all repos — litestore @ 12a9933236aa423d673a74b618d8d1e381514a1d

A minimalist nosql document store.

Merge branch 'master' into patch-1
Fabio Cevasco h3rald@h3rald.com
Sat, 06 Jan 2024 16:57:22 +0100
commit

12a9933236aa423d673a74b618d8d1e381514a1d

parent

6206ff2634a4b3dfef3ac33ec79d004a99081efd

M .github/workflows/add-artifacts-to-current-release.yml.github/workflows/add-artifacts-to-current-release.yml

@@ -76,13 +76,13 @@ if: matrix.os == 'windows-latest'

# Build for Linux - name: Build (Linux) - run: nimble build -v -y --passL:-static -d:release --gcc.exe:musl-gcc --gcc.linkerexe:musl-gcc --gc:orc --opt:size litestore + run: nimble build -v -y --passL:-static -d:release --gcc.exe:musl-gcc --gcc.linkerexe:musl-gcc --mm:refc --opt:size litestore if: matrix.os == 'ubuntu-latest' # Build for macOS/Windows - name: Build (macOS, Windows) shell: bash - run: nimble build -v -y -d:release --gc:orc --opt:size litestore + run: nimble build -v -y -d:release --mm:refc --opt:size litestore if: matrix.os == 'macos-latest' || matrix.os == 'windows-latest' # Import admin directory and create default db
M .github/workflows/ci.yml.github/workflows/ci.yml

@@ -40,4 +40,4 @@ curl https://nim-lang.org/choosenim/init.sh -sSf > init.sh

sh init.sh -y - name: Build - run: nimble build -y -d:release --gcc.exe:musl-gcc --gcc.linkerexe:musl-gcc + run: nimble build -y -d:release --gcc.exe:musl-gcc --gcc.linkerexe:musl-gcc --mm:refc
M .gitignore.gitignore

@@ -16,8 +16,13 @@ LiteStore_UserGuide.htm

jester_integration js *_backup -./config.json +config.json +*jwks.json *.db-shm *.db-wal *.nim.bak litestore_linkerArgs.txt +token*.txt +x5c*.cert +jwt +middleware
M LICENSELICENSE

@@ -1,6 +1,6 @@

The MIT License (MIT) -Copyright (c) 2015-2021 Fabio Cevasco +Copyright (c) 2015-2024 Fabio Cevasco Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal
M build_guidebuild_guide

@@ -32,7 +32,7 @@ for page in ${pages[@]}

do (cat "${page}"; printf "\n\n") >> LiteStore_UserGuide.md done -hastyscribe --field/version:1.11.0 LiteStore_UserGuide.md +hastyscribe --field/version:1.13.0 LiteStore_UserGuide.md rm LiteStore_UserGuide.md mv LiteStore_UserGuide.htm ../.. cd ../..
A description

@@ -0,0 +1,1 @@

+A minimalist nosql document store.
M litestore.nimblelitestore.nimble

@@ -25,7 +25,7 @@ installExt = @["nim", "c", "h", "json", "ico"]

# Dependencies -requires "nim >= 1.4.4", "https://github.com/h3rald/nim-jwt", "nimgen", "duktape" +requires "nim >= 2.0.0", "db_connector", "nimgen", "duktape" # Build
M src/admin/md/auth.mdsrc/admin/md/auth.md

@@ -40,8 +40,13 @@ However, users with the **admin:wiki** scope will be able to access documents located under the /docs/wiki/ folder.

Finally, specify the public signature to be used to validate JWT tokens using the **signature** property. Typically, its value should be set to the first value of the [x.509 certificate chain](https://auth0.com/docs/tokens/reference/jwt/jwks-properties) specified in the [JSON Web Key Set](https://auth0.com/docs/jwks) of your API. +> %tip% +> signature vs. jwks_uri +> +> As of version 1.13.0, it is recommended to use the **jwks_uri** property in a LiteStore configuration file instead of the **signature** property. + To use this configuration at runtime, specify it through the **-\-auth** option, like this: `litestore -\-auth:auth.json` -Once enabled, LiteStore will return HTTP 401 error codes if an invalid token or no token is included in the HTTP Authorization header of the request accessing the resource or HTTP 403 error codes in case an authenticated user does not have a valid scope to access a specified resource.+Once enabled, LiteStore will return HTTP 401 error codes if an invalid token or no token is included in the HTTP Authorization header of the request accessing the resource or HTTP 403 error codes in case an authenticated user does not have a valid scope to access a specified resource.
M src/admin/md/configuration-file.mdsrc/admin/md/configuration-file.md

@@ -101,4 +101,13 @@ * **allowed** — If set to **false**, LiteStore will return a [405 - Method not allowed](class:kwd) error code when accessing the resource with the specified method.

### signature -This section must be set to a valid certificate used validate JWT tokens. Note that the certificate must follow a specific format and start with the appropriate begin/end blocks.+This section must be set to a valid certificate used validate JWT tokens. Note that the certificate must follow a specific format and start with the appropriate begin/end blocks. + +### jwks_uri + +As of version 1.13.0, this property can be set to a URI pointing to a valid [JSON Web Key Sets](https://auth0.com/docs/secure/tokens/json-web-tokens/json-web-key-sets) file. If this property is specified, it will be used instead of **signature** to perform signature verification of JWKS tokens. + +> %note% +> How JWKS data is managed +> +> If this property is set, LiteStore will attempt to download the specified JWKS file on startup. This file will be catched to a *store-name*_jwks.json file (e.g. `data_jwks.json`) and its contents stored in memory.
M src/admin/md/global-js-objects.mdsrc/admin/md/global-js-objects.md

@@ -10,8 +10,6 @@ ### $req

The current HTTP request sent to access the current resource. -#### Properties - <dl> <dt>method: string</dt> <dd>The HTTP method used by the request, all uppercase (GET, POST, DELETE, PUT, PATCH, OPTIOONS, or HEAD).</dd>

@@ -67,7 +65,7 @@ All methods return a response object containing two String properties, **code** and **content**.

<dl> <dt>function get(resource: string, id: string, parameters: string): object</dt> -<dd>Retrieves the specified resource(s.). +<dd>Retrieves the specified resource(s). <p> Examples: <ul>

@@ -125,4 +123,70 @@ <li><code>$store.head('docs')</code></li>

</ul> </p> </dd> -</dl>+</dl> + +### $http + +Simple synchronous API to perform HTTP requests. + +All methods return a response object containing the following properties: +* **code** (string) +* **content** (string) +* **headers** (object) + +<dl> +<dt>function get(url: string, headers: object): object</dt> +<dd>Executes a GET request. +<p> +Example: +<ul> +<li><code>$http.get('https://reqres.in/api/users', {})</code></li> +</ul> +</p> +</dd> +<dt>function post(url: string, headers: object body: string): object</dt> +<dd>Executes a POST request. +<p> +Example: +<ul> +<li><code>$http.post(https://reqres.in/api/users', {'Content-Type': 'application/json'}, '{"name": "Test", "job": "Tester"}')</code></li> +</ul> +</p> +</dd> +<dt>function put(url: string, headers: object body: string): object</dt> +<dd>Executes a PUT request. +<p> +Example: +<ul> +<li><code>$http.put(https://reqres.in/api/users/2', {'Content-Type': 'application/json'}, '{"name": "Test", "job": "Tester"}')</code></li> +</ul> +</p> +</dd> +<dt>function patch(url: string, headers: object body: string): object</dt> +<dd>Executes a PATCH request. +<p> +Example: +<ul> +<li><code>$http.patch(https://reqres.in/api/users/2', {'Content-Type': 'application/json'}, '{"name": "Test", "job": "Tester"}')</code></li> +</ul> +</p> +</dd> +<dt>function delete(url: string, headers: object): object</dt> +<dd>Executes a DELETE request. +<p> +Example: +<ul> +<li><code>$http.delete('https://reqres.in/api/users/2', {})</code></li> +</ul> +</p> +</dd> +<dt>function head(url: string, headers: object): object</dt> +<dd>Executes a HEAD request. +<p> +Example: +<ul> +<li><code>$http.head('https://reqres.in/api/users', {})</code></li> +</ul> +</p> +</dd> +</dl>
D src/litestore.nim.cfg

@@ -1,19 +0,0 @@

-define:release -threads:on - -@if nimHasWarningObservableStores: - warning[ObservableStores]: off -@end - -# https://blog.filippo.io/easy-windows-and-linux-cross-compilers-for-macos/ - -amd64.windows.gcc.path = "/usr/local/bin" -amd64.windows.gcc.exe = "x86_64-w64-mingw32-gcc" -amd64.windows.gcc.linkerexe = "x86_64-w64-mingw32-gcc" - -amd64.linux.gcc.path = "/usr/local/bin" -amd64.linux.gcc.exe = "x86_64-linux-musl-gcc" -amd64.linux.gcc.linkerexe = "x86_64-linux-musl-gcc" - ---gc = "orc" ---opt = "size"
A src/litestore.nims

@@ -0,0 +1,23 @@

+ +switch("mm", "refc") +switch("opt", "size") +switch("define", "ssl") +switch("define", "release") +switch("threadAnalysis", "off") + +when defined(windows): + switch("dynlibOverride", "sqlite3_64") +else: + switch("dynlibOverride", "sqlite3") + +when defined(ssl): + switch("define", "useOpenSsl3") + when defined(windows): + # TODO", change once issue nim#15220 is resolved + switch("define", "noOpenSSLHacks") + switch("define", "sslVersion:(") + switch("dynlibOverride", "ssl-") + switch("dynlibOverride", "crypto-") + else: + switch("dynlibOverride", "ssl") + switch("dynlibOverride", "crypto")
M src/litestorepkg/lib/api_v1.nimsrc/litestorepkg/lib/api_v1.nim

@@ -35,7 +35,7 @@ if pair.len < 2 or pair[1] == "":

raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) try: pair[1] = pair[1].replace("+", "%2B").decodeURL - except: + except CatchableError: raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) case pair[0]: of "search":

@@ -45,12 +45,12 @@ options.tags = pair[1]

of "limit": try: options.limit = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) of "offset": try: options.offset = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) of "sort": let orderby = pair[1].orderByClauses()

@@ -139,7 +139,7 @@ result.headers = newHttpHeaders(TAB_HEADERS)

result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: result = resError(Http500, "Unable to delete document '$1'" % id) proc getRawDocuments(LS: LiteStore, options: QueryOptions = newQueryOptions()): LSResponse =

@@ -208,7 +208,7 @@ result.content = doc

result.code = Http201 else: result = resError(Http500, "Unable to create document.") - except: + except CatchableError: result = resError(Http500, "Unable to create document.") proc putDocument(LS: LiteStore, id: string, body: string, ct: string): LSResponse =

@@ -232,7 +232,7 @@ result.content = doc

result.code = Http200 else: result = resError(Http500, "Unable to update document '$1'." % id) - except: + except CatchableError: result = resError(Http500, "Unable to update document '$1'." % id) proc patchDocument(LS: LiteStore, id: string, body: string): LSResponse =

@@ -258,7 +258,7 @@ try:

apply = applyPatchOperation(tags, item["op"].str, item["path"].str, item["value"].str) if not apply: break - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) else: return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c)

@@ -270,7 +270,7 @@ discard LS.store.destroyTag(t1.str, id, true)

for t2 in tags: if t2 != "": LS.store.createTag(t2, id, true) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) return LS.getRawDocument(id)

@@ -331,7 +331,7 @@ result.content = ""

else: result = LS.getRawDocuments(options) result.content = "" - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) proc get(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse =

@@ -350,7 +350,7 @@ else:

return LS.getDocument(id, options) else: return LS.getRawDocuments(options) - except: + except CatchableError: return resError(Http500, "Internal Server Error - $1" % getCurrentExceptionMsg()) of "info": if id != "":

@@ -409,7 +409,7 @@ else:

result.headers = ctHeader("text/plain") result.content = contents result.code = Http200 - except: + except CatchableError: return resError(Http500, "Unable to read file '$1'." % path) else: return resError(Http404, "File '$1' not found." % path)

@@ -444,4 +444,4 @@ if LS.readonly:

return resError(Http405, "Method not allowed: $1" % $req.reqMethod) return validate(req, LS, resource, id, patch) else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return resError(Http405, "Method not allowed: $1" % $req.reqMethod)
M src/litestorepkg/lib/api_v2.nimsrc/litestorepkg/lib/api_v2.nim

@@ -34,7 +34,7 @@ if pair.len < 2 or pair[1] == "":

raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) try: pair[1] = pair[1].replace("+", "%2B").decodeURL - except: + except CatchableError: raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) case pair[0]: of "search":

@@ -44,12 +44,12 @@ options.tags = pair[1]

of "limit": try: options.limit = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) of "offset": try: options.offset = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) of "sort": let orderby = pair[1].orderByClauses()

@@ -77,7 +77,7 @@ case ct:

of "application/json": try: discard body.parseJson() - except: + except CatchableError: return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) else: discard

@@ -156,7 +156,7 @@ result.headers = newHttpHeaders(TAB_HEADERS)

result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: result = resError(Http500, "Unable to delete document '$1'" % id) proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions()): LSResponse =

@@ -229,7 +229,7 @@ result.content = doc

result.code = Http201 else: result = resError(Http500, "Unable to create document.") - except: + except CatchableError: result = resError(Http500, "Unable to create document.") proc putDocument*(LS: LiteStore, id: string, body: string, ct: string): LSResponse =

@@ -255,7 +255,7 @@ result.content = doc

result.code = Http200 else: result = resError(Http500, "Unable to update document '$1'." % id) - except: + except CatchableError: result = resError(Http500, "Unable to update document '$1'." % id) proc patchDocument*(LS: LiteStore, id: string, body: string): LSResponse =

@@ -281,7 +281,7 @@ try:

apply = applyPatchOperation(tags, item["op"].str, item["path"].str, item["value"].str) if not apply: break - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) else: return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c)

@@ -293,7 +293,7 @@ discard LS.store.destroyTag(t1.str, id, true)

for t2 in tags: if t2 != "": LS.store.createTag(t2, id, true) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) return LS.getRawDocument(id)

@@ -370,7 +370,7 @@ result.content = ""

else: result = LS.getRawDocuments(options) result.content = "" - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse =

@@ -391,7 +391,7 @@ else:

return LS.getDocument(id, options) else: return LS.getRawDocuments(options) - except: + except CatchableError: return resError(Http500, "Internal Server Error - $1" % getCurrentExceptionMsg()) of "info": if id != "":

@@ -447,7 +447,7 @@ else:

result.headers = ctHeader("text/plain") result.content = contents result.code = Http200 - except: + except CatchableError: return resError(Http500, "Unable to read file '$1'." % path) else: return resError(Http404, "File '$1' not found." % path)

@@ -482,4 +482,4 @@ if LS.readonly:

return resError(Http405, "Method not allowed: $1" % $req.reqMethod) return validate(req, LS, resource, id, patch) else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return resError(Http405, "Method not allowed: $1" % $req.reqMethod)
M src/litestorepkg/lib/api_v3.nimsrc/litestorepkg/lib/api_v3.nim

@@ -141,7 +141,7 @@ if pair.len < 2 or pair[1] == "":

raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) try: pair[1] = pair[1].replace("+", "%2B").decodeURL - except: + except CatchableError: raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) case pair[0]: of "filter":

@@ -159,12 +159,12 @@ options.tags = pair[1]

of "limit": try: options.limit = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) of "offset": try: options.offset = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) of "sort": let orderby = pair[1].orderByClauses()

@@ -192,7 +192,7 @@ case ct:

of "application/json": try: discard body.parseJson() - except: + except CatchableError: return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) else: discard

@@ -263,7 +263,7 @@ raise newException(EInvalidRequest, "invalid patch operation: $1" % op)

else: d = d[index] dorig = dorig[index] - except: + except CatchableError: raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) else: if c >= keys.len:

@@ -350,7 +350,7 @@ result.headers = newHttpHeaders(TAB_HEADERS)

result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: result = resError(Http500, "Unable to delete document '$1'" % id) proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions()): LSResponse =

@@ -420,7 +420,7 @@ result.content = doc

result.code = Http201 else: result = resError(Http500, "Unable to create document.") - except: + except CatchableError: result = resError(Http500, "Unable to create document.") proc putDocument*(LS: LiteStore, id: string, body: string, ct: string): LSResponse =

@@ -446,7 +446,7 @@ result.content = doc

result.code = Http200 else: result = resError(Http500, "Unable to update document '$1'." % id) - except: + except CatchableError: result = resError(Http500, "Unable to update document '$1'." % id) proc patchDocument*(LS: LiteStore, id: string, body: string): LSResponse =

@@ -471,7 +471,7 @@ if tags.contains("$subtype:json"):

try: origData = jdoc["data"].getStr.parseJson data = origData.copy - except: + except CatchableError: discard var c = 1 for item in jbody.items:

@@ -482,7 +482,7 @@ try:

apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) if not apply: break - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) else: return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c)

@@ -493,7 +493,7 @@ try:

var doc = LS.store.updateDocument(id, data.pretty, "application/json") if doc == "": return resError(Http500, "Unable to patch document '$1'." % id) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) if origTags != tags: try:

@@ -502,7 +502,7 @@ discard LS.store.destroyTag(t1.str, id, true)

for t2 in tags: if t2 != "": LS.store.createTag(t2, id, true) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) return LS.getRawDocument(id)

@@ -579,7 +579,7 @@ result.content = ""

else: result = LS.getRawDocuments(options) result.content = "" - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse =

@@ -600,7 +600,7 @@ else:

return LS.getDocument(id, options) else: return LS.getRawDocuments(options) - except: + except CatchableError: return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) of "info": if id != "":

@@ -656,7 +656,7 @@ else:

result.headers = ctHeader("text/plain") result.content = contents result.code = Http200 - except: + except CatchableError: return resError(Http500, "Unable to read file '$1'." % path) else: return resError(Http404, "File '$1' not found." % path)

@@ -691,4 +691,4 @@ if LS.readonly:

return resError(Http405, "Method not allowed: $1" % $req.reqMethod) return validate(req, LS, resource, id, patch) else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return resError(Http405, "Method not allowed: $1" % $req.reqMethod)
M src/litestorepkg/lib/api_v4.nimsrc/litestorepkg/lib/api_v4.nim

@@ -141,7 +141,7 @@ if pair.len < 2 or pair[1] == "":

raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) try: pair[1] = pair[1].replace("+", "%2B").decodeURL - except: + except CatchableError: raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) case pair[0]: of "filter":

@@ -161,32 +161,32 @@ options.tags = pair[1]

of "created-after": try: options.createdAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid created-after value: $1" % getCurrentExceptionMsg()) of "created-before": try: options.createdBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid created-before value: $1" % getCurrentExceptionMsg()) of "modified-after": try: options.modifiedAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid modified.after value: $1" % getCurrentExceptionMsg()) of "modified-before": try: options.modifiedBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid modified-before value: $1" % getCurrentExceptionMsg()) of "limit": try: options.limit = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) of "offset": try: options.offset = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) of "sort": let orderby = pair[1].orderByClauses()

@@ -216,7 +216,7 @@ case ct:

of "application/json": try: discard body.parseJson() - except: + except CatchableError: return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) else: discard

@@ -286,7 +286,7 @@ raise newException(EInvalidRequest, "invalid patch operation: $1" % op)

else: d = d[index] dorig = dorig[index] - except: + except CatchableError: raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) else: if c >= keys.len:

@@ -386,7 +386,7 @@ setOrigin(LS, req, result.headers)

result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: result = resError(Http500, "Unable to delete document '$1'" % id) proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse =

@@ -484,7 +484,7 @@ result.content = doc

result.code = Http201 else: result = resError(Http500, "Unable to create document.") - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to create document.")

@@ -513,7 +513,7 @@ result.content = doc

result.code = Http200 else: result = resError(Http500, "Unable to update document '$1'." % id) - except: + except CatchableError: result = resError(Http500, "Unable to update document '$1'." % id) proc patchDocument*(LS: LiteStore, id: string, body: string, req: LSRequest): LSResponse =

@@ -538,7 +538,7 @@ if tags.contains("$subtype:json"):

try: origData = jdoc["data"].getStr.parseJson data = origData.copy - except: + except CatchableError: discard var c = 1 for item in jbody.items:

@@ -549,7 +549,7 @@ try:

apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) if not apply: break - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) else: return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c)

@@ -560,7 +560,7 @@ try:

var doc = LS.store.updateDocument(id, data.pretty, "application/json") if doc == "": return resError(Http500, "Unable to patch document '$1'." % id) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) if origTags != tags: try:

@@ -569,7 +569,7 @@ discard LS.store.destroyTag(t1.str, id, true)

for t2 in tags: if t2 != "": LS.store.createTag(t2, id, true) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) return LS.getRawDocument(id, newQueryOptions(), req)

@@ -661,7 +661,7 @@ result.content = ""

else: result = LS.getRawDocuments(options, req) result.content = "" - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse =

@@ -681,7 +681,7 @@ else:

return LS.getDocument(id, options, req) else: return LS.getRawDocuments(options, req) - except: + except CatchableError: let e = getCurrentException() let trace = e.getStackTrace() echo trace

@@ -694,7 +694,7 @@ if id != "":

return LS.getTag(id, options, req) else: return LS.getTags(options, req) - except: + except CatchableError: return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) of "info": if id != "":

@@ -750,7 +750,7 @@ result.headers = ctHeader("text/plain")

setOrigin(LS, req, result.headers) result.content = contents result.code = Http200 - except: + except CatchableError: return resError(Http500, "Unable to read file '$1'." % path) else: return resError(Http404, "File '$1' not found." % path)

@@ -785,4 +785,4 @@ if LS.readonly:

return resError(Http405, "Method not allowed: $1" % $req.reqMethod) return validate(req, LS, resource, id, patch) else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return resError(Http405, "Method not allowed: $1" % $req.reqMethod)
M src/litestorepkg/lib/api_v5.nimsrc/litestorepkg/lib/api_v5.nim

@@ -145,7 +145,7 @@ if pair.len < 2 or pair[1] == "":

raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) try: pair[1] = pair[1].replace("+", "%2B").decodeURL - except: + except CatchableError: raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) case pair[0]: of "filter":

@@ -165,32 +165,32 @@ options.tags = pair[1]

of "created-after": try: options.createdAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid created-after value: $1" % getCurrentExceptionMsg()) of "created-before": try: options.createdBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid created-before value: $1" % getCurrentExceptionMsg()) of "modified-after": try: options.modifiedAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid modified.after value: $1" % getCurrentExceptionMsg()) of "modified-before": try: options.modifiedBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid modified-before value: $1" % getCurrentExceptionMsg()) of "limit": try: options.limit = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) of "offset": try: options.offset = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) of "sort": let orderby = pair[1].orderByClauses()

@@ -220,7 +220,7 @@ case ct:

of "application/json": try: discard body.parseJson() - except: + except CatchableError: return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) else: discard

@@ -290,7 +290,7 @@ raise newException(EInvalidRequest, "invalid patch operation: $1" % op)

else: d = d[index] dorig = dorig[index] - except: + except CatchableError: raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) else: if c >= keys.len:

@@ -400,7 +400,7 @@ setOrigin(LS, req, result.headers)

result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: result = resError(Http500, "Unable to delete document '$1'" % id) proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse =

@@ -524,7 +524,7 @@ result.headers = ctJsonHeader()

setOrigin(LS, req, result.headers) result.content = "{\"id\": \"$1\", \"field\": \"$2\"}" % [id, field] result.code = Http200 - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to create index.")

@@ -540,7 +540,7 @@ setOrigin(LS, req, result.headers)

result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to delete index.")

@@ -556,7 +556,7 @@ result.content = doc

result.code = Http201 else: result = resError(Http500, "Unable to create document.") - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to create document.")

@@ -585,7 +585,7 @@ result.content = doc

result.code = Http200 else: result = resError(Http500, "Unable to update document '$1'." % id) - except: + except CatchableError: result = resError(Http500, "Unable to update document '$1'." % id) proc patchDocument*(LS: LiteStore, id: string, body: string, req: LSRequest): LSResponse =

@@ -610,7 +610,7 @@ if tags.contains("$subtype:json"):

try: origData = jdoc["data"].getStr.parseJson data = origData.copy - except: + except CatchableError: discard var c = 1 for item in jbody.items:

@@ -621,7 +621,7 @@ try:

apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) if not apply: break - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) else: return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c)

@@ -632,7 +632,7 @@ try:

var doc = LS.store.updateDocument(id, data.pretty, "application/json") if doc == "": return resError(Http500, "Unable to patch document '$1'." % id) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) if origTags != tags: try:

@@ -641,7 +641,7 @@ discard LS.store.destroyTag(t1.str, id, true)

for t2 in tags: if t2 != "": LS.store.createTag(t2, id, true) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) return LS.getRawDocument(id, newQueryOptions(), req)

@@ -760,7 +760,7 @@ result.content = ""

else: result = LS.getRawDocuments(options, req) result.content = "" - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse =

@@ -780,7 +780,7 @@ else:

return LS.getDocument(id, options, req) else: return LS.getRawDocuments(options, req) - except: + except CatchableError: let e = getCurrentException() let trace = e.getStackTrace() echo trace

@@ -793,7 +793,7 @@ if id != "":

return LS.getTag(id, options, req) else: return LS.getTags(options, req) - except: + except CatchableError: return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) of "indexes": var options = newQueryOptions()

@@ -803,7 +803,7 @@ if id != "":

return LS.getIndex(id, options, req) else: return LS.getIndexes(options, req) - except: + except CatchableError: return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) of "info": if id != "":

@@ -824,7 +824,7 @@ if resource == "indexes":

var field = "" try: field = parseJson(req.body.strip)["field"].getStr - except: + except CatchableError: return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) return LS.putIndex(id, field, req) else: # Assume docs

@@ -870,7 +870,7 @@ result.headers = ctHeader("text/plain")

setOrigin(LS, req, result.headers) result.content = contents result.code = Http200 - except: + except CatchableError: return resError(Http500, "Unable to read file '$1'." % path) else: return resError(Http404, "File '$1' not found." % path)

@@ -905,4 +905,4 @@ if LS.readonly:

return resError(Http405, "Method not allowed: $1" % $req.reqMethod) return validate(req, LS, resource, id, patch) else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return resError(Http405, "Method not allowed: $1" % $req.reqMethod)
M src/litestorepkg/lib/api_v6.nimsrc/litestorepkg/lib/api_v6.nim

@@ -1,1131 +1,1131 @@

-import - asynchttpserver, - strutils, - sequtils, - cgi, - strtabs, - pegs, - json, - os, - uri, - times -import - types, - contenttypes, - core, - utils, - logger, - duktape - -# Helper procs - -proc sqlOp(op: string): string = - let table = newStringTable() - table["not eq"] = "<>" - table["eq"] = "==" - table["gt"] = ">" - table["gte"] = ">=" - table["lt"] = "<" - table["lte"] = "<=" - table["contains"] = "contains" - table["like"] = "like" - return table[op] - -proc orderByClauses*(str: string): string = - var clauses = newSeq[string]() - var fragments = str.split(",") - let clause = peg""" - clause <- {[-+]} {field} - field <- ('id' / 'created' / 'modified' / path) - path <- '$' (objField)+ - ident <- [a-zA-Z0-9_]+ - objField <- '.' ident - """ - for f in fragments: - var matches = @["", ""] - if f.find(clause, matches) != -1: - var field = matches[1] - if field[0] == '$': - field = "json_extract(documents.data, '$1')" % matches[1] - if matches[0] == "-": - clauses.add("$1 COLLATE NOCASE DESC" % field) - else: - clauses.add("$1 COLLATE NOCASE ASC" % field) - return clauses.join(", ") - -proc selectClause*(str: string, options: var QueryOptions) = - let tokens = """ - path <- '$' (objItem / objField)+ - ident <- [a-zA-Z0-9_]+ - objIndex <- '[' \d+ ']' - objField <- '.' ident - objItem <- objField objIndex - """ - let fields = peg(""" - fields <- ^{field} (\s* ',' \s* {field})*$ - field <- path \s+ ('as' / 'AS') \s+ ident - """ & tokens) - let field = peg(""" - field <- ^{path} \s+ ('as' / 'AS') \s+ {ident}$ - """ & tokens) - var fieldMatches = newSeq[string](10) - if str.strip.match(fields, fieldMatches): - for m in fieldMatches: - if m.len > 0: - var rawTuple = newSeq[string](2) - if m.match(field, rawTuple): - options.jsonSelect.add((path: rawTuple[0], alias: rawTuple[1])) - -proc filterClauses*(str: string, options: var QueryOptions) = - let tokens = """ - operator <- 'not eq' / 'eq' / 'gte' / 'gt' / 'lte' / 'lt' / 'contains' / 'like' - value <- string / number / 'null' / 'true' / 'false' - string <- '"' ('\\"' . / [^"])* '"' - number <- '-'? '0' / [1-9] [0-9]* ('.' [0-9]+)? (( 'e' / 'E' ) ( '+' / '-' )? [0-9]+)? - path <- '$' (objItem / objField)+ - ident <- [a-zA-Z0-9_]+ - objIndex <- '[' \d+ ']' - objField <- '.' ident - objItem <- objField objIndex - """ - let clause = peg(""" - clause <- {path} \s+ {operator} \s+ {value} - """ & tokens) - let andClauses = peg(""" - andClauses <- ^{clause} (\s+ 'and' \s+ {clause})*$ - clause <- path \s+ operator \s+ value - """ & tokens) - let orClauses = peg(""" - orClauses <- ^{andClauses} (\s+ 'or' \s+ {andClauses})*$ - andClauses <- clause (\s+ 'and' \s+ clause)* - clause <- path \s+ operator \s+ value - """ & tokens) - var orClausesMatches = newSeq[string](10) - discard str.strip.match(orClauses, orClausesMatches) - var parsedClauses = newSeq[seq[seq[string]]]() - for orClause in orClausesMatches: - if orClause.len > 0: - var andClausesMatches = newSeq[string](10) - discard orClause.strip.match(andClauses, andClausesMatches) - var parsedAndClauses = newSeq[seq[string]]() - for andClause in andClausesMatches: - if andClause.len > 0: - var clauses = newSeq[string](3) - discard andClause.strip.match(clause, clauses) - clauses[1] = sqlOp(clauses[1]) - if clauses[2] == "true": - clauses[2] = "1" - elif clauses[2] == "false": - clauses[2] = "0" - parsedAndClauses.add clauses - if parsedAndClauses.len > 0: - parsedClauses.add parsedAndClauses - if parsedClauses.len == 0: - return - var currentArr = 0 - var tables = newSeq[string]() - let resOrClauses = parsedClauses.map do (it: seq[seq[string]]) -> string: - let resAndClauses = it.map do (x: seq[string]) -> string: - if x[1] == "contains": - currentArr = currentArr + 1 - tables.add "json_each(documents.data, '$1') AS arr$2" % [x[0], $currentArr] - return "arr$1.value == $2" % [$currentArr, x[2]] - else: - var arr = @[x[0], x[1], x[2]] - if x[1] == "like": - arr[2] = x[2].replace('*', '%') - return "json_extract(documents.data, '$1') $2 $3 " % arr - return resAndClauses.join(" AND ") - options.tables = options.tables & tables - options.jsonFilter = resOrClauses.join(" OR ") - -proc parseQueryOption*(fragment: string, options: var QueryOptions) = - if fragment == "": - return - var pair = fragment.split('=') - if pair.len < 2 or pair[1] == "": - raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) - try: - pair[1] = pair[1].replace("+", "%2B").decodeURL - except: - raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) - case pair[0]: - of "filter": - filterClauses(pair[1], options) - if options.jsonFilter == "": - raise newException(EInvalidRequest, "Invalid filter clause: $1" % pair[1].replace("\"", "\\\"")) - of "select": - selectClause(pair[1], options) - if options.jsonSelect.len == 0: - raise newException(EInvalidRequest, "Invalid select clause: $1" % pair[1].replace("\"", "\\\"")) - of "like": - options.like = pair[1] - of "search": - options.search = pair[1] - of "tags": - options.tags = pair[1] - of "created-after": - try: - options.createdAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid created-after value: $1" % getCurrentExceptionMsg()) - of "created-before": - try: - options.createdBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid created-before value: $1" % getCurrentExceptionMsg()) - of "modified-after": - try: - options.modifiedAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid modified.after value: $1" % getCurrentExceptionMsg()) - of "modified-before": - try: - options.modifiedBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid modified-before value: $1" % getCurrentExceptionMsg()) - of "limit": - try: - options.limit = pair[1].parseInt - except: - raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) - of "offset": - try: - options.offset = pair[1].parseInt - except: - raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) - of "sort": - let orderby = pair[1].orderByClauses() - if orderby != "": - options.orderby = orderby - else: - raise newException(EInvalidRequest, "Invalid sort value: $1" % pair[1]) - of "contents", "raw": - discard - else: - discard - -proc parseQueryOptions*(querystring: string, options: var QueryOptions) = - var fragments = querystring.split('&') - for f in fragments: - f.parseQueryOption(options) - -proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, cb: proc(req: LSRequest, LS: LiteStore, resource: string, id: string):LSResponse): LSResponse = - if req.reqMethod == HttpPost or req.reqMethod == HttpPut or req.reqMethod == HttpPatch: - var ct = "" - let body = req.body.strip - if body == "": - return resError(Http400, "Bad request: No content specified for document.") - if req.headers.hasKey("Content-Type"): - ct = req.headers["Content-Type"] - case ct: - of "application/json": - try: - discard body.parseJson() - except: - return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) - else: - discard - return cb(req, LS, resource, id) - -proc patchTag(tags: var seq[string], index: int, op, path, value: string): bool = - LOG.debug("- PATCH -> $1 tag['$2'] = \"$3\" - Total tags: $4." % [op, $index, $value, $tags.len]) - case op: - of "remove": - let tag = tags[index] - if not tag.startsWith("$"): - tags[index] = "" # Not removing element, otherwise subsequent indexes won't work! - else: - raise newException(EInvalidRequest, "cannot remove system tag: $1" % tag) - of "add": - if value.match(PEG_USER_TAG): - tags.insert(value, index) - else: - if value.strip == "": - raise newException(EInvalidRequest, "tag not specified." % value) - else: - raise newException(EInvalidRequest, "invalid tag: $1" % value) - of "replace": - if value.match(PEG_USER_TAG): - if tags[index].startsWith("$"): - raise newException(EInvalidRequest, "cannot replace system tag: $1" % tags[index]) - else: - tags[index] = value - else: - if value.strip == "": - raise newException(EInvalidRequest, "tag not specified." % value) - else: - raise newException(EInvalidRequest, "invalid tag: $1" % value) - of "test": - if tags[index] != value: - return false - else: - raise newException(EInvalidRequest, "invalid patch operation: $1" % op) - return true - -proc patchData*(data: var JsonNode, origData: JsonNode, op: string, path: string, value: JsonNode): bool = - LOG.debug("- PATCH -> $1 path $2 with $3" % [op, path, $value]) - var keys = path.replace(peg"^\/data\/", "").split("/") - if keys.len == 0: - raise newException(EInvalidRequest, "no valid path specified: $1" % path) - var d = data - var dorig = origData - var c = 1 - for key in keys: - if d.kind == JArray: - try: - var index = key.parseInt - if c >= keys.len: - d.elems[index] = value - case op: - of "remove": - d.elems.del(index) - of "add": - d.elems.insert(value, index) - of "replace": - d.elems[index] = value - of "test": - if d.elems[index] != value: - return false - else: - raise newException(EInvalidRequest, "invalid patch operation: $1" % op) - else: - d = d[index] - dorig = dorig[index] - except: - raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) - else: - if c >= keys.len: - case op: - of "remove": - if d.hasKey(key): - d.delete(key) - else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) - of "add": - d[key] = value - of "replace": - if d.hasKey(key): - d[key] = value - else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) - of "test": - if dorig.hasKey(key): - if dorig[key] != value: - return false - else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) - else: - raise newException(EInvalidRequest, "invalid patch operation: $1" % op) - else: - d = d[key] - dorig = dorig[key] - c += 1 - return true - - -proc applyPatchOperation*(data: var JsonNode, origData: JsonNode, tags: var seq[string], op: string, path: string, value: JsonNode): bool = - var matches = @[""] - let p = peg""" - path <- ^tagPath / fieldPath$ - tagPath <- '\/tags\/' {\d+} - fieldPath <- '\/data\/' ident ('\/' ident)* - ident <- [a-zA-Z0-9_]+ / '-' - """ - if path.find(p, matches) == -1: - raise newException(EInvalidRequest, "cannot patch path '$1'" % path) - if path.match(peg"^\/tags\/"): - let index = matches[0].parseInt - if value.kind != JString: - raise newException(EInvalidRequest, "tag '$1' is not a string." % $value) - let tag = value.getStr - return patchTag(tags, index, op, path, tag) - elif tags.contains("$subtype:json"): - return patchData(data, origData, op, path, value) - else: - raise newException(EInvalidRequest, "cannot patch data of a non-JSON document.") - -# Low level procs - -proc getTag*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveTag(id, options) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - if doc == newJNull(): - result = resTagNotFound(id) - else: - result.content = $doc - result.code = Http200 - -proc getIndex*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveIndex(id, options) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - if doc == newJNull(): - result = resIndexNotFound(id) - else: - result.content = $doc - result.code = Http200 - -proc getRawDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveRawDocument(id, options) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - if doc == "": - result = resDocumentNotFound(id) - else: - result.content = doc - result.code = Http200 - -proc getDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveDocument(id, options) - if doc.data == "": - result = resDocumentNotFound(id) - else: - result.headers = doc.contenttype.ctHeader - setOrigin(LS, req, result.headers) - result.content = doc.data - result.code = Http200 - -proc deleteDocument*(LS: LiteStore, id: string, req: LSRequest): LSResponse = - let doc = LS.store.retrieveDocument(id) - if doc.data == "": - result = resDocumentNotFound(id) - else: - try: - let res = LS.store.destroyDocument(id) - if res == 0: - result = resError(Http500, "Unable to delete document '$1'" % id) - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Content-Length"] = "0" - result.content = "" - result.code = Http204 - except: - result = resError(Http500, "Unable to delete document '$1'" % id) - -proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = - var options = options - let t0 = cpuTime() - let docs = LS.store.retrieveTags(options) - let orig_limit = options.limit - let orig_offset = options.offset - options.limit = 0 - options.offset = 0 - options.select = @["COUNT(tag_id)"] - let total = LS.store.countTags(prepareSelectTagsQuery(options), options.like.replace("*", "%")) - var content = newJObject() - if options.like != "": - content["like"] = %(options.like.decodeURL) - if orig_limit > 0: - content["limit"] = %orig_limit - if orig_offset > 0: - content["offset"] = %orig_offset - content["total"] = %total - content["execution_time"] = %(cputime()-t0) - content["results"] = docs - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc getIndexes*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = - var options = options - let t0 = cpuTime() - let docs = LS.store.retrieveIndexes(options) - let orig_limit = options.limit - let orig_offset = options.offset - options.limit = 0 - options.offset = 0 - options.select = @["COUNT(name)"] - let total = LS.store.countIndexes(prepareSelectIndexesQuery(options), options.like.replace("*", "%")) - var content = newJObject() - if options.like != "": - content["like"] = %(options.like.decodeURL) - if orig_limit > 0: - content["limit"] = %orig_limit - if orig_offset > 0: - content["offset"] = %orig_offset - content["total"] = %total - content["execution_time"] = %(cputime()-t0) - content["results"] = docs - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = - var options = options - let t0 = cpuTime() - let docs = LS.store.retrieveRawDocuments(options) - let orig_limit = options.limit - let orig_offset = options.offset - options.limit = 0 - options.offset = 0 - options.select = @["COUNT(docid)"] - let total = LS.store.retrieveRawDocuments(options)[0].num - var content = newJObject() - if options.folder != "": - content["folder"] = %(options.folder) - if options.search != "": - content["search"] = %(options.search.decodeURL) - if options.tags != "": - content["tags"] = newJArray() - for tag in options.tags.replace("+", "%2B").decodeURL.split(","): - content["tags"].add(%tag) - if orig_limit > 0: - content["limit"] = %orig_limit - if orig_offset > 0: - content["offset"] = %orig_offset - if options.orderby != "": - content["sort"] = %options.orderby - content["total"] = %total - content["execution_time"] = %(cputime()-t0) - content["results"] = docs - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc getInfo*(LS: LiteStore, req: LSRequest): LSResponse = - let info = LS.store.retrieveInfo() - let version = info[0] - let total_documents = info[1] - let total_tags = LS.store.countTags() - let tags = LS.store.retrieveTagsWithTotals() - var content = newJObject() - content["version"] = %(LS.appname & " v" & LS.appversion) - content["datastore_version"] = %version - content["size"] = %($((LS.file.getFileSize().float/(1024*1024)).formatFloat(ffDecimal, 2)) & " MB") - content["read_only"] = %LS.readonly - content["log_level"] = %LS.loglevel - if LS.directory.len == 0: - content["directory"] = newJNull() - else: - content["directory"] = %LS.directory - content["mount"] = %LS.mount - content["total_documents"] = %total_documents - content["total_tags"] = %total_tags - content["tags"] = tags - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc putIndex*(LS: LiteStore, id, field: string, req: LSRequest): LSResponse = - try: - if (not id.match(PEG_INDEX)): - return resError(Http400, "invalid index ID: $1" % id) - if (not field.match(PEG_JSON_FIELD)): - return resError(Http400, "invalid field path: $1" % field) - if (LS.store.retrieveIndex(id) != newJNull()): - return resError(Http409, "Index already exists: $1" % id) - LS.store.createIndex(id, field) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = "{\"id\": \"$1\", \"field\": \"$2\"}" % [id, field] - result.code = Http200 - except: - eWarn() - result = resError(Http500, "Unable to create index.") - -proc deleteIndex*(LS: LiteStore, id: string, req: LSRequest): LSResponse = - if (not id.match(PEG_INDEX)): - return resError(Http400, "invalid index ID: $1" % id) - if (LS.store.retrieveIndex(id) == newJNull()): - return resError(Http404, "Index not found: $1" % id) - try: - LS.store.dropIndex(id) - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Content-Length"] = "0" - result.content = "" - result.code = Http204 - except: - eWarn() - result = resError(Http500, "Unable to delete index.") - -proc postDocument*(LS: LiteStore, body: string, ct: string, folder="", req: LSRequest): LSResponse = - if not folder.isFolder: - return resError(Http400, "Invalid folder specified when creating document: $1" % folder) - try: - var doc = LS.store.createDocument(folder, body, ct) - if doc != "": - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = doc - result.code = Http201 - else: - result = resError(Http500, "Unable to create document.") - except: - eWarn() - result = resError(Http500, "Unable to create document.") - -proc putDocument*(LS: LiteStore, id: string, body: string, ct: string, req: LSRequest): LSResponse = - if id.isFolder: - return resError(Http400, "Invalid ID '$1' (Document IDs cannot end with '/')." % id) - let doc = LS.store.retrieveDocument(id) - if doc.data == "": - # Create a new document - var doc = LS.store.createDocument(id, body, ct) - if doc != "": - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = doc - result.code = Http201 - else: - result = resError(Http500, "Unable to create document.") - else: - # Update existing document - try: - var doc = LS.store.updateDocument(id, body, ct) - if doc != "": - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = doc - result.code = Http200 - else: - result = resError(Http500, "Unable to update document '$1'." % id) - except: - result = resError(Http500, "Unable to update document '$1'." % id) - -proc patchDocument*(LS: LiteStore, id: string, body: string, req: LSRequest): LSResponse = - var apply = true - let jbody = body.parseJson - if jbody.kind != JArray: - return resError(Http400, "Bad request: PATCH request body is not an array.") - var options = newQueryOptions() - options.select = @["documents.id AS id", "created", "modified", "data"] - let doc = LS.store.retrieveRawDocument(id, options) - if doc == "": - return resDocumentNotFound(id) - let jdoc = doc.parseJson - var tags = newSeq[string]() - var origTags = newSeq[string]() - for tag in jdoc["tags"].items: - tags.add(tag.str) - origTags.add(tag.str) - var data: JsonNode - var origData: JsonNode - if tags.contains("$subtype:json"): - try: - origData = jdoc["data"].getStr.parseJson - data = origData.copy - except: - discard - var c = 1 - for item in jbody.items: - if item.hasKey("op") and item.hasKey("path"): - if not item.hasKey("value"): - item["value"] = %"" - try: - apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) - if not apply: - break - except: - return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) - else: - return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c) - c.inc - if apply: - if origData.len > 0 and origData != data: - try: - var doc = LS.store.updateDocument(id, data.pretty, "application/json") - if doc == "": - return resError(Http500, "Unable to patch document '$1'." % id) - except: - return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) - if origTags != tags: - try: - for t1 in jdoc["tags"].items: - discard LS.store.destroyTag(t1.str, id, true) - for t2 in tags: - if t2 != "": - LS.store.createTag(t2, id, true) - except: - return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) - return LS.getRawDocument(id, newQueryOptions(), req) - -# Main routing - -proc options*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - case resource: - of "info": - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - if id != "": - return resError(Http404, "Info '$1' not found." % id) - else: - result.code = Http204 - result.content = "" - of "dir": - result.code = Http204 - result.content = "" - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - of "tags": - result.code = Http204 - result.content = "" - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - of "indexes": - result.code = Http204 - result.content = "" - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - if id != "": - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - else: - result.headers["Allow"] = "GET, OPTIONS, PUT, DELETE" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS, PUT, DELETE" - else: - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - of "docs": - var folder: string - if id.isFolder: - folder = id - if folder.len > 0: - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS, POST, PUT" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST, PUT" - elif id != "": - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" - result.headers["Allow-Patch"] = "application/json-patch+json" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" - else: - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS, POST" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST" - else: - discard # never happens really. - -proc head*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - var options = newQueryOptions() - options.select = @["documents.id AS id", "created", "modified"] - if id.isFolder: - options.folder = id - try: - parseQueryOptions(req.url.query, options); - if id != "" and options.folder == "": - result = LS.getRawDocument(id, options, req) - result.content = "" - else: - result = LS.getRawDocuments(options, req) - result.content = "" - except: - return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) - -proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - case resource: - of "docs": - var options = newQueryOptions() - if id.isFolder: - options.folder = id - if req.url.query.contains("contents=false"): - options.select = @["documents.id AS id", "created", "modified"] - try: - parseQueryOptions(req.url.query, options); - if id != "" and options.folder == "": - if req.url.query.contains("raw=true") or req.headers.hasKey("Accept") and req.headers["Accept"] == "application/json": - return LS.getRawDocument(id, options, req) - else: - return LS.getDocument(id, options, req) - else: - return LS.getRawDocuments(options, req) - except: - let e = getCurrentException() - let trace = e.getStackTrace() - echo trace - return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) - of "tags": - var options = newQueryOptions() - try: - parseQueryOptions(req.url.query, options); - if id != "": - return LS.getTag(id, options, req) - else: - return LS.getTags(options, req) - except: - return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) - of "indexes": - var options = newQueryOptions() - try: - parseQueryOptions(req.url.query, options); - if id != "": - return LS.getIndex(id, options, req) - else: - return LS.getIndexes(options, req) - except: - return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) - of "info": - if id != "": - return resError(Http404, "Info '$1' not found." % id) - return LS.getInfo(req) - else: - discard # never happens really. - -proc post*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - var ct = "text/plain" - if req.headers.hasKey("Content-Type"): - ct = req.headers["Content-Type"] - return LS.postDocument(req.body.strip, ct, id, req) - -proc put*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - if id != "": - if resource == "indexes": - var field = "" - try: - field = parseJson(req.body.strip)["field"].getStr - except: - return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) - return LS.putIndex(id, field, req) - else: # Assume docs - var ct = "text/plain" - if req.headers.hasKey("Content-Type"): - ct = req.headers["Content-Type"] - return LS.putDocument(id, req.body.strip, ct, req) - else: - return resError(Http400, "Bad request: document ID must be specified in PUT requests.") - -proc delete*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - if id != "": - if resource == "indexes": - return LS.deleteIndex(id, req) - else: # Assume docs - return LS.deleteDocument(id, req) - else: - return resError(Http400, "Bad request: document ID must be specified in DELETE requests.") - -proc patch*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - if id != "": - return LS.patchDocument(id, req.body, req) - else: - return resError(Http400, "Bad request: document ID must be specified in PATCH requests.") - -proc serveFile*(req: LSRequest, LS: LiteStore, id: string): LSResponse = - let path = LS.directory / id - var reqMethod = $req.reqMethod - if req.headers.hasKey("X-HTTP-Method-Override"): - reqMethod = req.headers["X-HTTP-Method-Override"] - case reqMethod.toUpperAscii: - of "OPTIONS": - return validate(req, LS, "dir", id, options) - of "GET": - if path.fileExists: - try: - let contents = path.readFile - let parts = path.splitFile - if CONTENT_TYPES.hasKey(parts.ext): - result.headers = CONTENT_TYPES[parts.ext].ctHeader - else: - result.headers = ctHeader("text/plain") - setOrigin(LS, req, result.headers) - result.content = contents - result.code = Http200 - except: - return resError(Http500, "Unable to read file '$1'." % path) - else: - return resError(Http404, "File '$1' not found." % path) - else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - -proc route*(req: LSRequest, LS: LiteStore, resource = "docs", id = ""): LSResponse = - var reqMethod = $req.reqMethod - if req.headers.hasKey("X-HTTP-Method-Override"): - reqMethod = req.headers["X-HTTP-Method-Override"] - case reqMethod.toUpperAscii: - of "POST": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, post) - of "PUT": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, put) - of "DELETE": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, delete) - of "HEAD": - return validate(req, LS, resource, id, head) - of "OPTIONS": - return validate(req, LS, resource, id, options) - of "GET": - return validate(req, LS, resource, id, get) - of "PATCH": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, patch) - else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - -proc newSimpleLSRequest(meth: HttpMethod, resource, id, body = "", params = "", headers = newHttpHeaders()): LSRequest = - result.reqMethod = meth - result.body = body - result.headers = headers - result.url = parseUri("$1://$2:$3/$4/$5?$6" % @["http", "localhost", "9500", resource, id, params]) - -proc get(resource, id: string, params = ""): LSResponse = - return newSimpleLSRequest(HttpGet, resource, id, "", params).get(LS, resource, id) - -proc post(resource, folder, body: string, ct = ""): LSResponse = - var headers = newHttpHeaders() - if ct != "": - headers["Content-Type"] = ct - return newSimpleLSRequest(HttpPost, resource, "", body, "", headers).post(LS, resource, folder & "/") - -proc put(resource, id, body: string, ct = ""): LSResponse = - var headers = newHttpHeaders() - if ct != "": - headers["Content-Type"] = ct - return newSimpleLSRequest(HttpPut, resource, id, body, "", headers).put(LS, resource, id) - -proc patch(resource, id, body: string): LSResponse = - var headers = newHttpHeaders() - headers["Content-Type"] = "application/json" - return newSimpleLSRequest(HttpPatch, resource, id, body, "", headers).patch(LS, resource, id) - -proc delete(resource, id: string): LSResponse = - return newSimpleLSRequest(HttpPatch, resource, id).delete(LS, resource, id) - -proc head(resource, id: string): LSResponse = - return newSimpleLSRequest(HttpHead, resource, id).head(LS, resource, id) - -proc registerStoreApi(LS: LiteStore, ctx: DTContext, origResource, origId: string) = - var api_idx = ctx.duk_push_object() - # GET - var get: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let params = duk_get_string(ctx, 2) - let resp = get($resource, $id, $params) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, get, 3) - discard ctx.duk_put_prop_string(api_idx, "get") - # POST - var post: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let folder = duk_get_string(ctx, 1) - let body = duk_get_string(ctx, 2) - let ct = duk_get_string(ctx, 3) - let resp = post($resource, $folder, $body, $ct) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, post, 4) - discard ctx.duk_put_prop_string(api_idx, "post") - # PUT - var put: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let body = duk_get_string(ctx, 2) - let ct = duk_get_string(ctx, 3) - let resp = put($resource, $id, $body, $ct) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, put, 4) - discard ctx.duk_put_prop_string(api_idx, "put") - # PATCH - var patch: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let body = duk_get_string(ctx, 2) - let resp = patch($resource, $id, $body) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, patch, 3) - discard ctx.duk_put_prop_string(api_idx, "patch") - # DELETE - var delete: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let resp = delete($resource, $id) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, delete, 2) - discard ctx.duk_put_prop_string(api_idx, "delete") - # HEAD - var head: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let resp = head($resource, $id) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, head, 2) - discard ctx.duk_put_prop_string(api_idx, "head") - discard ctx.duk_put_global_string("$store") - -proc jError(ctx: DTContext): LSResponse = - return resError(Http500, "Middleware Error: " & $ctx.duk_safe_to_string(-1)) - -proc getMiddleware*(LS: LiteStore, id: string): string = - if not LS.middleware.hasKey(id): - # Attempt to retrieve resource from system documents - let options = newQueryOptions(true) - let doc = LS.store.retrieveDocument("middleware/" & id & ".js", options) - result = doc.data - if result == "": - LOG.warn("Middleware '$1' not found" % id) - else: - result = LS.middleware[id] - -proc getMiddlewareSeq(resource, id, meth: string): seq[string] = - result = newSeq[string]() - if LS.config.kind != JObject or not LS.config.hasKey("resources"): - return - var reqUri = "/" & resource & "/" & id - if reqUri[^1] == '/': - reqUri.removeSuffix({'/'}) - let parts = reqUri.split("/") - let ancestors = parts[1..parts.len-2] - var currentPath = "" - var currentPaths = "" - for p in ancestors: - currentPath &= "/" & p - currentPaths = currentPath & "/*" - if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][meth].hasKey("middleware"): - let mw = LS.config["resources"][currentPaths][meth]["middleware"] - if (mw.kind == JArray): - for m in mw: - result.add m.getStr - if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("middleware"): - let mw = LS.config["resources"][reqUri][meth]["middleware"] - if (mw.kind == JArray): - for m in mw: - result.add m.getStr - -proc execute*(req: var LSRequest, LS: LiteStore, resource, id: string): LSResponse = - let middleware = getMiddlewareSeq(resource, id, $req.reqMethod) - LOG.debug("Middleware: " & middleware.join(" -> ")); - if middleware.len == 0: - return route(req, LS, resource, id) - var jReq = $(%* req) - LOG.debug("Request: " & jReq) - var jRes = """{ - "code": 200, - "content": {}, - "final": false, - "headers": { - "Access-Control-Allow-Origin": "*", - "Access-Control-Allow-Headers": "Authorization, Content-Type", - "Server": "$1", - "Content-Type": "application/json" - } - }""" % [LS.appname & "/" & LS.appversion] - var context = "{}" - # Create execution context - var ctx = duk_create_heap_default() - duk_console_init(ctx) - duk_print_alert_init(ctx) - LS.registerStoreApi(ctx, resource, id) - if ctx.duk_peval_string(cstring("($1)" % $jReq)) != 0: - return jError(ctx) - discard ctx.duk_put_global_string("$req") - if ctx.duk_peval_string(cstring("($1)" % $jRes)) != 0: - return jError(ctx) - discard ctx.duk_put_global_string("$res") - if ctx.duk_peval_string(cstring("($1)" % $context)) != 0: - return jError(ctx) - discard ctx.duk_put_global_string("$ctx") - # Middleware-specific functions - var i = 0 - var abort = 0 - while abort != 1 and i < middleware.len: - let code = LS.getMiddleware(middleware[i]) - LOG.debug("Evaluating middleware '$1'" % middleware[i]) - if ctx.duk_peval_string(code.cstring) != 0: - return jError(ctx) - abort = ctx.duk_get_boolean(-1) - i.inc - # Retrieve response, and request - if ctx.duk_peval_string("JSON.stringify($res);") != 0: - return jError(ctx) - let fRes = parseJson($(ctx.duk_get_string(-1))).newLSResponse - if ctx.duk_peval_string("JSON.stringify($req);") != 0: - return jError(ctx) - let fReq = parseJson($(ctx.duk_get_string(-1))).newLSRequest() - ctx.duk_destroy_heap(); - LOG.debug("abort: $1", [$abort]) - if abort == 1: - return fRes - return route(fReq, LS, resource, id) +import + asynchttpserver, + strutils, + sequtils, + cgi, + strtabs, + pegs, + json, + os, + uri, + times +import + types, + contenttypes, + core, + utils, + logger, + duktape + +# Helper procs + +proc sqlOp(op: string): string = + let table = newStringTable() + table["not eq"] = "<>" + table["eq"] = "==" + table["gt"] = ">" + table["gte"] = ">=" + table["lt"] = "<" + table["lte"] = "<=" + table["contains"] = "contains" + table["like"] = "like" + return table[op] + +proc orderByClauses*(str: string): string = + var clauses = newSeq[string]() + var fragments = str.split(",") + let clause = peg""" + clause <- {[-+]} {field} + field <- ('id' / 'created' / 'modified' / path) + path <- '$' (objField)+ + ident <- [a-zA-Z0-9_]+ + objField <- '.' ident + """ + for f in fragments: + var matches = @["", ""] + if f.find(clause, matches) != -1: + var field = matches[1] + if field[0] == '$': + field = "json_extract(documents.data, '$1')" % matches[1] + if matches[0] == "-": + clauses.add("$1 COLLATE NOCASE DESC" % field) + else: + clauses.add("$1 COLLATE NOCASE ASC" % field) + return clauses.join(", ") + +proc selectClause*(str: string, options: var QueryOptions) = + let tokens = """ + path <- '$' (objItem / objField)+ + ident <- [a-zA-Z0-9_]+ + objIndex <- '[' \d+ ']' + objField <- '.' ident + objItem <- objField objIndex + """ + let fields = peg(""" + fields <- ^{field} (\s* ',' \s* {field})*$ + field <- path \s+ ('as' / 'AS') \s+ ident + """ & tokens) + let field = peg(""" + field <- ^{path} \s+ ('as' / 'AS') \s+ {ident}$ + """ & tokens) + var fieldMatches = newSeq[string](10) + if str.strip.match(fields, fieldMatches): + for m in fieldMatches: + if m.len > 0: + var rawTuple = newSeq[string](2) + if m.match(field, rawTuple): + options.jsonSelect.add((path: rawTuple[0], alias: rawTuple[1])) + +proc filterClauses*(str: string, options: var QueryOptions) = + let tokens = """ + operator <- 'not eq' / 'eq' / 'gte' / 'gt' / 'lte' / 'lt' / 'contains' / 'like' + value <- string / number / 'null' / 'true' / 'false' + string <- '"' ('\\"' . / [^"])* '"' + number <- '-'? '0' / [1-9] [0-9]* ('.' [0-9]+)? (( 'e' / 'E' ) ( '+' / '-' )? [0-9]+)? + path <- '$' (objItem / objField)+ + ident <- [a-zA-Z0-9_]+ + objIndex <- '[' \d+ ']' + objField <- '.' ident + objItem <- objField objIndex + """ + let clause = peg(""" + clause <- {path} \s+ {operator} \s+ {value} + """ & tokens) + let andClauses = peg(""" + andClauses <- ^{clause} (\s+ 'and' \s+ {clause})*$ + clause <- path \s+ operator \s+ value + """ & tokens) + let orClauses = peg(""" + orClauses <- ^{andClauses} (\s+ 'or' \s+ {andClauses})*$ + andClauses <- clause (\s+ 'and' \s+ clause)* + clause <- path \s+ operator \s+ value + """ & tokens) + var orClausesMatches = newSeq[string](10) + discard str.strip.match(orClauses, orClausesMatches) + var parsedClauses = newSeq[seq[seq[string]]]() + for orClause in orClausesMatches: + if orClause.len > 0: + var andClausesMatches = newSeq[string](10) + discard orClause.strip.match(andClauses, andClausesMatches) + var parsedAndClauses = newSeq[seq[string]]() + for andClause in andClausesMatches: + if andClause.len > 0: + var clauses = newSeq[string](3) + discard andClause.strip.match(clause, clauses) + clauses[1] = sqlOp(clauses[1]) + if clauses[2] == "true": + clauses[2] = "1" + elif clauses[2] == "false": + clauses[2] = "0" + parsedAndClauses.add clauses + if parsedAndClauses.len > 0: + parsedClauses.add parsedAndClauses + if parsedClauses.len == 0: + return + var currentArr = 0 + var tables = newSeq[string]() + let resOrClauses = parsedClauses.map do (it: seq[seq[string]]) -> string: + let resAndClauses = it.map do (x: seq[string]) -> string: + if x[1] == "contains": + currentArr = currentArr + 1 + tables.add "json_each(documents.data, '$1') AS arr$2" % [x[0], $currentArr] + return "arr$1.value == $2" % [$currentArr, x[2]] + else: + var arr = @[x[0], x[1], x[2]] + if x[1] == "like": + arr[2] = x[2].replace('*', '%') + return "json_extract(documents.data, '$1') $2 $3 " % arr + return resAndClauses.join(" AND ") + options.tables = options.tables & tables + options.jsonFilter = resOrClauses.join(" OR ") + +proc parseQueryOption*(fragment: string, options: var QueryOptions) = + if fragment == "": + return + var pair = fragment.split('=') + if pair.len < 2 or pair[1] == "": + raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) + try: + pair[1] = pair[1].replace("+", "%2B").decodeURL + except CatchableError: + raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) + case pair[0]: + of "filter": + filterClauses(pair[1], options) + if options.jsonFilter == "": + raise newException(EInvalidRequest, "Invalid filter clause: $1" % pair[1].replace("\"", "\\\"")) + of "select": + selectClause(pair[1], options) + if options.jsonSelect.len == 0: + raise newException(EInvalidRequest, "Invalid select clause: $1" % pair[1].replace("\"", "\\\"")) + of "like": + options.like = pair[1] + of "search": + options.search = pair[1] + of "tags": + options.tags = pair[1] + of "created-after": + try: + options.createdAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid created-after value: $1" % getCurrentExceptionMsg()) + of "created-before": + try: + options.createdBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid created-before value: $1" % getCurrentExceptionMsg()) + of "modified-after": + try: + options.modifiedAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid modified.after value: $1" % getCurrentExceptionMsg()) + of "modified-before": + try: + options.modifiedBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid modified-before value: $1" % getCurrentExceptionMsg()) + of "limit": + try: + options.limit = pair[1].parseInt + except CatchableError: + raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) + of "offset": + try: + options.offset = pair[1].parseInt + except CatchableError: + raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) + of "sort": + let orderby = pair[1].orderByClauses() + if orderby != "": + options.orderby = orderby + else: + raise newException(EInvalidRequest, "Invalid sort value: $1" % pair[1]) + of "contents", "raw": + discard + else: + discard + +proc parseQueryOptions*(querystring: string, options: var QueryOptions) = + var fragments = querystring.split('&') + for f in fragments: + f.parseQueryOption(options) + +proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, cb: proc(req: LSRequest, LS: LiteStore, resource: string, id: string):LSResponse): LSResponse = + if req.reqMethod == HttpPost or req.reqMethod == HttpPut or req.reqMethod == HttpPatch: + var ct = "" + let body = req.body.strip + if body == "": + return resError(Http400, "Bad request: No content specified for document.") + if req.headers.hasKey("Content-Type"): + ct = req.headers["Content-Type"] + case ct: + of "application/json": + try: + discard body.parseJson() + except CatchableError: + return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) + else: + discard + return cb(req, LS, resource, id) + +proc patchTag(tags: var seq[string], index: int, op, path, value: string): bool = + LOG.debug("- PATCH -> $1 tag['$2'] = \"$3\" - Total tags: $4." % [op, $index, $value, $tags.len]) + case op: + of "remove": + let tag = tags[index] + if not tag.startsWith("$"): + tags[index] = "" # Not removing element, otherwise subsequent indexes won't work! + else: + raise newException(EInvalidRequest, "cannot remove system tag: $1" % tag) + of "add": + if value.match(PEG_USER_TAG): + tags.insert(value, index) + else: + if value.strip == "": + raise newException(EInvalidRequest, "tag not specified." % value) + else: + raise newException(EInvalidRequest, "invalid tag: $1" % value) + of "replace": + if value.match(PEG_USER_TAG): + if tags[index].startsWith("$"): + raise newException(EInvalidRequest, "cannot replace system tag: $1" % tags[index]) + else: + tags[index] = value + else: + if value.strip == "": + raise newException(EInvalidRequest, "tag not specified." % value) + else: + raise newException(EInvalidRequest, "invalid tag: $1" % value) + of "test": + if tags[index] != value: + return false + else: + raise newException(EInvalidRequest, "invalid patch operation: $1" % op) + return true + +proc patchData*(data: var JsonNode, origData: JsonNode, op: string, path: string, value: JsonNode): bool = + LOG.debug("- PATCH -> $1 path $2 with $3" % [op, path, $value]) + var keys = path.replace(peg"^\/data\/", "").split("/") + if keys.len == 0: + raise newException(EInvalidRequest, "no valid path specified: $1" % path) + var d = data + var dorig = origData + var c = 1 + for key in keys: + if d.kind == JArray: + try: + var index = key.parseInt + if c >= keys.len: + d.elems[index] = value + case op: + of "remove": + d.elems.del(index) + of "add": + d.elems.insert(value, index) + of "replace": + d.elems[index] = value + of "test": + if d.elems[index] != value: + return false + else: + raise newException(EInvalidRequest, "invalid patch operation: $1" % op) + else: + d = d[index] + dorig = dorig[index] + except CatchableError: + raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) + else: + if c >= keys.len: + case op: + of "remove": + if d.hasKey(key): + d.delete(key) + else: + raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + of "add": + d[key] = value + of "replace": + if d.hasKey(key): + d[key] = value + else: + raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + of "test": + if dorig.hasKey(key): + if dorig[key] != value: + return false + else: + raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + else: + raise newException(EInvalidRequest, "invalid patch operation: $1" % op) + else: + d = d[key] + dorig = dorig[key] + c += 1 + return true + + +proc applyPatchOperation*(data: var JsonNode, origData: JsonNode, tags: var seq[string], op: string, path: string, value: JsonNode): bool = + var matches = @[""] + let p = peg""" + path <- ^tagPath / fieldPath$ + tagPath <- '\/tags\/' {\d+} + fieldPath <- '\/data\/' ident ('\/' ident)* + ident <- [a-zA-Z0-9_]+ / '-' + """ + if path.find(p, matches) == -1: + raise newException(EInvalidRequest, "cannot patch path '$1'" % path) + if path.match(peg"^\/tags\/"): + let index = matches[0].parseInt + if value.kind != JString: + raise newException(EInvalidRequest, "tag '$1' is not a string." % $value) + let tag = value.getStr + return patchTag(tags, index, op, path, tag) + elif tags.contains("$subtype:json"): + return patchData(data, origData, op, path, value) + else: + raise newException(EInvalidRequest, "cannot patch data of a non-JSON document.") + +# Low level procs + +proc getTag*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveTag(id, options) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + if doc == newJNull(): + result = resTagNotFound(id) + else: + result.content = $doc + result.code = Http200 + +proc getIndex*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveIndex(id, options) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + if doc == newJNull(): + result = resIndexNotFound(id) + else: + result.content = $doc + result.code = Http200 + +proc getRawDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveRawDocument(id, options) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + if doc == "": + result = resDocumentNotFound(id) + else: + result.content = doc + result.code = Http200 + +proc getDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveDocument(id, options) + if doc.data == "": + result = resDocumentNotFound(id) + else: + result.headers = doc.contenttype.ctHeader + setOrigin(LS, req, result.headers) + result.content = doc.data + result.code = Http200 + +proc deleteDocument*(LS: LiteStore, id: string, req: LSRequest): LSResponse = + let doc = LS.store.retrieveDocument(id) + if doc.data == "": + result = resDocumentNotFound(id) + else: + try: + let res = LS.store.destroyDocument(id) + if res == 0: + result = resError(Http500, "Unable to delete document '$1'" % id) + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Content-Length"] = "0" + result.content = "" + result.code = Http204 + except CatchableError: + result = resError(Http500, "Unable to delete document '$1'" % id) + +proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = + var options = options + let t0 = cpuTime() + let docs = LS.store.retrieveTags(options) + let orig_limit = options.limit + let orig_offset = options.offset + options.limit = 0 + options.offset = 0 + options.select = @["COUNT(tag_id)"] + let total = LS.store.countTags(prepareSelectTagsQuery(options), options.like.replace("*", "%")) + var content = newJObject() + if options.like != "": + content["like"] = %(options.like.decodeURL) + if orig_limit > 0: + content["limit"] = %orig_limit + if orig_offset > 0: + content["offset"] = %orig_offset + content["total"] = %total + content["execution_time"] = %(cputime()-t0) + content["results"] = docs + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc getIndexes*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = + var options = options + let t0 = cpuTime() + let docs = LS.store.retrieveIndexes(options) + let orig_limit = options.limit + let orig_offset = options.offset + options.limit = 0 + options.offset = 0 + options.select = @["COUNT(name)"] + let total = LS.store.countIndexes(prepareSelectIndexesQuery(options), options.like.replace("*", "%")) + var content = newJObject() + if options.like != "": + content["like"] = %(options.like.decodeURL) + if orig_limit > 0: + content["limit"] = %orig_limit + if orig_offset > 0: + content["offset"] = %orig_offset + content["total"] = %total + content["execution_time"] = %(cputime()-t0) + content["results"] = docs + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = + var options = options + let t0 = cpuTime() + let docs = LS.store.retrieveRawDocuments(options) + let orig_limit = options.limit + let orig_offset = options.offset + options.limit = 0 + options.offset = 0 + options.select = @["COUNT(docid)"] + let total = LS.store.retrieveRawDocuments(options)[0].num + var content = newJObject() + if options.folder != "": + content["folder"] = %(options.folder) + if options.search != "": + content["search"] = %(options.search.decodeURL) + if options.tags != "": + content["tags"] = newJArray() + for tag in options.tags.replace("+", "%2B").decodeURL.split(","): + content["tags"].add(%tag) + if orig_limit > 0: + content["limit"] = %orig_limit + if orig_offset > 0: + content["offset"] = %orig_offset + if options.orderby != "": + content["sort"] = %options.orderby + content["total"] = %total + content["execution_time"] = %(cputime()-t0) + content["results"] = docs + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc getInfo*(LS: LiteStore, req: LSRequest): LSResponse = + let info = LS.store.retrieveInfo() + let version = info[0] + let total_documents = info[1] + let total_tags = LS.store.countTags() + let tags = LS.store.retrieveTagsWithTotals() + var content = newJObject() + content["version"] = %(LS.appname & " v" & LS.appversion) + content["datastore_version"] = %version + content["size"] = %($((LS.file.getFileSize().float/(1024*1024)).formatFloat(ffDecimal, 2)) & " MB") + content["read_only"] = %LS.readonly + content["log_level"] = %LS.loglevel + if LS.directory.len == 0: + content["directory"] = newJNull() + else: + content["directory"] = %LS.directory + content["mount"] = %LS.mount + content["total_documents"] = %total_documents + content["total_tags"] = %total_tags + content["tags"] = tags + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc putIndex*(LS: LiteStore, id, field: string, req: LSRequest): LSResponse = + try: + if (not id.match(PEG_INDEX)): + return resError(Http400, "invalid index ID: $1" % id) + if (not field.match(PEG_JSON_FIELD)): + return resError(Http400, "invalid field path: $1" % field) + if (LS.store.retrieveIndex(id) != newJNull()): + return resError(Http409, "Index already exists: $1" % id) + LS.store.createIndex(id, field) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = "{\"id\": \"$1\", \"field\": \"$2\"}" % [id, field] + result.code = Http200 + except CatchableError: + eWarn() + result = resError(Http500, "Unable to create index.") + +proc deleteIndex*(LS: LiteStore, id: string, req: LSRequest): LSResponse = + if (not id.match(PEG_INDEX)): + return resError(Http400, "invalid index ID: $1" % id) + if (LS.store.retrieveIndex(id) == newJNull()): + return resError(Http404, "Index not found: $1" % id) + try: + LS.store.dropIndex(id) + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Content-Length"] = "0" + result.content = "" + result.code = Http204 + except CatchableError: + eWarn() + result = resError(Http500, "Unable to delete index.") + +proc postDocument*(LS: LiteStore, body: string, ct: string, folder="", req: LSRequest): LSResponse = + if not folder.isFolder: + return resError(Http400, "Invalid folder specified when creating document: $1" % folder) + try: + var doc = LS.store.createDocument(folder, body, ct) + if doc != "": + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = doc + result.code = Http201 + else: + result = resError(Http500, "Unable to create document.") + except CatchableError: + eWarn() + result = resError(Http500, "Unable to create document.") + +proc putDocument*(LS: LiteStore, id: string, body: string, ct: string, req: LSRequest): LSResponse = + if id.isFolder: + return resError(Http400, "Invalid ID '$1' (Document IDs cannot end with '/')." % id) + let doc = LS.store.retrieveDocument(id) + if doc.data == "": + # Create a new document + var doc = LS.store.createDocument(id, body, ct) + if doc != "": + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = doc + result.code = Http201 + else: + result = resError(Http500, "Unable to create document.") + else: + # Update existing document + try: + var doc = LS.store.updateDocument(id, body, ct) + if doc != "": + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = doc + result.code = Http200 + else: + result = resError(Http500, "Unable to update document '$1'." % id) + except CatchableError: + result = resError(Http500, "Unable to update document '$1'." % id) + +proc patchDocument*(LS: LiteStore, id: string, body: string, req: LSRequest): LSResponse = + var apply = true + let jbody = body.parseJson + if jbody.kind != JArray: + return resError(Http400, "Bad request: PATCH request body is not an array.") + var options = newQueryOptions() + options.select = @["documents.id AS id", "created", "modified", "data"] + let doc = LS.store.retrieveRawDocument(id, options) + if doc == "": + return resDocumentNotFound(id) + let jdoc = doc.parseJson + var tags = newSeq[string]() + var origTags = newSeq[string]() + for tag in jdoc["tags"].items: + tags.add(tag.str) + origTags.add(tag.str) + var data: JsonNode + var origData: JsonNode + if tags.contains("$subtype:json"): + try: + origData = jdoc["data"].getStr.parseJson + data = origData.copy + except CatchableError: + discard + var c = 1 + for item in jbody.items: + if item.hasKey("op") and item.hasKey("path"): + if not item.hasKey("value"): + item["value"] = %"" + try: + apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) + if not apply: + break + except CatchableError: + return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) + else: + return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c) + c.inc + if apply: + if origData.len > 0 and origData != data: + try: + var doc = LS.store.updateDocument(id, data.pretty, "application/json") + if doc == "": + return resError(Http500, "Unable to patch document '$1'." % id) + except CatchableError: + return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) + if origTags != tags: + try: + for t1 in jdoc["tags"].items: + discard LS.store.destroyTag(t1.str, id, true) + for t2 in tags: + if t2 != "": + LS.store.createTag(t2, id, true) + except CatchableError: + return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) + return LS.getRawDocument(id, newQueryOptions(), req) + +# Main routing + +proc options*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + case resource: + of "info": + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + if id != "": + return resError(Http404, "Info '$1' not found." % id) + else: + result.code = Http204 + result.content = "" + of "dir": + result.code = Http204 + result.content = "" + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + of "tags": + result.code = Http204 + result.content = "" + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + of "indexes": + result.code = Http204 + result.content = "" + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + if id != "": + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + else: + result.headers["Allow"] = "GET, OPTIONS, PUT, DELETE" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS, PUT, DELETE" + else: + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + of "docs": + var folder: string + if id.isFolder: + folder = id + if folder.len > 0: + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS, POST, PUT" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST, PUT" + elif id != "": + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" + result.headers["Allow-Patch"] = "application/json-patch+json" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" + else: + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS, POST" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST" + else: + discard # never happens really. + +proc head*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + var options = newQueryOptions() + options.select = @["documents.id AS id", "created", "modified"] + if id.isFolder: + options.folder = id + try: + parseQueryOptions(req.url.query, options); + if id != "" and options.folder == "": + result = LS.getRawDocument(id, options, req) + result.content = "" + else: + result = LS.getRawDocuments(options, req) + result.content = "" + except CatchableError: + return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) + +proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + case resource: + of "docs": + var options = newQueryOptions() + if id.isFolder: + options.folder = id + if req.url.query.contains("contents=false"): + options.select = @["documents.id AS id", "created", "modified"] + try: + parseQueryOptions(req.url.query, options); + if id != "" and options.folder == "": + if req.url.query.contains("raw=true") or req.headers.hasKey("Accept") and req.headers["Accept"] == "application/json": + return LS.getRawDocument(id, options, req) + else: + return LS.getDocument(id, options, req) + else: + return LS.getRawDocuments(options, req) + except CatchableError: + let e = getCurrentException() + let trace = e.getStackTrace() + echo trace + return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) + of "tags": + var options = newQueryOptions() + try: + parseQueryOptions(req.url.query, options); + if id != "": + return LS.getTag(id, options, req) + else: + return LS.getTags(options, req) + except CatchableError: + return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) + of "indexes": + var options = newQueryOptions() + try: + parseQueryOptions(req.url.query, options); + if id != "": + return LS.getIndex(id, options, req) + else: + return LS.getIndexes(options, req) + except CatchableError: + return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) + of "info": + if id != "": + return resError(Http404, "Info '$1' not found." % id) + return LS.getInfo(req) + else: + discard # never happens really. + +proc post*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + var ct = "text/plain" + if req.headers.hasKey("Content-Type"): + ct = req.headers["Content-Type"] + return LS.postDocument(req.body.strip, ct, id, req) + +proc put*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + if id != "": + if resource == "indexes": + var field = "" + try: + field = parseJson(req.body.strip)["field"].getStr + except CatchableError: + return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) + return LS.putIndex(id, field, req) + else: # Assume docs + var ct = "text/plain" + if req.headers.hasKey("Content-Type"): + ct = req.headers["Content-Type"] + return LS.putDocument(id, req.body.strip, ct, req) + else: + return resError(Http400, "Bad request: document ID must be specified in PUT requests.") + +proc delete*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + if id != "": + if resource == "indexes": + return LS.deleteIndex(id, req) + else: # Assume docs + return LS.deleteDocument(id, req) + else: + return resError(Http400, "Bad request: document ID must be specified in DELETE requests.") + +proc patch*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + if id != "": + return LS.patchDocument(id, req.body, req) + else: + return resError(Http400, "Bad request: document ID must be specified in PATCH requests.") + +proc serveFile*(req: LSRequest, LS: LiteStore, id: string): LSResponse = + let path = LS.directory / id + var reqMethod = $req.reqMethod + if req.headers.hasKey("X-HTTP-Method-Override"): + reqMethod = req.headers["X-HTTP-Method-Override"] + case reqMethod.toUpperAscii: + of "OPTIONS": + return validate(req, LS, "dir", id, options) + of "GET": + if path.fileExists: + try: + let contents = path.readFile + let parts = path.splitFile + if CONTENT_TYPES.hasKey(parts.ext): + result.headers = CONTENT_TYPES[parts.ext].ctHeader + else: + result.headers = ctHeader("text/plain") + setOrigin(LS, req, result.headers) + result.content = contents + result.code = Http200 + except CatchableError: + return resError(Http500, "Unable to read file '$1'." % path) + else: + return resError(Http404, "File '$1' not found." % path) + else: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + +proc route*(req: LSRequest, LS: LiteStore, resource = "docs", id = ""): LSResponse = + var reqMethod = $req.reqMethod + if req.headers.hasKey("X-HTTP-Method-Override"): + reqMethod = req.headers["X-HTTP-Method-Override"] + case reqMethod.toUpperAscii: + of "POST": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, post) + of "PUT": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, put) + of "DELETE": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, delete) + of "HEAD": + return validate(req, LS, resource, id, head) + of "OPTIONS": + return validate(req, LS, resource, id, options) + of "GET": + return validate(req, LS, resource, id, get) + of "PATCH": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, patch) + else: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + +proc newSimpleLSRequest(meth: HttpMethod, resource = "", id = "", body = "", params = "", headers = newHttpHeaders()): LSRequest = + result.reqMethod = meth + result.body = body + result.headers = headers + result.url = parseUri("$1://$2:$3/$4/$5?$6" % @["http", "localhost", "9500", resource, id, params]) + +proc get(resource, id: string, params = ""): LSResponse = + return newSimpleLSRequest(HttpGet, resource, id, "", params).get(LS, resource, id) + +proc post(resource, folder, body: string, ct = ""): LSResponse = + var headers = newHttpHeaders() + if ct != "": + headers["Content-Type"] = ct + return newSimpleLSRequest(HttpPost, resource, "", body, "", headers).post(LS, resource, folder & "/") + +proc put(resource, id, body: string, ct = ""): LSResponse = + var headers = newHttpHeaders() + if ct != "": + headers["Content-Type"] = ct + return newSimpleLSRequest(HttpPut, resource, id, body, "", headers).put(LS, resource, id) + +proc patch(resource, id, body: string): LSResponse = + var headers = newHttpHeaders() + headers["Content-Type"] = "application/json" + return newSimpleLSRequest(HttpPatch, resource, id, body, "", headers).patch(LS, resource, id) + +proc delete(resource, id: string): LSResponse = + return newSimpleLSRequest(HttpPatch, resource, id).delete(LS, resource, id) + +proc head(resource, id: string): LSResponse = + return newSimpleLSRequest(HttpHead, resource, id).head(LS, resource, id) + +proc registerStoreApi(LS: LiteStore, ctx: DTContext, origResource, origId: string) = + var api_idx = ctx.duk_push_object() + # GET + var get: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let params = duk_get_string(ctx, 2) + let resp = get($resource, $id, $params) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, get, 3) + discard ctx.duk_put_prop_string(api_idx, "get") + # POST + var post: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let folder = duk_get_string(ctx, 1) + let body = duk_get_string(ctx, 2) + let ct = duk_get_string(ctx, 3) + let resp = post($resource, $folder, $body, $ct) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, post, 4) + discard ctx.duk_put_prop_string(api_idx, "post") + # PUT + var put: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let body = duk_get_string(ctx, 2) + let ct = duk_get_string(ctx, 3) + let resp = put($resource, $id, $body, $ct) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, put, 4) + discard ctx.duk_put_prop_string(api_idx, "put") + # PATCH + var patch: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let body = duk_get_string(ctx, 2) + let resp = patch($resource, $id, $body) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, patch, 3) + discard ctx.duk_put_prop_string(api_idx, "patch") + # DELETE + var delete: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let resp = delete($resource, $id) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, delete, 2) + discard ctx.duk_put_prop_string(api_idx, "delete") + # HEAD + var head: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let resp = head($resource, $id) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, head, 2) + discard ctx.duk_put_prop_string(api_idx, "head") + discard ctx.duk_put_global_string("$store") + +proc jError(ctx: DTContext): LSResponse = + return resError(Http500, "Middleware Error: " & $ctx.duk_safe_to_string(-1)) + +proc getMiddleware*(LS: LiteStore, id: string): string = + if not LS.middleware.hasKey(id): + # Attempt to retrieve resource from system documents + let options = newQueryOptions(true) + let doc = LS.store.retrieveDocument("middleware/" & id & ".js", options) + result = doc.data + if result == "": + LOG.warn("Middleware '$1' not found" % id) + else: + result = LS.middleware[id] + +proc getMiddlewareSeq(resource, id, meth: string): seq[string] = + result = newSeq[string]() + if LS.config.kind != JObject or not LS.config.hasKey("resources"): + return + var reqUri = "/" & resource & "/" & id + if reqUri[^1] == '/': + reqUri.removeSuffix({'/'}) + let parts = reqUri.split("/") + let ancestors = parts[1..parts.len-2] + var currentPath = "" + var currentPaths = "" + for p in ancestors: + currentPath &= "/" & p + currentPaths = currentPath & "/*" + if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][meth].hasKey("middleware"): + let mw = LS.config["resources"][currentPaths][meth]["middleware"] + if (mw.kind == JArray): + for m in mw: + result.add m.getStr + if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("middleware"): + let mw = LS.config["resources"][reqUri][meth]["middleware"] + if (mw.kind == JArray): + for m in mw: + result.add m.getStr + +proc execute*(req: var LSRequest, LS: LiteStore, resource, id: string): LSResponse = + let middleware = getMiddlewareSeq(resource, id, $req.reqMethod) + LOG.debug("Middleware: " & middleware.join(" -> ")); + if middleware.len == 0: + return route(req, LS, resource, id) + var jReq = $(%* req) + LOG.debug("Request: " & jReq) + var jRes = """{ + "code": 200, + "content": {}, + "final": false, + "headers": { + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Headers": "Authorization, Content-Type", + "Server": "$1", + "Content-Type": "application/json" + } + }""" % [LS.appname & "/" & LS.appversion] + var context = "{}" + # Create execution context + var ctx = duk_create_heap_default() + duk_console_init(ctx) + duk_print_alert_init(ctx) + LS.registerStoreApi(ctx, resource, id) + if ctx.duk_peval_string(cstring("($1)" % $jReq)) != 0: + return jError(ctx) + discard ctx.duk_put_global_string("$req") + if ctx.duk_peval_string(cstring("($1)" % $jRes)) != 0: + return jError(ctx) + discard ctx.duk_put_global_string("$res") + if ctx.duk_peval_string(cstring("($1)" % $context)) != 0: + return jError(ctx) + discard ctx.duk_put_global_string("$ctx") + # Middleware-specific functions + var i = 0 + var abort = 0 + while abort != 1 and i < middleware.len: + let code = LS.getMiddleware(middleware[i]) + LOG.debug("Evaluating middleware '$1'" % middleware[i]) + if ctx.duk_peval_string(code.cstring) != 0: + return jError(ctx) + abort = ctx.duk_get_boolean(-1) + i.inc + # Retrieve response, and request + if ctx.duk_peval_string("JSON.stringify($res);") != 0: + return jError(ctx) + let fRes = parseJson($(ctx.duk_get_string(-1))).newLSResponse + if ctx.duk_peval_string("JSON.stringify($req);") != 0: + return jError(ctx) + let fReq = parseJson($(ctx.duk_get_string(-1))).newLSRequest() + ctx.duk_destroy_heap(); + LOG.debug("abort: $1", [$abort]) + if abort == 1: + return fRes + return route(fReq, LS, resource, id)
M src/litestorepkg/lib/api_v7.nimsrc/litestorepkg/lib/api_v7.nim

@@ -1,1269 +1,1269 @@

-import - asynchttpserver, - strutils, - sequtils, - cgi, - strtabs, - pegs, - json, - os, - uri, - tables, - times -import - types, - contenttypes, - core, - utils, - logger, - duktape - -# Helper procs - -proc sqlOp(op: string): string = - let table = newStringTable() - table["not eq"] = "<>" - table["eq"] = "==" - table["gt"] = ">" - table["gte"] = ">=" - table["lt"] = "<" - table["lte"] = "<=" - table["contains"] = "contains" - table["like"] = "like" - return table[op] - -proc orderByClauses*(str: string): string = - var clauses = newSeq[string]() - var fragments = str.split(",") - let clause = peg""" - clause <- {[-+]} {field} - field <- ('id' / 'created' / 'modified' / path) - path <- '$' (objField)+ - ident <- [a-zA-Z0-9_]+ - objField <- '.' ident - """ - for f in fragments: - var matches = @["", ""] - if f.find(clause, matches) != -1: - var field = matches[1] - if field[0] == '$': - field = "json_extract(documents.data, '$1')" % matches[1] - if matches[0] == "-": - clauses.add("$1 COLLATE NOCASE DESC" % field) - else: - clauses.add("$1 COLLATE NOCASE ASC" % field) - return clauses.join(", ") - -proc selectClause*(str: string, options: var QueryOptions) = - let tokens = """ - path <- '$' (objItem / objField)+ - ident <- [a-zA-Z0-9_]+ - objIndex <- '[' \d+ ']' - objField <- '.' ident - objItem <- objField objIndex - """ - let fields = peg(""" - fields <- ^{field} (\s* ',' \s* {field})*$ - field <- path \s+ ('as' / 'AS') \s+ ident - """ & tokens) - let field = peg(""" - field <- ^{path} \s+ ('as' / 'AS') \s+ {ident}$ - """ & tokens) - var fieldMatches = newSeq[string](10) - if str.strip.match(fields, fieldMatches): - for m in fieldMatches: - if m.len > 0: - var rawTuple = newSeq[string](2) - if m.match(field, rawTuple): - options.jsonSelect.add((path: rawTuple[0], alias: rawTuple[1])) - -proc filterClauses*(str: string, options: var QueryOptions) = - let tokens = """ - operator <- 'not eq' / 'eq' / 'gte' / 'gt' / 'lte' / 'lt' / 'contains' / 'like' - value <- string / number / 'null' / 'true' / 'false' - string <- '"' ('\\"' . / [^"])* '"' - number <- '-'? '0' / [1-9] [0-9]* ('.' [0-9]+)? (( 'e' / 'E' ) ( '+' / '-' )? [0-9]+)? - path <- '$' (objItem / objField)+ - ident <- [a-zA-Z0-9_]+ - objIndex <- '[' \d+ ']' - objField <- '.' ident - objItem <- objField objIndex - """ - let clause = peg(""" - clause <- {path} \s+ {operator} \s+ {value} - """ & tokens) - let andClauses = peg(""" - andClauses <- ^{clause} (\s+ 'and' \s+ {clause})*$ - clause <- path \s+ operator \s+ value - """ & tokens) - let orClauses = peg(""" - orClauses <- ^{andClauses} (\s+ 'or' \s+ {andClauses})*$ - andClauses <- clause (\s+ 'and' \s+ clause)* - clause <- path \s+ operator \s+ value - """ & tokens) - var orClausesMatches = newSeq[string](10) - discard str.strip.match(orClauses, orClausesMatches) - var parsedClauses = newSeq[seq[seq[string]]]() - for orClause in orClausesMatches: - if orClause.len > 0: - var andClausesMatches = newSeq[string](10) - discard orClause.strip.match(andClauses, andClausesMatches) - var parsedAndClauses = newSeq[seq[string]]() - for andClause in andClausesMatches: - if andClause.len > 0: - var clauses = newSeq[string](3) - discard andClause.strip.match(clause, clauses) - clauses[1] = sqlOp(clauses[1]) - if clauses[2] == "true": - clauses[2] = "1" - elif clauses[2] == "false": - clauses[2] = "0" - parsedAndClauses.add clauses - if parsedAndClauses.len > 0: - parsedClauses.add parsedAndClauses - if parsedClauses.len == 0: - return - var currentArr = 0 - var tables = newSeq[string]() - let resOrClauses = parsedClauses.map do (it: seq[seq[string]]) -> string: - let resAndClauses = it.map do (x: seq[string]) -> string: - if x[1] == "contains": - currentArr = currentArr + 1 - tables.add "json_each(documents.data, '$1') AS arr$2" % [x[0], $currentArr] - return "arr$1.value == $2" % [$currentArr, x[2]] - else: - var arr = @[x[0], x[1], x[2]] - if x[1] == "like": - arr[2] = x[2].replace('*', '%') - return "json_extract(documents.data, '$1') $2 $3 " % arr - return resAndClauses.join(" AND ") - options.tables = options.tables & tables - options.jsonFilter = resOrClauses.join(" OR ") - -proc parseQueryOption*(fragment: string, options: var QueryOptions) = - if fragment == "": - return - var pair = fragment.split('=') - if pair.len < 2 or pair[1] == "": - raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) - try: - pair[1] = pair[1].replace("+", "%2B").decodeURL - except: - raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) - case pair[0]: - of "filter": - filterClauses(pair[1], options) - if options.jsonFilter == "": - raise newException(EInvalidRequest, "Invalid filter clause: $1" % pair[1].replace("\"", "\\\"")) - of "select": - selectClause(pair[1], options) - if options.jsonSelect.len == 0: - raise newException(EInvalidRequest, "Invalid select clause: $1" % pair[1].replace("\"", "\\\"")) - of "like": - options.like = pair[1] - of "search": - options.search = pair[1] - of "tags": - options.tags = pair[1] - of "created-after": - try: - options.createdAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid created-after value: $1" % getCurrentExceptionMsg()) - of "created-before": - try: - options.createdBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid created-before value: $1" % getCurrentExceptionMsg()) - of "modified-after": - try: - options.modifiedAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid modified.after value: $1" % getCurrentExceptionMsg()) - of "modified-before": - try: - options.modifiedBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid modified-before value: $1" % getCurrentExceptionMsg()) - of "limit": - try: - options.limit = pair[1].parseInt - except: - raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) - of "offset": - try: - options.offset = pair[1].parseInt - except: - raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) - of "sort": - let orderby = pair[1].orderByClauses() - if orderby != "": - options.orderby = orderby - else: - raise newException(EInvalidRequest, "Invalid sort value: $1" % pair[1]) - of "contents", "raw": - discard - else: - discard - -proc parseQueryOptions*(querystring: string, options: var QueryOptions) = - var q = querystring - if q.startsWith("?"): - q = q[1 .. q.len - 1] - var fragments = q.split('&') - for f in fragments: - f.parseQueryOption(options) - -proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, cb: proc(req: LSRequest, LS: LiteStore, resource: string, id: string):LSResponse): LSResponse = - if req.reqMethod == HttpPost or req.reqMethod == HttpPut or req.reqMethod == HttpPatch: - var ct = "" - let body = req.body.strip - if body == "": - return resError(Http400, "Bad request: No content specified for document.") - if req.headers.hasKey("Content-Type"): - ct = req.headers["Content-Type"] - case ct: - of "application/json": - try: - discard body.parseJson() - except: - return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) - else: - discard - return cb(req, LS, resource, id) - -proc patchTag(tags: var seq[string], index: int, op, path, value: string): bool = - LOG.debug("- PATCH -> $1 tag['$2'] = \"$3\" - Total tags: $4." % [op, $index, $value, $tags.len]) - case op: - of "remove": - let tag = tags[index] - if not tag.startsWith("$"): - tags[index] = "" # Not removing element, otherwise subsequent indexes won't work! - else: - raise newException(EInvalidRequest, "cannot remove system tag: $1" % tag) - of "add": - if value.match(PEG_USER_TAG): - tags.insert(value, index) - else: - if value.strip == "": - raise newException(EInvalidRequest, "tag not specified." % value) - else: - raise newException(EInvalidRequest, "invalid tag: $1" % value) - of "replace": - if value.match(PEG_USER_TAG): - if tags[index].startsWith("$"): - raise newException(EInvalidRequest, "cannot replace system tag: $1" % tags[index]) - else: - tags[index] = value - else: - if value.strip == "": - raise newException(EInvalidRequest, "tag not specified." % value) - else: - raise newException(EInvalidRequest, "invalid tag: $1" % value) - of "test": - if tags[index] != value: - return false - else: - raise newException(EInvalidRequest, "invalid patch operation: $1" % op) - return true - -proc patchData*(data: var JsonNode, origData: JsonNode, op: string, path: string, value: JsonNode): bool = - LOG.debug("- PATCH -> $1 path $2 with $3" % [op, path, $value]) - var keys = path.replace(peg"^\/data\/", "").split("/") - if keys.len == 0: - raise newException(EInvalidRequest, "no valid path specified: $1" % path) - var d = data - var dorig = origData - var c = 1 - for key in keys: - if d.kind == JArray: - try: - var index = key.parseInt - if c >= keys.len: - d.elems[index] = value - case op: - of "remove": - d.elems.del(index) - of "add": - d.elems.insert(value, index) - of "replace": - d.elems[index] = value - of "test": - if d.elems[index] != value: - return false - else: - raise newException(EInvalidRequest, "invalid patch operation: $1" % op) - else: - d = d[index] - dorig = dorig[index] - except: - raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) - else: - if c >= keys.len: - case op: - of "remove": - if d.hasKey(key): - d.delete(key) - else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) - of "add": - d[key] = value - of "replace": - if d.hasKey(key): - d[key] = value - else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) - of "test": - if dorig.hasKey(key): - if dorig[key] != value: - return false - else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) - else: - raise newException(EInvalidRequest, "invalid patch operation: $1" % op) - else: - d = d[key] - dorig = dorig[key] - c += 1 - return true - - -proc applyPatchOperation*(data: var JsonNode, origData: JsonNode, tags: var seq[string], op: string, path: string, value: JsonNode): bool = - var matches = @[""] - let p = peg""" - path <- ^tagPath / fieldPath$ - tagPath <- '\/tags\/' {\d+} - fieldPath <- '\/data\/' ident ('\/' ident)* - ident <- [a-zA-Z0-9_]+ / '-' - """ - if path.find(p, matches) == -1: - raise newException(EInvalidRequest, "cannot patch path '$1'" % path) - if path.match(peg"^\/tags\/"): - let index = matches[0].parseInt - if value.kind != JString: - raise newException(EInvalidRequest, "tag '$1' is not a string." % $value) - let tag = value.getStr - return patchTag(tags, index, op, path, tag) - elif tags.contains("$subtype:json"): - return patchData(data, origData, op, path, value) - else: - raise newException(EInvalidRequest, "cannot patch data of a non-JSON document.") - -# Low level procs - -proc getTag*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveTag(id, options) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - if doc == newJNull(): - result = resTagNotFound(id) - else: - result.content = $doc - result.code = Http200 - -proc getStore*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - if (not LSDICT.hasKey(id)): - return resStoreNotFound(id) - let store = LSDICT[id] - var doc = newJObject() - doc["id"] = %id - doc["file"] = %store.file - doc["config"] = store.config - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = $doc - result.code = Http200 - -proc getIndex*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveIndex(id, options) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - if doc == newJNull(): - result = resIndexNotFound(id) - else: - result.content = $doc - result.code = Http200 - -proc getRawDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveRawDocument(id, options) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - if doc == "": - result = resDocumentNotFound(id) - else: - result.content = doc - result.code = Http200 - -proc getDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveDocument(id, options) - if doc.data == "": - result = resDocumentNotFound(id) - else: - result.headers = doc.contenttype.ctHeader - setOrigin(LS, req, result.headers) - result.content = doc.data - result.code = Http200 - -proc deleteDocument*(LS: LiteStore, id: string, req: LSRequest): LSResponse = - let doc = LS.store.retrieveDocument(id) - if doc.data == "": - result = resDocumentNotFound(id) - else: - try: - let res = LS.store.destroyDocument(id) - if res == 0: - result = resError(Http500, "Unable to delete document '$1'" % id) - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Content-Length"] = "0" - result.content = "" - result.code = Http204 - except: - result = resError(Http500, "Unable to delete document '$1'" % id) - -proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = - var options = options - let t0 = cpuTime() - let docs = LS.store.retrieveTags(options) - let orig_limit = options.limit - let orig_offset = options.offset - options.limit = 0 - options.offset = 0 - options.select = @["COUNT(tag_id)"] - let total = LS.store.countTags(prepareSelectTagsQuery(options), options.like.replace("*", "%")) - var content = newJObject() - if options.like != "": - content["like"] = %(options.like.decodeURL) - if orig_limit > 0: - content["limit"] = %orig_limit - if orig_offset > 0: - content["offset"] = %orig_offset - content["total"] = %total - content["execution_time"] = %(cputime()-t0) - content["results"] = docs - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc getStores(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = - let t0 = cpuTime() - var docs = newJArray() - for k, v in LSDICT.pairs: - var store = newJObject() - store["id"] = %k - store["file"] = %v.file - store["config"] = v.config - docs.add(store) - var content = newJObject() - content["total"] = %LSDICT.len - content["execution_time"] = %(cputime()-t0) - content["results"] = docs - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc getIndexes*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = - var options = options - let t0 = cpuTime() - let docs = LS.store.retrieveIndexes(options) - let orig_limit = options.limit - let orig_offset = options.offset - options.limit = 0 - options.offset = 0 - options.select = @["COUNT(name)"] - let total = LS.store.countIndexes(prepareSelectIndexesQuery(options), options.like.replace("*", "%")) - var content = newJObject() - if options.like != "": - content["like"] = %(options.like.decodeURL) - if orig_limit > 0: - content["limit"] = %orig_limit - if orig_offset > 0: - content["offset"] = %orig_offset - content["total"] = %total - content["execution_time"] = %(cputime()-t0) - content["results"] = docs - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = - var options = options - let t0 = cpuTime() - let docs = LS.store.retrieveRawDocuments(options) - let orig_limit = options.limit - let orig_offset = options.offset - options.limit = 0 - options.offset = 0 - options.select = @["COUNT(docid)"] - let total = LS.store.retrieveRawDocuments(options)[0].num - var content = newJObject() - if options.folder != "": - content["folder"] = %(options.folder) - if options.search != "": - content["search"] = %(options.search.decodeURL) - if options.tags != "": - content["tags"] = newJArray() - for tag in options.tags.replace("+", "%2B").decodeURL.split(","): - content["tags"].add(%tag) - if orig_limit > 0: - content["limit"] = %orig_limit - if orig_offset > 0: - content["offset"] = %orig_offset - if options.orderby != "": - content["sort"] = %options.orderby - content["total"] = %total - content["execution_time"] = %(cputime()-t0) - content["results"] = docs - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc getInfo*(LS: LiteStore, req: LSRequest): LSResponse = - let info = LS.store.retrieveInfo() - let version = info[0] - let total_documents = info[1] - let total_tags = LS.store.countTags() - let tags = LS.store.retrieveTagsWithTotals() - var content = newJObject() - content["version"] = %(LS.appname & " v" & LS.appversion) - content["datastore_version"] = %version - content["api_version"] = %7 - content["size"] = %($((LS.file.getFileSize().float/(1024*1024)).formatFloat(ffDecimal, 2)) & " MB") - content["read_only"] = %LS.readonly - content["log_level"] = %LS.loglevel - if LS.directory.len == 0: - content["directory"] = newJNull() - else: - content["directory"] = %LS.directory - content["mount"] = %LS.mount - if LS.config != newJNull() and LS.config.hasKey("stores") and LS.config["stores"].len > 0: - content["additional_stores"] = %toSeq(LS.config["stores"].keys) - else: - content["additional_stores"] = newJArray() - if LS.auth != newJNull(): - content["auth"] = %true - else: - content["auth"] = %false - content["total_documents"] = %total_documents - content["total_tags"] = %total_tags - content["tags"] = tags - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc putIndex*(LS: LiteStore, id, field: string, req: LSRequest): LSResponse = - try: - if (not id.match(PEG_INDEX)): - return resError(Http400, "invalid index ID: $1" % id) - if (not field.match(PEG_JSON_FIELD)): - return resError(Http400, "invalid field path: $1" % field) - if (LS.store.retrieveIndex(id) != newJNull()): - return resError(Http409, "Index already exists: $1" % id) - LS.store.createIndex(id, field) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = "{\"id\": \"$1\", \"field\": \"$2\"}" % [id, field] - result.code = Http201 - except: - eWarn() - result = resError(Http500, "Unable to create index.") - -proc putStore*(LS: LiteStore, id: string, config: JsonNode, req: LSRequest): LSResponse = - try: - if (not id.match(PEG_STORE) or id == "master"): - return resError(Http400, "invalid store ID: $1" % id) - if (LSDICT.hasKey(id)): - return resError(Http409, "Store already exists: $1" % id) - let store = LS.addStore(id, id & ".db", config) - LS.updateConfig() - LSDICT[id] = store - result = getStore(LS, id, newQueryOptions(), req) - result.code = Http201 - except: - eWarn() - result = resError(Http500, "Unable to create store.") - -proc deleteIndex*(LS: LiteStore, id: string, req: LSRequest): LSResponse = - if (not id.match(PEG_INDEX)): - return resError(Http400, "invalid index ID: $1" % id) - if (LS.store.retrieveIndex(id) == newJNull()): - return resError(Http404, "Index not found: $1" % id) - try: - LS.store.dropIndex(id) - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Content-Length"] = "0" - result.content = "" - result.code = Http204 - except: - eWarn() - result = resError(Http500, "Unable to delete index.") - -proc deleteStore*(LS: LiteStore, id: string, req: LSRequest): LSResponse = - if (not id.match(PEG_STORE)): - return resError(Http400, "invalid store ID: $1" % id) - if (not LSDICT.hasKey(id)): - return resError(Http404, "Store not found: $1" % id) - try: - LSDICT.del(id) - if LS.config.hasKey("stores") and LS.config["stores"].hasKey(id): - LS.config["stores"].delete(id) - LS.updateConfig() - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Content-Length"] = "0" - result.content = "" - result.code = Http204 - except: - eWarn() - result = resError(Http500, "Unable to delete index.") - -proc postDocument*(LS: LiteStore, body: string, ct: string, folder="", req: LSRequest): LSResponse = - if not folder.isFolder: - return resError(Http400, "Invalid folder specified when creating document: $1" % folder) - try: - var doc = LS.store.createDocument(folder, body, ct) - if doc != "": - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = doc - result.code = Http201 - else: - result = resError(Http500, "Unable to create document.") - except: - eWarn() - result = resError(Http500, "Unable to create document.") - -proc putDocument*(LS: LiteStore, id: string, body: string, ct: string, req: LSRequest): LSResponse = - if id.isFolder: - return resError(Http400, "Invalid ID '$1' (Document IDs cannot end with '/')." % id) - let doc = LS.store.retrieveDocument(id) - if doc.data == "": - # Create a new document - var doc = LS.store.createDocument(id, body, ct) - if doc != "": - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = doc - result.code = Http201 - else: - result = resError(Http500, "Unable to create document.") - else: - # Update existing document - try: - var doc = LS.store.updateDocument(id, body, ct) - if doc != "": - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = doc - result.code = Http200 - else: - result = resError(Http500, "Unable to update document '$1'." % id) - except: - result = resError(Http500, "Unable to update document '$1'." % id) - -proc patchDocument*(LS: LiteStore, id: string, body: string, req: LSRequest): LSResponse = - var apply = true - let jbody = body.parseJson - if jbody.kind != JArray: - return resError(Http400, "Bad request: PATCH request body is not an array.") - var options = newQueryOptions() - options.select = @["documents.id AS id", "created", "modified", "data"] - let doc = LS.store.retrieveRawDocument(id, options) - if doc == "": - return resDocumentNotFound(id) - let jdoc = doc.parseJson - var tags = newSeq[string]() - var origTags = newSeq[string]() - for tag in jdoc["tags"].items: - tags.add(tag.str) - origTags.add(tag.str) - var data: JsonNode - var origData: JsonNode - if tags.contains("$subtype:json"): - try: - origData = jdoc["data"].getStr.parseJson - data = origData.copy - except: - discard - var c = 1 - for item in jbody.items: - if item.hasKey("op") and item.hasKey("path"): - if not item.hasKey("value"): - item["value"] = %"" - try: - apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) - if not apply: - break - except: - return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) - else: - return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c) - c.inc - if apply: - # when document is not JSON the origData is not defined - # the extra check allows editing tags for non-JSON documents - if origData != nil and origData.len > 0 and origData != data: - try: - var doc = LS.store.updateDocument(id, data.pretty, "application/json") - if doc == "": - return resError(Http500, "Unable to patch document '$1'." % id) - except: - return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) - if origTags != tags: - try: - for t1 in jdoc["tags"].items: - discard LS.store.destroyTag(t1.str, id, true) - for t2 in tags: - if t2 != "": - LS.store.createTag(t2, id, true) - except: - return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) - return LS.getRawDocument(id, newQueryOptions(), req) - -# Main routing - -proc options*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - case resource: - of "info": - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - if id != "": - return resError(Http404, "Info '$1' not found." % id) - else: - result.code = Http204 - result.content = "" - of "dir": - result.code = Http204 - result.content = "" - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - of "tags": - result.code = Http204 - result.content = "" - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - of "indexes": - result.code = Http204 - result.content = "" - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - if id != "": - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - else: - result.headers["Allow"] = "GET, OPTIONS, PUT, DELETE" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS, PUT, DELETE" - else: - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - of "docs": - var folder: string - if id.isFolder: - folder = id - if folder.len > 0: - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS, POST, PUT" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST, PUT" - elif id != "": - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" - result.headers["Allow-Patch"] = "application/json-patch+json" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" - else: - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS, POST" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST" - of "stores": - result.code = Http204 - result.content = "" - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - if id != "": - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - else: - result.headers["Allow"] = "GET, OPTIONS, PUT, DELETE" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS, PUT, DELETE" - else: - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - else: - discard # never happens really. - -proc head*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - var options = newQueryOptions() - options.select = @["documents.id AS id", "created", "modified"] - if id.isFolder: - options.folder = id - try: - parseQueryOptions(req.url.query, options); - if id != "" and options.folder == "": - result = LS.getRawDocument(id, options, req) - result.content = "" - else: - result = LS.getRawDocuments(options, req) - result.content = "" - except: - return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) - -proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - case resource: - of "docs": - var options = newQueryOptions() - if id.isFolder: - options.folder = id - if req.url.query.contains("contents=false"): - options.select = @["documents.id AS id", "created", "modified"] - try: - parseQueryOptions(req.url.query, options); - if id != "" and options.folder == "": - if req.url.query.contains("raw=true") or req.headers.hasKey("Accept") and req.headers["Accept"] == "application/json": - return LS.getRawDocument(id, options, req) - else: - return LS.getDocument(id, options, req) - else: - return LS.getRawDocuments(options, req) - except: - let e = getCurrentException() - let trace = e.getStackTrace() - echo trace - return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) - of "tags": - var options = newQueryOptions() - try: - parseQueryOptions(req.url.query, options); - if id != "": - return LS.getTag(id, options, req) - else: - return LS.getTags(options, req) - except: - return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) - of "indexes": - var options = newQueryOptions() - try: - parseQueryOptions(req.url.query, options); - if id != "": - return LS.getIndex(id, options, req) - else: - return LS.getIndexes(options, req) - except: - return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) - of "stores": - var options = newQueryOptions() - try: - parseQueryOptions(req.url.query, options); - if id != "": - return LS.getStore(id, options, req) - else: - return LS.getStores(options, req) - except: - return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) - of "info": - if id != "": - return resError(Http404, "Info '$1' not found." % id) - return LS.getInfo(req) - else: - discard # never happens really. - -proc post*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - var ct = "text/plain" - if req.headers.hasKey("Content-Type"): - ct = req.headers["Content-Type"] - return LS.postDocument(req.body.strip, ct, id, req) - -proc put*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - if id != "": - if resource == "indexes": - var field = "" - try: - field = parseJson(req.body.strip)["field"].getStr - except: - return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) - return LS.putIndex(id, field, req) - elif resource == "stores": - var config = newJNull() - try: - config = parseJson(req.body) - except: - return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) - return LS.putStore(id, config, req) - else: # Assume docs - var ct = "text/plain" - if req.headers.hasKey("Content-Type"): - ct = req.headers["Content-Type"] - return LS.putDocument(id, req.body.strip, ct, req) - else: - return resError(Http400, "Bad request: document ID must be specified in PUT requests.") - -proc delete*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - if id != "": - if resource == "indexes": - return LS.deleteIndex(id, req) - elif resource == "stores": - return LS.deleteStore(id, req) - else: # Assume docs - return LS.deleteDocument(id, req) - else: - return resError(Http400, "Bad request: document ID must be specified in DELETE requests.") - -proc patch*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - if id != "": - return LS.patchDocument(id, req.body, req) - else: - return resError(Http400, "Bad request: document ID must be specified in PATCH requests.") - -proc serveFile*(req: LSRequest, LS: LiteStore, id: string): LSResponse = - let path = LS.directory / id - var reqMethod = $req.reqMethod - if req.headers.hasKey("X-HTTP-Method-Override"): - reqMethod = req.headers["X-HTTP-Method-Override"] - case reqMethod.toUpperAscii: - of "OPTIONS": - return validate(req, LS, "dir", id, options) - of "GET": - if path.fileExists: - try: - let contents = path.readFile - let parts = path.splitFile - if CONTENT_TYPES.hasKey(parts.ext): - result.headers = CONTENT_TYPES[parts.ext].ctHeader - else: - result.headers = ctHeader("text/plain") - setOrigin(LS, req, result.headers) - result.content = contents - result.code = Http200 - except: - return resError(Http500, "Unable to read file '$1'." % path) - else: - return resError(Http404, "File '$1' not found." % path) - else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - -proc route*(req: LSRequest, LS: LiteStore, resource = "docs", id = ""): LSResponse = - var reqMethod = $req.reqMethod - if req.headers.hasKey("X-HTTP-Method-Override"): - reqMethod = req.headers["X-HTTP-Method-Override"] - LOG.debug("ROUTE - resource: " & resource & " id: " & id) - case reqMethod.toUpperAscii: - of "POST": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, post) - of "PUT": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, put) - of "DELETE": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, delete) - of "HEAD": - return validate(req, LS, resource, id, head) - of "OPTIONS": - return validate(req, LS, resource, id, options) - of "GET": - return validate(req, LS, resource, id, get) - of "PATCH": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, patch) - else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - -proc multiRoute(req: LSRequest, resource, id: string): LSResponse = - var matches = @["", "", ""] - if req.url.path.find(PEG_STORE_URL, matches) != -1: - let id = matches[0] - let path = "/v7/" & matches[1] - matches = @["", "", ""] - discard path.find(PEG_URL, matches) - return req.route(LSDICT[id], matches[1], matches[2]) - return req.route(LS, resource, id) - -proc newSimpleLSRequest(meth: HttpMethod, resource, id, body = "", params = "", headers = newHttpHeaders()): LSRequest = - result.reqMethod = meth - result.body = body - result.headers = headers - result.url = parseUri("$1://$2:$3/$4/$5?$6" % @["http", "localhost", "9500", resource, id, params]) - -proc get(resource, id: string, params = ""): LSResponse = - return newSimpleLSRequest(HttpGet, resource, id, "", params).multiRoute(resource, id) - -proc post(resource, folder, body: string, ct = ""): LSResponse = - var headers = newHttpHeaders() - if ct != "": - headers["Content-Type"] = ct - return newSimpleLSRequest(HttpPost, resource, folder, body, "", headers).multiRoute(resource, folder & "/") - -proc put(resource, id, body: string, ct = ""): LSResponse = - var headers = newHttpHeaders() - if ct != "": - headers["Content-Type"] = ct - return newSimpleLSRequest(HttpPut, resource, id, body, "", headers).multiRoute(resource, id) - -proc patch(resource, id, body: string): LSResponse = - var headers = newHttpHeaders() - headers["Content-Type"] = "application/json" - return newSimpleLSRequest(HttpPatch, resource, id, body, "", headers).multiRoute(resource, id) - -proc delete(resource, id: string): LSResponse = - return newSimpleLSRequest(HttpDelete, resource, id).multiRoute(resource, id) - -proc head(resource, id: string): LSResponse = - return newSimpleLSRequest(HttpHead, resource, id).multiRoute(resource, id) - -proc registerStoreApi(LS: LiteStore, ctx: DTContext, origResource, origId: string) = - var api_idx = ctx.duk_push_object() - # GET - var get: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let params = duk_get_string(ctx, 2) - let resp = get($resource, $id, $params) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, get, 3) - discard ctx.duk_put_prop_string(api_idx, "get") - # POST - var post: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let folder = duk_get_string(ctx, 1) - let body = duk_get_string(ctx, 2) - let ct = duk_get_string(ctx, 3) - let resp = post($resource, $folder, $body, $ct) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, post, 4) - discard ctx.duk_put_prop_string(api_idx, "post") - # PUT - var put: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let body = duk_get_string(ctx, 2) - let ct = duk_get_string(ctx, 3) - let resp = put($resource, $id, $body, $ct) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, put, 4) - discard ctx.duk_put_prop_string(api_idx, "put") - # PATCH - var patch: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let body = duk_get_string(ctx, 2) - let resp = patch($resource, $id, $body) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, patch, 3) - discard ctx.duk_put_prop_string(api_idx, "patch") - # DELETE - var delete: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let resp = delete($resource, $id) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, delete, 2) - discard ctx.duk_put_prop_string(api_idx, "delete") - # HEAD - var head: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let resp = head($resource, $id) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, head, 2) - discard ctx.duk_put_prop_string(api_idx, "head") - discard ctx.duk_put_global_string("$store") - -proc jError(ctx: DTContext): LSResponse = - return resError(Http500, "Middleware Error: " & $ctx.duk_safe_to_string(-1)) - -proc getMiddleware*(LS: LiteStore, id: string): string = - if not LS.middleware.hasKey(id): - # Attempt to retrieve resource from system documents - let options = newQueryOptions(true) - let doc = LS.store.retrieveDocument("middleware/" & id & ".js", options) - result = doc.data - if result == "": - LOG.warn("Middleware '$1' not found" % id) - else: - result = LS.middleware[id] - -proc getMiddlewareSeq(LS: LiteStore, resource, id, meth: string): seq[string] = - result = newSeq[string]() - if LS.config.kind != JObject or not LS.config.hasKey("resources"): - return - var reqUri = "/" & resource & "/" & id - if reqUri[^1] == '/': - reqUri.removeSuffix({'/'}) - let parts = reqUri.split("/") - let ancestors = parts[1..parts.len-2] - var currentPath = "" - var currentPaths = "" - for p in ancestors: - currentPath &= "/" & p - currentPaths = currentPath & "/*" - if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][meth].hasKey("middleware"): - let mw = LS.config["resources"][currentPaths][meth]["middleware"] - if (mw.kind == JArray): - for m in mw: - result.add m.getStr - if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("middleware"): - let mw = LS.config["resources"][reqUri][meth]["middleware"] - if (mw.kind == JArray): - for m in mw: - result.add m.getStr - -proc execute*(req: var LSRequest, LS: LiteStore, resource, id: string): LSResponse = - let middleware = getMiddlewareSeq(LS, resource, id, $req.reqMethod) - if middleware.len > 0: - LOG.debug("Middleware: " & middleware.join(" -> ")); - if middleware.len == 0: - return route(req, LS, resource, id) - var jReq = $(%* req) - LOG.debug("Request: " & jReq) - var jRes = """{ - "code": 200, - "content": {}, - "final": false, - "headers": { - "Access-Control-Allow-Origin": "*", - "Access-Control-Allow-Headers": "Authorization, Content-Type", - "Server": "$1", - "Content-Type": "application/json" - } - }""" % [LS.appname & "/" & LS.appversion] - var context = "{}" - # Create execution context - var ctx = duk_create_heap_default() - duk_console_init(ctx) - duk_print_alert_init(ctx) - LS.registerStoreApi(ctx, resource, id) - if ctx.duk_peval_string(cstring("($1)" % $jReq)) != 0: - return jError(ctx) - discard ctx.duk_put_global_string("$req") - if ctx.duk_peval_string(cstring("($1)" % $jRes)) != 0: - return jError(ctx) - discard ctx.duk_put_global_string("$res") - if ctx.duk_peval_string(cstring("($1)" % $context)) != 0: - return jError(ctx) - discard ctx.duk_put_global_string("$ctx") - # Middleware-specific functions - var i = 0 - var abort = 0 - while abort != 1 and i < middleware.len: - let code = LS.getMiddleware(middleware[i]) - LOG.debug("Evaluating middleware '$1'" % middleware[i]) - if ctx.duk_peval_string(code.cstring) != 0: - return jError(ctx) - abort = ctx.duk_get_boolean(-1) - i.inc - # Retrieve response, and request - if ctx.duk_peval_string("JSON.stringify($res);") != 0: - return jError(ctx) - let fRes = parseJson($(ctx.duk_get_string(-1))).newLSResponse - if ctx.duk_peval_string("JSON.stringify($req);") != 0: - return jError(ctx) - let fReq = parseJson($(ctx.duk_get_string(-1))).newLSRequest() - ctx.duk_destroy_heap(); - LOG.debug("abort: $1", [$abort]) - if abort == 1: - return fRes - return route(fReq, LS, resource, id) +import + asynchttpserver, + strutils, + sequtils, + cgi, + strtabs, + pegs, + json, + os, + uri, + tables, + times +import + types, + contenttypes, + core, + utils, + logger, + duktape + +# Helper procs + +proc sqlOp(op: string): string = + let table = newStringTable() + table["not eq"] = "<>" + table["eq"] = "==" + table["gt"] = ">" + table["gte"] = ">=" + table["lt"] = "<" + table["lte"] = "<=" + table["contains"] = "contains" + table["like"] = "like" + return table[op] + +proc orderByClauses*(str: string): string = + var clauses = newSeq[string]() + var fragments = str.split(",") + let clause = peg""" + clause <- {[-+]} {field} + field <- ('id' / 'created' / 'modified' / path) + path <- '$' (objField)+ + ident <- [a-zA-Z0-9_]+ + objField <- '.' ident + """ + for f in fragments: + var matches = @["", ""] + if f.find(clause, matches) != -1: + var field = matches[1] + if field[0] == '$': + field = "json_extract(documents.data, '$1')" % matches[1] + if matches[0] == "-": + clauses.add("$1 COLLATE NOCASE DESC" % field) + else: + clauses.add("$1 COLLATE NOCASE ASC" % field) + return clauses.join(", ") + +proc selectClause*(str: string, options: var QueryOptions) = + let tokens = """ + path <- '$' (objItem / objField)+ + ident <- [a-zA-Z0-9_]+ + objIndex <- '[' \d+ ']' + objField <- '.' ident + objItem <- objField objIndex + """ + let fields = peg(""" + fields <- ^{field} (\s* ',' \s* {field})*$ + field <- path \s+ ('as' / 'AS') \s+ ident + """ & tokens) + let field = peg(""" + field <- ^{path} \s+ ('as' / 'AS') \s+ {ident}$ + """ & tokens) + var fieldMatches = newSeq[string](10) + if str.strip.match(fields, fieldMatches): + for m in fieldMatches: + if m.len > 0: + var rawTuple = newSeq[string](2) + if m.match(field, rawTuple): + options.jsonSelect.add((path: rawTuple[0], alias: rawTuple[1])) + +proc filterClauses*(str: string, options: var QueryOptions) = + let tokens = """ + operator <- 'not eq' / 'eq' / 'gte' / 'gt' / 'lte' / 'lt' / 'contains' / 'like' + value <- string / number / 'null' / 'true' / 'false' + string <- '"' ('\\"' . / [^"])* '"' + number <- '-'? '0' / [1-9] [0-9]* ('.' [0-9]+)? (( 'e' / 'E' ) ( '+' / '-' )? [0-9]+)? + path <- '$' (objItem / objField)+ + ident <- [a-zA-Z0-9_]+ + objIndex <- '[' \d+ ']' + objField <- '.' ident + objItem <- objField objIndex + """ + let clause = peg(""" + clause <- {path} \s+ {operator} \s+ {value} + """ & tokens) + let andClauses = peg(""" + andClauses <- ^{clause} (\s+ 'and' \s+ {clause})*$ + clause <- path \s+ operator \s+ value + """ & tokens) + let orClauses = peg(""" + orClauses <- ^{andClauses} (\s+ 'or' \s+ {andClauses})*$ + andClauses <- clause (\s+ 'and' \s+ clause)* + clause <- path \s+ operator \s+ value + """ & tokens) + var orClausesMatches = newSeq[string](10) + discard str.strip.match(orClauses, orClausesMatches) + var parsedClauses = newSeq[seq[seq[string]]]() + for orClause in orClausesMatches: + if orClause.len > 0: + var andClausesMatches = newSeq[string](10) + discard orClause.strip.match(andClauses, andClausesMatches) + var parsedAndClauses = newSeq[seq[string]]() + for andClause in andClausesMatches: + if andClause.len > 0: + var clauses = newSeq[string](3) + discard andClause.strip.match(clause, clauses) + clauses[1] = sqlOp(clauses[1]) + if clauses[2] == "true": + clauses[2] = "1" + elif clauses[2] == "false": + clauses[2] = "0" + parsedAndClauses.add clauses + if parsedAndClauses.len > 0: + parsedClauses.add parsedAndClauses + if parsedClauses.len == 0: + return + var currentArr = 0 + var tables = newSeq[string]() + let resOrClauses = parsedClauses.map do (it: seq[seq[string]]) -> string: + let resAndClauses = it.map do (x: seq[string]) -> string: + if x[1] == "contains": + currentArr = currentArr + 1 + tables.add "json_each(documents.data, '$1') AS arr$2" % [x[0], $currentArr] + return "arr$1.value == $2" % [$currentArr, x[2]] + else: + var arr = @[x[0], x[1], x[2]] + if x[1] == "like": + arr[2] = x[2].replace('*', '%') + return "json_extract(documents.data, '$1') $2 $3 " % arr + return resAndClauses.join(" AND ") + options.tables = options.tables & tables + options.jsonFilter = resOrClauses.join(" OR ") + +proc parseQueryOption*(fragment: string, options: var QueryOptions) = + if fragment == "": + return + var pair = fragment.split('=') + if pair.len < 2 or pair[1] == "": + raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) + try: + pair[1] = pair[1].replace("+", "%2B").decodeURL + except CatchableError: + raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) + case pair[0]: + of "filter": + filterClauses(pair[1], options) + if options.jsonFilter == "": + raise newException(EInvalidRequest, "Invalid filter clause: $1" % pair[1].replace("\"", "\\\"")) + of "select": + selectClause(pair[1], options) + if options.jsonSelect.len == 0: + raise newException(EInvalidRequest, "Invalid select clause: $1" % pair[1].replace("\"", "\\\"")) + of "like": + options.like = pair[1] + of "search": + options.search = pair[1] + of "tags": + options.tags = pair[1] + of "created-after": + try: + options.createdAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid created-after value: $1" % getCurrentExceptionMsg()) + of "created-before": + try: + options.createdBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid created-before value: $1" % getCurrentExceptionMsg()) + of "modified-after": + try: + options.modifiedAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid modified.after value: $1" % getCurrentExceptionMsg()) + of "modified-before": + try: + options.modifiedBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid modified-before value: $1" % getCurrentExceptionMsg()) + of "limit": + try: + options.limit = pair[1].parseInt + except CatchableError: + raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) + of "offset": + try: + options.offset = pair[1].parseInt + except CatchableError: + raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) + of "sort": + let orderby = pair[1].orderByClauses() + if orderby != "": + options.orderby = orderby + else: + raise newException(EInvalidRequest, "Invalid sort value: $1" % pair[1]) + of "contents", "raw": + discard + else: + discard + +proc parseQueryOptions*(querystring: string, options: var QueryOptions) = + var q = querystring + if q.startsWith("?"): + q = q[1 .. q.len - 1] + var fragments = q.split('&') + for f in fragments: + f.parseQueryOption(options) + +proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, cb: proc(req: LSRequest, LS: LiteStore, resource: string, id: string):LSResponse): LSResponse = + if req.reqMethod == HttpPost or req.reqMethod == HttpPut or req.reqMethod == HttpPatch: + var ct = "" + let body = req.body.strip + if body == "": + return resError(Http400, "Bad request: No content specified for document.") + if req.headers.hasKey("Content-Type"): + ct = req.headers["Content-Type"] + case ct: + of "application/json": + try: + discard body.parseJson() + except CatchableError: + return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) + else: + discard + return cb(req, LS, resource, id) + +proc patchTag(tags: var seq[string], index: int, op, path, value: string): bool = + LOG.debug("- PATCH -> $1 tag['$2'] = \"$3\" - Total tags: $4." % [op, $index, $value, $tags.len]) + case op: + of "remove": + let tag = tags[index] + if not tag.startsWith("$"): + tags[index] = "" # Not removing element, otherwise subsequent indexes won't work! + else: + raise newException(EInvalidRequest, "cannot remove system tag: $1" % tag) + of "add": + if value.match(PEG_USER_TAG): + tags.insert(value, index) + else: + if value.strip == "": + raise newException(EInvalidRequest, "tag not specified." % value) + else: + raise newException(EInvalidRequest, "invalid tag: $1" % value) + of "replace": + if value.match(PEG_USER_TAG): + if tags[index].startsWith("$"): + raise newException(EInvalidRequest, "cannot replace system tag: $1" % tags[index]) + else: + tags[index] = value + else: + if value.strip == "": + raise newException(EInvalidRequest, "tag not specified." % value) + else: + raise newException(EInvalidRequest, "invalid tag: $1" % value) + of "test": + if tags[index] != value: + return false + else: + raise newException(EInvalidRequest, "invalid patch operation: $1" % op) + return true + +proc patchData*(data: var JsonNode, origData: JsonNode, op: string, path: string, value: JsonNode): bool = + LOG.debug("- PATCH -> $1 path $2 with $3" % [op, path, $value]) + var keys = path.replace(peg"^\/data\/", "").split("/") + if keys.len == 0: + raise newException(EInvalidRequest, "no valid path specified: $1" % path) + var d = data + var dorig = origData + var c = 1 + for key in keys: + if d.kind == JArray: + try: + var index = key.parseInt + if c >= keys.len: + d.elems[index] = value + case op: + of "remove": + d.elems.del(index) + of "add": + d.elems.insert(value, index) + of "replace": + d.elems[index] = value + of "test": + if d.elems[index] != value: + return false + else: + raise newException(EInvalidRequest, "invalid patch operation: $1" % op) + else: + d = d[index] + dorig = dorig[index] + except CatchableError: + raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) + else: + if c >= keys.len: + case op: + of "remove": + if d.hasKey(key): + d.delete(key) + else: + raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + of "add": + d[key] = value + of "replace": + if d.hasKey(key): + d[key] = value + else: + raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + of "test": + if dorig.hasKey(key): + if dorig[key] != value: + return false + else: + raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + else: + raise newException(EInvalidRequest, "invalid patch operation: $1" % op) + else: + d = d[key] + dorig = dorig[key] + c += 1 + return true + + +proc applyPatchOperation*(data: var JsonNode, origData: JsonNode, tags: var seq[string], op: string, path: string, value: JsonNode): bool = + var matches = @[""] + let p = peg""" + path <- ^tagPath / fieldPath$ + tagPath <- '\/tags\/' {\d+} + fieldPath <- '\/data\/' ident ('\/' ident)* + ident <- [a-zA-Z0-9_]+ / '-' + """ + if path.find(p, matches) == -1: + raise newException(EInvalidRequest, "cannot patch path '$1'" % path) + if path.match(peg"^\/tags\/"): + let index = matches[0].parseInt + if value.kind != JString: + raise newException(EInvalidRequest, "tag '$1' is not a string." % $value) + let tag = value.getStr + return patchTag(tags, index, op, path, tag) + elif tags.contains("$subtype:json"): + return patchData(data, origData, op, path, value) + else: + raise newException(EInvalidRequest, "cannot patch data of a non-JSON document.") + +# Low level procs + +proc getTag*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveTag(id, options) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + if doc == newJNull(): + result = resTagNotFound(id) + else: + result.content = $doc + result.code = Http200 + +proc getStore*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + if (not LSDICT.hasKey(id)): + return resStoreNotFound(id) + let store = LSDICT[id] + var doc = newJObject() + doc["id"] = %id + doc["file"] = %store.file + doc["config"] = store.config + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = $doc + result.code = Http200 + +proc getIndex*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveIndex(id, options) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + if doc == newJNull(): + result = resIndexNotFound(id) + else: + result.content = $doc + result.code = Http200 + +proc getRawDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveRawDocument(id, options) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + if doc == "": + result = resDocumentNotFound(id) + else: + result.content = doc + result.code = Http200 + +proc getDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveDocument(id, options) + if doc.data == "": + result = resDocumentNotFound(id) + else: + result.headers = doc.contenttype.ctHeader + setOrigin(LS, req, result.headers) + result.content = doc.data + result.code = Http200 + +proc deleteDocument*(LS: LiteStore, id: string, req: LSRequest): LSResponse = + let doc = LS.store.retrieveDocument(id) + if doc.data == "": + result = resDocumentNotFound(id) + else: + try: + let res = LS.store.destroyDocument(id) + if res == 0: + result = resError(Http500, "Unable to delete document '$1'" % id) + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Content-Length"] = "0" + result.content = "" + result.code = Http204 + except CatchableError: + result = resError(Http500, "Unable to delete document '$1'" % id) + +proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = + var options = options + let t0 = cpuTime() + let docs = LS.store.retrieveTags(options) + let orig_limit = options.limit + let orig_offset = options.offset + options.limit = 0 + options.offset = 0 + options.select = @["COUNT(tag_id)"] + let total = LS.store.countTags(prepareSelectTagsQuery(options), options.like.replace("*", "%")) + var content = newJObject() + if options.like != "": + content["like"] = %(options.like.decodeURL) + if orig_limit > 0: + content["limit"] = %orig_limit + if orig_offset > 0: + content["offset"] = %orig_offset + content["total"] = %total + content["execution_time"] = %(cputime()-t0) + content["results"] = docs + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc getStores(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = + let t0 = cpuTime() + var docs = newJArray() + for k, v in LSDICT.pairs: + var store = newJObject() + store["id"] = %k + store["file"] = %v.file + store["config"] = v.config + docs.add(store) + var content = newJObject() + content["total"] = %LSDICT.len + content["execution_time"] = %(cputime()-t0) + content["results"] = docs + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc getIndexes*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = + var options = options + let t0 = cpuTime() + let docs = LS.store.retrieveIndexes(options) + let orig_limit = options.limit + let orig_offset = options.offset + options.limit = 0 + options.offset = 0 + options.select = @["COUNT(name)"] + let total = LS.store.countIndexes(prepareSelectIndexesQuery(options), options.like.replace("*", "%")) + var content = newJObject() + if options.like != "": + content["like"] = %(options.like.decodeURL) + if orig_limit > 0: + content["limit"] = %orig_limit + if orig_offset > 0: + content["offset"] = %orig_offset + content["total"] = %total + content["execution_time"] = %(cputime()-t0) + content["results"] = docs + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = + var options = options + let t0 = cpuTime() + let docs = LS.store.retrieveRawDocuments(options) + let orig_limit = options.limit + let orig_offset = options.offset + options.limit = 0 + options.offset = 0 + options.select = @["COUNT(docid)"] + let total = LS.store.retrieveRawDocuments(options)[0].num + var content = newJObject() + if options.folder != "": + content["folder"] = %(options.folder) + if options.search != "": + content["search"] = %(options.search.decodeURL) + if options.tags != "": + content["tags"] = newJArray() + for tag in options.tags.replace("+", "%2B").decodeURL.split(","): + content["tags"].add(%tag) + if orig_limit > 0: + content["limit"] = %orig_limit + if orig_offset > 0: + content["offset"] = %orig_offset + if options.orderby != "": + content["sort"] = %options.orderby + content["total"] = %total + content["execution_time"] = %(cputime()-t0) + content["results"] = docs + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc getInfo*(LS: LiteStore, req: LSRequest): LSResponse = + let info = LS.store.retrieveInfo() + let version = info[0] + let total_documents = info[1] + let total_tags = LS.store.countTags() + let tags = LS.store.retrieveTagsWithTotals() + var content = newJObject() + content["version"] = %(LS.appname & " v" & LS.appversion) + content["datastore_version"] = %version + content["api_version"] = %7 + content["size"] = %($((LS.file.getFileSize().float/(1024*1024)).formatFloat(ffDecimal, 2)) & " MB") + content["read_only"] = %LS.readonly + content["log_level"] = %LS.loglevel + if LS.directory.len == 0: + content["directory"] = newJNull() + else: + content["directory"] = %LS.directory + content["mount"] = %LS.mount + if LS.config != newJNull() and LS.config.hasKey("stores") and LS.config["stores"].len > 0: + content["additional_stores"] = %toSeq(LS.config["stores"].keys) + else: + content["additional_stores"] = newJArray() + if LS.auth != newJNull(): + content["auth"] = %true + else: + content["auth"] = %false + content["total_documents"] = %total_documents + content["total_tags"] = %total_tags + content["tags"] = tags + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc putIndex*(LS: LiteStore, id, field: string, req: LSRequest): LSResponse = + try: + if (not id.match(PEG_INDEX)): + return resError(Http400, "invalid index ID: $1" % id) + if (not field.match(PEG_JSON_FIELD)): + return resError(Http400, "invalid field path: $1" % field) + if (LS.store.retrieveIndex(id) != newJNull()): + return resError(Http409, "Index already exists: $1" % id) + LS.store.createIndex(id, field) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = "{\"id\": \"$1\", \"field\": \"$2\"}" % [id, field] + result.code = Http201 + except CatchableError: + eWarn() + result = resError(Http500, "Unable to create index.") + +proc putStore*(LS: LiteStore, id: string, config: JsonNode, req: LSRequest): LSResponse = + try: + if (not id.match(PEG_STORE) or id == "master"): + return resError(Http400, "invalid store ID: $1" % id) + if (LSDICT.hasKey(id)): + return resError(Http409, "Store already exists: $1" % id) + let store = LS.addStore(id, id & ".db", config) + LS.updateConfig() + LSDICT[id] = store + result = getStore(LS, id, newQueryOptions(), req) + result.code = Http201 + except CatchableError: + eWarn() + result = resError(Http500, "Unable to create store.") + +proc deleteIndex*(LS: LiteStore, id: string, req: LSRequest): LSResponse = + if (not id.match(PEG_INDEX)): + return resError(Http400, "invalid index ID: $1" % id) + if (LS.store.retrieveIndex(id) == newJNull()): + return resError(Http404, "Index not found: $1" % id) + try: + LS.store.dropIndex(id) + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Content-Length"] = "0" + result.content = "" + result.code = Http204 + except CatchableError: + eWarn() + result = resError(Http500, "Unable to delete index.") + +proc deleteStore*(LS: LiteStore, id: string, req: LSRequest): LSResponse = + if (not id.match(PEG_STORE)): + return resError(Http400, "invalid store ID: $1" % id) + if (not LSDICT.hasKey(id)): + return resError(Http404, "Store not found: $1" % id) + try: + LSDICT.del(id) + if LS.config.hasKey("stores") and LS.config["stores"].hasKey(id): + LS.config["stores"].delete(id) + LS.updateConfig() + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Content-Length"] = "0" + result.content = "" + result.code = Http204 + except CatchableError: + eWarn() + result = resError(Http500, "Unable to delete index.") + +proc postDocument*(LS: LiteStore, body: string, ct: string, folder="", req: LSRequest): LSResponse = + if not folder.isFolder: + return resError(Http400, "Invalid folder specified when creating document: $1" % folder) + try: + var doc = LS.store.createDocument(folder, body, ct) + if doc != "": + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = doc + result.code = Http201 + else: + result = resError(Http500, "Unable to create document.") + except CatchableError: + eWarn() + result = resError(Http500, "Unable to create document.") + +proc putDocument*(LS: LiteStore, id: string, body: string, ct: string, req: LSRequest): LSResponse = + if id.isFolder: + return resError(Http400, "Invalid ID '$1' (Document IDs cannot end with '/')." % id) + let doc = LS.store.retrieveDocument(id) + if doc.data == "": + # Create a new document + var doc = LS.store.createDocument(id, body, ct) + if doc != "": + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = doc + result.code = Http201 + else: + result = resError(Http500, "Unable to create document.") + else: + # Update existing document + try: + var doc = LS.store.updateDocument(id, body, ct) + if doc != "": + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = doc + result.code = Http200 + else: + result = resError(Http500, "Unable to update document '$1'." % id) + except CatchableError: + result = resError(Http500, "Unable to update document '$1'." % id) + +proc patchDocument*(LS: LiteStore, id: string, body: string, req: LSRequest): LSResponse = + var apply = true + let jbody = body.parseJson + if jbody.kind != JArray: + return resError(Http400, "Bad request: PATCH request body is not an array.") + var options = newQueryOptions() + options.select = @["documents.id AS id", "created", "modified", "data"] + let doc = LS.store.retrieveRawDocument(id, options) + if doc == "": + return resDocumentNotFound(id) + let jdoc = doc.parseJson + var tags = newSeq[string]() + var origTags = newSeq[string]() + for tag in jdoc["tags"].items: + tags.add(tag.str) + origTags.add(tag.str) + var data: JsonNode + var origData: JsonNode + if tags.contains("$subtype:json"): + try: + origData = jdoc["data"].getStr.parseJson + data = origData.copy + except CatchableError: + discard + var c = 1 + for item in jbody.items: + if item.hasKey("op") and item.hasKey("path"): + if not item.hasKey("value"): + item["value"] = %"" + try: + apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) + if not apply: + break + except CatchableError: + return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) + else: + return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c) + c.inc + if apply: + # when document is not JSON the origData is not defined + # the extra check allows editing tags for non-JSON documents + if origData != nil and origData.len > 0 and origData != data: + try: + var doc = LS.store.updateDocument(id, data.pretty, "application/json") + if doc == "": + return resError(Http500, "Unable to patch document '$1'." % id) + except CatchableError: + return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) + if origTags != tags: + try: + for t1 in jdoc["tags"].items: + discard LS.store.destroyTag(t1.str, id, true) + for t2 in tags: + if t2 != "": + LS.store.createTag(t2, id, true) + except CatchableError: + return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) + return LS.getRawDocument(id, newQueryOptions(), req) + +# Main routing + +proc options*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + case resource: + of "info": + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + if id != "": + return resError(Http404, "Info '$1' not found." % id) + else: + result.code = Http204 + result.content = "" + of "dir": + result.code = Http204 + result.content = "" + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + of "tags": + result.code = Http204 + result.content = "" + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + of "indexes": + result.code = Http204 + result.content = "" + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + if id != "": + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + else: + result.headers["Allow"] = "GET, OPTIONS, PUT, DELETE" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS, PUT, DELETE" + else: + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + of "docs": + var folder: string + if id.isFolder: + folder = id + if folder.len > 0: + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS, POST, PUT" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST, PUT" + elif id != "": + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" + result.headers["Allow-Patch"] = "application/json-patch+json" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" + else: + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS, POST" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST" + of "stores": + result.code = Http204 + result.content = "" + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + if id != "": + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + else: + result.headers["Allow"] = "GET, OPTIONS, PUT, DELETE" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS, PUT, DELETE" + else: + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + else: + discard # never happens really. + +proc head*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + var options = newQueryOptions() + options.select = @["documents.id AS id", "created", "modified"] + if id.isFolder: + options.folder = id + try: + parseQueryOptions(req.url.query, options); + if id != "" and options.folder == "": + result = LS.getRawDocument(id, options, req) + result.content = "" + else: + result = LS.getRawDocuments(options, req) + result.content = "" + except CatchableError: + return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) + +proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + case resource: + of "docs": + var options = newQueryOptions() + if id.isFolder: + options.folder = id + if req.url.query.contains("contents=false"): + options.select = @["documents.id AS id", "created", "modified"] + try: + parseQueryOptions(req.url.query, options); + if id != "" and options.folder == "": + if req.url.query.contains("raw=true") or req.headers.hasKey("Accept") and req.headers["Accept"] == "application/json": + return LS.getRawDocument(id, options, req) + else: + return LS.getDocument(id, options, req) + else: + return LS.getRawDocuments(options, req) + except CatchableError: + let e = getCurrentException() + let trace = e.getStackTrace() + echo trace + return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) + of "tags": + var options = newQueryOptions() + try: + parseQueryOptions(req.url.query, options); + if id != "": + return LS.getTag(id, options, req) + else: + return LS.getTags(options, req) + except CatchableError: + return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) + of "indexes": + var options = newQueryOptions() + try: + parseQueryOptions(req.url.query, options); + if id != "": + return LS.getIndex(id, options, req) + else: + return LS.getIndexes(options, req) + except CatchableError: + return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) + of "stores": + var options = newQueryOptions() + try: + parseQueryOptions(req.url.query, options); + if id != "": + return LS.getStore(id, options, req) + else: + return LS.getStores(options, req) + except CatchableError: + return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) + of "info": + if id != "": + return resError(Http404, "Info '$1' not found." % id) + return LS.getInfo(req) + else: + discard # never happens really. + +proc post*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + var ct = "text/plain" + if req.headers.hasKey("Content-Type"): + ct = req.headers["Content-Type"] + return LS.postDocument(req.body.strip, ct, id, req) + +proc put*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + if id != "": + if resource == "indexes": + var field = "" + try: + field = parseJson(req.body.strip)["field"].getStr + except CatchableError: + return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) + return LS.putIndex(id, field, req) + elif resource == "stores": + var config = newJNull() + try: + config = parseJson(req.body) + except CatchableError: + return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) + return LS.putStore(id, config, req) + else: # Assume docs + var ct = "text/plain" + if req.headers.hasKey("Content-Type"): + ct = req.headers["Content-Type"] + return LS.putDocument(id, req.body.strip, ct, req) + else: + return resError(Http400, "Bad request: document ID must be specified in PUT requests.") + +proc delete*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + if id != "": + if resource == "indexes": + return LS.deleteIndex(id, req) + elif resource == "stores": + return LS.deleteStore(id, req) + else: # Assume docs + return LS.deleteDocument(id, req) + else: + return resError(Http400, "Bad request: document ID must be specified in DELETE requests.") + +proc patch*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + if id != "": + return LS.patchDocument(id, req.body, req) + else: + return resError(Http400, "Bad request: document ID must be specified in PATCH requests.") + +proc serveFile*(req: LSRequest, LS: LiteStore, id: string): LSResponse = + let path = LS.directory / id + var reqMethod = $req.reqMethod + if req.headers.hasKey("X-HTTP-Method-Override"): + reqMethod = req.headers["X-HTTP-Method-Override"] + case reqMethod.toUpperAscii: + of "OPTIONS": + return validate(req, LS, "dir", id, options) + of "GET": + if path.fileExists: + try: + let contents = path.readFile + let parts = path.splitFile + if CONTENT_TYPES.hasKey(parts.ext): + result.headers = CONTENT_TYPES[parts.ext].ctHeader + else: + result.headers = ctHeader("text/plain") + setOrigin(LS, req, result.headers) + result.content = contents + result.code = Http200 + except CatchableError: + return resError(Http500, "Unable to read file '$1'." % path) + else: + return resError(Http404, "File '$1' not found." % path) + else: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + +proc route*(req: LSRequest, LS: LiteStore, resource = "docs", id = ""): LSResponse = + var reqMethod = $req.reqMethod + if req.headers.hasKey("X-HTTP-Method-Override"): + reqMethod = req.headers["X-HTTP-Method-Override"] + LOG.debug("ROUTE - resource: " & resource & " id: " & id) + case reqMethod.toUpperAscii: + of "POST": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, post) + of "PUT": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, put) + of "DELETE": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, delete) + of "HEAD": + return validate(req, LS, resource, id, head) + of "OPTIONS": + return validate(req, LS, resource, id, options) + of "GET": + return validate(req, LS, resource, id, get) + of "PATCH": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, patch) + else: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + +proc multiRoute(req: LSRequest, resource, id: string): LSResponse = + var matches = @["", "", ""] + if req.url.path.find(PEG_STORE_URL, matches) != -1: + let id = matches[0] + let path = "/v7/" & matches[1] + matches = @["", "", ""] + discard path.find(PEG_URL, matches) + return req.route(LSDICT[id], matches[1], matches[2]) + return req.route(LS, resource, id) + +proc newSimpleLSRequest(meth: HttpMethod, resource = "", id = "", body = "", params = "", headers = newHttpHeaders()): LSRequest = + result.reqMethod = meth + result.body = body + result.headers = headers + result.url = parseUri("$1://$2:$3/$4/$5?$6" % @["http", "localhost", "9500", resource, id, params]) + +proc get(resource, id: string, params = ""): LSResponse = + return newSimpleLSRequest(HttpGet, resource, id, "", params).multiRoute(resource, id) + +proc post(resource, folder, body: string, ct = ""): LSResponse = + var headers = newHttpHeaders() + if ct != "": + headers["Content-Type"] = ct + return newSimpleLSRequest(HttpPost, resource, folder, body, "", headers).multiRoute(resource, folder & "/") + +proc put(resource, id, body: string, ct = ""): LSResponse = + var headers = newHttpHeaders() + if ct != "": + headers["Content-Type"] = ct + return newSimpleLSRequest(HttpPut, resource, id, body, "", headers).multiRoute(resource, id) + +proc patch(resource, id, body: string): LSResponse = + var headers = newHttpHeaders() + headers["Content-Type"] = "application/json" + return newSimpleLSRequest(HttpPatch, resource, id, body, "", headers).multiRoute(resource, id) + +proc delete(resource, id: string): LSResponse = + return newSimpleLSRequest(HttpDelete, resource, id).multiRoute(resource, id) + +proc head(resource, id: string): LSResponse = + return newSimpleLSRequest(HttpHead, resource, id).multiRoute(resource, id) + +proc registerStoreApi(LS: LiteStore, ctx: DTContext, origResource, origId: string) = + var api_idx = ctx.duk_push_object() + # GET + var get: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let params = duk_get_string(ctx, 2) + let resp = get($resource, $id, $params) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, get, 3) + discard ctx.duk_put_prop_string(api_idx, "get") + # POST + var post: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let folder = duk_get_string(ctx, 1) + let body = duk_get_string(ctx, 2) + let ct = duk_get_string(ctx, 3) + let resp = post($resource, $folder, $body, $ct) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, post, 4) + discard ctx.duk_put_prop_string(api_idx, "post") + # PUT + var put: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let body = duk_get_string(ctx, 2) + let ct = duk_get_string(ctx, 3) + let resp = put($resource, $id, $body, $ct) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, put, 4) + discard ctx.duk_put_prop_string(api_idx, "put") + # PATCH + var patch: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let body = duk_get_string(ctx, 2) + let resp = patch($resource, $id, $body) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, patch, 3) + discard ctx.duk_put_prop_string(api_idx, "patch") + # DELETE + var delete: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let resp = delete($resource, $id) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, delete, 2) + discard ctx.duk_put_prop_string(api_idx, "delete") + # HEAD + var head: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let resp = head($resource, $id) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, head, 2) + discard ctx.duk_put_prop_string(api_idx, "head") + discard ctx.duk_put_global_string("$store") + +proc jError(ctx: DTContext): LSResponse = + return resError(Http500, "Middleware Error: " & $ctx.duk_safe_to_string(-1)) + +proc getMiddleware*(LS: LiteStore, id: string): string = + if not LS.middleware.hasKey(id): + # Attempt to retrieve resource from system documents + let options = newQueryOptions(true) + let doc = LS.store.retrieveDocument("middleware/" & id & ".js", options) + result = doc.data + if result == "": + LOG.warn("Middleware '$1' not found" % id) + else: + result = LS.middleware[id] + +proc getMiddlewareSeq(LS: LiteStore, resource, id, meth: string): seq[string] = + result = newSeq[string]() + if LS.config.kind != JObject or not LS.config.hasKey("resources"): + return + var reqUri = "/" & resource & "/" & id + if reqUri[^1] == '/': + reqUri.removeSuffix({'/'}) + let parts = reqUri.split("/") + let ancestors = parts[1..parts.len-2] + var currentPath = "" + var currentPaths = "" + for p in ancestors: + currentPath &= "/" & p + currentPaths = currentPath & "/*" + if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][meth].hasKey("middleware"): + let mw = LS.config["resources"][currentPaths][meth]["middleware"] + if (mw.kind == JArray): + for m in mw: + result.add m.getStr + if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("middleware"): + let mw = LS.config["resources"][reqUri][meth]["middleware"] + if (mw.kind == JArray): + for m in mw: + result.add m.getStr + +proc execute*(req: var LSRequest, LS: LiteStore, resource, id: string): LSResponse = + let middleware = getMiddlewareSeq(LS, resource, id, $req.reqMethod) + if middleware.len > 0: + LOG.debug("Middleware: " & middleware.join(" -> ")); + if middleware.len == 0: + return route(req, LS, resource, id) + var jReq = $(%* req) + LOG.debug("Request: " & jReq) + var jRes = """{ + "code": 200, + "content": {}, + "final": false, + "headers": { + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Headers": "Authorization, Content-Type", + "Server": "$1", + "Content-Type": "application/json" + } + }""" % [LS.appname & "/" & LS.appversion] + var context = "{}" + # Create execution context + var ctx = duk_create_heap_default() + duk_console_init(ctx) + duk_print_alert_init(ctx) + LS.registerStoreApi(ctx, resource, id) + if ctx.duk_peval_string(cstring("($1)" % $jReq)) != 0: + return jError(ctx) + discard ctx.duk_put_global_string("$req") + if ctx.duk_peval_string(cstring("($1)" % $jRes)) != 0: + return jError(ctx) + discard ctx.duk_put_global_string("$res") + if ctx.duk_peval_string(cstring("($1)" % $context)) != 0: + return jError(ctx) + discard ctx.duk_put_global_string("$ctx") + # Middleware-specific functions + var i = 0 + var abort = 0 + while abort != 1 and i < middleware.len: + let code = LS.getMiddleware(middleware[i]) + LOG.debug("Evaluating middleware '$1'" % middleware[i]) + if ctx.duk_peval_string(code.cstring) != 0: + return jError(ctx) + abort = ctx.duk_get_boolean(-1) + i.inc + # Retrieve response, and request + if ctx.duk_peval_string("JSON.stringify($res);") != 0: + return jError(ctx) + let fRes = parseJson($(ctx.duk_get_string(-1))).newLSResponse + if ctx.duk_peval_string("JSON.stringify($req);") != 0: + return jError(ctx) + let fReq = parseJson($(ctx.duk_get_string(-1))).newLSRequest() + ctx.duk_destroy_heap(); + LOG.debug("abort: $1", [$abort]) + if abort == 1: + return fRes + return route(fReq, LS, resource, id)
M src/litestorepkg/lib/api_v8.nimsrc/litestorepkg/lib/api_v8.nim

@@ -1,5 +1,6 @@

import - asynchttpserver, + std/[asynchttpserver, + httpclient, strutils, sequtils, cgi,

@@ -9,7 +10,7 @@ json,

os, uri, tables, - times + times] import types, contenttypes,

@@ -148,17 +149,19 @@ if pair.len < 2 or pair[1] == "":

raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) try: pair[1] = pair[1].replace("+", "%2B").decodeURL - except: + except CatchableError: raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) case pair[0]: of "filter": filterClauses(pair[1], options) if options.jsonFilter == "": - raise newException(EInvalidRequest, "Invalid filter clause: $1" % pair[1].replace("\"", "\\\"")) + raise newException(EInvalidRequest, "Invalid filter clause: $1" % pair[ + 1].replace("\"", "\\\"")) of "select": selectClause(pair[1], options) if options.jsonSelect.len == 0: - raise newException(EInvalidRequest, "Invalid select clause: $1" % pair[1].replace("\"", "\\\"")) + raise newException(EInvalidRequest, "Invalid select clause: $1" % pair[ + 1].replace("\"", "\\\"")) of "like": options.like = pair[1] of "search":

@@ -168,33 +171,39 @@ options.tags = pair[1]

of "created-after": try: options.createdAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid created-after value: $1" % getCurrentExceptionMsg()) + except CatchableError: + raise newException(EInvalidRequest, "Invalid created-after value: $1" % + getCurrentExceptionMsg()) of "created-before": try: options.createdBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid created-before value: $1" % getCurrentExceptionMsg()) + except CatchableError: + raise newException(EInvalidRequest, "Invalid created-before value: $1" % + getCurrentExceptionMsg()) of "modified-after": try: options.modifiedAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid modified.after value: $1" % getCurrentExceptionMsg()) + except CatchableError: + raise newException(EInvalidRequest, "Invalid modified.after value: $1" % + getCurrentExceptionMsg()) of "modified-before": try: options.modifiedBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid modified-before value: $1" % getCurrentExceptionMsg()) + except CatchableError: + raise newException(EInvalidRequest, + "Invalid modified-before value: $1" % getCurrentExceptionMsg()) of "limit": try: options.limit = pair[1].parseInt - except: - raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) + except CatchableError: + raise newException(EInvalidRequest, "Invalid limit value: $1" % + getCurrentExceptionMsg()) of "offset": try: options.offset = pair[1].parseInt - except: - raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) + except CatchableError: + raise newException(EInvalidRequest, "Invalid offset value: $1" % + getCurrentExceptionMsg()) of "sort": let orderby = pair[1].orderByClauses() if orderby != "":

@@ -214,9 +223,11 @@ var fragments = q.split('&')

for f in fragments: f.parseQueryOption(options) -proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, cb: proc(req: LSRequest, LS: LiteStore, resource: string, id: string):LSResponse): LSResponse = +proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, + cb: proc(req: LSRequest, LS: LiteStore, resource: string, + id: string): LSResponse): LSResponse = if req.reqMethod == HttpPost or req.reqMethod == HttpPut or req.reqMethod == HttpPatch: - var ct = "" + var ct = "" let body = req.body.strip if body == "": return resError(Http400, "Bad request: No content specified for document.")

@@ -226,14 +237,17 @@ case ct:

of "application/json": try: discard body.parseJson() - except: - return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) + except CatchableError: + return resError(Http400, "Invalid JSON content - $1" % + getCurrentExceptionMsg()) else: discard return cb(req, LS, resource, id) -proc patchTag(tags: var seq[string], index: int, op, path, value: string): bool = - LOG.debug("- PATCH -> $1 tag['$2'] = \"$3\" - Total tags: $4." % [op, $index, $value, $tags.len]) +proc patchTag(tags: var seq[string], index: int, op, path, + value: string): bool = + LOG.debug("- PATCH -> $1 tag['$2'] = \"$3\" - Total tags: $4." % [op, $index, + $value, $tags.len]) case op: of "remove": let tag = tags[index]

@@ -252,7 +266,8 @@ raise newException(EInvalidRequest, "invalid tag: $1" % value)

of "replace": if value.match(PEG_USER_TAG): if tags[index].startsWith("$"): - raise newException(EInvalidRequest, "cannot replace system tag: $1" % tags[index]) + raise newException(EInvalidRequest, "cannot replace system tag: $1" % + tags[index]) else: tags[index] = value else:

@@ -267,7 +282,8 @@ else:

raise newException(EInvalidRequest, "invalid patch operation: $1" % op) return true -proc patchData*(data: var JsonNode, origData: JsonNode, op: string, path: string, value: JsonNode): bool = +proc patchData*(data: var JsonNode, origData: JsonNode, op: string, + path: string, value: JsonNode): bool = LOG.debug("- PATCH -> $1 path $2 with $3" % [op, path, $value]) var keys = path.replace(peg"^\/data\/", "").split("/") if keys.len == 0:

@@ -292,12 +308,14 @@ of "test":

if d.elems[index] != value: return false else: - raise newException(EInvalidRequest, "invalid patch operation: $1" % op) + raise newException(EInvalidRequest, + "invalid patch operation: $1" % op) else: d = d[index] dorig = dorig[index] - except: - raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) + except CatchableError: + raise newException(EInvalidRequest, + "invalid index key '$1' in path '$2'" % [key, path]) else: if c >= keys.len: case op:

@@ -305,20 +323,23 @@ of "remove":

if d.hasKey(key): d.delete(key) else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + raise newException(EInvalidRequest, + "key '$1' not found in path '$2'" % [key, path]) of "add": d[key] = value of "replace": if d.hasKey(key): d[key] = value else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + raise newException(EInvalidRequest, + "key '$1' not found in path '$2'" % [key, path]) of "test": if dorig.hasKey(key): if dorig[key] != value: return false else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + raise newException(EInvalidRequest, + "key '$1' not found in path '$2'" % [key, path]) else: raise newException(EInvalidRequest, "invalid patch operation: $1" % op) else:

@@ -328,7 +349,8 @@ c += 1

return true -proc applyPatchOperation*(data: var JsonNode, origData: JsonNode, tags: var seq[string], op: string, path: string, value: JsonNode): bool = +proc applyPatchOperation*(data: var JsonNode, origData: JsonNode, tags: var seq[ + string], op: string, path: string, value: JsonNode): bool = var matches = @[""] let p = peg""" path <- ^tagPath / fieldPath$

@@ -351,7 +373,8 @@ raise newException(EInvalidRequest, "cannot patch data of a non-JSON document.")

# Low level procs -proc getTag*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = +proc getTag*(LS: LiteStore, id: string, options = newQueryOptions(), + req: LSRequest): LSResponse = let doc = LS.store.retrieveTag(id, options) result.headers = ctJsonHeader() setOrigin(LS, req, result.headers)

@@ -361,7 +384,8 @@ else:

result.content = $doc result.code = Http200 -proc getStore*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = +proc getStore*(LS: LiteStore, id: string, options = newQueryOptions(), + req: LSRequest): LSResponse = if (not LSDICT.hasKey(id)): return resStoreNotFound(id) let store = LSDICT[id]

@@ -374,7 +398,8 @@ setOrigin(LS, req, result.headers)

result.content = $doc result.code = Http200 -proc getIndex*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = +proc getIndex*(LS: LiteStore, id: string, options = newQueryOptions(), + req: LSRequest): LSResponse = let doc = LS.store.retrieveIndex(id, options) result.headers = ctJsonHeader() setOrigin(LS, req, result.headers)

@@ -384,7 +409,8 @@ else:

result.content = $doc result.code = Http200 -proc getRawDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = +proc getRawDocument*(LS: LiteStore, id: string, options = newQueryOptions(), + req: LSRequest): LSResponse = let doc = LS.store.retrieveRawDocument(id, options) result.headers = ctJsonHeader() setOrigin(LS, req, result.headers)

@@ -394,7 +420,8 @@ else:

result.content = doc result.code = Http200 -proc getDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = +proc getDocument*(LS: LiteStore, id: string, options = newQueryOptions(), + req: LSRequest): LSResponse = let doc = LS.store.retrieveDocument(id, options) if doc.data == "": result = resDocumentNotFound(id)

@@ -419,10 +446,11 @@ setOrigin(LS, req, result.headers)

result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: result = resError(Http500, "Unable to delete document '$1'" % id) -proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = +proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), + req: LSRequest): LSResponse = var options = options let t0 = cpuTime() let docs = LS.store.retrieveTags(options)

@@ -431,7 +459,8 @@ let orig_offset = options.offset

options.limit = 0 options.offset = 0 options.select = @["COUNT(tag_id)"] - let total = LS.store.countTags(prepareSelectTagsQuery(options), options.like.replace("*", "%")) + let total = LS.store.countTags(prepareSelectTagsQuery(options), + options.like.replace("*", "%")) var content = newJObject() if options.like != "": content["like"] = %(options.like.decodeURL)

@@ -447,7 +476,8 @@ setOrigin(LS, req, result.headers)

result.content = content.pretty result.code = Http200 -proc getStores(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = +proc getStores(LS: LiteStore, options: QueryOptions = newQueryOptions(), + req: LSRequest): LSResponse = let t0 = cpuTime() var docs = newJArray() for k, v in LSDICT.pairs:

@@ -465,7 +495,8 @@ setOrigin(LS, req, result.headers)

result.content = content.pretty result.code = Http200 -proc getIndexes*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = +proc getIndexes*(LS: LiteStore, options: QueryOptions = newQueryOptions(), + req: LSRequest): LSResponse = var options = options let t0 = cpuTime() let docs = LS.store.retrieveIndexes(options)

@@ -474,7 +505,8 @@ let orig_offset = options.offset

options.limit = 0 options.offset = 0 options.select = @["COUNT(name)"] - let total = LS.store.countIndexes(prepareSelectIndexesQuery(options), options.like.replace("*", "%")) + let total = LS.store.countIndexes(prepareSelectIndexesQuery(options), + options.like.replace("*", "%")) var content = newJObject() if options.like != "": content["like"] = %(options.like.decodeURL)

@@ -490,7 +522,8 @@ setOrigin(LS, req, result.headers)

result.content = content.pretty result.code = Http200 -proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = +proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions(), + req: LSRequest): LSResponse = var options = options let t0 = cpuTime() let docs = LS.store.retrieveRawDocuments(options)

@@ -533,7 +566,8 @@ var content = newJObject()

content["version"] = %(LS.appname & " v" & LS.appversion) content["datastore_version"] = %version content["api_version"] = %7 - content["size"] = %($((LS.file.getFileSize().float/(1024*1024)).formatFloat(ffDecimal, 2)) & " MB") + content["size"] = %($((LS.file.getFileSize().float/(1024*1024)).formatFloat( + ffDecimal, 2)) & " MB") content["read_only"] = %LS.readonly content["log_level"] = %LS.loglevel if LS.directory.len == 0:

@@ -541,7 +575,8 @@ content["directory"] = newJNull()

else: content["directory"] = %LS.directory content["mount"] = %LS.mount - if LS.config != newJNull() and LS.config.hasKey("stores") and LS.config["stores"].len > 0: + if LS.config != newJNull() and LS.config.hasKey("stores") and LS.config[ + "stores"].len > 0: content["additional_stores"] = %toSeq(LS.config["stores"].keys) else: content["additional_stores"] = newJArray()

@@ -570,11 +605,12 @@ result.headers = ctJsonHeader()

setOrigin(LS, req, result.headers) result.content = "{\"id\": \"$1\", \"field\": \"$2\"}" % [id, field] result.code = Http201 - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to create index.") -proc putStore*(LS: LiteStore, id: string, config: JsonNode, req: LSRequest): LSResponse = +proc putStore*(LS: LiteStore, id: string, config: JsonNode, + req: LSRequest): LSResponse = try: if (not id.match(PEG_STORE) or id == "master"): return resError(Http400, "invalid store ID: $1" % id)

@@ -585,7 +621,7 @@ LS.updateConfig()

LSDICT[id] = store result = getStore(LS, id, newQueryOptions(), req) result.code = Http201 - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to create store.")

@@ -601,7 +637,7 @@ setOrigin(LS, req, result.headers)

result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to delete index.")

@@ -620,11 +656,12 @@ setOrigin(LS, req, result.headers)

result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to delete index.") -proc postDocument*(LS: LiteStore, body: string, ct: string, folder="", req: LSRequest): LSResponse = +proc postDocument*(LS: LiteStore, body: string, ct: string, folder = "", + req: LSRequest): LSResponse = if not folder.isFolder: return resError(Http400, "Invalid folder specified when creating document: $1" % folder) try:

@@ -636,11 +673,12 @@ result.content = doc

result.code = Http201 else: result = resError(Http500, "Unable to create document.") - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to create document.") -proc putDocument*(LS: LiteStore, id: string, body: string, ct: string, req: LSRequest): LSResponse = +proc putDocument*(LS: LiteStore, id: string, body: string, ct: string, + req: LSRequest): LSResponse = if id.isFolder: return resError(Http400, "Invalid ID '$1' (Document IDs cannot end with '/')." % id) let doc = LS.store.retrieveDocument(id)

@@ -665,10 +703,11 @@ result.content = doc

result.code = Http200 else: result = resError(Http500, "Unable to update document '$1'." % id) - except: + except CatchableError: result = resError(Http500, "Unable to update document '$1'." % id) -proc patchDocument*(LS: LiteStore, id: string, body: string, req: LSRequest): LSResponse = +proc patchDocument*(LS: LiteStore, id: string, body: string, + req: LSRequest): LSResponse = var apply = true let jbody = body.parseJson if jbody.kind != JArray:

@@ -690,7 +729,7 @@ if tags.contains("$subtype:json"):

try: origData = jdoc["data"].getStr.parseJson data = origData.copy - except: + except CatchableError: discard var c = 1 for item in jbody.items:

@@ -698,13 +737,14 @@ if item.hasKey("op") and item.hasKey("path"):

if not item.hasKey("value"): item["value"] = %"" try: - apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) + apply = applyPatchOperation(data, origData, tags, item["op"].str, item[ + "path"].str, item["value"]) if not apply: break - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) else: - return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c) + return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c) c.inc if apply: # when document is not JSON the origData is not defined

@@ -714,8 +754,9 @@ try:

var doc = LS.store.updateDocument(id, data.pretty, "application/json") if doc == "": return resError(Http500, "Unable to patch document '$1'." % id) - except: - return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) + except CatchableError: + return resError(Http500, "Unable to patch document '$1' - $2" % id, + getCurrentExceptionMsg()) if origTags != tags: try: for t1 in jdoc["tags"].items:

@@ -723,13 +764,15 @@ discard LS.store.destroyTag(t1.str, id, true)

for t2 in tags: if t2 != "": LS.store.createTag(t2, id, true) - except: - return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) + except CatchableError: + return resError(Http500, "Unable to patch document '$1' - $2" % [id, + getCurrentExceptionMsg()]) return LS.getRawDocument(id, newQueryOptions(), req) # Main routing -proc options*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = +proc options*(req: LSRequest, LS: LiteStore, resource: string, + id = ""): LSResponse = case resource: of "info": result.headers = newHttpHeaders(TAB_HEADERS)

@@ -863,7 +906,8 @@ result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS"

else: discard # never happens really. -proc head*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = +proc head*(req: LSRequest, LS: LiteStore, resource: string, + id = ""): LSResponse = var options = newQueryOptions() options.select = @["documents.id AS id", "created", "modified"] if id.isFolder:

@@ -876,10 +920,11 @@ result.content = ""

else: result = LS.getRawDocuments(options, req) result.content = "" - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) -proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = +proc get*(req: LSRequest, LS: LiteStore, resource: string, + id = ""): LSResponse = case resource: of "docs": var options = newQueryOptions()

@@ -890,13 +935,14 @@ options.select = @["documents.id AS id", "created", "modified"]

try: parseQueryOptions(req.url.query, options); if id != "" and options.folder == "": - if req.url.query.contains("raw=true") or req.headers.hasKey("Accept") and req.headers["Accept"] == "application/json": + if req.url.query.contains("raw=true") or req.headers.hasKey( + "Accept") and req.headers["Accept"] == "application/json": return LS.getRawDocument(id, options, req) else: return LS.getDocument(id, options, req) else: return LS.getRawDocuments(options, req) - except: + except CatchableError: let e = getCurrentException() let trace = e.getStackTrace() echo trace

@@ -913,9 +959,9 @@ else:

options.folder = "" result = LS.getDocument(id & "index.html", options, req) if result.code == Http404: - result = LS.getDocument(id & "index.htm", options, req) + result = LS.getDocument(id & "index.htm", options, req) return result - except: + except CatchableError: let e = getCurrentException() let trace = e.getStackTrace() echo trace

@@ -928,7 +974,7 @@ if id != "":

return LS.getTag(id, options, req) else: return LS.getTags(options, req) - except: + except CatchableError: return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) of "indexes": var options = newQueryOptions()

@@ -938,7 +984,7 @@ if id != "":

return LS.getIndex(id, options, req) else: return LS.getIndexes(options, req) - except: + except CatchableError: return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) of "stores": var options = newQueryOptions()

@@ -948,7 +994,7 @@ if id != "":

return LS.getStore(id, options, req) else: return LS.getStores(options, req) - except: + except CatchableError: return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) of "info": if id != "":

@@ -957,27 +1003,31 @@ return LS.getInfo(req)

else: discard # never happens really. -proc post*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = +proc post*(req: LSRequest, LS: LiteStore, resource: string, + id = ""): LSResponse = var ct = "text/plain" if req.headers.hasKey("Content-Type"): ct = req.headers["Content-Type"] return LS.postDocument(req.body.strip, ct, id, req) -proc put*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = +proc put*(req: LSRequest, LS: LiteStore, resource: string, + id = ""): LSResponse = if id != "": if resource == "indexes": var field = "" try: field = parseJson(req.body.strip)["field"].getStr - except: - return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) + except CatchableError: + return resError(Http400, "Bad Request - Invalid JSON body - $1" % + getCurrentExceptionMsg()) return LS.putIndex(id, field, req) elif resource == "stores": var config = newJNull() try: config = parseJson(req.body) - except: - return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) + except CatchableError: + return resError(Http400, "Bad Request - Invalid JSON body - $1" % + getCurrentExceptionMsg()) return LS.putStore(id, config, req) else: # Assume docs var ct = "text/plain"

@@ -987,7 +1037,8 @@ return LS.putDocument(id, req.body.strip, ct, req)

else: return resError(Http400, "Bad request: document ID must be specified in PUT requests.") -proc delete*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = +proc delete*(req: LSRequest, LS: LiteStore, resource: string, + id = ""): LSResponse = if id != "": if resource == "indexes": return LS.deleteIndex(id, req)

@@ -998,7 +1049,8 @@ return LS.deleteDocument(id, req)

else: return resError(Http400, "Bad request: document ID must be specified in DELETE requests.") -proc patch*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = +proc patch*(req: LSRequest, LS: LiteStore, resource: string, + id = ""): LSResponse = if id != "": return LS.patchDocument(id, req.body, req) else:

@@ -1024,14 +1076,15 @@ result.headers = ctHeader("text/plain")

setOrigin(LS, req, result.headers) result.content = contents result.code = Http200 - except: + except CatchableError: return resError(Http500, "Unable to read file '$1'." % path) else: return resError(Http404, "File '$1' not found." % path) else: return resError(Http405, "Method not allowed: $1" % $req.reqMethod) -proc route*(req: LSRequest, LS: LiteStore, resource = "docs", id = ""): LSResponse = +proc route*(req: LSRequest, LS: LiteStore, resource = "docs", + id = ""): LSResponse = var reqMethod = $req.reqMethod if req.headers.hasKey("X-HTTP-Method-Override"): reqMethod = req.headers["X-HTTP-Method-Override"]

@@ -1072,31 +1125,37 @@ discard path.find(PEG_URL, matches)

return req.route(LSDICT[id], matches[1], matches[2]) return req.route(LS, resource, id) -proc newSimpleLSRequest(meth: HttpMethod, resource, id, body = "", params = "", headers = newHttpHeaders()): LSRequest = +proc newSimpleLSRequest(meth: HttpMethod, resource = "", id = "", body = "", + params = "", headers = newHttpHeaders()): LSRequest = result.reqMethod = meth result.body = body result.headers = headers - result.url = parseUri("$1://$2:$3/$4/$5?$6" % @["http", "localhost", "9500", resource, id, params]) - + result.url = parseUri("$1://$2:$3/$4/$5?$6" % @["http", "localhost", "9500", + resource, id, params]) + proc get(resource, id: string, params = ""): LSResponse = - return newSimpleLSRequest(HttpGet, resource, id, "", params).multiRoute(resource, id) + return newSimpleLSRequest(HttpGet, resource, id, "", params).multiRoute( + resource, id) proc post(resource, folder, body: string, ct = ""): LSResponse = var headers = newHttpHeaders() if ct != "": headers["Content-Type"] = ct - return newSimpleLSRequest(HttpPost, resource, folder, body, "", headers).multiRoute(resource, folder & "/") + return newSimpleLSRequest(HttpPost, resource, folder, body, "", + headers).multiRoute(resource, folder & "/") proc put(resource, id, body: string, ct = ""): LSResponse = var headers = newHttpHeaders() if ct != "": headers["Content-Type"] = ct - return newSimpleLSRequest(HttpPut, resource, id, body, "", headers).multiRoute(resource, id) + return newSimpleLSRequest(HttpPut, resource, id, body, "", + headers).multiRoute(resource, id) proc patch(resource, id, body: string): LSResponse = var headers = newHttpHeaders() headers["Content-Type"] = "application/json" - return newSimpleLSRequest(HttpPatch, resource, id, body, "", headers).multiRoute(resource, id) + return newSimpleLSRequest(HttpPatch, resource, id, body, "", + headers).multiRoute(resource, id) proc delete(resource, id: string): LSResponse = return newSimpleLSRequest(HttpDelete, resource, id).multiRoute(resource, id)

@@ -1104,7 +1163,133 @@

proc head(resource, id: string): LSResponse = return newSimpleLSRequest(HttpHead, resource, id).multiRoute(resource, id) -proc registerStoreApi(LS: LiteStore, ctx: DTContext, origResource, origId: string) = +proc toHeaders(obj: JsonNode): HttpHeaders = + var headers = newSeq[tuple[key: string, val: string]](0) + for k, v in obj.pairs: + headers.add (key: k, val: v.getstr) + return newHttpHeaders(headers) + +proc toJson(headers: HttpHeaders): JsonNode = + result = newJObject() + for k, v in headers.pairs: + result[k] = newJString(v) + +proc getHeadersArg(ctx: DTContext): HttpHeaders = + duk_enum(ctx, 1, 0) + var headers = newSeq[tuple[key: string, val: string]](0) + while duk_next(ctx, -1, 1) == 1: + let key = $duk_safe_to_string(ctx, -2) + let val = $duk_safe_to_string(ctx, -1) + duk_pop_2(ctx) + headers.add (key: key, val: val) + return newHttpHeaders(headers) + + +proc registerHttpApi(LS: LiteStore, ctx: DTContext) = + var api_idx = ctx.duk_push_object() + # GET + var get: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let url = $duk_get_string(ctx, 0) + let client = newHttpClient(headers = ctx.getHeadersArg) + let resp = client.get(url) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.body.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + discard ctx.duk_push_string(resp.headers.toJson.pretty.cstring) + discard ctx.duk_put_prop_string(res_idx, "headers") + return 1 + ) + discard duk_push_c_function(ctx, get, 2) + discard ctx.duk_put_prop_string(api_idx, "get") + # HEAD + var head: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let url = $duk_get_string(ctx, 0) + let client = newHttpClient(headers = ctx.getHeadersArg) + let resp = client.head(url) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.body.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + discard ctx.duk_push_string(resp.headers.toJson.pretty.cstring) + discard ctx.duk_put_prop_string(res_idx, "headers") + return 1 + ) + discard duk_push_c_function(ctx, head, 2) + discard ctx.duk_put_prop_string(api_idx, "head") + # DELETE + var delete: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let url = $duk_get_string(ctx, 0) + let client = newHttpClient(headers = ctx.getHeadersArg) + let resp = client.delete(url) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.body.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + discard ctx.duk_push_string(resp.headers.toJson.pretty.cstring) + discard ctx.duk_put_prop_string(res_idx, "headers") + return 1 + ) + discard duk_push_c_function(ctx, delete, 2) + discard ctx.duk_put_prop_string(api_idx, "head") + # POST + var post: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let url = $duk_get_string(ctx, 0) + let client = newHttpClient(headers = ctx.getHeadersArg) + let body = $duk_get_string(ctx, 2) + let resp = client.post(url, body) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.body.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + discard ctx.duk_push_string(resp.headers.toJson.pretty.cstring) + discard ctx.duk_put_prop_string(res_idx, "headers") + return 1 + ) + discard duk_push_c_function(ctx, post, 3) + discard ctx.duk_put_prop_string(api_idx, "post") + # PUT + var put: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let url = $duk_get_string(ctx, 0) + let client = newHttpClient(headers = ctx.getHeadersArg) + let body = $duk_get_string(ctx, 2) + let resp = client.put(url, body) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.body.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + discard ctx.duk_push_string(resp.headers.toJson.pretty.cstring) + discard ctx.duk_put_prop_string(res_idx, "headers") + return 1 + ) + discard duk_push_c_function(ctx, put, 3) + discard ctx.duk_put_prop_string(api_idx, "put") + # PATCH + var patch: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let url = $duk_get_string(ctx, 0) + let client = newHttpClient(headers = ctx.getHeadersArg) + let body = $duk_get_string(ctx, 2) + let resp = client.patch(url, body) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.body.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + discard ctx.duk_push_string(resp.headers.toJson.pretty.cstring) + discard ctx.duk_put_prop_string(res_idx, "headers") + return 1 + ) + discard duk_push_c_function(ctx, patch, 3) + discard ctx.duk_put_prop_string(api_idx, "patch") + discard ctx.duk_put_global_string("$http") + +proc registerStoreApi(LS: LiteStore, ctx: DTContext, origResource, + origId: string) = var api_idx = ctx.duk_push_object() # GET var get: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} =

@@ -1198,7 +1383,7 @@ discard duk_push_c_function(ctx, head, 2)

discard ctx.duk_put_prop_string(api_idx, "head") discard ctx.duk_put_global_string("$store") -proc jError(ctx: DTContext): LSResponse = +proc jError(ctx: DTContext): LSResponse = return resError(Http500, "Middleware Error: " & $ctx.duk_safe_to_string(-1)) proc getMiddleware*(LS: LiteStore, id: string): string =

@@ -1213,9 +1398,9 @@ else:

result = LS.middleware[id] proc getMiddlewareSeq(LS: LiteStore, resource, id, meth: string): seq[string] = - result = newSeq[string]() + result = newSeq[string]() if LS.config.kind != JObject or not LS.config.hasKey("resources"): - return + return var reqUri = "/" & resource & "/" & id if reqUri[^1] == '/': reqUri.removeSuffix({'/'})

@@ -1226,24 +1411,28 @@ var currentPaths = ""

for p in ancestors: currentPath &= "/" & p currentPaths = currentPath & "/*" - if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][meth].hasKey("middleware"): + if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][ + currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][ + meth].hasKey("middleware"): let mw = LS.config["resources"][currentPaths][meth]["middleware"] if (mw.kind == JArray): for m in mw: result.add m.getStr - if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("middleware"): + if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][ + reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("middleware"): let mw = LS.config["resources"][reqUri][meth]["middleware"] if (mw.kind == JArray): for m in mw: result.add m.getStr -proc execute*(req: var LSRequest, LS: LiteStore, resource, id: string): LSResponse = +proc execute*(req: var LSRequest, LS: LiteStore, resource, + id: string): LSResponse = let middleware = getMiddlewareSeq(LS, resource, id, $req.reqMethod) if middleware.len > 0: LOG.debug("Middleware: " & middleware.join(" -> ")); if middleware.len == 0: return route(req, LS, resource, id) - var jReq = $(%* req) + var jReq = $( %* req) LOG.debug("Request: " & jReq) var jRes = """{ "code": 200,

@@ -1262,6 +1451,7 @@ var ctx = duk_create_heap_default()

duk_console_init(ctx) duk_print_alert_init(ctx) LS.registerStoreApi(ctx, resource, id) + LS.registerHttpApi(ctx) if ctx.duk_peval_string(cstring("($1)" % $jReq)) != 0: return jError(ctx) discard ctx.duk_put_global_string("$req")

@@ -1276,7 +1466,7 @@ var i = 0

var abort = 0 while abort != 1 and i < middleware.len: let code = LS.getMiddleware(middleware[i]) - LOG.debug("Evaluating middleware '$1'" % middleware[i]) + LOG.debug("Evaluating middleware '$1'" % middleware[i]) if ctx.duk_peval_string(code.cstring) != 0: return jError(ctx) abort = ctx.duk_get_boolean(-1)
M src/litestorepkg/lib/config.nimsrc/litestorepkg/lib/config.nim

@@ -1,12 +1,12 @@

const - pkgName* = "litestore" - pkgVersion* = "1.12.1" - pkgAuthor* = "Fabio Cevasco" + pkgName* = "litestore" + pkgVersion* = "1.13.0" + pkgAuthor* = "Fabio Cevasco" pkgDescription* = "Self-contained, lightweight, RESTful document store." - pkgLicense* = "MIT" - appname* = "LiteStore" + pkgLicense* = "MIT" + appname* = "LiteStore" var - file* = "data.db" - address* = "127.0.0.1" - port* = 9500 + file* = "data.db" + address* = "127.0.0.1" + port* = 9500
M src/litestorepkg/lib/core.nimsrc/litestorepkg/lib/core.nim

@@ -1,15 +1,18 @@

import - x_sqlite3, - x_db_sqlite as db, + db_connector/sqlite3, + db_connector/db_sqlite as db, + std/[ os, + paths, oids, json, pegs, strtabs, strutils, sequtils, + httpclient, base64, - math + math] import types, contenttypes,

@@ -52,7 +55,7 @@

proc closeDatastore*(store: Datastore) = try: db.close(store.db) - except: + except CatchableError: raise newException(EDatastoreUnavailable, "Datastore '$1' cannot be closed." % store.path)

@@ -61,7 +64,7 @@ try:

if store.path.fileExists(): store.closeDataStore() store.path.removeFile() - except: + except CatchableError: raise newException(EDatastoreUnavailable, "Datastore '$1' cannot destroyed." % store.path)

@@ -79,7 +82,7 @@ try:

store.db.exec(SQL_CREATE_SYSTEM_DOCUMENTS_TABLE) store.db.exec(SQL_UPDATE_VERSION, 2) LOG.debug("Done.") - except: + except CatchableError: store.closeDatastore() store.path.removeFile() copyFile(bkp_path, store.path)

@@ -106,7 +109,7 @@ discard result.db.tryExec("PRAGMA foreign_keys = ON".sql)

LOG.debug("Done.") result.path = file result.mount = "" - except: + except CatchableError: raise newException(EDatastoreUnavailable, "Datastore '$1' cannot be opened." % file)

@@ -136,7 +139,8 @@

# Manage Indexes proc createIndex*(store: Datastore, indexId, field: string) = - let query = sql("CREATE INDEX json_index_$1 ON documents(json_extract(data, ?) COLLATE NOCASE) WHERE json_valid(data)" % [indexId]) + let query = sql("CREATE INDEX json_index_$1 ON documents(json_extract(data, ?) COLLATE NOCASE) WHERE json_valid(data)" % + [indexId]) store.begin() store.db.exec(query, field) store.commit()

@@ -147,7 +151,8 @@ store.begin()

store.db.exec(query) store.commit() -proc retrieveIndex*(store: Datastore, id: string, options: QueryOptions = newQueryOptions()): JsonNode = +proc retrieveIndex*(store: Datastore, id: string, + options: QueryOptions = newQueryOptions()): JsonNode = var options = options options.single = true let query = prepareSelectIndexesQuery(options)

@@ -167,7 +172,7 @@ if (options.like[options.like.len-1] == '*' and options.like[0] != '*'):

let str = "json_index_" & options.like.substr(0, options.like.len-2) raw_indexes = store.db.getAllRows(query.sql, str, str & "{") else: - let str = "json_index_" & options.like.replace("*", "%") + let str = "json_index_" & options.like.replace("*", "%") raw_indexes = store.db.getAllRows(query.sql, str) else: raw_indexes = store.db.getAllRows(query.sql)

@@ -176,7 +181,8 @@ for index in raw_indexes:

var matches: array[0..0, string] let fieldPeg = peg"'CREATE INDEX json_index_test ON documents(json_extract(data, \'' {[^']+}" discard index[1].match(fieldPeg, matches) - indexes.add(%[("id", %index[0].replace("json_index_", "")), ("field", %matches[0])]) + indexes.add(%[("id", %index[0].replace("json_index_", "")), ("field", + %matches[0])]) return %indexes proc countIndexes*(store: Datastore, q = "", like = ""): int64 =

@@ -286,7 +292,7 @@ if contenttype == "application/json":

# Validate JSON data try: discard data.parseJson - except: + except CatchableError: raise newException(JsonParsingError, "Invalid JSON content - " & getCurrentExceptionMsg()) if id == "":

@@ -318,7 +324,7 @@ raise newException(EFileExists, "File already exists: $1" % filename)

if singleOp: store.commit() return $store.retrieveRawDocument(id) - except: + except CatchableError: store.rollback() eWarn() raise

@@ -334,7 +340,7 @@ if contenttype == "application/json":

# Validate JSON data try: discard data.parseJson - except: + except CatchableError: raise newException(JsonParsingError, "Invalid JSON content - " & getCurrentExceptionMsg()) if id == "":

@@ -350,7 +356,7 @@ binary, currentTime())

if singleOp: store.commit() return $store.retrieveRawDocument(id) - except: + except CatchableError: store.rollback() eWarn() raise

@@ -365,21 +371,21 @@ if contenttype == "application/json":

# Validate JSON data try: discard data.parseJson - except: + except CatchableError: raise newException(JsonParsingError, "Invalid JSON content - " & getCurrentExceptionMsg()) try: LOG.debug("Updating system document '$1'" % id) store.begin() - var res = store.db.execAffectedRows(SQL_UPDATE_SYSTEM_DOCUMENT, data, contenttype, - binary, currentTime(), id) + var res = store.db.execAffectedRows(SQL_UPDATE_SYSTEM_DOCUMENT, data, + contenttype, binary, currentTime(), id) if res > 0: result = $store.retrieveRawDocument(id) else: result = "" if singleOp: store.commit() - except: + except CatchableError: eWarn() store.rollback() raise

@@ -394,7 +400,7 @@ if contenttype == "application/json":

# Validate JSON data try: discard data.parseJson - except: + except CatchableError: raise newException(JsonParsingError, "Invalid JSON content - " & getCurrentExceptionMsg()) var searchable = searchable

@@ -421,7 +427,7 @@ else:

result = "" if singleOp: store.commit() - except: + except CatchableError: eWarn() store.rollback() raise

@@ -446,11 +452,11 @@ else:

raise newException(EFileNotFound, "File not found: $1" % filename) if singleOp: store.commit() - except: + except CatchableError: eWarn() store.rollback() -proc findDocumentId*(store: Datastore, pattern: string): string = +proc findDocumentId*(store: Datastore, pattern: string): string = var select = "SELECT id FROM documents WHERE id LIKE ? ESCAPE '\\' " var raw_document = store.db.getRow(select.sql, pattern) LOG.debug("Retrieving document '$1'" % pattern)

@@ -496,7 +502,8 @@

proc countDocuments*(store: Datastore): int64 = return store.db.getRow(SQL_COUNT_DOCUMENTS)[0].parseInt -proc importFile*(store: Datastore, f: string, dir = "/", system = false, notSearchable = false): string = +proc importFile*(store: Datastore, f: string, dir = "/", system = false, + notSearchable = false): string = if not f.fileExists: raise newException(EFileNotFound, "File '$1' not found." % f) let split = f.splitFile

@@ -530,7 +537,7 @@ else:

discard store.createDocument(d_id, d_contents, d_ct, d_binary, d_searchable) if dir != "/" and not system: store.db.exec(SQL_INSERT_TAG, "$dir:"&dir, d_id) - except: + except CatchableError: store.rollback() eWarn() raise

@@ -544,7 +551,7 @@ store.begin()

try: for tag in tags: store.db.exec(SQL_INSERT_TAG, tag, d_id) - except: + except CatchableError: store.rollback() eWarn() raise

@@ -562,7 +569,7 @@ LOG.debug("Optimixing full-text index...")

store.db.exec(SQL_OPTIMIZE) store.commit() LOG.debug("Done") - except: + except CatchableError: eWarn() proc vacuum*(file: string) =

@@ -570,7 +577,7 @@ let data = db.open(file, "", "", "")

try: data.exec(SQL_VACUUM) db.close(data) - except: + except CatchableError: eWarn() quit(203) quit(0)

@@ -580,21 +587,22 @@ result = newSeq[string]()

let tags_file = f.splitFile.dir / "_tags" if tags_file.fileExists: for tag in tags_file.lines: - result.add(tag) + result.add(tag) -proc importDir*(store: Datastore, dir: string, system = false, importTags = false, notSearchable = false) = +proc importDir*(store: Datastore, dir: string, system = false, + importTags = false, notSearchable = false) = var files = newSeq[string]() if not dir.dirExists: raise newException(EDirectoryNotFound, "Directory '$1' not found." % dir) for f in dir.walkDirRec(): if f.dirExists: continue - let dirs = f.split(DirSep) + let dirs = f.split(DirSep) if dirs.any(proc (s: string): bool = return s.startsWith(".")): # Ignore hidden directories and files - continue - let fileName = f.splitFile.name + continue + let fileName = f.splitFile.name if fileName == "_tags" and not importTags: # Ignore tags file unless the CLI flag was set continue

@@ -621,7 +629,7 @@ cBatches.inc

store.commit() LOG.info("Importing batch $1/$2...", cBatches, nBatches) store.begin() - except: + except CatchableError: LOG.warn("Unable to import file: $1", f) eWarn() store.rollback()

@@ -685,17 +693,37 @@ LOG.level = lvNone

else: fail(103, "Invalid log level '$1'" % val) -proc processAuthConfig(configuration: JsonNode, auth: var JsonNode) = - if auth == newJNull() and configuration != newJNull() and configuration.hasKey("signature"): - LOG.debug("Authentication: Signature found, processing authentication rules in configuration.") - auth = newJObject(); - auth["access"] = newJObject(); - auth["signature"] = configuration["signature"] - for k, v in configuration["resources"].pairs: - auth["access"][k] = newJObject() +proc downloadJwks*(LS: LiteStore, uri: string) = + let file = LS.jwksPath + let client = newHttpClient() + client.downloadFile(uri, file) + +proc processAuthConfig(LS: var LiteStore) = + if LS.auth == newJNull() and LS.config != newJNull(): + LS.auth = newJObject(); + LS.auth["access"] = newJObject(); + if LS.config.hasKey("jwks_uri"): + LOG.debug("Authentication: Downloading JWKS file.") + try: + LS.downloadJwks(LS.config["jwks_uri"].getStr) + except CatchableError: + LOG.warn "Unable to download JWKS file." + eWarn() + try: + LS.jwks = LS.jwksPath.parseFile + except: + LOG.warn "Unable to parse JWKS file." + eWarn() + elif LS.config.hasKey("signature"): + LOG.debug("Authentication: Signature found, processing authentication rules in configuration.") + LS.auth["signature"] = LS.config["signature"].getStr.replace( + "-----BEGIN CERTIFICATE-----\n", "").replace( + "\n-----END CERTIFICATE-----").strip().newJString + for k, v in LS.config["resources"].pairs: + LS.auth["access"][k] = newJObject() for meth, content in v.pairs: if content.hasKey("auth"): - auth["access"][k][meth] = content["auth"] + LS.auth["access"][k][meth] = content["auth"] proc processConfigSettings(LS: var LiteStore) = # Process config settings if present and if no cli settings are set

@@ -727,7 +755,7 @@ proc setup*(LS: var LiteStore, open = true) {.gcsafe.} =

if not LS.file.fileExists: try: LS.file.createDatastore() - except: + except CatchableError: eWarn() fail(200, "Unable to create datastore '$1'" % [LS.file]) if (open):

@@ -735,15 +763,15 @@ try:

LS.store = LS.file.openDatastore() try: LS.store.upgradeDatastore() - except: + except CatchableError: fail(203, "Unable to upgrade datastore '$1'" % [LS.file]) if LS.mount: try: LS.store.mountDir(LS.directory) - except: + except CatchableError: eWarn() fail(202, "Unable to mount directory '$1'" % [LS.directory]) - except: + except CatchableError: fail(201, "Unable to open datastore '$1'" % [LS.file]) proc initStore*(LS: var LiteStore) =

@@ -759,7 +787,7 @@ # Process config settings

LS.processConfigSettings() # Process auth from config settings LOG.debug("Authentication: Checking configuration for auth rules - Store file: " & LS.file) - processAuthConfig(LS.config, LS.auth) + LS.processAuthConfig() if LS.auth == newJNull(): # Attempt to retrieve auth.json from system documents
A src/litestorepkg/lib/jwt.nim

@@ -0,0 +1,142 @@

+import std/[ + openssl, base64, strutils, macros, json, times, pegs, sequtils, os + ] +import types, core + +when defined(windows) and defined(amd64): + {.passL: "-static -L"&getProjectPath()&"/litestorepkg/vendor/openssl/windows -lssl -lcrypto -lbcrypt".} +elif defined(linux) and defined(amd64): + {.passL: "-static -L"&getProjectPath()&"/litestorepkg/vendor/openssl/linux -lssl -lcrypto".} +elif defined(macosx) and defined(amd64): + {.passL: "-Bstatic -L"&getProjectPath()&"/litestorepkg/vendor/openssl/macosx -lssl -lcrypto -Bdynamic".} + + +proc EVP_PKEY_new(): EVP_PKEY {.cdecl, importc.} +proc X509_get_pubkey(cert: PX509): EVP_PKEY {.cdecl, importc.} +proc EVP_DigestVerifyInit(ctx: EVP_MD_CTX; pctx: ptr EVP_PKEY_CTX; typ: EVP_MD; + e: ENGINE; pkey: EVP_PKEY): cint {.cdecl, importc.} +proc EVP_DigestVerifyUpdate(ctx: EVP_MD_CTX; data: pointer; + len: cuint): cint {.cdecl, importc.} +proc EVP_DigestVerifyFinal(ctx: EVP_MD_CTX; data: pointer; + len: cuint): cint {.cdecl, importc.} + +proc getLastError(): string = + return $ERR_error_string(ERR_get_error(), nil) + +proc raiseJwtError(msg: string) = + let err = getLastError() + raise newException(EJwtValidationError, msg&"\n"&err) + +proc raiseX509Error(msg: string) = + let err = getLastError() + raise newException(EX509Error, msg&"\n"&err) + +proc getX5c*(LS: LiteStore; token: JWT): string = + let keys = LS.jwks["keys"] + if token.header.hasKey("kid"): + let kid = token.header["kid"].getStr + return keys.filterIt(it["kid"].getStr == kid)[0]["x5c"][0].getStr + return keys[0]["x5c"][0].getStr + +proc base64UrlDecode(encoded: string): string = + let padding = 4 - (encoded.len mod 4) + let base64String = encoded.replace("-", "+").replace("_", "/") & repeat("=", padding) + result = base64.decode(base64String) + +proc newJwt*(token: string): JWT = + let parts = token.split(".") + result.token = token + result.payload = parts[0]&"."&parts[1] + result.header = parts[0].base64UrlDecode.parseJson + result.claims = parts[1].base64UrlDecode.parseJson + result.signature = parts[2].base64UrlDecode + +proc verifyTimeClaims*(jwt: JWT) = + let t = now().toTime.toUnix + if jwt.claims.hasKey("nbf") and jwt.claims["nbf"].getInt > t: + raiseJwtError("Token cannot be used yet.") + if jwt.claims.hasKey("exp") and jwt.claims["exp"].getInt < t: + raiseJwtError("Token has expired.") + +proc verifyAlgorithm*(jwt: JWT) = + let alg = jwt.header["alg"].getStr + if alg != "RS256": + raiseJwtError("Algorithm not supported: " & alg) + +proc verifyScope*(jwt: JWT; reqScope: seq[string] = @[]) = + if reqScope.len == 0: + return + var scp = newSeq[string](0) + if jwt.claims.hasKey("scp"): + scp = jwt.claims["scp"].getStr.split(peg"\s") + elif jwt.claims.hasKey("scope"): + scp = jwt.claims["scope"].getStr.split(peg"\s") + if scp.len == 0: + raiseJwtError("No scp or scope claim found in token") + var authorized = "" + for s in scp: + for r in reqScope: + if r == s: + authorized = s + break + if authorized == "": + raise newException(EUnauthorizedError, "Unauthorized") + +proc verifySignature*(jwt: JWT; x5c: string) = + let sig = jwt.signature + let payload = jwt.payload + let cert = x5c.decode + var pkeyctx: EVP_PKEY_CTX + var mdctx: EVP_MD_CTX + let alg = EVP_sha256(); + var x509: PX509 + var pubkey = EVP_PKEY_new() + try: + ### Validate Signature (Only RS256 supported) + x509 = d2i_X509(cert) + if x509.isNil: + raiseX509Error("Invalid X509 certificate") + + pubkey = X509_get_pubkey(x509) + if pubkey.isNil: + raiseX509Error("An error occurred while retrieving the public key") + + mdctx = EVP_MD_CTX_create() + if mdctx.isNil: + raiseX509Error("Unable to initialize MD CTX") + + pkeyctx = EVP_PKEY_CTX_new(pubkey, nil) + if pkeyctx.isNil: + raiseX509Error("Unable to initialize PKEY CTX") + + if EVP_DigestVerifyInit(mdctx, addr pkeyctx, alg, nil, pubkey) != 1: + raiseJwtError("Unable to initialize digest verification") + + if EVP_DigestVerifyUpdate(mdctx, addr payload[0], payload.len.cuint) != 1: + raiseJwtError("Unable to update digest verification") + + if EVP_DigestVerifyFinal(mdctx, addr sig[0], sig.len.cuint) != 1: + raiseJwtError("Verification failed") + except CatchableError: + let err = getCurrentException() + if not mdctx.isNil: + EVP_MD_CTX_destroy(mdctx) + if not pkeyctx.isNil: + EVP_PKEY_CTX_free(pkeyctx) + if not pubkey.isNil: + EVP_PKEY_free(pubkey) + if not x509.isNil: + X509_free(x509) + raise err + + +when isMainModule: + + let token = "token.txt".readFile + let x5c = "x5c.cert".readFile + let jwt = token.newJwt + + echo token + echo "---" + echo x5c + jwt.verifySignature(x5c)
M src/litestorepkg/lib/queries.nimsrc/litestorepkg/lib/queries.nim

@@ -1,4 +1,4 @@

-import x_db_sqlite +import db_connector/db_sqlite # SQL QUERIES
M src/litestorepkg/lib/server.nimsrc/litestorepkg/lib/server.nim

@@ -1,22 +1,19 @@

import asynchttpserver, asyncdispatch, - times, strutils, pegs, logger, cgi, - os, json, tables, strtabs, - base64, asyncnet, - jwt, sequtils -import - types, - utils, +import + types, + utils, + jwt, api_v1, api_v2, api_v3,

@@ -27,17 +24,7 @@ api_v7,

api_v8 export - api_v5 - - -proc decodeUrlSafeAsString*(s: string): string = - var s = s.replace('-', '+').replace('_', '/') - while s.len mod 4 > 0: - s &= "=" - base64.decode(s) - -proc decodeUrlSafe*(s: string): seq[byte] = - cast[seq[byte]](decodeUrlSafeAsString(s)) + api_v8 proc getReqInfo(req: LSRequest): string = var url = req.url.path

@@ -51,37 +38,44 @@ proc handleCtrlC() {.noconv.} =

echo "" LOG.info("Exiting...") quit() - -template auth(uri: string, jwt: JWT, LS: LiteStore): void = + +template auth(uri: string, LS: LiteStore, jwt: JWT): void = let cfg = access[uri] if cfg.hasKey(reqMethod): LOG.debug("Authenticating: " & reqMethod & " " & uri) - if not req.headers.hasKey("Authorization"): + if not req.headers.hasKey("Authorization"): return resError(Http401, "Unauthorized - No token") let token = req.headers["Authorization"].replace(peg"^ 'Bearer '", "") # Validate token try: - jwt = token.toJwt() - let parts = token.split(".") - var sig = LS.auth["signature"].getStr - discard verifySignature(parts[0] & "." & parts[1], decodeUrlSafe(parts[2]), sig, RS256) - verifyTimeClaims(jwt) - let scopes = cfg[reqMethod] - # Validate scope - var authorized = "" - let reqScopes = ($jwt.claims["scope"].node.str).split(peg"\s+") - LOG.debug("Resource scopes: " & $scopes) - LOG.debug("Request scopes: " & $reqScopes) - for scope in scopes: - for reqScope in reqScopes: - if reqScope == scope.getStr: - authorized = scope.getStr - break - if authorized == "": - return resError(Http403, "Forbidden - You are not permitted to access this resource") - LOG.debug("Authorization successful: " & authorized) - except: - echo getCurrentExceptionMsg() + jwt = token.newJwt + var x5c: string + if LS.config.hasKey("jwks_uri"): + LOG.debug("Selecting x5c...") + x5c = LS.getX5c(jwt) + else: + LOG.debug("Using stored signature...") + x5c = LS.config["signature"].getStr + LOG.debug("Verifying algorithm...") + jwt.verifyAlgorithm() + LOG.debug("Verifying signature...") + try: + jwt.verifySignature(x5c) + except EX509Error: + LOG.warn getCurrentExceptionMsg() + writeStackTrace() + LOG.debug("Verifying claims...") + jwt.verifyTimeClaims() + let scope = cfg[reqMethod].mapIt(it.getStr) + LOG.debug("Verifying scope...") + jwt.verifyScope(scope) + LOG.debug("Authorization successful") + except EUnauthorizedError: + LOG.warn getCurrentExceptionMsg() + writeStackTrace() + return resError(Http403, "Forbidden - You are not permitted to access this resource") + except CatchableError: + LOG.warn getCurrentExceptionMsg() writeStackTrace() return resError(Http401, "Unauthorized - Invalid token")

@@ -100,17 +94,21 @@ var currentPaths = ""

for p in ancestors: currentPath &= "/" & p currentPaths = currentPath & "/*" - if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][meth].hasKey("allowed"): + if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][ + currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][ + meth].hasKey("allowed"): let allowed = LS.config["resources"][currentPaths][meth]["allowed"] if (allowed == %false): return false; - if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("allowed"): + if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][ + reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("allowed"): let allowed = LS.config["resources"][reqUri][meth]["allowed"] if (allowed == %false): return false return true -proc processApiUrl(req: LSRequest, LS: LiteStore, info: ResourceInfo): LSResponse = +proc processApiUrl(req: LSRequest, LS: LiteStore, + info: ResourceInfo): LSResponse = var reqUri = "/" & info.resource & "/" & info.id if reqUri[^1] == '/': reqUri.removeSuffix({'/'})

@@ -125,12 +123,12 @@ let access = LS.auth["access"]

while true: # Match exact url if access.hasKey(uri): - auth(uri, jwt, LS) + auth(uri, LS, jwt) break # Match exact url adding /* (e.g. /docs would match also /docs/* in auth.json) elif uri[^1] != '*' and uri[^1] != '/': if access.hasKey(uri & "/*"): - auth(uri & "/*", jwt, LS) + auth(uri & "/*", LS, jwt) break var parts = uri.split("/") if parts[^1] == "*":

@@ -143,7 +141,7 @@ else:

# If at the end of the URL, check generic URL uri = "/*" if access.hasKey(uri): - auth(uri, jwt, LS) + auth(uri, LS, jwt) break if info.version == "v8": if info.resource.match(peg"^assets / docs / info / tags / indexes / stores$"):

@@ -235,7 +233,8 @@ return resError(Http400, "Bad Request - Not serving any directory." % info.version)

else: return resError(Http404, "Resource Not Found: $1" % info.resource) else: - if info.version == "v1" or info.version == "v2" or info.version == "v3" or info.version == "v4" or info.version == "v5": + if info.version == "v1" or info.version == "v2" or info.version == "v3" or + info.version == "v4" or info.version == "v5": return resError(Http400, "Bad Request - Invalid API version: $1" % info.version) else: if info.resource.decodeURL.strip == "":

@@ -243,7 +242,7 @@ return resError(Http400, "Bad Request - No resource specified." % info.resource)

else: return resError(Http404, "Resource Not Found: $1" % info.resource) -proc process*(req: LSRequest, LS: LiteStore): LSResponse {.gcsafe.}= +proc process*(req: LSRequest, LS: LiteStore): LSResponse {.gcsafe.} = var matches = @["", "", ""] template route(req: LSRequest, peg: Peg, op: untyped): untyped = if req.url.path.find(peg, matches) != -1:

@@ -273,14 +272,17 @@ raise newException(EInvalidRequest, req.getReqInfo())

except EInvalidRequest: let e = (ref EInvalidRequest)(getCurrentException()) let trace = e.getStackTrace() - return resError(Http404, "Resource Not Found: $1" % getCurrentExceptionMsg().split(" ")[2], trace) - except: + return resError(Http404, "Resource Not Found: $1" % getCurrentExceptionMsg( + ).split(" ")[2], trace) + except CatchableError: let e = getCurrentException() let trace = e.getStackTrace() - return resError(Http500, "Internal Server Error: $1" % getCurrentExceptionMsg(), trace) + return resError(Http500, "Internal Server Error: $1" % + getCurrentExceptionMsg(), trace) -proc process*(req: LSRequest, LSDICT: OrderedTable[string, LiteStore]): LSResponse {.gcsafe.}= +proc process*(req: LSRequest, LSDICT: OrderedTable[string, + LiteStore]): LSResponse {.gcsafe.} = var matches = @["", ""] if req.url.path.find(PEG_STORE_URL, matches) != -1: let id = matches[0]

@@ -335,7 +337,8 @@ LOG.info(getReqInfo(req).replace("$", "$$"))

let res = req.process(LSDICT) var newReq = newRequest(req, client) await newReq.respond(res.code, res.content, res.headers) - echo(LS.appname & " v" & LS.appversion & " started on " & LS.address & ":" & $LS.port & ".") + echo(LS.appname & " v" & LS.appversion & " started on " & LS.address & ":" & + $LS.port & ".") printCfg("master") let storeIds = toSeq(LSDICT.keys) if (storeIds.len > 1):
M src/litestorepkg/lib/types.nimsrc/litestorepkg/lib/types.nim

@@ -1,21 +1,21 @@

-import - x_db_sqlite, - asynchttpserver, +import + db_connector/db_sqlite, + std/[asynchttpserver, asyncnet, uri, - pegs, + pegs, json, strtabs, + os, strutils, sequtils, nativesockets, - jwt, - tables + tables] import config type - EDatastoreExists* = object of CatchableError + EDatastoreExists* = object of CatchableError EDatastoreDoesNotExist* = object of CatchableError EDatastoreUnavailable* = object of CatchableError EInvalidTag* = object of CatchableError

@@ -23,6 +23,9 @@ EDirectoryNotFound* = object of CatchableError

EFileNotFound* = object of CatchableError EFileExists* = object of CatchableError EInvalidRequest* = object of CatchableError + EJwtValidationError* = object of CatchableError + EUnauthorizedError* = object of CatchableError + EX509Error* = object of CatchableError ConfigFiles* = object auth*: string config*: string

@@ -41,8 +44,8 @@ tables*: seq[string]

jsonFilter*: string jsonSelect*: seq[tuple[path: string, alias: string]] select*: seq[string] - single*:bool - system*:bool + single*: bool + system*: bool limit*: int offset*: int orderby*: string

@@ -59,6 +62,12 @@ tag*: string

startswith*: bool endswith*: bool negated*: bool + JWT* = object + header*: JsonNode + claims*: JsonNode + signature*: string + payload*: string + token*: string Operation* = enum opRun, opImport,

@@ -83,6 +92,7 @@ port*: int

operation*: Operation config*: JsonNode configFile*: string + jwks*: JsonNode cliSettings*: JsonNode directory*: string manageSystemData*: bool

@@ -96,15 +106,15 @@ middleware*: StringTableRef

appversion*: string auth*: JsonNode authFile*: string - favicon*:string - loglevel*:string + favicon*: string + loglevel*: string LSRequest* = object reqMethod*: HttpMethod headers*: HttpHeaders protocol*: tuple[orig: string, major, minor: int] url*: Uri jwt*: JWT - hostname*: string + hostname*: string body*: string LSResponse* = object code*: HttpCode

@@ -116,7 +126,10 @@ id: string,

version: string ] -proc initLiteStore*(): LiteStore = +proc jwksPath*(LS: LiteStore): string = + return "$#/$#_jwks.json" % [getCurrentDir(), LS.file.splitFile.name] + +proc initLiteStore*(): LiteStore = result.config = newJNull() result.configFile = "" result.cliSettings = newJObject()

@@ -147,7 +160,7 @@ of "DELETE":

return HttpDelete else: return HttpGet - + proc `%`*(protocol: tuple[orig: string, major: int, minor: int]): JsonNode = result = newJObject()

@@ -201,7 +214,7 @@ result.headers = newHttpHeaders()

for k, v in req["headers"].pairs: result.headers[k] = v.getStr let protocol = req["protocol"].getStr - let parts = protocol.split("/") + let parts = protocol.split("/") let version = parts[1].split(".") result.protocol = (orig: parts[0], major: version[0].parseInt, minor: version[1].parseInt) result.url = initUri()

@@ -232,8 +245,8 @@

var PEG_TAG* {.threadvar.}: Peg PEG_USER_TAG* {.threadvar.}: Peg - PEG_INDEX* {.threadvar}: Peg - PEG_STORE* {.threadvar}: Peg + PEG_INDEX* {.threadvar.}: Peg + PEG_STORE* {.threadvar.}: Peg PEG_JSON_FIELD* {.threadvar.}: Peg PEG_DEFAULT_URL* {.threadvar.}: Peg PEG_STORE_URL* {.threadvar.}: Peg

@@ -252,7 +265,7 @@ # Initialize LiteStore

var LS* {.threadvar.}: LiteStore var LSDICT* {.threadvar.}: OrderedTable[string, LiteStore] var TAB_HEADERS* {.threadvar.}: array[0..2, (string, string)] -LSDICT = initOrderedTable[string, LiteStore]() +LSDICT = initOrderedTable[string, LiteStore]() LS.appversion = pkgVersion LS.appname = appname

@@ -264,9 +277,14 @@ "Server": LS.appname & "/" & LS.appversion

} proc newQueryOptions*(system = false): QueryOptions = - var select = @["documents.id AS id", "documents.data AS data", "content_type", "binary", "searchable", "created", "modified"] + var select = @["documents.id AS id", "documents.data AS data", "content_type", + "binary", "searchable", "created", "modified"] if system: - select = @["system_documents.id AS id", "system_documents.data AS data", "content_type", "binary", "created", "modified"] + select = @["system_documents.id AS id", "system_documents.data AS data", + "content_type", "binary", "created", "modified"] return QueryOptions(select: select, - single: false, limit: 0, offset: 0, orderby: "", tags: "", search: "", folder: "", like: "", system: system, - createdAfter: "", createdBefore: "", modifiedAfter: "", modifiedBefore: "", jsonFilter: "", jsonSelect: newSeq[tuple[path: string, alias: string]](), tables: newSeq[string]()) + single: false, limit: 0, offset: 0, orderby: "", tags: "", search: "", + folder: "", like: "", system: system, + createdAfter: "", createdBefore: "", modifiedAfter: "", modifiedBefore: "", + jsonFilter: "", jsonSelect: newSeq[tuple[path: string, alias: string]](), + tables: newSeq[string]())
M src/litestorepkg/lib/utils.nimsrc/litestorepkg/lib/utils.nim

@@ -1,6 +1,6 @@

import - x_sqlite3, - x_db_sqlite, + db_connector/sqlite3, + db_connector/db_sqlite, json, strutils, pegs,

@@ -248,7 +248,7 @@ var tags = peg"""'<' [^>]+ '>'"""

var special_chars = peg"""\*\*+ / \_\_+ / \-\-+ / \#\#+ / \+\++ / \~\~+ / \`\`+ """ try: json = s.parseJson() - except: + except CatchableError: discard if not json.isNil: if json.kind == JObject:

@@ -324,7 +324,7 @@ var e = getCurrentException()

LOG.warn(e.msg) LOG.debug(getStackTrace(e)) -proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, cb: proc(req: LSRequest, LS: LiteStore, resource: string, id: string):LSResponse): LSResponse = +proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, cb: proc(req: LSRequest, LS: LiteStore, resource: string, id: string):LSResponse): LSResponse {.gcsafe.} = if req.reqMethod == HttpPost or req.reqMethod == HttpPut or req.reqMethod == HttpPatch: var ct = "" let body = req.body.strip

@@ -336,7 +336,7 @@ case ct:

of "application/json": try: discard body.parseJson() - except: + except CatchableError: return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) else: discard
D src/litestorepkg/lib/x_db_sqlite.nim

@@ -1,638 +0,0 @@

-# -# -# Nim's Runtime Library -# (c) Copyright 2015 Andreas Rumpf -# -# See the file "copying.txt", included in this -# distribution, for details about the copyright. -# - -## A higher level `SQLite`:idx: database wrapper. This interface -## is implemented for other databases too. -## -## Basic usage -## =========== -## -## The basic flow of using this module is: -## -## 1. Open database connection -## 2. Execute SQL query -## 3. Close database connection -## -## Parameter substitution -## ---------------------- -## -## All ``db_*`` modules support the same form of parameter substitution. -## That is, using the ``?`` (question mark) to signify the place where a -## value should be placed. For example: -## -## .. code-block:: Nim -## -## sql"INSERT INTO my_table (colA, colB, colC) VALUES (?, ?, ?)" -## -## Opening a connection to a database -## ---------------------------------- -## -## .. code-block:: Nim -## -## import db_sqlite -## -## # user, password, database name can be empty. -## # These params are not used on db_sqlite module. -## let db = open("mytest.db", "", "", "") -## db.close() -## -## Creating a table -## ---------------- -## -## .. code-block:: Nim -## -## db.exec(sql"DROP TABLE IF EXISTS my_table") -## db.exec(sql"""CREATE TABLE my_table ( -## id INTEGER, -## name VARCHAR(50) NOT NULL -## )""") -## -## Inserting data -## -------------- -## -## .. code-block:: Nim -## -## db.exec(sql"INSERT INTO my_table (id, name) VALUES (0, ?)", -## "Jack") -## -## Larger example -## -------------- -## -## .. code-block:: nim -## -## import db_sqlite, math -## -## let db = open("mytest.db", "", "", "") -## -## db.exec(sql"DROP TABLE IF EXISTS my_table") -## db.exec(sql"""CREATE TABLE my_table ( -## id INTEGER PRIMARY KEY, -## name VARCHAR(50) NOT NULL, -## i INT(11), -## f DECIMAL(18, 10) -## )""") -## -## db.exec(sql"BEGIN") -## for i in 1..1000: -## db.exec(sql"INSERT INTO my_table (name, i, f) VALUES (?, ?, ?)", -## "Item#" & $i, i, sqrt(i.float)) -## db.exec(sql"COMMIT") -## -## for x in db.fastRows(sql"SELECT * FROM my_table"): -## echo x -## -## let id = db.tryInsertId(sql"""INSERT INTO my_table (name, i, f) -## VALUES (?, ?, ?)""", -## "Item#1001", 1001, sqrt(1001.0)) -## echo "Inserted item: ", db.getValue(sql"SELECT name FROM my_table WHERE id=?", id) -## -## db.close() -## -## See also -## ======== -## -## * `db_odbc module <db_odbc.html>`_ for ODBC database wrapper -## * `db_mysql module <db_mysql.html>`_ for MySQL database wrapper -## * `db_postgres module <db_postgres.html>`_ for PostgreSQL database wrapper - -{.deadCodeElim: on.} # dce option deprecated - -import x_sqlite3 as sqlite3 # h3rald - -import db_common -export db_common - -type - DbConn* = PSqlite3 ## Encapsulates a database connection. - Row* = seq[string] ## A row of a dataset. `NULL` database values will be - ## converted to an empty string. - InstantRow* = PStmt ## A handle that can be used to get a row's column - ## text on demand. - -proc dbError*(db: DbConn) {.noreturn.} = - ## Raises a `DbError` exception. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## if not db.tryExec(sql"SELECT * FROM not_exist_table"): - ## dbError(db) - ## db.close() - var e: ref DbError - new(e) - e.msg = $sqlite3.errmsg(db) - raise e - -proc dbQuote*(s: string): string = - ## Escapes the `'` (single quote) char to `''`. - ## Because single quote is used for defining `VARCHAR` in SQL. - runnableExamples: - doAssert dbQuote("'") == "''''" - doAssert dbQuote("A Foobar's pen.") == "'A Foobar''s pen.'" - - result = "'" - for c in items(s): - if c == '\'': add(result, "''") - else: add(result, c) - add(result, '\'') - -proc dbFormat(formatstr: SqlQuery, args: varargs[string]): string = - result = "" - var a = 0 - for c in items(string(formatstr)): - if c == '?': - add(result, dbQuote(args[a])) - inc(a) - else: - add(result, c) - -proc tryExec*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): bool {. - tags: [ReadDbEffect, WriteDbEffect].} = - ## Tries to execute the query and returns `true` if successful, `false` otherwise. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## if not db.tryExec(sql"SELECT * FROM my_table"): - ## dbError(db) - ## db.close() - assert(not db.isNil, "Database not connected.") - var q = dbFormat(query, args) - var stmt: sqlite3.PStmt - if prepare_v2(db, q.cstring, q.cstring.len.cint, stmt, nil) == SQLITE_OK: - let x = step(stmt) - if x in {SQLITE_DONE, SQLITE_ROW}: - result = finalize(stmt) == SQLITE_OK - -proc exec*(db: DbConn, query: SqlQuery, args: varargs[string, `$`]) {. - tags: [ReadDbEffect, WriteDbEffect].} = - ## Executes the query and raises a `DbError` exception if not successful. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## try: - ## db.exec(sql"INSERT INTO my_table (id, name) VALUES (?, ?)", - ## 1, "item#1") - ## except: - ## stderr.writeLine(getCurrentExceptionMsg()) - ## finally: - ## db.close() - if not tryExec(db, query, args): dbError(db) - -proc newRow(L: int): Row = - newSeq(result, L) - for i in 0..L-1: result[i] = "" - -proc setupQuery(db: DbConn, query: SqlQuery, - args: varargs[string]): PStmt = - assert(not db.isNil, "Database not connected.") - var q = dbFormat(query, args) - if prepare_v2(db, q.cstring, q.len.cint, result, nil) != SQLITE_OK: dbError(db) - -proc setRow(stmt: PStmt, r: var Row, cols: cint) = - for col in 0'i32..cols-1: - setLen(r[col], column_bytes(stmt, col)) # set capacity - setLen(r[col], 0) - let x = column_text(stmt, col) - if not isNil(x): add(r[col], x) - -iterator fastRows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = - ## Executes the query and iterates over the result dataset. - ## - ## This is very fast, but potentially dangerous. Use this iterator only - ## if you require **ALL** the rows. - ## - ## **Note:** Breaking the `fastRows()` iterator during a loop will cause the - ## next database query to raise a `DbError` exception ``unable to close due - ## to ...``. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## for row in db.fastRows(sql"SELECT id, name FROM my_table"): - ## echo row - ## - ## # Output: - ## # @["1", "item#1"] - ## # @["2", "item#2"] - ## - ## db.close() - var stmt = setupQuery(db, query, args) - var L = (column_count(stmt)) - var result = newRow(L) - try: - while step(stmt) == SQLITE_ROW: - setRow(stmt, result, L) - yield result - finally: - if finalize(stmt) != SQLITE_OK: dbError(db) - -iterator instantRows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): InstantRow - {.tags: [ReadDbEffect].} = - ## Similar to `fastRows iterator <#fastRows.i,DbConn,SqlQuery,varargs[string,]>`_ - ## but returns a handle that can be used to get column text - ## on demand using `[]`. Returned handle is valid only within the iterator body. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## for row in db.instantRows(sql"SELECT * FROM my_table"): - ## echo "id:" & row[0] - ## echo "name:" & row[1] - ## echo "length:" & $len(row) - ## - ## # Output: - ## # id:1 - ## # name:item#1 - ## # length:2 - ## # id:2 - ## # name:item#2 - ## # length:2 - ## - ## db.close() - var stmt = setupQuery(db, query, args) - try: - while step(stmt) == SQLITE_ROW: - yield stmt - finally: - if finalize(stmt) != SQLITE_OK: dbError(db) - -proc toTypeKind(t: var DbType; x: int32) = - case x - of SQLITE_INTEGER: - t.kind = dbInt - t.size = 8 - of SQLITE_FLOAT: - t.kind = dbFloat - t.size = 8 - of SQLITE_BLOB: t.kind = dbBlob - of SQLITE_NULL: t.kind = dbNull - of SQLITE_TEXT: t.kind = dbVarchar - else: t.kind = dbUnknown - -proc setColumns(columns: var DbColumns; x: PStmt) = - let L = column_count(x) - setLen(columns, L) - for i in 0'i32 ..< L: - columns[i].name = $column_name(x, i) - columns[i].typ.name = $column_decltype(x, i) - toTypeKind(columns[i].typ, column_type(x, i)) - columns[i].tableName = $column_table_name(x, i) - -iterator instantRows*(db: DbConn; columns: var DbColumns; query: SqlQuery, - args: varargs[string, `$`]): InstantRow - {.tags: [ReadDbEffect].} = - ## Similar to `instantRows iterator <#instantRows.i,DbConn,SqlQuery,varargs[string,]>`_, - ## but sets information about columns to `columns`. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## var columns: DbColumns - ## for row in db.instantRows(columns, sql"SELECT * FROM my_table"): - ## discard - ## echo columns[0] - ## - ## # Output: - ## # (name: "id", tableName: "my_table", typ: (kind: dbNull, - ## # notNull: false, name: "INTEGER", size: 0, maxReprLen: 0, precision: 0, - ## # scale: 0, min: 0, max: 0, validValues: @[]), primaryKey: false, - ## # foreignKey: false) - ## - ## db.close() - var stmt = setupQuery(db, query, args) - setColumns(columns, stmt) - try: - while step(stmt) == SQLITE_ROW: - yield stmt - finally: - if finalize(stmt) != SQLITE_OK: dbError(db) - -proc `[]`*(row: InstantRow, col: int32): string {.inline.} = - ## Returns text for given column of the row. - ## - ## See also: - ## * `instantRows iterator <#instantRows.i,DbConn,SqlQuery,varargs[string,]>`_ - ## example code - $column_text(row, col) - -proc unsafeColumnAt*(row: InstantRow, index: int32): cstring {.inline.} = - ## Returns cstring for given column of the row. - ## - ## See also: - ## * `instantRows iterator <#instantRows.i,DbConn,SqlQuery,varargs[string,]>`_ - ## example code - column_text(row, index) - -proc len*(row: InstantRow): int32 {.inline.} = - ## Returns number of columns in a row. - ## - ## See also: - ## * `instantRows iterator <#instantRows.i,DbConn,SqlQuery,varargs[string,]>`_ - ## example code - column_count(row) - -proc getRow*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = - ## Retrieves a single row. If the query doesn't return any rows, this proc - ## will return a `Row` with empty strings for each column. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## doAssert db.getRow(sql"SELECT id, name FROM my_table" - ## ) == Row(@["1", "item#1"]) - ## doAssert db.getRow(sql"SELECT id, name FROM my_table WHERE id = ?", - ## 2) == Row(@["2", "item#2"]) - ## - ## # Returns empty. - ## doAssert db.getRow(sql"INSERT INTO my_table (id, name) VALUES (?, ?)", - ## 3, "item#3") == @[] - ## doAssert db.getRow(sql"DELETE FROM my_table WHERE id = ?", 3) == @[] - ## doAssert db.getRow(sql"UPDATE my_table SET name = 'ITEM#1' WHERE id = ?", - ## 1) == @[] - ## db.close() - var stmt = setupQuery(db, query, args) - var L = (column_count(stmt)) - result = newRow(L) - if step(stmt) == SQLITE_ROW: - setRow(stmt, result, L) - if finalize(stmt) != SQLITE_OK: dbError(db) - -proc getAllRows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): seq[Row] {.tags: [ReadDbEffect].} = - ## Executes the query and returns the whole result dataset. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## doAssert db.getAllRows(sql"SELECT id, name FROM my_table") == @[Row(@["1", "item#1"]), Row(@["2", "item#2"])] - ## db.close() - result = @[] - for r in fastRows(db, query, args): - result.add(r) - -iterator rows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = - ## Similar to `fastRows iterator <#fastRows.i,DbConn,SqlQuery,varargs[string,]>`_, - ## but slower and safe. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## for row in db.rows(sql"SELECT id, name FROM my_table"): - ## echo row - ## - ## ## Output: - ## ## @["1", "item#1"] - ## ## @["2", "item#2"] - ## - ## db.close() - for r in fastRows(db, query, args): yield r - -proc getValue*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): string {.tags: [ReadDbEffect].} = - ## Executes the query and returns the first column of the first row of the - ## result dataset. Returns `""` if the dataset contains no rows or the database - ## value is `NULL`. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## doAssert db.getValue(sql"SELECT name FROM my_table WHERE id = ?", - ## 2) == "item#2" - ## doAssert db.getValue(sql"SELECT id, name FROM my_table") == "1" - ## doAssert db.getValue(sql"SELECT name, id FROM my_table") == "item#1" - ## - ## db.close() - var stmt = setupQuery(db, query, args) - if step(stmt) == SQLITE_ROW: - let cb = column_bytes(stmt, 0) - if cb == 0: - result = "" - else: - result = newStringOfCap(cb) - add(result, column_text(stmt, 0)) - else: - result = "" - if finalize(stmt) != SQLITE_OK: dbError(db) - -proc tryInsertID*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): int64 - {.tags: [WriteDbEffect], raises: [].} = - ## Executes the query (typically "INSERT") and returns the - ## generated ID for the row or -1 in case of an error. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## db.exec(sql"CREATE TABLE my_table (id INTEGER, name VARCHAR(50) NOT NULL)") - ## - ## doAssert db.tryInsertID(sql"INSERT INTO not_exist_table (id, name) VALUES (?, ?)", - ## 1, "item#1") == -1 - ## db.close() - assert(not db.isNil, "Database not connected.") - var q = dbFormat(query, args) - var stmt: sqlite3.PStmt - result = -1 - if prepare_v2(db, q.cstring, q.cstring.len.cint, stmt, nil) == SQLITE_OK: - if step(stmt) == SQLITE_DONE: - result = last_insert_rowid(db) - if finalize(stmt) != SQLITE_OK: - result = -1 - -proc insertID*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): int64 {.tags: [WriteDbEffect].} = - ## Executes the query (typically "INSERT") and returns the - ## generated ID for the row. - ## - ## Raises a `DbError` exception when failed to insert row. - ## For Postgre this adds ``RETURNING id`` to the query, so it only works - ## if your primary key is named ``id``. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## db.exec(sql"CREATE TABLE my_table (id INTEGER, name VARCHAR(50) NOT NULL)") - ## - ## for i in 0..2: - ## let id = db.insertID(sql"INSERT INTO my_table (id, name) VALUES (?, ?)", i, "item#" & $i) - ## echo "LoopIndex = ", i, ", InsertID = ", id - ## - ## # Output: - ## # LoopIndex = 0, InsertID = 1 - ## # LoopIndex = 1, InsertID = 2 - ## # LoopIndex = 2, InsertID = 3 - ## - ## db.close() - result = tryInsertID(db, query, args) - if result < 0: dbError(db) - -proc execAffectedRows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): int64 {. - tags: [ReadDbEffect, WriteDbEffect].} = - ## Executes the query (typically "UPDATE") and returns the - ## number of affected rows. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## doAssert db.execAffectedRows(sql"UPDATE my_table SET name = 'TEST'") == 2 - ## - ## db.close() - exec(db, query, args) - result = changes(db) - -proc close*(db: DbConn) {.tags: [DbEffect].} = - ## Closes the database connection. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## db.close() - if sqlite3.close(db) != SQLITE_OK: dbError(db) - -proc open*(connection, user, password, database: string): DbConn {. - tags: [DbEffect].} = - ## Opens a database connection. Raises a `DbError` exception if the connection - ## could not be established. - ## - ## **Note:** Only the ``connection`` parameter is used for ``sqlite``. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## try: - ## let db = open("mytest.db", "", "", "") - ## ## do something... - ## ## db.getAllRows(sql"SELECT * FROM my_table") - ## db.close() - ## except: - ## stderr.writeLine(getCurrentExceptionMsg()) - var db: DbConn - if sqlite3.open(connection, db) == SQLITE_OK: - result = db - else: - dbError(db) - -proc setEncoding*(connection: DbConn, encoding: string): bool {. - tags: [DbEffect].} = - ## Sets the encoding of a database connection, returns `true` for - ## success, `false` for failure. - ## - ## **Note:** The encoding cannot be changed once it's been set. - ## According to SQLite3 documentation, any attempt to change - ## the encoding after the database is created will be silently - ## ignored. - exec(connection, sql"PRAGMA encoding = ?", [encoding]) - result = connection.getValue(sql"PRAGMA encoding") == encoding - -when not defined(testing) and isMainModule: - var db = open("db.sql", "", "", "") - exec(db, sql"create table tbl1(one varchar(10), two smallint)", []) - exec(db, sql"insert into tbl1 values('hello!',10)", []) - exec(db, sql"insert into tbl1 values('goodbye', 20)", []) - #db.query("create table tbl1(one varchar(10), two smallint)") - #db.query("insert into tbl1 values('hello!',10)") - #db.query("insert into tbl1 values('goodbye', 20)") - for r in db.rows(sql"select * from tbl1", []): - echo(r[0], r[1]) - for r in db.instantRows(sql"select * from tbl1", []): - echo(r[0], r[1]) - - x_db_sqlite.close(db) # h3rald
D src/litestorepkg/lib/x_sqlite3.nim

@@ -1,379 +0,0 @@

-# -# -# Nim's Runtime Library -# (c) Copyright 2012 Andreas Rumpf -# -# See the file "copying.txt", included in this -# distribution, for details about the copyright. -# - -{.deadCodeElim: on.} # dce option deprecated - -when defined(nimHasStyleChecks): - {.push styleChecks: off.} - -# START - Removed by h3rald -#when defined(windows): -# when defined(nimOldDlls): -# const Lib = "sqlite3.dll" -# elif defined(cpu64): -# const Lib = "sqlite3_64.dll" -# else: -# const Lib = "sqlite3_32.dll" -#elif defined(macosx): -# const -# Lib = "libsqlite3(|.0).dylib" -#else: -# const -# Lib = "libsqlite3.so(|.0)" -# -#when defined(staticSqlite): -{.pragma: mylib.} -# {.compile: "sqlite3.c".} -#else: -# {.pragma: mylib, dynlib: Lib.} -# END - Removed by h3rald - -const - SQLITE_INTEGER* = 1 - SQLITE_FLOAT* = 2 - SQLITE_BLOB* = 4 - SQLITE_NULL* = 5 - SQLITE_TEXT* = 3 - SQLITE_UTF8* = 1 - SQLITE_UTF16LE* = 2 - SQLITE_UTF16BE* = 3 # Use native byte order - SQLITE_UTF16* = 4 # sqlite3_create_function only - SQLITE_ANY* = 5 #sqlite_exec return values - SQLITE_OK* = 0 - SQLITE_ERROR* = 1 # SQL error or missing database - SQLITE_INTERNAL* = 2 # An internal logic error in SQLite - SQLITE_PERM* = 3 # Access permission denied - SQLITE_ABORT* = 4 # Callback routine requested an abort - SQLITE_BUSY* = 5 # The database file is locked - SQLITE_LOCKED* = 6 # A table in the database is locked - SQLITE_NOMEM* = 7 # A malloc() failed - SQLITE_READONLY* = 8 # Attempt to write a readonly database - SQLITE_INTERRUPT* = 9 # Operation terminated by sqlite3_interrupt() - SQLITE_IOERR* = 10 # Some kind of disk I/O error occurred - SQLITE_CORRUPT* = 11 # The database disk image is malformed - SQLITE_NOTFOUND* = 12 # (Internal Only) Table or record not found - SQLITE_FULL* = 13 # Insertion failed because database is full - SQLITE_CANTOPEN* = 14 # Unable to open the database file - SQLITE_PROTOCOL* = 15 # Database lock protocol error - SQLITE_EMPTY* = 16 # Database is empty - SQLITE_SCHEMA* = 17 # The database schema changed - SQLITE_TOOBIG* = 18 # Too much data for one row of a table - SQLITE_CONSTRAINT* = 19 # Abort due to constraint violation - SQLITE_MISMATCH* = 20 # Data type mismatch - SQLITE_MISUSE* = 21 # Library used incorrectly - SQLITE_NOLFS* = 22 # Uses OS features not supported on host - SQLITE_AUTH* = 23 # Authorization denied - SQLITE_FORMAT* = 24 # Auxiliary database format error - SQLITE_RANGE* = 25 # 2nd parameter to sqlite3_bind out of range - SQLITE_NOTADB* = 26 # File opened that is not a database file - SQLITE_ROW* = 100 # sqlite3_step() has another row ready - SQLITE_DONE* = 101 # sqlite3_step() has finished executing - SQLITE_COPY* = 0 - SQLITE_CREATE_INDEX* = 1 - SQLITE_CREATE_TABLE* = 2 - SQLITE_CREATE_TEMP_INDEX* = 3 - SQLITE_CREATE_TEMP_TABLE* = 4 - SQLITE_CREATE_TEMP_TRIGGER* = 5 - SQLITE_CREATE_TEMP_VIEW* = 6 - SQLITE_CREATE_TRIGGER* = 7 - SQLITE_CREATE_VIEW* = 8 - SQLITE_DELETE* = 9 - SQLITE_DROP_INDEX* = 10 - SQLITE_DROP_TABLE* = 11 - SQLITE_DROP_TEMP_INDEX* = 12 - SQLITE_DROP_TEMP_TABLE* = 13 - SQLITE_DROP_TEMP_TRIGGER* = 14 - SQLITE_DROP_TEMP_VIEW* = 15 - SQLITE_DROP_TRIGGER* = 16 - SQLITE_DROP_VIEW* = 17 - SQLITE_INSERT* = 18 - SQLITE_PRAGMA* = 19 - SQLITE_READ* = 20 - SQLITE_SELECT* = 21 - SQLITE_TRANSACTION* = 22 - SQLITE_UPDATE* = 23 - SQLITE_ATTACH* = 24 - SQLITE_DETACH* = 25 - SQLITE_ALTER_TABLE* = 26 - SQLITE_REINDEX* = 27 - SQLITE_DENY* = 1 - SQLITE_IGNORE* = 2 # Original from sqlite3.h: - #define SQLITE_STATIC ((void(*)(void *))0) - #define SQLITE_TRANSIENT ((void(*)(void *))-1) - SQLITE_DETERMINISTIC* = 0x800 - -type - Sqlite3 {.pure, final.} = object - PSqlite3* = ptr Sqlite3 - PPSqlite3* = ptr PSqlite3 - Context{.pure, final.} = object - Pcontext* = ptr Context - TStmt{.pure, final.} = object - PStmt* = ptr TStmt - Value{.pure, final.} = object - PValue* = ptr Value - PValueArg* = array[0..127, PValue] - - Callback* = proc (para1: pointer, para2: int32, para3, - para4: cstringArray): int32{.cdecl.} - Tbind_destructor_func* = proc (para1: pointer){.cdecl, locks: 0, tags: [], gcsafe.} - Create_function_step_func* = proc (para1: Pcontext, para2: int32, - para3: PValueArg){.cdecl.} - Create_function_func_func* = proc (para1: Pcontext, para2: int32, - para3: PValueArg){.cdecl.} - Create_function_final_func* = proc (para1: Pcontext){.cdecl.} - Result_func* = proc (para1: pointer){.cdecl.} - Create_collation_func* = proc (para1: pointer, para2: int32, para3: pointer, - para4: int32, para5: pointer): int32{.cdecl.} - Collation_needed_func* = proc (para1: pointer, para2: PSqlite3, eTextRep: int32, - para4: cstring){.cdecl.} - -const - SQLITE_STATIC* = nil - SQLITE_TRANSIENT* = cast[Tbind_destructor_func](-1) - -proc close*(para1: PSqlite3): int32{.cdecl, mylib, importc: "sqlite3_close".} -proc exec*(para1: PSqlite3, sql: cstring, para3: Callback, para4: pointer, - errmsg: var cstring): int32{.cdecl, mylib, - importc: "sqlite3_exec".} -proc last_insert_rowid*(para1: PSqlite3): int64{.cdecl, mylib, - importc: "sqlite3_last_insert_rowid".} -proc changes*(para1: PSqlite3): int32{.cdecl, mylib, importc: "sqlite3_changes".} -proc total_changes*(para1: PSqlite3): int32{.cdecl, mylib, - importc: "sqlite3_total_changes".} -proc interrupt*(para1: PSqlite3){.cdecl, mylib, importc: "sqlite3_interrupt".} -proc complete*(sql: cstring): int32{.cdecl, mylib, - importc: "sqlite3_complete".} -proc complete16*(sql: pointer): int32{.cdecl, mylib, - importc: "sqlite3_complete16".} -proc busy_handler*(para1: PSqlite3, - para2: proc (para1: pointer, para2: int32): int32{.cdecl.}, - para3: pointer): int32{.cdecl, mylib, - importc: "sqlite3_busy_handler".} -proc busy_timeout*(para1: PSqlite3, ms: int32): int32{.cdecl, mylib, - importc: "sqlite3_busy_timeout".} -proc get_table*(para1: PSqlite3, sql: cstring, resultp: var cstringArray, - nrow, ncolumn: var cint, errmsg: ptr cstring): int32{.cdecl, - mylib, importc: "sqlite3_get_table".} -proc free_table*(result: cstringArray){.cdecl, mylib, - importc: "sqlite3_free_table".} - # Todo: see how translate sqlite3_mprintf, sqlite3_vmprintf, sqlite3_snprintf - # function sqlite3_mprintf(_para1:Pchar; args:array of const):Pchar;cdecl; external Sqlite3Lib name 'sqlite3_mprintf'; -proc mprintf*(para1: cstring): cstring{.cdecl, varargs, mylib, - importc: "sqlite3_mprintf".} - #function sqlite3_vmprintf(_para1:Pchar; _para2:va_list):Pchar;cdecl; external Sqlite3Lib name 'sqlite3_vmprintf'; -proc free*(z: cstring){.cdecl, mylib, importc: "sqlite3_free".} - #function sqlite3_snprintf(_para1:longint; _para2:Pchar; _para3:Pchar; args:array of const):Pchar;cdecl; external Sqlite3Lib name 'sqlite3_snprintf'; -proc snprintf*(para1: int32, para2: cstring, para3: cstring): cstring{.cdecl, - mylib, varargs, importc: "sqlite3_snprintf".} -proc set_authorizer*(para1: PSqlite3, xAuth: proc (para1: pointer, para2: int32, - para3: cstring, para4: cstring, para5: cstring, para6: cstring): int32{. - cdecl.}, pUserData: pointer): int32{.cdecl, mylib, - importc: "sqlite3_set_authorizer".} -proc trace*(para1: PSqlite3, xTrace: proc (para1: pointer, para2: cstring){.cdecl.}, - para3: pointer): pointer{.cdecl, mylib, - importc: "sqlite3_trace".} -proc progress_handler*(para1: PSqlite3, para2: int32, - para3: proc (para1: pointer): int32{.cdecl.}, - para4: pointer){.cdecl, mylib, - importc: "sqlite3_progress_handler".} -proc commit_hook*(para1: PSqlite3, para2: proc (para1: pointer): int32{.cdecl.}, - para3: pointer): pointer{.cdecl, mylib, - importc: "sqlite3_commit_hook".} -proc open*(filename: cstring, ppDb: var PSqlite3): int32{.cdecl, mylib, - importc: "sqlite3_open".} -proc open16*(filename: pointer, ppDb: var PSqlite3): int32{.cdecl, mylib, - importc: "sqlite3_open16".} -proc errcode*(db: PSqlite3): int32{.cdecl, mylib, importc: "sqlite3_errcode".} -proc errmsg*(para1: PSqlite3): cstring{.cdecl, mylib, importc: "sqlite3_errmsg".} -proc errmsg16*(para1: PSqlite3): pointer{.cdecl, mylib, - importc: "sqlite3_errmsg16".} -proc prepare*(db: PSqlite3, zSql: cstring, nBytes: int32, ppStmt: var PStmt, - pzTail: ptr cstring): int32{.cdecl, mylib, - importc: "sqlite3_prepare".} - -proc prepare_v2*(db: PSqlite3, zSql: cstring, nByte: cint, ppStmt: var PStmt, - pzTail: ptr cstring): cint {. - importc: "sqlite3_prepare_v2", cdecl, mylib.} - -proc prepare16*(db: PSqlite3, zSql: pointer, nBytes: int32, ppStmt: var PStmt, - pzTail: var pointer): int32{.cdecl, mylib, - importc: "sqlite3_prepare16".} -proc bind_blob*(para1: PStmt, para2: int32, para3: pointer, n: int32, - para5: Tbind_destructor_func): int32{.cdecl, mylib, - importc: "sqlite3_bind_blob".} -proc bind_double*(para1: PStmt, para2: int32, para3: float64): int32{.cdecl, - mylib, importc: "sqlite3_bind_double".} -proc bind_int*(para1: PStmt, para2: int32, para3: int32): int32{.cdecl, - mylib, importc: "sqlite3_bind_int".} -proc bind_int64*(para1: PStmt, para2: int32, para3: int64): int32{.cdecl, - mylib, importc: "sqlite3_bind_int64".} -proc bind_null*(para1: PStmt, para2: int32): int32{.cdecl, mylib, - importc: "sqlite3_bind_null".} -proc bind_text*(para1: PStmt, para2: int32, para3: cstring, n: int32, - para5: Tbind_destructor_func): int32{.cdecl, mylib, - importc: "sqlite3_bind_text".} -proc bind_text16*(para1: PStmt, para2: int32, para3: pointer, para4: int32, - para5: Tbind_destructor_func): int32{.cdecl, mylib, - importc: "sqlite3_bind_text16".} - #function sqlite3_bind_value(_para1:Psqlite3_stmt; _para2:longint; _para3:Psqlite3_value):longint;cdecl; external Sqlite3Lib name 'sqlite3_bind_value'; - #These overloaded functions were introduced to allow the use of SQLITE_STATIC and SQLITE_TRANSIENT - #It's the c world man ;-) -proc bind_blob*(para1: PStmt, para2: int32, para3: pointer, n: int32, - para5: int32): int32{.cdecl, mylib, - importc: "sqlite3_bind_blob".} -proc bind_text*(para1: PStmt, para2: int32, para3: cstring, n: int32, - para5: int32): int32{.cdecl, mylib, - importc: "sqlite3_bind_text".} -proc bind_text16*(para1: PStmt, para2: int32, para3: pointer, para4: int32, - para5: int32): int32{.cdecl, mylib, - importc: "sqlite3_bind_text16".} -proc bind_parameter_count*(para1: PStmt): int32{.cdecl, mylib, - importc: "sqlite3_bind_parameter_count".} -proc bind_parameter_name*(para1: PStmt, para2: int32): cstring{.cdecl, - mylib, importc: "sqlite3_bind_parameter_name".} -proc bind_parameter_index*(para1: PStmt, zName: cstring): int32{.cdecl, - mylib, importc: "sqlite3_bind_parameter_index".} -proc clear_bindings*(para1: PStmt): int32 {.cdecl, - mylib, importc: "sqlite3_clear_bindings".} -proc column_count*(PStmt: PStmt): int32{.cdecl, mylib, - importc: "sqlite3_column_count".} -proc column_name*(para1: PStmt, para2: int32): cstring{.cdecl, mylib, - importc: "sqlite3_column_name".} -proc column_table_name*(para1: PStmt; para2: int32): cstring{.cdecl, mylib, - importc: "sqlite3_column_table_name".} -proc column_name16*(para1: PStmt, para2: int32): pointer{.cdecl, mylib, - importc: "sqlite3_column_name16".} -proc column_decltype*(para1: PStmt, i: int32): cstring{.cdecl, mylib, - importc: "sqlite3_column_decltype".} -proc column_decltype16*(para1: PStmt, para2: int32): pointer{.cdecl, - mylib, importc: "sqlite3_column_decltype16".} -proc step*(para1: PStmt): int32{.cdecl, mylib, importc: "sqlite3_step".} -proc data_count*(PStmt: PStmt): int32{.cdecl, mylib, - importc: "sqlite3_data_count".} -proc column_blob*(para1: PStmt, iCol: int32): pointer{.cdecl, mylib, - importc: "sqlite3_column_blob".} -proc column_bytes*(para1: PStmt, iCol: int32): int32{.cdecl, mylib, - importc: "sqlite3_column_bytes".} -proc column_bytes16*(para1: PStmt, iCol: int32): int32{.cdecl, mylib, - importc: "sqlite3_column_bytes16".} -proc column_double*(para1: PStmt, iCol: int32): float64{.cdecl, mylib, - importc: "sqlite3_column_double".} -proc column_int*(para1: PStmt, iCol: int32): int32{.cdecl, mylib, - importc: "sqlite3_column_int".} -proc column_int64*(para1: PStmt, iCol: int32): int64{.cdecl, mylib, - importc: "sqlite3_column_int64".} -proc column_text*(para1: PStmt, iCol: int32): cstring{.cdecl, mylib, - importc: "sqlite3_column_text".} -proc column_text16*(para1: PStmt, iCol: int32): pointer{.cdecl, mylib, - importc: "sqlite3_column_text16".} -proc column_type*(para1: PStmt, iCol: int32): int32{.cdecl, mylib, - importc: "sqlite3_column_type".} -proc finalize*(PStmt: PStmt): int32{.cdecl, mylib, - importc: "sqlite3_finalize".} -proc reset*(PStmt: PStmt): int32{.cdecl, mylib, importc: "sqlite3_reset".} -proc create_function*(para1: PSqlite3, zFunctionName: cstring, nArg: int32, - eTextRep: int32, para5: pointer, - xFunc: Create_function_func_func, - xStep: Create_function_step_func, - xFinal: Create_function_final_func): int32{.cdecl, - mylib, importc: "sqlite3_create_function".} -proc create_function16*(para1: PSqlite3, zFunctionName: pointer, nArg: int32, - eTextRep: int32, para5: pointer, - xFunc: Create_function_func_func, - xStep: Create_function_step_func, - xFinal: Create_function_final_func): int32{.cdecl, - mylib, importc: "sqlite3_create_function16".} -proc aggregate_count*(para1: Pcontext): int32{.cdecl, mylib, - importc: "sqlite3_aggregate_count".} -proc value_blob*(para1: PValue): pointer{.cdecl, mylib, - importc: "sqlite3_value_blob".} -proc value_bytes*(para1: PValue): int32{.cdecl, mylib, - importc: "sqlite3_value_bytes".} -proc value_bytes16*(para1: PValue): int32{.cdecl, mylib, - importc: "sqlite3_value_bytes16".} -proc value_double*(para1: PValue): float64{.cdecl, mylib, - importc: "sqlite3_value_double".} -proc value_int*(para1: PValue): int32{.cdecl, mylib, - importc: "sqlite3_value_int".} -proc value_int64*(para1: PValue): int64{.cdecl, mylib, - importc: "sqlite3_value_int64".} -proc value_text*(para1: PValue): cstring{.cdecl, mylib, - importc: "sqlite3_value_text".} -proc value_text16*(para1: PValue): pointer{.cdecl, mylib, - importc: "sqlite3_value_text16".} -proc value_text16le*(para1: PValue): pointer{.cdecl, mylib, - importc: "sqlite3_value_text16le".} -proc value_text16be*(para1: PValue): pointer{.cdecl, mylib, - importc: "sqlite3_value_text16be".} -proc value_type*(para1: PValue): int32{.cdecl, mylib, - importc: "sqlite3_value_type".} -proc aggregate_context*(para1: Pcontext, nBytes: int32): pointer{.cdecl, - mylib, importc: "sqlite3_aggregate_context".} -proc user_data*(para1: Pcontext): pointer{.cdecl, mylib, - importc: "sqlite3_user_data".} -proc get_auxdata*(para1: Pcontext, para2: int32): pointer{.cdecl, mylib, - importc: "sqlite3_get_auxdata".} -proc set_auxdata*(para1: Pcontext, para2: int32, para3: pointer, - para4: proc (para1: pointer){.cdecl.}){.cdecl, mylib, - importc: "sqlite3_set_auxdata".} -proc result_blob*(para1: Pcontext, para2: pointer, para3: int32, - para4: Result_func){.cdecl, mylib, - importc: "sqlite3_result_blob".} -proc result_double*(para1: Pcontext, para2: float64){.cdecl, mylib, - importc: "sqlite3_result_double".} -proc result_error*(para1: Pcontext, para2: cstring, para3: int32){.cdecl, - mylib, importc: "sqlite3_result_error".} -proc result_error16*(para1: Pcontext, para2: pointer, para3: int32){.cdecl, - mylib, importc: "sqlite3_result_error16".} -proc result_int*(para1: Pcontext, para2: int32){.cdecl, mylib, - importc: "sqlite3_result_int".} -proc result_int64*(para1: Pcontext, para2: int64){.cdecl, mylib, - importc: "sqlite3_result_int64".} -proc result_null*(para1: Pcontext){.cdecl, mylib, - importc: "sqlite3_result_null".} -proc result_text*(para1: Pcontext, para2: cstring, para3: int32, - para4: Result_func){.cdecl, mylib, - importc: "sqlite3_result_text".} -proc result_text16*(para1: Pcontext, para2: pointer, para3: int32, - para4: Result_func){.cdecl, mylib, - importc: "sqlite3_result_text16".} -proc result_text16le*(para1: Pcontext, para2: pointer, para3: int32, - para4: Result_func){.cdecl, mylib, - importc: "sqlite3_result_text16le".} -proc result_text16be*(para1: Pcontext, para2: pointer, para3: int32, - para4: Result_func){.cdecl, mylib, - importc: "sqlite3_result_text16be".} -proc result_value*(para1: Pcontext, para2: PValue){.cdecl, mylib, - importc: "sqlite3_result_value".} -proc create_collation*(para1: PSqlite3, zName: cstring, eTextRep: int32, - para4: pointer, xCompare: Create_collation_func): int32{. - cdecl, mylib, importc: "sqlite3_create_collation".} -proc create_collation16*(para1: PSqlite3, zName: cstring, eTextRep: int32, - para4: pointer, xCompare: Create_collation_func): int32{. - cdecl, mylib, importc: "sqlite3_create_collation16".} -proc collation_needed*(para1: PSqlite3, para2: pointer, para3: Collation_needed_func): int32{. - cdecl, mylib, importc: "sqlite3_collation_needed".} -proc collation_needed16*(para1: PSqlite3, para2: pointer, para3: Collation_needed_func): int32{. - cdecl, mylib, importc: "sqlite3_collation_needed16".} -proc libversion*(): cstring{.cdecl, mylib, importc: "sqlite3_libversion".} - #Alias for allowing better code portability (win32 is not working with external variables) -proc version*(): cstring{.cdecl, mylib, importc: "sqlite3_libversion".} - # Not published functions -proc libversion_number*(): int32{.cdecl, mylib, - importc: "sqlite3_libversion_number".} - #function sqlite3_key(db:Psqlite3; pKey:pointer; nKey:longint):longint;cdecl; external Sqlite3Lib name 'sqlite3_key'; - #function sqlite3_rekey(db:Psqlite3; pKey:pointer; nKey:longint):longint;cdecl; external Sqlite3Lib name 'sqlite3_rekey'; - #function sqlite3_sleep(_para1:longint):longint;cdecl; external Sqlite3Lib name 'sqlite3_sleep'; - #function sqlite3_expired(_para1:Psqlite3_stmt):longint;cdecl; external Sqlite3Lib name 'sqlite3_expired'; - #function sqlite3_global_recover:longint;cdecl; external Sqlite3Lib name 'sqlite3_global_recover'; -# implementation - -when defined(nimHasStyleChecks): - {.pop.}
M src/litestorepkg/vendor/sqlite/sqlite3.csrc/litestorepkg/vendor/sqlite/sqlite3.c

@@ -1,6 +1,6 @@

/****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.40.0. By combining all the individual C code files into this +** version 3.44.2. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements

@@ -16,6 +16,9 @@ ** of the embedded sqlite3.h header file.) Additional code files may be needed

** if you want a wrapper to interface SQLite with your choice of programming ** language. The code for the "sqlite3" command-line shell is also in a ** separate file. This file contains only code for the core SQLite library. +** +** The content in this amalgamation comes from Fossil check-in +** ebead0e7230cd33bcec9f95d2183069565b9. */ #define SQLITE_CORE 1 #define SQLITE_AMALGAMATION 1

@@ -50,11 +53,11 @@ ** measured by branch coverage. This is

** used on lines of code that actually ** implement parts of coverage testing. ** -** OPTIMIZATION-IF-TRUE - This branch is allowed to alway be false +** OPTIMIZATION-IF-TRUE - This branch is allowed to always be false ** and the correct answer is still obtained, ** though perhaps more slowly. ** -** OPTIMIZATION-IF-FALSE - This branch is allowed to alway be true +** OPTIMIZATION-IF-FALSE - This branch is allowed to always be true ** and the correct answer is still obtained, ** though perhaps more slowly. **

@@ -123,6 +126,10 @@ #undef SQLITE_4_BYTE_ALIGNED_MALLOC

#define SQLITE_4_BYTE_ALIGNED_MALLOC #endif /* defined(_MSC_VER) && !defined(_WIN64) */ +#if !defined(HAVE_LOG2) && defined(_MSC_VER) && _MSC_VER<1800 +#define HAVE_LOG2 0 +#endif /* !defined(HAVE_LOG2) && defined(_MSC_VER) && _MSC_VER<1800 */ + #endif /* SQLITE_MSVC_H */ /************** End of msvc.h ************************************************/

@@ -452,9 +459,9 @@ ** See also: [sqlite3_libversion()],

** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.40.0" -#define SQLITE_VERSION_NUMBER 3040000 -#define SQLITE_SOURCE_ID "2022-11-16 12:10:08 89c459e766ea7e9165d0beeb124708b955a4950d0f4792f457465d71b158d318" +#define SQLITE_VERSION "3.44.2" +#define SQLITE_VERSION_NUMBER 3044002 +#define SQLITE_SOURCE_ID "2023-11-24 11:41:44 ebead0e7230cd33bcec9f95d2183069565b9e709bf745c9b5db65cc0cbf92c0f" /* ** CAPI3REF: Run-Time Library Version Numbers

@@ -834,6 +841,7 @@ #define SQLITE_IOERR_COMMIT_ATOMIC (SQLITE_IOERR | (30<<8))

#define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8)) #define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8)) #define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8)) +#define SQLITE_IOERR_IN_PAGE (SQLITE_IOERR | (34<<8)) #define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) #define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8)) #define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8))

@@ -869,6 +877,7 @@ #define SQLITE_CONSTRAINT_PINNED (SQLITE_CONSTRAINT |(11<<8))

#define SQLITE_CONSTRAINT_DATATYPE (SQLITE_CONSTRAINT |(12<<8)) #define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8)) #define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) +#define SQLITE_NOTICE_RBU (SQLITE_NOTICE | (3<<8)) #define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) #define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8)) #define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8))

@@ -1481,7 +1490,6 @@ ** The [SQLITE_FCNTL_CKPT_DONE] opcode is invoked from within a checkpoint

** in wal mode after the client has finished copying pages from the wal ** file to the database file, but before the *-shm file is updated to ** record the fact that the pages have been checkpointed. -** </ul> ** ** <li>[[SQLITE_FCNTL_EXTERNAL_READER]] ** The EXPERIMENTAL [SQLITE_FCNTL_EXTERNAL_READER] opcode is used to detect

@@ -1494,10 +1502,16 @@ ** currently has an SQL transaction open on the database. It is set to 0 if

** the database is not a wal-mode db, or if there is no such connection in any ** other process. This opcode cannot be used to detect transactions opened ** by clients within the current process, only within other processes. -** </ul> ** ** <li>[[SQLITE_FCNTL_CKSM_FILE]] -** Used by the cksmvfs VFS module only. +** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use internally by the +** [checksum VFS shim] only. +** +** <li>[[SQLITE_FCNTL_RESET_CACHE]] +** If there is currently no transaction open on the database, and the +** database is not a temp db, then the [SQLITE_FCNTL_RESET_CACHE] file-control +** purges the contents of the in-memory page cache. If there is an open +** transaction, or if the db is a temp-db, this opcode is a no-op, not an error. ** </ul> */ #define SQLITE_FCNTL_LOCKSTATE 1

@@ -1540,6 +1554,7 @@ #define SQLITE_FCNTL_RESERVE_BYTES 38

#define SQLITE_FCNTL_CKPT_START 39 #define SQLITE_FCNTL_EXTERNAL_READER 40 #define SQLITE_FCNTL_CKSM_FILE 41 +#define SQLITE_FCNTL_RESET_CACHE 42 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE

@@ -1954,19 +1969,22 @@ ** <b>The sqlite3_config() interface is not threadsafe. The application

** must ensure that no other SQLite interfaces are invoked by other ** threads while sqlite3_config() is running.</b> ** -** The sqlite3_config() interface +** The first argument to sqlite3_config() is an integer +** [configuration option] that determines +** what property of SQLite is to be configured. Subsequent arguments +** vary depending on the [configuration option] +** in the first argument. +** +** For most configuration options, the sqlite3_config() interface ** may only be invoked prior to library initialization using ** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. +** The exceptional configuration options that may be invoked at any time +** are called "anytime configuration options". ** ^If sqlite3_config() is called after [sqlite3_initialize()] and before -** [sqlite3_shutdown()] then it will return SQLITE_MISUSE. +** [sqlite3_shutdown()] with a first argument that is not an anytime +** configuration option, then the sqlite3_config() call will return SQLITE_MISUSE. ** Note, however, that ^sqlite3_config() can be called as part of the ** implementation of an application-defined [sqlite3_os_init()]. -** -** The first argument to sqlite3_config() is an integer -** [configuration option] that determines -** what property of SQLite is to be configured. Subsequent arguments -** vary depending on the [configuration option] -** in the first argument. ** ** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK]. ** ^If the option is unknown or SQLite is unable to set the option

@@ -2074,6 +2092,23 @@ ** KEYWORDS: {configuration option}

** ** These constants are the available integer configuration options that ** can be passed as the first argument to the [sqlite3_config()] interface. +** +** Most of the configuration options for sqlite3_config() +** will only work if invoked prior to [sqlite3_initialize()] or after +** [sqlite3_shutdown()]. The few exceptions to this rule are called +** "anytime configuration options". +** ^Calling [sqlite3_config()] with a first argument that is not an +** anytime configuration option in between calls to [sqlite3_initialize()] and +** [sqlite3_shutdown()] is a no-op that returns SQLITE_MISUSE. +** +** The set of anytime configuration options can change (by insertions +** and/or deletions) from one release of SQLite to the next. +** As of SQLite version 3.42.0, the complete set of anytime configuration +** options is: +** <ul> +** <li> SQLITE_CONFIG_LOG +** <li> SQLITE_CONFIG_PCACHE_HDRSZ +** </ul> ** ** New configuration options may be added in future releases of SQLite. ** Existing configuration options might be discontinued. Applications

@@ -2405,7 +2440,7 @@ ** than the configured sorter-reference size threshold - then a reference

** is stored in each sorted record and the required column values loaded ** from the database as records are returned in sorted order. The default ** value for this option is to never use this optimization. Specifying a -** negative value for this option restores the default behaviour. +** negative value for this option restores the default behavior. ** This option is only available if SQLite is compiled with the ** [SQLITE_ENABLE_SORTER_REFERENCES] compile-time option. **

@@ -2421,28 +2456,28 @@ ** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that

** compile-time option is not set, then the default maximum is 1073741824. ** </dl> */ -#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ -#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ -#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ -#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */ -#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ -#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ -#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ -#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ -#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ -/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ -#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ -#define SQLITE_CONFIG_PCACHE 14 /* no-op */ -#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ -#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ -#define SQLITE_CONFIG_URI 17 /* int */ -#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ -#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ +#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ +#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ +#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */ +#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ +#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ +#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ +#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ +#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ +/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ +#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ +#define SQLITE_CONFIG_PCACHE 14 /* no-op */ +#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ +#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ +#define SQLITE_CONFIG_URI 17 /* int */ +#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ #define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */ -#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ -#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ +#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ +#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ #define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */ #define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */ #define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */

@@ -2483,7 +2518,7 @@ ** rounded down to the next smaller multiple of 8. ^(The lookaside memory

** configuration for a database connection can only be changed when that ** connection is not currently using lookaside memory, or in other words ** when the "current value" returned by -** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero. +** [sqlite3_db_status](D,[SQLITE_DBSTATUS_LOOKASIDE_USED],...) is zero. ** Any attempt to change the lookaside memory configuration when lookaside ** memory is in use leaves the configuration unchanged and returns ** [SQLITE_BUSY].)^</dd>

@@ -2580,7 +2615,7 @@ ** <dd> Usually, when a database in wal mode is closed or detached from a

** database handle, SQLite checks if this will mean that there are now no ** connections at all to the database. If so, it performs a checkpoint ** operation before closing the connection. This option may be used to -** override this behaviour. The first parameter passed to this operation +** override this behavior. The first parameter passed to this operation ** is an integer - positive to disable checkpoints-on-close, or zero (the ** default) to enable them, and negative to leave the setting unchanged. ** The second parameter is a pointer to an integer

@@ -2633,8 +2668,12 @@ ** <li> [sqlite3_exec](db, "[VACUUM]", 0, 0, 0);

** <li> sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 0, 0); ** </ol> ** Because resetting a database is destructive and irreversible, the -** process requires the use of this obscure API and multiple steps to help -** ensure that it does not happen by accident. +** process requires the use of this obscure API and multiple steps to +** help ensure that it does not happen by accident. Because this +** feature must be capable of resetting corrupt databases, and +** shutting down virtual tables may require access to that corrupt +** storage, the library must abandon any installed virtual tables +** without calling their xDestroy() methods. ** ** [[SQLITE_DBCONFIG_DEFENSIVE]] <dt>SQLITE_DBCONFIG_DEFENSIVE</dt> ** <dd>The SQLITE_DBCONFIG_DEFENSIVE option activates or deactivates the

@@ -2673,7 +2712,7 @@ ** using the [PRAGMA legacy_alter_table] statement.

** </dd> ** ** [[SQLITE_DBCONFIG_DQS_DML]] -** <dt>SQLITE_DBCONFIG_DQS_DML</td> +** <dt>SQLITE_DBCONFIG_DQS_DML</dt> ** <dd>The SQLITE_DBCONFIG_DQS_DML option activates or deactivates ** the legacy [double-quoted string literal] misfeature for DML statements ** only, that is DELETE, INSERT, SELECT, and UPDATE statements. The

@@ -2682,7 +2721,7 @@ ** compile-time option.

** </dd> ** ** [[SQLITE_DBCONFIG_DQS_DDL]] -** <dt>SQLITE_DBCONFIG_DQS_DDL</td> +** <dt>SQLITE_DBCONFIG_DQS_DDL</dt> ** <dd>The SQLITE_DBCONFIG_DQS option activates or deactivates ** the legacy [double-quoted string literal] misfeature for DDL statements, ** such as CREATE TABLE and CREATE INDEX. The

@@ -2691,7 +2730,7 @@ ** compile-time option.

** </dd> ** ** [[SQLITE_DBCONFIG_TRUSTED_SCHEMA]] -** <dt>SQLITE_DBCONFIG_TRUSTED_SCHEMA</td> +** <dt>SQLITE_DBCONFIG_TRUSTED_SCHEMA</dt> ** <dd>The SQLITE_DBCONFIG_TRUSTED_SCHEMA option tells SQLite to ** assume that database schemas are untainted by malicious content. ** When the SQLITE_DBCONFIG_TRUSTED_SCHEMA option is disabled, SQLite

@@ -2711,7 +2750,7 @@ ** can also be controlled using the [PRAGMA trusted_schema] statement.

** </dd> ** ** [[SQLITE_DBCONFIG_LEGACY_FILE_FORMAT]] -** <dt>SQLITE_DBCONFIG_LEGACY_FILE_FORMAT</td> +** <dt>SQLITE_DBCONFIG_LEGACY_FILE_FORMAT</dt> ** <dd>The SQLITE_DBCONFIG_LEGACY_FILE_FORMAT option activates or deactivates ** the legacy file format flag. When activated, this flag causes all newly ** created database file to have a schema format version number (the 4-byte

@@ -2720,7 +2759,7 @@ ** means that the resulting database file will be readable and writable by

** any SQLite version back to 3.0.0 ([dateof:3.0.0]). Without this setting, ** newly created databases are generally not understandable by SQLite versions ** prior to 3.3.0 ([dateof:3.3.0]). As these words are written, there -** is now scarcely any need to generated database files that are compatible +** is now scarcely any need to generate database files that are compatible ** all the way back to version 3.0.0, and so this setting is of little ** practical use, but is provided so that SQLite can continue to claim the ** ability to generate new database files that are compatible with version

@@ -2729,8 +2768,40 @@ ** <p>Note that when the SQLITE_DBCONFIG_LEGACY_FILE_FORMAT setting is on,

** the [VACUUM] command will fail with an obscure error when attempting to ** process a table with generated columns and a descending index. This is ** not considered a bug since SQLite versions 3.3.0 and earlier do not support -** either generated columns or decending indexes. +** either generated columns or descending indexes. ** </dd> +** +** [[SQLITE_DBCONFIG_STMT_SCANSTATUS]] +** <dt>SQLITE_DBCONFIG_STMT_SCANSTATUS</dt> +** <dd>The SQLITE_DBCONFIG_STMT_SCANSTATUS option is only useful in +** SQLITE_ENABLE_STMT_SCANSTATUS builds. In this case, it sets or clears +** a flag that enables collection of the sqlite3_stmt_scanstatus_v2() +** statistics. For statistics to be collected, the flag must be set on +** the database handle both when the SQL statement is prepared and when it +** is stepped. The flag is set (collection of statistics is enabled) +** by default. This option takes two arguments: an integer and a pointer to +** an integer.. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the statement scanstatus option. If the second argument +** is not NULL, then the value of the statement scanstatus setting after +** processing the first argument is written into the integer that the second +** argument points to. +** </dd> +** +** [[SQLITE_DBCONFIG_REVERSE_SCANORDER]] +** <dt>SQLITE_DBCONFIG_REVERSE_SCANORDER</dt> +** <dd>The SQLITE_DBCONFIG_REVERSE_SCANORDER option changes the default order +** in which tables and indexes are scanned so that the scans start at the end +** and work toward the beginning rather than starting at the beginning and +** working toward the end. Setting SQLITE_DBCONFIG_REVERSE_SCANORDER is the +** same as setting [PRAGMA reverse_unordered_selects]. This option takes +** two arguments which are an integer and a pointer to an integer. The first +** argument is 1, 0, or -1 to enable, disable, or leave unchanged the +** reverse scan order flag, respectively. If the second argument is not NULL, +** then 0 or 1 is written into the integer that the second argument points to +** depending on if the reverse scan order flag is set after processing the +** first argument. +** </dd> +** ** </dl> */ #define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */

@@ -2751,7 +2822,9 @@ #define SQLITE_DBCONFIG_DQS_DDL 1014 /* int int* */

#define SQLITE_DBCONFIG_ENABLE_VIEW 1015 /* int int* */ #define SQLITE_DBCONFIG_LEGACY_FILE_FORMAT 1016 /* int int* */ #define SQLITE_DBCONFIG_TRUSTED_SCHEMA 1017 /* int int* */ -#define SQLITE_DBCONFIG_MAX 1017 /* Largest DBCONFIG */ +#define SQLITE_DBCONFIG_STMT_SCANSTATUS 1018 /* int int* */ +#define SQLITE_DBCONFIG_REVERSE_SCANORDER 1019 /* int int* */ +#define SQLITE_DBCONFIG_MAX 1019 /* Largest DBCONFIG */ /* ** CAPI3REF: Enable Or Disable Extended Result Codes

@@ -2973,8 +3046,13 @@ ** not effected by the sqlite3_interrupt().

** ^A call to sqlite3_interrupt(D) that occurs when there are no running ** SQL statements is a no-op and has no effect on SQL statements ** that are started after the sqlite3_interrupt() call returns. +** +** ^The [sqlite3_is_interrupted(D)] interface can be used to determine whether +** or not an interrupt is currently in effect for [database connection] D. +** It returns 1 if an interrupt is currently in effect, or 0 otherwise. */ SQLITE_API void sqlite3_interrupt(sqlite3*); +SQLITE_API int sqlite3_is_interrupted(sqlite3*); /* ** CAPI3REF: Determine If An SQL Statement Is Complete

@@ -3592,8 +3670,8 @@ ** [[SQLITE_TRACE_PROFILE]] <dt>SQLITE_TRACE_PROFILE</dt>

** <dd>^An SQLITE_TRACE_PROFILE callback provides approximately the same ** information as is provided by the [sqlite3_profile()] callback. ** ^The P argument is a pointer to the [prepared statement] and the -** X argument points to a 64-bit integer which is the estimated of -** the number of nanosecond that the prepared statement took to run. +** X argument points to a 64-bit integer which is approximately +** the number of nanoseconds that the prepared statement took to run. ** ^The SQLITE_TRACE_PROFILE callback is invoked when the statement finishes. ** ** [[SQLITE_TRACE_ROW]] <dt>SQLITE_TRACE_ROW</dt>

@@ -3625,8 +3703,10 @@ ** NULL or if the M mask is zero, then tracing is disabled. The

** M argument should be the bitwise OR-ed combination of ** zero or more [SQLITE_TRACE] constants. ** -** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides -** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2(). +** ^Each call to either sqlite3_trace(D,X,P) or sqlite3_trace_v2(D,M,X,P) +** overrides (cancels) all prior calls to sqlite3_trace(D,X,P) or +** sqlite3_trace_v2(D,M,X,P) for the [database connection] D. Each +** database connection may have at most one trace callback. ** ** ^The X callback is invoked whenever any of the events identified by ** mask M occur. ^The integer return value from the callback is currently

@@ -3656,7 +3736,7 @@ ** METHOD: sqlite3

** ** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback ** function X to be invoked periodically during long running calls to -** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for +** [sqlite3_step()] and [sqlite3_prepare()] and similar for ** database connection D. An example use for this ** interface is to keep a GUI updated during a large query. **

@@ -3681,6 +3761,13 @@ ** the database connection that invoked the progress handler.

** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their ** database connections for the meaning of "modify" in this paragraph. ** +** The progress handler callback would originally only be invoked from the +** bytecode engine. It still might be invoked during [sqlite3_prepare()] +** and similar because those routines might force a reparse of the schema +** which involves running the bytecode engine. However, beginning with +** SQLite version 3.41.0, the progress handler callback might also be +** invoked directly from [sqlite3_prepare()] while analyzing and generating +** code for complex queries. */ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);

@@ -3717,13 +3804,18 @@ ** three flag combinations:)^

** ** <dl> ** ^(<dt>[SQLITE_OPEN_READONLY]</dt> -** <dd>The database is opened in read-only mode. If the database does not -** already exist, an error is returned.</dd>)^ +** <dd>The database is opened in read-only mode. If the database does +** not already exist, an error is returned.</dd>)^ ** ** ^(<dt>[SQLITE_OPEN_READWRITE]</dt> -** <dd>The database is opened for reading and writing if possible, or reading -** only if the file is write protected by the operating system. In either -** case the database must already exist, otherwise an error is returned.</dd>)^ +** <dd>The database is opened for reading and writing if possible, or +** reading only if the file is write protected by the operating +** system. In either case the database must already exist, otherwise +** an error is returned. For historical reasons, if opening in +** read-write mode fails due to OS-level permissions, an attempt is +** made to open it in read-only mode. [sqlite3_db_readonly()] can be +** used to determine whether the database is actually +** read-write.</dd>)^ ** ** ^(<dt>[SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]</dt> ** <dd>The database is opened for reading and writing, and is created if

@@ -3983,7 +4075,7 @@ ** The first parameter to these interfaces (hereafter referred to

** as F) must be one of: ** <ul> ** <li> A database filename pointer created by the SQLite core and -** passed into the xOpen() method of a VFS implemention, or +** passed into the xOpen() method of a VFS implementation, or ** <li> A filename obtained from [sqlite3_db_filename()], or ** <li> A new filename constructed using [sqlite3_create_filename()]. ** </ul>

@@ -4096,7 +4188,7 @@

/* ** CAPI3REF: Create and Destroy VFS Filenames ** -** These interfces are provided for use by [VFS shim] implementations and +** These interfaces are provided for use by [VFS shim] implementations and ** are not useful outside of that context. ** ** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of

@@ -4176,6 +4268,7 @@ ** </ul>

** ** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language ** text that describes the error, as either UTF-8 or UTF-16 respectively. +** (See how SQLite handles [invalid UTF] for exceptions to this rule.) ** ^(Memory to hold the error message string is managed internally. ** The application does not need to worry about freeing the result. ** However, the error string might be overwritten or deallocated by

@@ -4644,6 +4737,41 @@ */

SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt); /* +** CAPI3REF: Change The EXPLAIN Setting For A Prepared Statement +** METHOD: sqlite3_stmt +** +** The sqlite3_stmt_explain(S,E) interface changes the EXPLAIN +** setting for [prepared statement] S. If E is zero, then S becomes +** a normal prepared statement. If E is 1, then S behaves as if +** its SQL text began with "[EXPLAIN]". If E is 2, then S behaves as if +** its SQL text began with "[EXPLAIN QUERY PLAN]". +** +** Calling sqlite3_stmt_explain(S,E) might cause S to be reprepared. +** SQLite tries to avoid a reprepare, but a reprepare might be necessary +** on the first transition into EXPLAIN or EXPLAIN QUERY PLAN mode. +** +** Because of the potential need to reprepare, a call to +** sqlite3_stmt_explain(S,E) will fail with SQLITE_ERROR if S cannot be +** reprepared because it was created using [sqlite3_prepare()] instead of +** the newer [sqlite3_prepare_v2()] or [sqlite3_prepare_v3()] interfaces and +** hence has no saved SQL text with which to reprepare. +** +** Changing the explain setting for a prepared statement does not change +** the original SQL text for the statement. Hence, if the SQL text originally +** began with EXPLAIN or EXPLAIN QUERY PLAN, but sqlite3_stmt_explain(S,0) +** is called to convert the statement into an ordinary statement, the EXPLAIN +** or EXPLAIN QUERY PLAN keywords will still appear in the sqlite3_sql(S) +** output, even though the statement now acts like a normal SQL statement. +** +** This routine returns SQLITE_OK if the explain mode is successfully +** changed, or an error code if the explain mode could not be changed. +** The explain mode cannot be changed while a statement is active. +** Hence, it is good practice to call [sqlite3_reset(S)] +** immediately prior to calling sqlite3_stmt_explain(S,E). +*/ +SQLITE_API int sqlite3_stmt_explain(sqlite3_stmt *pStmt, int eMode); + +/* ** CAPI3REF: Determine If A Prepared Statement Has Been Reset ** METHOD: sqlite3_stmt **

@@ -4806,7 +4934,7 @@ ** ^ (1) A destructor to dispose of the BLOB or string after SQLite has finished

** with it may be passed. ^It is called to dispose of the BLOB or string even ** if the call to the bind API fails, except the destructor is not called if ** the third parameter is a NULL pointer or the fourth parameter is negative. -** ^ (2) The special constant, [SQLITE_STATIC], may be passsed to indicate that +** ^ (2) The special constant, [SQLITE_STATIC], may be passed to indicate that ** the application remains responsible for disposing of the object. ^In this ** case, the object and the provided pointer to it must remain valid until ** either the prepared statement is finalized or the same SQL parameter is

@@ -5485,19 +5613,32 @@ **

** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S ** back to the beginning of its program. ** -** ^If the most recent call to [sqlite3_step(S)] for the -** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE], -** or if [sqlite3_step(S)] has never before been called on S, -** then [sqlite3_reset(S)] returns [SQLITE_OK]. +** ^The return code from [sqlite3_reset(S)] indicates whether or not +** the previous evaluation of prepared statement S completed successfully. +** ^If [sqlite3_step(S)] has never before been called on S or if +** [sqlite3_step(S)] has not been called since the previous call +** to [sqlite3_reset(S)], then [sqlite3_reset(S)] will return +** [SQLITE_OK]. ** ** ^If the most recent call to [sqlite3_step(S)] for the ** [prepared statement] S indicated an error, then ** [sqlite3_reset(S)] returns an appropriate [error code]. +** ^The [sqlite3_reset(S)] interface might also return an [error code] +** if there were no prior errors but the process of resetting +** the prepared statement caused a new error. ^For example, if an +** [INSERT] statement with a [RETURNING] clause is only stepped one time, +** that one call to [sqlite3_step(S)] might return SQLITE_ROW but +** the overall statement might still fail and the [sqlite3_reset(S)] call +** might return SQLITE_BUSY if locking constraints prevent the +** database change from committing. Therefore, it is important that +** applications check the return code from [sqlite3_reset(S)] even if +** no prior call to [sqlite3_step(S)] indicated a problem. ** ** ^The [sqlite3_reset(S)] interface does not change the values ** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S. */ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); + /* ** CAPI3REF: Create Or Redefine SQL Functions

@@ -5704,10 +5845,21 @@ ** The SQLITE_DIRECTONLY flag means that the function may only be invoked

** from top-level SQL, and cannot be used in VIEWs or TRIGGERs nor in ** schema structures such as [CHECK constraints], [DEFAULT clauses], ** [expression indexes], [partial indexes], or [generated columns]. -** The SQLITE_DIRECTONLY flags is a security feature which is recommended -** for all [application-defined SQL functions], and especially for functions -** that have side-effects or that could potentially leak sensitive -** information. +** <p> +** The SQLITE_DIRECTONLY flag is recommended for any +** [application-defined SQL function] +** that has side-effects or that could potentially leak sensitive information. +** This will prevent attacks in which an application is tricked +** into using a database file that has had its schema surreptitiously +** modified to invoke the application-defined function in ways that are +** harmful. +** <p> +** Some people say it is good practice to set SQLITE_DIRECTONLY on all +** [application-defined SQL functions], regardless of whether or not they +** are security sensitive, as doing so prevents those functions from being used +** inside of the database schema, and thus ensures that the database +** can be inspected and modified using generic tools (such as the [CLI]) +** that do not have access to the application-defined functions. ** </dd> ** ** [[SQLITE_INNOCUOUS]] <dt>SQLITE_INNOCUOUS</dt><dd>

@@ -5734,13 +5886,27 @@ ** security-adverse side-effects and information-leaks.

** </dd> ** ** [[SQLITE_SUBTYPE]] <dt>SQLITE_SUBTYPE</dt><dd> -** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call +** The SQLITE_SUBTYPE flag indicates to SQLite that a function might call ** [sqlite3_value_subtype()] to inspect the sub-types of its arguments. -** Specifying this flag makes no difference for scalar or aggregate user -** functions. However, if it is not specified for a user-defined window -** function, then any sub-types belonging to arguments passed to the window -** function may be discarded before the window function is called (i.e. -** sqlite3_value_subtype() will always return 0). +** This flag instructs SQLite to omit some corner-case optimizations that +** might disrupt the operation of the [sqlite3_value_subtype()] function, +** causing it to return zero rather than the correct subtype(). +** SQL functions that invokes [sqlite3_value_subtype()] should have this +** property. If the SQLITE_SUBTYPE property is omitted, then the return +** value from [sqlite3_value_subtype()] might sometimes be zero even though +** a non-zero subtype was specified by the function argument expression. +** +** [[SQLITE_RESULT_SUBTYPE]] <dt>SQLITE_RESULT_SUBTYPE</dt><dd> +** The SQLITE_RESULT_SUBTYPE flag indicates to SQLite that a function might call +** [sqlite3_result_subtype()] to cause a sub-type to be associated with its +** result. +** Every function that invokes [sqlite3_result_subtype()] should have this +** property. If it does not, then the call to [sqlite3_result_subtype()] +** might become a no-op if the function is used as term in an +** [expression index]. On the other hand, SQL functions that never invoke +** [sqlite3_result_subtype()] should avoid setting this property, as the +** purpose of this property is to disable certain optimizations that are +** incompatible with subtypes. ** </dd> ** </dl> */

@@ -5748,6 +5914,7 @@ #define SQLITE_DETERMINISTIC 0x000000800

#define SQLITE_DIRECTONLY 0x000080000 #define SQLITE_SUBTYPE 0x000100000 #define SQLITE_INNOCUOUS 0x000200000 +#define SQLITE_RESULT_SUBTYPE 0x001000000 /* ** CAPI3REF: Deprecated Functions

@@ -5848,16 +6015,6 @@ ** words, if the value is a string that looks like a number)

** then the conversion is performed. Otherwise no conversion occurs. ** The [SQLITE_INTEGER | datatype] after conversion is returned.)^ ** -** ^(The sqlite3_value_encoding(X) interface returns one of [SQLITE_UTF8], -** [SQLITE_UTF16BE], or [SQLITE_UTF16LE] according to the current encoding -** of the value X, assuming that X has type TEXT.)^ If sqlite3_value_type(X) -** returns something other than SQLITE_TEXT, then the return value from -** sqlite3_value_encoding(X) is meaningless. ^Calls to -** sqlite3_value_text(X), sqlite3_value_text16(X), sqlite3_value_text16be(X), -** sqlite3_value_text16le(X), sqlite3_value_bytes(X), or -** sqlite3_value_bytes16(X) might change the encoding of the value X and -** thus change the return from subsequent calls to sqlite3_value_encoding(X). -** ** ^Within the [xUpdate] method of a [virtual table], the ** sqlite3_value_nochange(X) interface returns true if and only if ** the column corresponding to X is unchanged by the UPDATE operation

@@ -5922,6 +6079,27 @@ SQLITE_API int sqlite3_value_type(sqlite3_value*);

SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*); SQLITE_API int sqlite3_value_nochange(sqlite3_value*); SQLITE_API int sqlite3_value_frombind(sqlite3_value*); + +/* +** CAPI3REF: Report the internal text encoding state of an sqlite3_value object +** METHOD: sqlite3_value +** +** ^(The sqlite3_value_encoding(X) interface returns one of [SQLITE_UTF8], +** [SQLITE_UTF16BE], or [SQLITE_UTF16LE] according to the current text encoding +** of the value X, assuming that X has type TEXT.)^ If sqlite3_value_type(X) +** returns something other than SQLITE_TEXT, then the return value from +** sqlite3_value_encoding(X) is meaningless. ^Calls to +** [sqlite3_value_text(X)], [sqlite3_value_text16(X)], [sqlite3_value_text16be(X)], +** [sqlite3_value_text16le(X)], [sqlite3_value_bytes(X)], or +** [sqlite3_value_bytes16(X)] might change the encoding of the value X and +** thus change the return from subsequent calls to sqlite3_value_encoding(X). +** +** This routine is intended for used by applications that test and validate +** the SQLite implementation. This routine is inquiring about the opaque +** internal state of an [sqlite3_value] object. Ordinary applications should +** not need to know what the internal state of an sqlite3_value object is and +** hence should not need to use this interface. +*/ SQLITE_API int sqlite3_value_encoding(sqlite3_value*); /*

@@ -5933,6 +6111,12 @@ ** an [application-defined SQL function] argument V. The subtype

** information can be used to pass a limited amount of context from ** one SQL function to another. Use the [sqlite3_result_subtype()] ** routine to set the subtype for the return value of an SQL function. +** +** Every [application-defined SQL function] that invoke this interface +** should include the [SQLITE_SUBTYPE] property in the text +** encoding argument when the function is [sqlite3_create_function|registered]. +** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype() +** might return zero instead of the upstream subtype in some corner cases. */ SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*);

@@ -6031,48 +6215,56 @@ ** CAPI3REF: Function Auxiliary Data

** METHOD: sqlite3_context ** ** These functions may be used by (non-aggregate) SQL functions to -** associate metadata with argument values. If the same value is passed to -** multiple invocations of the same SQL function during query execution, under -** some circumstances the associated metadata may be preserved. An example -** of where this might be useful is in a regular-expression matching -** function. The compiled version of the regular expression can be stored as -** metadata associated with the pattern string. +** associate auxiliary data with argument values. If the same argument +** value is passed to multiple invocations of the same SQL function during +** query execution, under some circumstances the associated auxiliary data +** might be preserved. An example of where this might be useful is in a +** regular-expression matching function. The compiled version of the regular +** expression can be stored as auxiliary data associated with the pattern string. ** Then as long as the pattern string remains the same, ** the compiled regular expression can be reused on multiple ** invocations of the same function. ** -** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the metadata +** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the auxiliary data ** associated by the sqlite3_set_auxdata(C,N,P,X) function with the Nth argument ** value to the application-defined function. ^N is zero for the left-most -** function argument. ^If there is no metadata +** function argument. ^If there is no auxiliary data ** associated with the function argument, the sqlite3_get_auxdata(C,N) interface ** returns a NULL pointer. ** -** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th -** argument of the application-defined function. ^Subsequent +** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as auxiliary data for the +** N-th argument of the application-defined function. ^Subsequent ** calls to sqlite3_get_auxdata(C,N) return P from the most recent -** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or -** NULL if the metadata has been discarded. +** sqlite3_set_auxdata(C,N,P,X) call if the auxiliary data is still valid or +** NULL if the auxiliary data has been discarded. ** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL, ** SQLite will invoke the destructor function X with parameter P exactly -** once, when the metadata is discarded. -** SQLite is free to discard the metadata at any time, including: <ul> +** once, when the auxiliary data is discarded. +** SQLite is free to discard the auxiliary data at any time, including: <ul> ** <li> ^(when the corresponding function parameter changes)^, or ** <li> ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the ** SQL statement)^, or ** <li> ^(when sqlite3_set_auxdata() is invoked again on the same ** parameter)^, or ** <li> ^(during the original sqlite3_set_auxdata() call when a memory -** allocation error occurs.)^ </ul> +** allocation error occurs.)^ +** <li> ^(during the original sqlite3_set_auxdata() call if the function +** is evaluated during query planning instead of during query execution, +** as sometimes happens with [SQLITE_ENABLE_STAT4].)^ </ul> ** -** Note the last bullet in particular. The destructor X in +** Note the last two bullets in particular. The destructor X in ** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the ** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata() ** should be called near the end of the function implementation and the ** function implementation should not make any use of P after -** sqlite3_set_auxdata() has been called. +** sqlite3_set_auxdata() has been called. Furthermore, a call to +** sqlite3_get_auxdata() that occurs immediately after a corresponding call +** to sqlite3_set_auxdata() might still return NULL if an out-of-memory +** condition occurred during the sqlite3_set_auxdata() call or if the +** function is being evaluated during query planning rather than during +** query execution. ** -** ^(In practice, metadata is preserved between function calls for +** ^(In practice, auxiliary data is preserved between function calls for ** function parameters that are compile-time constants, including literal ** values and [parameters] and expressions composed from the same.)^ **

@@ -6082,10 +6274,67 @@ ** kinds of function caching behavior.

** ** These routines must be called from the same thread in which ** the SQL function is running. +** +** See also: [sqlite3_get_clientdata()] and [sqlite3_set_clientdata()]. */ SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N); SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*)); +/* +** CAPI3REF: Database Connection Client Data +** METHOD: sqlite3 +** +** These functions are used to associate one or more named pointers +** with a [database connection]. +** A call to sqlite3_set_clientdata(D,N,P,X) causes the pointer P +** to be attached to [database connection] D using name N. Subsequent +** calls to sqlite3_get_clientdata(D,N) will return a copy of pointer P +** or a NULL pointer if there were no prior calls to +** sqlite3_set_clientdata() with the same values of D and N. +** Names are compared using strcmp() and are thus case sensitive. +** +** If P and X are both non-NULL, then the destructor X is invoked with +** argument P on the first of the following occurrences: +** <ul> +** <li> An out-of-memory error occurs during the call to +** sqlite3_set_clientdata() which attempts to register pointer P. +** <li> A subsequent call to sqlite3_set_clientdata(D,N,P,X) is made +** with the same D and N parameters. +** <li> The database connection closes. SQLite does not make any guarantees +** about the order in which destructors are called, only that all +** destructors will be called exactly once at some point during the +** database connection closing process. +** </ul> +** +** SQLite does not do anything with client data other than invoke +** destructors on the client data at the appropriate time. The intended +** use for client data is to provide a mechanism for wrapper libraries +** to store additional information about an SQLite database connection. +** +** There is no limit (other than available memory) on the number of different +** client data pointers (with different names) that can be attached to a +** single database connection. However, the implementation is optimized +** for the case of having only one or two different client data names. +** Applications and wrapper libraries are discouraged from using more than +** one client data name each. +** +** There is no way to enumerate the client data pointers +** associated with a database connection. The N parameter can be thought +** of as a secret key such that only code that knows the secret key is able +** to access the associated data. +** +** Security Warning: These interfaces should not be exposed in scripting +** languages or in other circumstances where it might be possible for an +** an attacker to invoke them. Any agent that can invoke these interfaces +** can probably also take control of the process. +** +** Database connection client data is only available for SQLite +** version 3.44.0 ([dateof:3.44.0]) and later. +** +** See also: [sqlite3_set_auxdata()] and [sqlite3_get_auxdata()]. +*/ +SQLITE_API void *sqlite3_get_clientdata(sqlite3*,const char*); +SQLITE_API int sqlite3_set_clientdata(sqlite3*, const char*, void*, void(*)(void*)); /* ** CAPI3REF: Constants Defining Special Destructor Behavior

@@ -6287,6 +6536,20 @@ ** of the subtype T are preserved in current versions of SQLite;

** higher order bits are discarded. ** The number of subtype bytes preserved by SQLite might increase ** in future releases of SQLite. +** +** Every [application-defined SQL function] that invokes this interface +** should include the [SQLITE_RESULT_SUBTYPE] property in its +** text encoding argument when the SQL function is +** [sqlite3_create_function|registered]. If the [SQLITE_RESULT_SUBTYPE] +** property is omitted from the function that invokes sqlite3_result_subtype(), +** then in some cases the sqlite3_result_subtype() might fail to set +** the result subtype. +** +** If SQLite is compiled with -DSQLITE_STRICT_SUBTYPE=1, then any +** SQL function that invokes the sqlite3_result_subtype() interface +** and that does not have the SQLITE_RESULT_SUBTYPE property will raise +** an error. Future versions of SQLite might enable -DSQLITE_STRICT_SUBTYPE=1 +** by default. */ SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int);

@@ -6458,6 +6721,13 @@ ** method of the default [sqlite3_vfs] object. If the xSleep() method

** of the default VFS is not implemented correctly, or not implemented at ** all, then the behavior of sqlite3_sleep() may deviate from the description ** in the previous paragraphs. +** +** If a negative argument is passed to sqlite3_sleep() the results vary by +** VFS and operating system. Some system treat a negative argument as an +** instruction to sleep forever. Others understand it to mean do not sleep +** at all. ^In SQLite version 3.42.0 and later, a negative +** argument passed into sqlite3_sleep() is changed to zero before it is relayed +** down into the xSleep method of the VFS. */ SQLITE_API int sqlite3_sleep(int);

@@ -6711,7 +6981,7 @@ */

SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); /* -** CAPI3REF: Allowed return values from [sqlite3_txn_state()] +** CAPI3REF: Allowed return values from sqlite3_txn_state() ** KEYWORDS: {transaction state} ** ** These constants define the current transaction state of a database file.

@@ -6843,7 +7113,7 @@ ** <p>^There is only one autovacuum pages callback per database connection.

** ^Each call to the sqlite3_autovacuum_pages() interface overrides all ** previous invocations for that database connection. ^If the callback ** argument (C) to sqlite3_autovacuum_pages(D,C,P,X) is a NULL pointer, -** then the autovacuum steps callback is cancelled. The return value +** then the autovacuum steps callback is canceled. The return value ** from sqlite3_autovacuum_pages() is normally SQLITE_OK, but might ** be some other error code if something goes wrong. The current ** implementation will only return SQLITE_OK or SQLITE_MISUSE, but other

@@ -7303,15 +7573,6 @@ */

SQLITE_API void sqlite3_reset_auto_extension(void); /* -** The interface to the virtual-table mechanism is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - -/* ** Structures used by the virtual table interface */ typedef struct sqlite3_vtab sqlite3_vtab;

@@ -7371,6 +7632,10 @@ int (*xRollbackTo)(sqlite3_vtab *pVTab, int);

/* The methods above are in versions 1 and 2 of the sqlite_module object. ** Those below are for version 3 and greater. */ int (*xShadowName)(const char*); + /* The methods above are in versions 1 through 3 of the sqlite_module object. + ** Those below are for version 4 and greater. */ + int (*xIntegrity)(sqlite3_vtab *pVTab, const char *zSchema, + const char *zTabName, int mFlags, char **pzErr); }; /*

@@ -7429,10 +7694,10 @@ ** the constraint may or may not be checked in byte code. In other words,

** when the omit flag is true there is no guarantee that the constraint will ** not be checked again using byte code.)^ ** -** ^The idxNum and idxPtr values are recorded and passed into the +** ^The idxNum and idxStr values are recorded and passed into the ** [xFilter] method. -** ^[sqlite3_free()] is used to free idxPtr if and only if -** needToFreeIdxPtr is true. +** ^[sqlite3_free()] is used to free idxStr if and only if +** needToFreeIdxStr is true. ** ** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in ** the correct order to satisfy the ORDER BY clause so that no separate

@@ -7552,7 +7817,7 @@ ** The collating sequence to be used for comparison can be found using

** the [sqlite3_vtab_collation()] interface. For most real-world virtual ** tables, the collating sequence of constraints does not matter (for example ** because the constraints are numeric) and so the sqlite3_vtab_collation() -** interface is no commonly needed. +** interface is not commonly needed. */ #define SQLITE_INDEX_CONSTRAINT_EQ 2 #define SQLITE_INDEX_CONSTRAINT_GT 4

@@ -7712,16 +7977,6 @@ */

SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); /* -** The interface to the virtual-table mechanism defined above (back up -** to a comment remarkably similar to this one) is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - -/* ** CAPI3REF: A Handle To An Open BLOB ** KEYWORDS: {BLOB handle} {BLOB handles} **

@@ -7868,7 +8123,7 @@ ** committed. ^If an error occurs while committing the transaction, an error

** code is returned and the transaction rolled back. ** ** Calling this function with an argument that is not a NULL pointer or an -** open blob handle results in undefined behaviour. ^Calling this routine +** open blob handle results in undefined behavior. ^Calling this routine ** with a null pointer (such as would be returned by a failed call to ** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function ** is passed a valid open blob handle, the values returned by the

@@ -8104,9 +8359,9 @@ ** previously entered by the same thread. The behavior

** is undefined if the mutex is not currently entered by the ** calling thread or is not currently allocated. ** -** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or -** sqlite3_mutex_leave() is a NULL pointer, then all three routines -** behave as no-ops. +** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), +** sqlite3_mutex_leave(), or sqlite3_mutex_free() is a NULL pointer, +** then any of the four routines behaves as a no-op. ** ** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()]. */

@@ -8348,6 +8603,7 @@ #define SQLITE_TESTCTRL_FIRST 5

#define SQLITE_TESTCTRL_PRNG_SAVE 5 #define SQLITE_TESTCTRL_PRNG_RESTORE 6 #define SQLITE_TESTCTRL_PRNG_RESET 7 /* NOT USED */ +#define SQLITE_TESTCTRL_FK_NO_ACTION 7 #define SQLITE_TESTCTRL_BITVEC_TEST 8 #define SQLITE_TESTCTRL_FAULT_INSTALL 9 #define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10

@@ -8376,7 +8632,8 @@ #define SQLITE_TESTCTRL_SEEK_COUNT 30

#define SQLITE_TESTCTRL_TRACEFLAGS 31 #define SQLITE_TESTCTRL_TUNE 32 #define SQLITE_TESTCTRL_LOGEST 33 -#define SQLITE_TESTCTRL_LAST 33 /* Largest TESTCTRL */ +#define SQLITE_TESTCTRL_USELONGDOUBLE 34 +#define SQLITE_TESTCTRL_LAST 34 /* Largest TESTCTRL */ /* ** CAPI3REF: SQL Keyword Checking

@@ -9832,7 +10089,7 @@ **

** [[SQLITE_VTAB_DIRECTONLY]]<dt>SQLITE_VTAB_DIRECTONLY</dt> ** <dd>Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_DIRECTONLY) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implmentation +** the [xConnect] or [xCreate] methods of a [virtual table] implementation ** prohibits that virtual table from being used from within triggers and ** views. ** </dd>

@@ -9840,18 +10097,28 @@ **

** [[SQLITE_VTAB_INNOCUOUS]]<dt>SQLITE_VTAB_INNOCUOUS</dt> ** <dd>Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_INNOCUOUS) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implmentation +** the [xConnect] or [xCreate] methods of a [virtual table] implementation ** identify that virtual table as being safe to use from within triggers ** and views. Conceptually, the SQLITE_VTAB_INNOCUOUS tag means that the ** virtual table can do no serious harm even if it is controlled by a ** malicious hacker. Developers should avoid setting the SQLITE_VTAB_INNOCUOUS ** flag unless absolutely necessary. ** </dd> +** +** [[SQLITE_VTAB_USES_ALL_SCHEMAS]]<dt>SQLITE_VTAB_USES_ALL_SCHEMAS</dt> +** <dd>Calls of the form +** [sqlite3_vtab_config](db,SQLITE_VTAB_USES_ALL_SCHEMA) from within the +** the [xConnect] or [xCreate] methods of a [virtual table] implementation +** instruct the query planner to begin at least a read transaction on +** all schemas ("main", "temp", and any ATTACH-ed databases) whenever the +** virtual table is used. +** </dd> ** </dl> */ #define SQLITE_VTAB_CONSTRAINT_SUPPORT 1 #define SQLITE_VTAB_INNOCUOUS 2 #define SQLITE_VTAB_DIRECTONLY 3 +#define SQLITE_VTAB_USES_ALL_SCHEMAS 4 /* ** CAPI3REF: Determine The Virtual Table Conflict Policy

@@ -9924,7 +10191,7 @@ ** name of that alternative collating sequence is returned.

** <li><p> Otherwise, "BINARY" is returned. ** </ol> */ -SQLITE_API SQLITE_EXPERIMENTAL const char *sqlite3_vtab_collation(sqlite3_index_info*,int); +SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); /* ** CAPI3REF: Determine if a virtual table query is DISTINCT

@@ -10012,7 +10279,7 @@ ** "[IN operator|column IN (...)]" is

** communicated to the xBestIndex method as a ** [SQLITE_INDEX_CONSTRAINT_EQ] constraint.)^ If xBestIndex wants to use ** this constraint, it must set the corresponding -** aConstraintUsage[].argvIndex to a postive integer. ^(Then, under +** aConstraintUsage[].argvIndex to a positive integer. ^(Then, under ** the usual mode of handling IN operators, SQLite generates [bytecode] ** that invokes the [xFilter|xFilter() method] once for each value ** on the right-hand side of the IN operator.)^ Thus the virtual table

@@ -10081,21 +10348,20 @@ ** The result of invoking these interfaces from any other context

** is undefined and probably harmful. ** ** The X parameter in a call to sqlite3_vtab_in_first(X,P) or -** sqlite3_vtab_in_next(X,P) must be one of the parameters to the +** sqlite3_vtab_in_next(X,P) should be one of the parameters to the ** xFilter method which invokes these routines, and specifically ** a parameter that was previously selected for all-at-once IN constraint ** processing use the [sqlite3_vtab_in()] interface in the ** [xBestIndex|xBestIndex method]. ^(If the X parameter is not ** an xFilter argument that was selected for all-at-once IN constraint -** processing, then these routines return [SQLITE_MISUSE])^ or perhaps -** exhibit some other undefined or harmful behavior. +** processing, then these routines return [SQLITE_ERROR].)^ ** ** ^(Use these routines to access all values on the right-hand side ** of the IN constraint using code like the following: ** ** <blockquote><pre> ** &nbsp; for(rc=sqlite3_vtab_in_first(pList, &pVal); -** &nbsp; rc==SQLITE_OK && pVal +** &nbsp; rc==SQLITE_OK && pVal; ** &nbsp; rc=sqlite3_vtab_in_next(pList, &pVal) ** &nbsp; ){ ** &nbsp; // do something with pVal

@@ -10193,6 +10459,10 @@ ** When the value returned to V is a string, space to hold that string is

** managed by the prepared statement S and will be automatically freed when ** S is finalized. ** +** Not all values are available for all query elements. When a value is +** not available, the output variable is set to -1 if the value is numeric, +** or to NULL if it is a string (SQLITE_SCANSTAT_NAME). +** ** <dl> ** [[SQLITE_SCANSTAT_NLOOP]] <dt>SQLITE_SCANSTAT_NLOOP</dt> ** <dd>^The [sqlite3_int64] variable pointed to by the V parameter will be

@@ -10220,12 +10490,24 @@ ** <dd>^The "const char *" variable pointed to by the V parameter will be set

** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN] ** description for the X-th loop. ** -** [[SQLITE_SCANSTAT_SELECTID]] <dt>SQLITE_SCANSTAT_SELECT</dt> +** [[SQLITE_SCANSTAT_SELECTID]] <dt>SQLITE_SCANSTAT_SELECTID</dt> ** <dd>^The "int" variable pointed to by the V parameter will be set to the -** "select-id" for the X-th loop. The select-id identifies which query or -** subquery the loop is part of. The main query has a select-id of zero. -** The select-id is the same value as is output in the first column -** of an [EXPLAIN QUERY PLAN] query. +** id for the X-th query plan element. The id value is unique within the +** statement. The select-id is the same value as is output in the first +** column of an [EXPLAIN QUERY PLAN] query. +** +** [[SQLITE_SCANSTAT_PARENTID]] <dt>SQLITE_SCANSTAT_PARENTID</dt> +** <dd>The "int" variable pointed to by the V parameter will be set to the +** the id of the parent of the current query element, if applicable, or +** to zero if the query element has no parent. This is the same value as +** returned in the second column of an [EXPLAIN QUERY PLAN] query. +** +** [[SQLITE_SCANSTAT_NCYCLE]] <dt>SQLITE_SCANSTAT_NCYCLE</dt> +** <dd>The sqlite3_int64 output value is set to the number of cycles, +** according to the processor time-stamp counter, that elapsed while the +** query element was being processed. This value is not available for +** all query elements - if it is unavailable the output variable is +** set to -1. ** </dl> */ #define SQLITE_SCANSTAT_NLOOP 0

@@ -10234,12 +10516,14 @@ #define SQLITE_SCANSTAT_EST 2

#define SQLITE_SCANSTAT_NAME 3 #define SQLITE_SCANSTAT_EXPLAIN 4 #define SQLITE_SCANSTAT_SELECTID 5 +#define SQLITE_SCANSTAT_PARENTID 6 +#define SQLITE_SCANSTAT_NCYCLE 7 /* ** CAPI3REF: Prepared Statement Scan Status ** METHOD: sqlite3_stmt ** -** This interface returns information about the predicted and measured +** These interfaces return information about the predicted and measured ** performance for pStmt. Advanced applications can use this ** interface to compare the predicted and the measured performance and ** issue warnings and/or rerun [ANALYZE] if discrepancies are found.

@@ -10250,19 +10534,25 @@ ** compile-time option.

** ** The "iScanStatusOp" parameter determines which status information to return. ** The "iScanStatusOp" must be one of the [scanstatus options] or the behavior -** of this interface is undefined. -** ^The requested measurement is written into a variable pointed to by -** the "pOut" parameter. -** Parameter "idx" identifies the specific loop to retrieve statistics for. -** Loops are numbered starting from zero. ^If idx is out of range - less than -** zero or greater than or equal to the total number of loops used to implement -** the statement - a non-zero value is returned and the variable that pOut -** points to is unchanged. +** of this interface is undefined. ^The requested measurement is written into +** a variable pointed to by the "pOut" parameter. ** -** ^Statistics might not be available for all loops in all statements. ^In cases -** where there exist loops with no available statistics, this function behaves -** as if the loop did not exist - it returns non-zero and leave the variable -** that pOut points to unchanged. +** The "flags" parameter must be passed a mask of flags. At present only +** one flag is defined - SQLITE_SCANSTAT_COMPLEX. If SQLITE_SCANSTAT_COMPLEX +** is specified, then status information is available for all elements +** of a query plan that are reported by "EXPLAIN QUERY PLAN" output. If +** SQLITE_SCANSTAT_COMPLEX is not specified, then only query plan elements +** that correspond to query loops (the "SCAN..." and "SEARCH..." elements of +** the EXPLAIN QUERY PLAN output) are available. Invoking API +** sqlite3_stmt_scanstatus() is equivalent to calling +** sqlite3_stmt_scanstatus_v2() with a zeroed flags parameter. +** +** Parameter "idx" identifies the specific query element to retrieve statistics +** for. Query elements are numbered starting from zero. A value of -1 may be +** to query for statistics regarding the entire query. ^If idx is out of range +** - less than -1 or greater than or equal to the total number of query +** elements used to implement the statement - a non-zero value is returned and +** the variable that pOut points to is unchanged. ** ** See also: [sqlite3_stmt_scanstatus_reset()] */

@@ -10272,6 +10562,19 @@ int idx, /* Index of loop to report on */

int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */ void *pOut /* Result written here */ ); +SQLITE_API int sqlite3_stmt_scanstatus_v2( + sqlite3_stmt *pStmt, /* Prepared statement for which info desired */ + int idx, /* Index of loop to report on */ + int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */ + int flags, /* Mask of flags defined below */ + void *pOut /* Result written here */ +); + +/* +** CAPI3REF: Prepared Statement Scan Status +** KEYWORDS: {scan status flags} +*/ +#define SQLITE_SCANSTAT_COMPLEX 0x0001 /* ** CAPI3REF: Zero Scan-Status Counters

@@ -10362,6 +10665,10 @@ ** or updated. The value of the seventh parameter passed to the callback

** function is not defined for operations on WITHOUT ROWID tables, or for ** DELETE operations on rowid tables. ** +** ^The sqlite3_preupdate_hook(D,C,P) function returns the P argument from +** the previous call on the same [database connection] D, or NULL for +** the first call on D. +** ** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()], ** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces ** provide additional information about a preupdate event. These routines

@@ -10401,7 +10708,7 @@ **

** When the [sqlite3_blob_write()] API is used to update a blob column, ** the pre-update hook is invoked with SQLITE_DELETE. This is because the ** in this case the new values are not available. In this case, when a -** callback made with op==SQLITE_DELETE is actuall a write using the +** callback made with op==SQLITE_DELETE is actually a write using the ** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns ** the index of the column being written. In other cases, where the ** pre-update hook is being invoked for some other reason, including a

@@ -10662,6 +10969,13 @@ ** The size of the database is written into *P even if the

** SQLITE_SERIALIZE_NOCOPY bit is set but no contiguous copy ** of the database exists. ** +** After the call, if the SQLITE_SERIALIZE_NOCOPY bit had been set, +** the returned buffer content will remain accessible and unchanged +** until either the next write operation on the connection or when +** the connection is closed, and applications must not modify the +** buffer. If the bit had been clear, the returned buffer will not +** be accessed by SQLite after the call. +** ** A call to sqlite3_serialize(D,S,P,F) might return NULL even if the ** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory ** allocation error occurs.

@@ -10710,6 +11024,9 @@ ** connection closes. If the SQLITE_DESERIALIZE_RESIZEABLE bit is set, then

** SQLite will try to increase the buffer size using sqlite3_realloc64() ** if writes on the database cause it to grow larger than M bytes. ** +** Applications must not modify the buffer P or invalidate it before +** the database connection D is closed. +** ** The sqlite3_deserialize() interface will fail with SQLITE_BUSY if the ** database is currently in a read transaction or is involved in a backup ** operation.

@@ -10718,6 +11035,13 @@ ** It is not possible to deserialized into the TEMP database. If the

** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the ** function returns SQLITE_ERROR. ** +** The deserialized database should not be in [WAL mode]. If the database +** is in WAL mode, then any attempt to use the database file will result +** in an [SQLITE_CANTOPEN] error. The application can set the +** [file format version numbers] (bytes 18 and 19) of the input database P +** to 0x01 prior to invoking sqlite3_deserialize(D,S,P,N,M,F) to force the +** database file into rollback mode and work around this limitation. +** ** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the ** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then ** [sqlite3_free()] is invoked on argument P prior to returning.

@@ -10765,6 +11089,19 @@ ** builds on processors without floating point support.

*/ #ifdef SQLITE_OMIT_FLOATING_POINT # undef double +#endif + +#if defined(__wasi__) +# undef SQLITE_WASI +# define SQLITE_WASI 1 +# undef SQLITE_OMIT_WAL +# define SQLITE_OMIT_WAL 1/* because it requires shared memory APIs */ +# ifndef SQLITE_OMIT_LOAD_EXTENSION +# define SQLITE_OMIT_LOAD_EXTENSION +# endif +# ifndef SQLITE_THREADSAFE +# define SQLITE_THREADSAFE 0 +# endif #endif #if 0

@@ -10973,16 +11310,20 @@ */

SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); /* -** CAPIREF: Conigure a Session Object +** CAPI3REF: Configure a Session Object ** METHOD: sqlite3_session ** ** This method is used to configure a session object after it has been -** created. At present the only valid value for the second parameter is -** [SQLITE_SESSION_OBJCONFIG_SIZE]. +** created. At present the only valid values for the second parameter are +** [SQLITE_SESSION_OBJCONFIG_SIZE] and [SQLITE_SESSION_OBJCONFIG_ROWID]. ** -** Arguments for sqlite3session_object_config() +*/ +SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); + +/* +** CAPI3REF: Options for sqlite3session_object_config ** -** The following values may passed as the the 4th parameter to +** The following values may passed as the the 2nd parameter to ** sqlite3session_object_config(). ** ** <dt>SQLITE_SESSION_OBJCONFIG_SIZE <dd>

@@ -10998,12 +11339,21 @@ ** enabled following the current call, or 0 otherwise.

** ** It is an error (SQLITE_MISUSE) to attempt to modify this setting after ** the first table has been attached to the session object. -*/ -SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); - -/* +** +** <dt>SQLITE_SESSION_OBJCONFIG_ROWID <dd> +** This option is used to set, clear or query the flag that enables +** collection of data for tables with no explicit PRIMARY KEY. +** +** Normally, tables with no explicit PRIMARY KEY are simply ignored +** by the sessions module. However, if this flag is set, it behaves +** as if such tables have a column "_rowid_ INTEGER PRIMARY KEY" inserted +** as their leftmost columns. +** +** It is an error (SQLITE_MISUSE) to attempt to modify this setting after +** the first table has been attached to the session object. */ -#define SQLITE_SESSION_OBJCONFIG_SIZE 1 +#define SQLITE_SESSION_OBJCONFIG_SIZE 1 +#define SQLITE_SESSION_OBJCONFIG_ROWID 2 /* ** CAPI3REF: Enable Or Disable A Session Object

@@ -11765,6 +12115,18 @@ );

/* +** CAPI3REF: Upgrade the Schema of a Changeset/Patchset +*/ +SQLITE_API int sqlite3changeset_upgrade( + sqlite3 *db, + const char *zDb, + int nIn, const void *pIn, /* Input changeset */ + int *pnOut, void **ppOut /* OUT: Inverse of input */ +); + + + +/* ** CAPI3REF: Changegroup Handle ** ** A changegroup is an object used to combine two or more

@@ -11811,6 +12173,38 @@ */

SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp); /* +** CAPI3REF: Add a Schema to a Changegroup +** METHOD: sqlite3_changegroup_schema +** +** This method may be used to optionally enforce the rule that the changesets +** added to the changegroup handle must match the schema of database zDb +** ("main", "temp", or the name of an attached database). If +** sqlite3changegroup_add() is called to add a changeset that is not compatible +** with the configured schema, SQLITE_SCHEMA is returned and the changegroup +** object is left in an undefined state. +** +** A changeset schema is considered compatible with the database schema in +** the same way as for sqlite3changeset_apply(). Specifically, for each +** table in the changeset, there exists a database table with: +** +** <ul> +** <li> The name identified by the changeset, and +** <li> at least as many columns as recorded in the changeset, and +** <li> the primary key columns in the same position as recorded in +** the changeset. +** </ul> +** +** The output of the changegroup object always has the same schema as the +** database nominated using this function. In cases where changesets passed +** to sqlite3changegroup_add() have fewer columns than the corresponding table +** in the database schema, these are filled in using the default column +** values from the database schema. This makes it possible to combined +** changesets that have different numbers of columns for a single table +** within a changegroup, provided that they are otherwise compatible. +*/ +SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const char *zDb); + +/* ** CAPI3REF: Add A Changeset To A Changegroup ** METHOD: sqlite3_changegroup **

@@ -11878,13 +12272,18 @@ **

** If the new changeset contains changes to a table that is already present ** in the changegroup, then the number of columns and the position of the ** primary key columns for the table must be consistent. If this is not the -** case, this function fails with SQLITE_SCHEMA. If the input changeset -** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is -** returned. Or, if an out-of-memory condition occurs during processing, this -** function returns SQLITE_NOMEM. In all cases, if an error occurs the state -** of the final contents of the changegroup is undefined. +** case, this function fails with SQLITE_SCHEMA. Except, if the changegroup +** object has been configured with a database schema using the +** sqlite3changegroup_schema() API, then it is possible to combine changesets +** with different numbers of columns for a single table, provided that +** they are otherwise compatible. +** +** If the input changeset appears to be corrupt and the corruption is +** detected, SQLITE_CORRUPT is returned. Or, if an out-of-memory condition +** occurs during processing, this function returns SQLITE_NOMEM. ** -** If no error occurs, SQLITE_OK is returned. +** In all cases, if an error occurs the state of the final contents of the +** changegroup is undefined. If no error occurs, SQLITE_OK is returned. */ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData);

@@ -12136,9 +12535,30 @@ ** <dt>SQLITE_CHANGESETAPPLY_INVERT <dd>

** Invert the changeset before applying it. This is equivalent to inverting ** a changeset using sqlite3changeset_invert() before applying it. It is ** an error to specify this flag with a patchset. +** +** <dt>SQLITE_CHANGESETAPPLY_IGNORENOOP <dd> +** Do not invoke the conflict handler callback for any changes that +** would not actually modify the database even if they were applied. +** Specifically, this means that the conflict handler is not invoked +** for: +** <ul> +** <li>a delete change if the row being deleted cannot be found, +** <li>an update change if the modified fields are already set to +** their new values in the conflicting row, or +** <li>an insert change if all fields of the conflicting row match +** the row being inserted. +** </ul> +** +** <dt>SQLITE_CHANGESETAPPLY_FKNOACTION <dd> +** If this flag it set, then all foreign key constraints in the target +** database behave as if they were declared with "ON UPDATE NO ACTION ON +** DELETE NO ACTION", even if they are actually CASCADE, RESTRICT, SET NULL +** or SET DEFAULT. */ #define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001 #define SQLITE_CHANGESETAPPLY_INVERT 0x0002 +#define SQLITE_CHANGESETAPPLY_IGNORENOOP 0x0004 +#define SQLITE_CHANGESETAPPLY_FKNOACTION 0x0008 /* ** CAPI3REF: Constants Passed To The Conflict Handler

@@ -12879,7 +13299,7 @@ ** xPhraseNextColumn()

** See xPhraseFirstColumn above. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 3 */ + int iVersion; /* Currently always set to 2 */ void *(*xUserData)(Fts5Context*);

@@ -13108,8 +13528,8 @@ ** On the other hand, it may require more CPU cycles to run MATCH queries,

** as separate queries of the FTS index are required for each synonym. ** ** When using methods (2) or (3), it is important that the tokenizer only -** provide synonyms when tokenizing document text (method (2)) or query -** text (method (3)), not both. Doing so will not cause any errors, but is +** provide synonyms when tokenizing document text (method (3)) or query +** text (method (2)), not both. Doing so will not cause any errors, but is ** inefficient. */ typedef struct Fts5Tokenizer Fts5Tokenizer;

@@ -13157,7 +13577,7 @@ /* Create a new tokenizer */

int (*xCreateTokenizer)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_tokenizer *pTokenizer, void (*xDestroy)(void*) );

@@ -13166,7 +13586,7 @@ /* Find an existing tokenizer */

int (*xFindTokenizer)( fts5_api *pApi, const char *zName, - void **ppContext, + void **ppUserData, fts5_tokenizer *pTokenizer );

@@ -13174,7 +13594,7 @@ /* Create a new auxiliary function */

int (*xCreateFunction)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_extension_function xFunction, void (*xDestroy)(void*) );

@@ -13285,7 +13705,7 @@ ** The code generator for compound SELECT statements does one

** level of recursion for each term. A stack overflow can result ** if the number of terms is too large. In practice, most SQL ** never has more than 3 or 4 terms. Use a value of 0 to disable -** any limit on the number of terms in a compount SELECT. +** any limit on the number of terms in a compound SELECT. */ #ifndef SQLITE_MAX_COMPOUND_SELECT # define SQLITE_MAX_COMPOUND_SELECT 500

@@ -13435,8 +13855,8 @@ #pragma warn -spa /* Suspicious pointer arithmetic */

#endif /* -** WAL mode depends on atomic aligned 32-bit loads and stores in a few -** places. The following macros try to make this explicit. +** A few places in the code require atomic load/store of aligned +** integer values. */ #ifndef __has_extension # define __has_extension(x) 0 /* compatibility with non-clang compilers */

@@ -13492,15 +13912,22 @@ # define SQLITE_PTR_TO_INT(X) ((int)(X))

#endif /* -** A macro to hint to the compiler that a function should not be +** Macros to hint to the compiler that a function should or should not be ** inlined. */ #if defined(__GNUC__) # define SQLITE_NOINLINE __attribute__((noinline)) +# define SQLITE_INLINE __attribute__((always_inline)) inline #elif defined(_MSC_VER) && _MSC_VER>=1310 # define SQLITE_NOINLINE __declspec(noinline) +# define SQLITE_INLINE __forceinline #else # define SQLITE_NOINLINE +# define SQLITE_INLINE +#endif +#if defined(SQLITE_COVERAGE_TEST) || defined(__STRICT_ANSI__) +# undef SQLITE_INLINE +# define SQLITE_INLINE #endif /*

@@ -13523,6 +13950,16 @@ # endif

#endif /* +** Enable SQLITE_USE_SEH by default on MSVC builds. Only omit +** SEH support if the -DSQLITE_OMIT_SEH option is given. +*/ +#if defined(_MSC_VER) && !defined(SQLITE_OMIT_SEH) +# define SQLITE_USE_SEH 1 +#else +# undef SQLITE_USE_SEH +#endif + +/* ** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2. ** 0 means mutexes are permanently disable and the library is never ** threadsafe. 1 means the library is serialized which is the highest

@@ -14318,15 +14755,9 @@ #define SQLITE_MAX_U32 ((((u64)1)<<32)-1)

/* ** The datatype used to store estimates of the number of rows in a -** table or index. This is an unsigned integer type. For 99.9% of -** the world, a 32-bit integer is sufficient. But a 64-bit integer -** can be used at compile-time if desired. +** table or index. */ -#ifdef SQLITE_64BIT_STATS - typedef u64 tRowcnt; /* 64-bit only if requested at compile-time */ -#else - typedef u32 tRowcnt; /* 32-bit is the default */ -#endif +typedef u64 tRowcnt; /* ** Estimated quantities used for query planning are stored as 16-bit

@@ -14387,8 +14818,31 @@ ** In other words, S is a buffer and E is a pointer to the first byte after

** the end of buffer S. This macro returns true if P points to something ** contained within the buffer S. */ -#define SQLITE_WITHIN(P,S,E) (((uptr)(P)>=(uptr)(S))&&((uptr)(P)<(uptr)(E))) +#define SQLITE_WITHIN(P,S,E) (((uptr)(P)>=(uptr)(S))&&((uptr)(P)<(uptr)(E))) +/* +** P is one byte past the end of a large buffer. Return true if a span of bytes +** between S..E crosses the end of that buffer. In other words, return true +** if the sub-buffer S..E-1 overflows the buffer whose last byte is P-1. +** +** S is the start of the span. E is one byte past the end of end of span. +** +** P +** |-----------------| FALSE +** |-------| +** S E +** +** P +** |-----------------| +** |-------| TRUE +** S E +** +** P +** |-----------------| +** |-------| FALSE +** S E +*/ +#define SQLITE_OVERFLOW(P,S,E) (((uptr)(S)<(uptr)(P))&&((uptr)(E)>(uptr)(P))) /* ** Macros to determine whether the machine is big or little endian,

@@ -14398,16 +14852,33 @@ ** For best performance, an attempt is made to guess at the byte-order

** using C-preprocessor macros. If that is unsuccessful, or if ** -DSQLITE_BYTEORDER=0 is set, then byte-order is determined ** at run-time. +** +** If you are building SQLite on some obscure platform for which the +** following ifdef magic does not work, you can always include either: +** +** -DSQLITE_BYTEORDER=1234 +** +** or +** +** -DSQLITE_BYTEORDER=4321 +** +** to cause the build to work for little-endian or big-endian processors, +** respectively. */ -#ifndef SQLITE_BYTEORDER -# if defined(i386) || defined(__i386__) || defined(_M_IX86) || \ +#ifndef SQLITE_BYTEORDER /* Replicate changes at tag-20230904a */ +# if defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__ +# define SQLITE_BYTEORDER 4321 +# elif defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__ +# define SQLITE_BYTEORDER 1234 +# elif defined(__BIG_ENDIAN__) && __BIG_ENDIAN__==1 +# define SQLITE_BYTEORDER 4321 +# elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \ defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \ defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \ defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64) -# define SQLITE_BYTEORDER 1234 -# elif defined(sparc) || defined(__ppc__) || \ - defined(__ARMEB__) || defined(__AARCH64EB__) -# define SQLITE_BYTEORDER 4321 +# define SQLITE_BYTEORDER 1234 +# elif defined(sparc) || defined(__ARMEB__) || defined(__AARCH64EB__) +# define SQLITE_BYTEORDER 4321 # else # define SQLITE_BYTEORDER 0 # endif

@@ -14472,9 +14943,9 @@ ** underlying malloc() implementation might return us 4-byte aligned

** pointers. In that case, only verify 4-byte alignment. */ #ifdef SQLITE_4_BYTE_ALIGNED_MALLOC -# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&3)==0) +# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&3)==0) #else -# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&7)==0) +# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&7)==0) #endif /*

@@ -14528,15 +14999,38 @@ #if defined(SQLITE_DEBUG) \

&& (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_SELECTTRACE) \ || defined(SQLITE_ENABLE_TREETRACE)) # define TREETRACE_ENABLED 1 -# define SELECTTRACE(K,P,S,X) \ +# define TREETRACE(K,P,S,X) \ if(sqlite3TreeTrace&(K)) \ sqlite3DebugPrintf("%u/%d/%p: ",(S)->selId,(P)->addrExplain,(S)),\ sqlite3DebugPrintf X #else -# define SELECTTRACE(K,P,S,X) +# define TREETRACE(K,P,S,X) # define TREETRACE_ENABLED 0 #endif +/* TREETRACE flag meanings: +** +** 0x00000001 Beginning and end of SELECT processing +** 0x00000002 WHERE clause processing +** 0x00000004 Query flattener +** 0x00000008 Result-set wildcard expansion +** 0x00000010 Query name resolution +** 0x00000020 Aggregate analysis +** 0x00000040 Window functions +** 0x00000080 Generated column names +** 0x00000100 Move HAVING terms into WHERE +** 0x00000200 Count-of-view optimization +** 0x00000400 Compound SELECT processing +** 0x00000800 Drop superfluous ORDER BY +** 0x00001000 LEFT JOIN simplifies to JOIN +** 0x00002000 Constant propagation +** 0x00004000 Push-down optimization +** 0x00008000 After all FROM-clause analysis +** 0x00010000 Beginning of DELETE/INSERT/UPDATE processing +** 0x00020000 Transform DISTINCT into GROUP BY +** 0x00040000 SELECT tree dump after all code has been generated +*/ + /* ** Macros for "wheretrace" */

@@ -14549,6 +15043,36 @@ #else

# define WHERETRACE(K,X) #endif +/* +** Bits for the sqlite3WhereTrace mask: +** +** (---any--) Top-level block structure +** 0x-------F High-level debug messages +** 0x----FFF- More detail +** 0xFFFF---- Low-level debug messages +** +** 0x00000001 Code generation +** 0x00000002 Solver +** 0x00000004 Solver costs +** 0x00000008 WhereLoop inserts +** +** 0x00000010 Display sqlite3_index_info xBestIndex calls +** 0x00000020 Range an equality scan metrics +** 0x00000040 IN operator decisions +** 0x00000080 WhereLoop cost adjustements +** 0x00000100 +** 0x00000200 Covering index decisions +** 0x00000400 OR optimization +** 0x00000800 Index scanner +** 0x00001000 More details associated with code generation +** 0x00002000 +** 0x00004000 Show all WHERE terms at key points +** 0x00008000 Show the full SELECT statement at key places +** +** 0x00010000 Show more detail when printing WHERE terms +** 0x00020000 Show WHERE terms returned from whereScanNext() +*/ + /* ** An instance of the following structure is used to store the busy-handler

@@ -14569,7 +15093,7 @@

/* ** Name of table that holds the database schema. ** -** The PREFERRED names are used whereever possible. But LEGACY is also +** The PREFERRED names are used wherever possible. But LEGACY is also ** used for backwards compatibility. ** ** 1. Queries can use either the PREFERRED or the LEGACY names

@@ -14678,11 +15202,13 @@ typedef struct Column Column;

typedef struct Cte Cte; typedef struct CteUse CteUse; typedef struct Db Db; +typedef struct DbClientData DbClientData; typedef struct DbFixer DbFixer; typedef struct Schema Schema; typedef struct Expr Expr; typedef struct ExprList ExprList; typedef struct FKey FKey; +typedef struct FpDecode FpDecode; typedef struct FuncDestructor FuncDestructor; typedef struct FuncDef FuncDef; typedef struct FuncDefHash FuncDefHash;

@@ -14701,6 +15227,7 @@ typedef struct Parse Parse;

typedef struct ParseCleanup ParseCleanup; typedef struct PreUpdate PreUpdate; typedef struct PrintfArguments PrintfArguments; +typedef struct RCStr RCStr; typedef struct RenameToken RenameToken; typedef struct Returning Returning; typedef struct RowSet RowSet;

@@ -15338,6 +15865,10 @@ # define disable_simulated_io_errors()

# define enable_simulated_io_errors() #endif +#if defined(SQLITE_USE_SEH) && !defined(SQLITE_OMIT_WAL) +SQLITE_PRIVATE int sqlite3PagerWalSystemErrno(Pager*); +#endif + #endif /* SQLITE_PAGER_H */ /************** End of pager.h ***********************************************/

@@ -15529,7 +16060,7 @@ ** implementations with limits on what needs to be prefetched and thereby

** reduce network bandwidth. ** ** Note that BTREE_HINT_FLAGS with BTREE_BULKLOAD is the only hint used by -** standard SQLite. The other hints are provided for extentions that use +** standard SQLite. The other hints are provided for extensions that use ** the SQLite parser and code generator but substitute their own storage ** engine. */

@@ -15667,15 +16198,21 @@ SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor*, int flags);

SQLITE_PRIVATE i64 sqlite3BtreeIntegerKey(BtCursor*); SQLITE_PRIVATE void sqlite3BtreeCursorPin(BtCursor*); SQLITE_PRIVATE void sqlite3BtreeCursorUnpin(BtCursor*); -#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC SQLITE_PRIVATE i64 sqlite3BtreeOffset(BtCursor*); -#endif SQLITE_PRIVATE int sqlite3BtreePayload(BtCursor*, u32 offset, u32 amt, void*); SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor*, u32 *pAmt); SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor*); SQLITE_PRIVATE sqlite3_int64 sqlite3BtreeMaxRecordSize(BtCursor*); -SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(sqlite3*,Btree*,Pgno*aRoot,int nRoot,int,int*); +SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( + sqlite3 *db, /* Database connection that is running the check */ + Btree *p, /* The btree to be checked */ + Pgno *aRoot, /* An array of root pages numbers for individual trees */ + int nRoot, /* Number of entries in aRoot[] */ + int mxErr, /* Stop reporting errors after this many */ + int *pnErr, /* OUT: Write number of errors seen to this variable */ + char **pzOut /* OUT: Write the error message string here */ +); SQLITE_PRIVATE struct Pager *sqlite3BtreePager(Btree*); SQLITE_PRIVATE i64 sqlite3BtreeRowCountEst(BtCursor*);

@@ -15713,6 +16250,8 @@ SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree*, int, int *, int *);

#endif SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor*, BtCursor*, i64); + +SQLITE_PRIVATE void sqlite3BtreeClearCache(Btree*); /* ** If we are not using shared cache, then there is no need to

@@ -15830,13 +16369,13 @@ } p4;

#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS char *zComment; /* Comment to improve readability */ #endif -#ifdef VDBE_PROFILE - u32 cnt; /* Number of times this instruction was executed */ - u64 cycles; /* Total time spent executing this instruction */ -#endif #ifdef SQLITE_VDBE_COVERAGE u32 iSrcLine; /* Source-code line that generated this opcode ** with flags in the upper 8 bits */ +#endif +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE) + u64 nExec; + u64 nCycle; #endif }; typedef struct VdbeOp VdbeOp;

@@ -16104,19 +16643,20 @@ #define OP_VBegin 170

#define OP_VCreate 171 #define OP_VDestroy 172 #define OP_VOpen 173 -#define OP_VInitIn 174 /* synopsis: r[P2]=ValueList(P1,P3) */ -#define OP_VColumn 175 /* synopsis: r[P3]=vcolumn(P2) */ -#define OP_VRename 176 -#define OP_Pagecount 177 -#define OP_MaxPgcnt 178 -#define OP_ClrSubtype 179 /* synopsis: r[P1].subtype = 0 */ -#define OP_FilterAdd 180 /* synopsis: filter(P1) += key(P3@P4) */ -#define OP_Trace 181 -#define OP_CursorHint 182 -#define OP_ReleaseReg 183 /* synopsis: release r[P1@P2] mask P3 */ -#define OP_Noop 184 -#define OP_Explain 185 -#define OP_Abortable 186 +#define OP_VCheck 174 +#define OP_VInitIn 175 /* synopsis: r[P2]=ValueList(P1,P3) */ +#define OP_VColumn 176 /* synopsis: r[P3]=vcolumn(P2) */ +#define OP_VRename 177 +#define OP_Pagecount 178 +#define OP_MaxPgcnt 179 +#define OP_ClrSubtype 180 /* synopsis: r[P1].subtype = 0 */ +#define OP_FilterAdd 181 /* synopsis: filter(P1) += key(P3@P4) */ +#define OP_Trace 182 +#define OP_CursorHint 183 +#define OP_ReleaseReg 184 /* synopsis: release r[P1@P2] mask P3 */ +#define OP_Noop 185 +#define OP_Explain 186 +#define OP_Abortable 187 /* Properties such as "out2" or "jump" that are specified in ** comments following the "case" for each opcode in the vdbe.c

@@ -16128,31 +16668,32 @@ #define OPFLG_IN2 0x04 /* in2: P2 is an input */

#define OPFLG_IN3 0x08 /* in3: P3 is an input */ #define OPFLG_OUT2 0x10 /* out2: P2 is an output */ #define OPFLG_OUT3 0x20 /* out3: P3 is an output */ +#define OPFLG_NCYCLE 0x40 /* ncycle:Cycles count against P1 */ #define OPFLG_INITIALIZER {\ -/* 0 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x01, 0x00,\ +/* 0 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x41, 0x00,\ /* 8 */ 0x01, 0x01, 0x01, 0x01, 0x03, 0x03, 0x01, 0x01,\ -/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x09, 0x09, 0x09,\ -/* 24 */ 0x09, 0x01, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,\ -/* 32 */ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,\ -/* 40 */ 0x01, 0x01, 0x01, 0x26, 0x26, 0x01, 0x23, 0x0b,\ +/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x49, 0x49, 0x49,\ +/* 24 */ 0x49, 0x01, 0x49, 0x49, 0x49, 0x49, 0x49, 0x49,\ +/* 32 */ 0x41, 0x01, 0x41, 0x41, 0x41, 0x01, 0x41, 0x41,\ +/* 40 */ 0x41, 0x41, 0x41, 0x26, 0x26, 0x41, 0x23, 0x0b,\ /* 48 */ 0x01, 0x01, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\ -/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x03, 0x01, 0x01,\ +/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x03, 0x01, 0x41,\ /* 64 */ 0x01, 0x00, 0x00, 0x02, 0x02, 0x08, 0x00, 0x10,\ /* 72 */ 0x10, 0x10, 0x00, 0x10, 0x00, 0x10, 0x10, 0x00,\ /* 80 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x02, 0x02,\ -/* 88 */ 0x02, 0x00, 0x00, 0x12, 0x1e, 0x20, 0x00, 0x00,\ -/* 96 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x26, 0x26,\ +/* 88 */ 0x02, 0x00, 0x00, 0x12, 0x1e, 0x20, 0x40, 0x00,\ +/* 96 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x40, 0x26, 0x26,\ /* 104 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26,\ -/* 112 */ 0x00, 0x00, 0x12, 0x00, 0x00, 0x10, 0x00, 0x00,\ -/* 120 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10,\ -/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,\ -/* 136 */ 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x10, 0x00,\ +/* 112 */ 0x40, 0x00, 0x12, 0x40, 0x40, 0x10, 0x40, 0x00,\ +/* 120 */ 0x00, 0x00, 0x40, 0x00, 0x40, 0x40, 0x10, 0x10,\ +/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x50,\ +/* 136 */ 0x00, 0x40, 0x04, 0x04, 0x00, 0x40, 0x50, 0x40,\ /* 144 */ 0x10, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,\ /* 152 */ 0x00, 0x10, 0x00, 0x00, 0x06, 0x10, 0x00, 0x04,\ /* 160 */ 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,\ -/* 176 */ 0x00, 0x10, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00,\ -/* 184 */ 0x00, 0x00, 0x00,} +/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x50,\ +/* 176 */ 0x40, 0x00, 0x10, 0x10, 0x02, 0x00, 0x00, 0x00,\ +/* 184 */ 0x00, 0x00, 0x00, 0x00,} /* The resolve3P2Values() routine is able to run faster if it knows ** the value of the largest JUMP opcode. The smaller the maximum

@@ -16205,14 +16746,20 @@ # define sqlite3VdbeNoJumpsOutsideSubrtn(A,B,C,D)

#endif SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp,int iLineno); #ifndef SQLITE_OMIT_EXPLAIN -SQLITE_PRIVATE void sqlite3VdbeExplain(Parse*,u8,const char*,...); +SQLITE_PRIVATE int sqlite3VdbeExplain(Parse*,u8,const char*,...); SQLITE_PRIVATE void sqlite3VdbeExplainPop(Parse*); SQLITE_PRIVATE int sqlite3VdbeExplainParent(Parse*); # define ExplainQueryPlan(P) sqlite3VdbeExplain P +# ifdef SQLITE_ENABLE_STMT_SCANSTATUS +# define ExplainQueryPlan2(V,P) (V = sqlite3VdbeExplain P) +# else +# define ExplainQueryPlan2(V,P) ExplainQueryPlan(P) +# endif # define ExplainQueryPlanPop(P) sqlite3VdbeExplainPop(P) # define ExplainQueryPlanParent(P) sqlite3VdbeExplainParent(P) #else # define ExplainQueryPlan(P) +# define ExplainQueryPlan2(V,P) # define ExplainQueryPlanPop(P) # define ExplainQueryPlanParent(P) 0 # define sqlite3ExplainBreakpoint(A,B) /*no-op*/

@@ -16321,7 +16868,7 @@ /*

** The VdbeCoverage macros are used to set a coverage testing point ** for VDBE branch instructions. The coverage testing points are line ** numbers in the sqlite3.c source file. VDBE branch coverage testing -** only works with an amalagmation build. That's ok since a VDBE branch +** only works with an amalgamation build. That's ok since a VDBE branch ** coverage build designed for testing the test suite only. No application ** should ever ship with VDBE branch coverage measuring turned on. **

@@ -16339,7 +16886,7 @@ ** // taken on the first two ways. The

** // NULL option is not possible ** ** VdbeCoverageEqNe(v) // Previous OP_Jump is only interested -** // in distingishing equal and not-equal. +** // in distinguishing equal and not-equal. ** ** Every VDBE branch operation must be tagged with one of the macros above. ** If not, then when "make test" is run with -DSQLITE_VDBE_COVERAGE and

@@ -16349,7 +16896,7 @@ **

** During testing, the test application will invoke ** sqlite3_test_control(SQLITE_TESTCTRL_VDBE_COVERAGE,...) to set a callback ** routine that is invoked as each bytecode branch is taken. The callback -** contains the sqlite3.c source line number ov the VdbeCoverage macro and +** contains the sqlite3.c source line number of the VdbeCoverage macro and ** flags to indicate whether or not the branch was taken. The test application ** is responsible for keeping track of this and reporting byte-code branches ** that are never taken.

@@ -16385,14 +16932,22 @@ #endif

#ifdef SQLITE_ENABLE_STMT_SCANSTATUS SQLITE_PRIVATE void sqlite3VdbeScanStatus(Vdbe*, int, int, int, LogEst, const char*); +SQLITE_PRIVATE void sqlite3VdbeScanStatusRange(Vdbe*, int, int, int); +SQLITE_PRIVATE void sqlite3VdbeScanStatusCounters(Vdbe*, int, int, int); #else -# define sqlite3VdbeScanStatus(a,b,c,d,e) +# define sqlite3VdbeScanStatus(a,b,c,d,e,f) +# define sqlite3VdbeScanStatusRange(a,b,c,d) +# define sqlite3VdbeScanStatusCounters(a,b,c,d) #endif #if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE*, int, VdbeOp*); #endif +#if defined(SQLITE_ENABLE_CURSOR_HINTS) && defined(SQLITE_DEBUG) +SQLITE_PRIVATE int sqlite3CursorRangeHintExprCheck(Walker *pWalker, Expr *pExpr); +#endif + #endif /* SQLITE_VDBE_H */ /************** End of vdbe.h ************************************************/

@@ -16441,7 +16996,7 @@ ** Elements above, except pCache, are public. All that follow are

** private to pcache.c and should not be accessed by other modules. ** pCache is grouped with the public elements for efficiency. */ - i16 nRef; /* Number of users of this page */ + i64 nRef; /* Number of users of this page */ PgHdr *pDirtyNext; /* Next element in list of dirty pages */ PgHdr *pDirtyPrev; /* Previous element in list of dirty pages */ /* NB: pDirtyNext and pDirtyPrev are undefined if the

@@ -16522,12 +17077,12 @@ /* Discard the contents of the cache */

SQLITE_PRIVATE void sqlite3PcacheClear(PCache*); /* Return the total number of outstanding page references */ -SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache*); +SQLITE_PRIVATE i64 sqlite3PcacheRefCount(PCache*); /* Increment the reference count of an existing page */ SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr*); -SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr*); +SQLITE_PRIVATE i64 sqlite3PcachePageRefcount(PgHdr*); /* Return the total number of pages stored in the cache */ SQLITE_PRIVATE int sqlite3PcachePagecount(PCache*);

@@ -16680,7 +17235,7 @@

/* ** Default synchronous levels. ** -** Note that (for historcal reasons) the PAGER_SYNCHRONOUS_* macros differ +** Note that (for historical reasons) the PAGER_SYNCHRONOUS_* macros differ ** from the SQLITE_DEFAULT_SYNCHRONOUS value by 1. ** ** PAGER_SYNCHRONOUS DEFAULT_SYNCHRONOUS

@@ -16719,7 +17274,7 @@ /*

** An instance of the following structure stores a database schema. ** ** Most Schema objects are associated with a Btree. The exception is -** the Schema for the TEMP databaes (sqlite3.aDb[1]) which is free-standing. +** the Schema for the TEMP database (sqlite3.aDb[1]) which is free-standing. ** In shared cache mode, a single Schema object can be shared by multiple ** Btrees that refer to the same underlying BtShared object. **

@@ -16830,7 +17385,7 @@ u32 anStat[3]; /* 0: hits. 1: size misses. 2: full misses */

LookasideSlot *pInit; /* List of buffers not previously used */ LookasideSlot *pFree; /* List of available buffers */ #ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE - LookasideSlot *pSmallInit; /* List of small buffers not prediously used */ + LookasideSlot *pSmallInit; /* List of small buffers not previously used */ LookasideSlot *pSmallFree; /* List of available small buffers */ void *pMiddle; /* First byte past end of full-size buffers and ** the first byte of LOOKASIDE_SMALL buffers */

@@ -16847,7 +17402,7 @@ #define DisableLookaside db->lookaside.bDisable++;db->lookaside.sz=0

#define EnableLookaside db->lookaside.bDisable--;\ db->lookaside.sz=db->lookaside.bDisable?0:db->lookaside.szTrue -/* Size of the smaller allocations in two-size lookside */ +/* Size of the smaller allocations in two-size lookaside */ #ifdef SQLITE_OMIT_TWOSIZE_LOOKASIDE # define LOOKASIDE_SMALL 0 #else

@@ -17047,6 +17602,7 @@ int nStatement; /* Number of nested statement-transactions */

i64 nDeferredCons; /* Net deferred constraints this transaction. */ i64 nDeferredImmCons; /* Net deferred immediate constraints */ int *pnBytesFreed; /* If not NULL, increment this in DbFree() */ + DbClientData *pDbData; /* sqlite3_set_clientdata() content */ #ifdef SQLITE_ENABLE_UNLOCK_NOTIFY /* The following variables are all protected by the STATIC_MAIN ** mutex, not by sqlite3.mutex. They are used by code in notify.c.

@@ -17102,7 +17658,7 @@ ** vtabs in the schema definition */

#define SQLITE_NullCallback 0x00000100 /* Invoke the callback once if the */ /* result set is empty */ #define SQLITE_IgnoreChecks 0x00000200 /* Do not enforce check constraints */ -#define SQLITE_ReadUncommit 0x00000400 /* READ UNCOMMITTED in shared-cache */ +#define SQLITE_StmtScanStatus 0x00000400 /* Enable stmt_scanstats() counters */ #define SQLITE_NoCkptOnClose 0x00000800 /* No checkpoint on close()/DETACH */ #define SQLITE_ReverseOrder 0x00001000 /* Reverse unordered SELECTs */ #define SQLITE_RecTriggers 0x00002000 /* Enable recursive triggers */

@@ -17128,6 +17684,8 @@ #define SQLITE_CountRows HI(0x00001) /* Count rows changed by INSERT, */

/* DELETE, or UPDATE and return */ /* the count using a callback. */ #define SQLITE_CorruptRdOnly HI(0x00002) /* Prohibit writes due to error */ +#define SQLITE_ReadUncommit HI(0x00004) /* READ UNCOMMITTED in shared-cache */ +#define SQLITE_FkNoAction HI(0x00008) /* Treat all FK as NO ACTION */ /* Flags used only if debugging */ #ifdef SQLITE_DEBUG

@@ -17183,6 +17741,9 @@ #define SQLITE_ReleaseReg 0x00400000 /* Use OP_ReleaseReg for testing */

#define SQLITE_FlttnUnionAll 0x00800000 /* Disable the UNION ALL flattener */ /* TH3 expects this value ^^^^^^^^^^ See flatten04.test */ #define SQLITE_IndexedExpr 0x01000000 /* Pull exprs from index when able */ +#define SQLITE_Coroutines 0x02000000 /* Co-routines for subqueries */ +#define SQLITE_NullUnusedCols 0x04000000 /* NULL unused columns in subqueries */ +#define SQLITE_OnePass 0x08000000 /* Single-pass DELETE and UPDATE */ #define SQLITE_AllOpts 0xffffffff /* All optimizations */ /*

@@ -17265,10 +17826,17 @@ ** SQLITE_FUNC_MINMAX == NC_MinMaxAgg == SF_MinMaxAgg

** SQLITE_FUNC_ANYORDER == NC_OrderAgg == SF_OrderByReqd ** SQLITE_FUNC_LENGTH == OPFLAG_LENGTHARG ** SQLITE_FUNC_TYPEOF == OPFLAG_TYPEOFARG +** SQLITE_FUNC_BYTELEN == OPFLAG_BYTELENARG ** SQLITE_FUNC_CONSTANT == SQLITE_DETERMINISTIC from the API ** SQLITE_FUNC_DIRECT == SQLITE_DIRECTONLY from the API -** SQLITE_FUNC_UNSAFE == SQLITE_INNOCUOUS +** SQLITE_FUNC_UNSAFE == SQLITE_INNOCUOUS -- opposite meanings!!! ** SQLITE_FUNC_ENCMASK depends on SQLITE_UTF* macros in the API +** +** Note that even though SQLITE_FUNC_UNSAFE and SQLITE_INNOCUOUS have the +** same bit value, their meanings are inverted. SQLITE_FUNC_UNSAFE is +** used internally and if set means that the function has side effects. +** SQLITE_INNOCUOUS is used by application code and means "not unsafe". +** See multiple instances of tag-20230109-1. */ #define SQLITE_FUNC_ENCMASK 0x0003 /* SQLITE_UTF8, SQLITE_UTF16BE or UTF16LE */ #define SQLITE_FUNC_LIKE 0x0004 /* Candidate for the LIKE optimization */

@@ -17277,6 +17845,7 @@ #define SQLITE_FUNC_EPHEM 0x0010 /* Ephemeral. Delete with VDBE */

#define SQLITE_FUNC_NEEDCOLL 0x0020 /* sqlite3GetFuncCollSeq() might be called*/ #define SQLITE_FUNC_LENGTH 0x0040 /* Built-in length() function */ #define SQLITE_FUNC_TYPEOF 0x0080 /* Built-in typeof() function */ +#define SQLITE_FUNC_BYTELEN 0x00c0 /* Built-in octet_length() function */ #define SQLITE_FUNC_COUNT 0x0100 /* Built-in count(*) aggregate */ /* 0x0200 -- available for reuse */ #define SQLITE_FUNC_UNLIKELY 0x0400 /* Built-in unlikely() function */

@@ -17285,14 +17854,15 @@ #define SQLITE_FUNC_MINMAX 0x1000 /* True for min() and max() aggregates */

#define SQLITE_FUNC_SLOCHNG 0x2000 /* "Slow Change". Value constant during a ** single query - might change over time */ #define SQLITE_FUNC_TEST 0x4000 /* Built-in testing functions */ -/* 0x8000 -- available for reuse */ +#define SQLITE_FUNC_RUNONLY 0x8000 /* Cannot be used by valueFromFunction */ #define SQLITE_FUNC_WINDOW 0x00010000 /* Built-in window-only function */ #define SQLITE_FUNC_INTERNAL 0x00040000 /* For use by NestedParse() only */ #define SQLITE_FUNC_DIRECT 0x00080000 /* Not for use in TRIGGERs or VIEWs */ -#define SQLITE_FUNC_SUBTYPE 0x00100000 /* Result likely to have sub-type */ +/* SQLITE_SUBTYPE 0x00100000 // Consumer of subtypes */ #define SQLITE_FUNC_UNSAFE 0x00200000 /* Function has side effects */ #define SQLITE_FUNC_INLINE 0x00400000 /* Functions implemented in-line */ #define SQLITE_FUNC_BUILTIN 0x00800000 /* This is a built-in function */ +/* SQLITE_RESULT_SUBTYPE 0x01000000 // Generator of subtypes */ #define SQLITE_FUNC_ANYORDER 0x08000000 /* count/min/max aggregate */ /* Identifier numbers for each in-line function */

@@ -17384,9 +17954,10 @@ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} }

#define MFUNCTION(zName, nArg, xPtr, xFunc) \ {nArg, SQLITE_FUNC_BUILTIN|SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \ xPtr, 0, xFunc, 0, 0, 0, #zName, {0} } -#define JFUNCTION(zName, nArg, iArg, xFunc) \ - {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS|\ - SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \ +#define JFUNCTION(zName, nArg, bUseCache, bWS, bRS, iArg, xFunc) \ + {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|SQLITE_FUNC_CONSTANT|\ + SQLITE_UTF8|((bUseCache)*SQLITE_FUNC_RUNONLY)|\ + ((bRS)*SQLITE_SUBTYPE)|((bWS)*SQLITE_RESULT_SUBTYPE), \ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} } #define INLINE_FUNC(zName, nArg, iArg, mFlags) \ {nArg, SQLITE_FUNC_BUILTIN|\

@@ -17577,6 +18148,7 @@ #define SQLITE_AFF_TEXT 0x42 /* 'B' */

#define SQLITE_AFF_NUMERIC 0x43 /* 'C' */ #define SQLITE_AFF_INTEGER 0x44 /* 'D' */ #define SQLITE_AFF_REAL 0x45 /* 'E' */ +#define SQLITE_AFF_FLEXNUM 0x46 /* 'F' */ #define sqlite3IsNumericAffinity(X) ((X)>=SQLITE_AFF_NUMERIC)

@@ -17647,6 +18219,7 @@ Module *pMod; /* Pointer to module implementation */

sqlite3_vtab *pVtab; /* Pointer to vtab instance */ int nRef; /* Number of pointers to this structure */ u8 bConstraint; /* True if constraints are supported */ + u8 bAllSchemas; /* True if might use any attached schema */ u8 eVtabRisk; /* Riskiness of allowing hacker access */ int iSavepoint; /* Depth of the SAVEPOINT stack */ VTable *pNext; /* Next in linked list (see above) */

@@ -17854,7 +18427,7 @@ ** referenced table row is propagated into the row that holds the

** foreign key. ** ** The OE_Default value is a place holder that means to use whatever -** conflict resolution algorthm is required from context. +** conflict resolution algorithm is required from context. ** ** The following symbolic values are used to record which type ** of conflict resolution action to take.

@@ -18027,6 +18600,7 @@ unsigned bHasExpr:1; /* Index contains an expression, either a literal

** expression, or a reference to a VIRTUAL column */ #ifdef SQLITE_ENABLE_STAT4 int nSample; /* Number of elements in aSample[] */ + int mxSample; /* Number of slots allocated to aSample[] */ int nSampleCol; /* Size of IndexSample.anEq[] and so on */ tRowcnt *aAvgEq; /* Average nEq values for keys not in aSample */ IndexSample *aSample; /* Samples of the left-most key */

@@ -18108,16 +18682,15 @@ u8 directMode; /* Direct rendering mode means take data directly

** from source tables rather than from accumulators */ u8 useSortingIdx; /* In direct mode, reference the sorting index rather ** than the source table */ + u16 nSortingColumn; /* Number of columns in the sorting index */ int sortingIdx; /* Cursor number of the sorting index */ int sortingIdxPTab; /* Cursor number of pseudo-table */ - int nSortingColumn; /* Number of columns in the sorting index */ - int mnReg, mxReg; /* Range of registers allocated for aCol and aFunc */ + int iFirstReg; /* First register in range for aCol[] and aFunc[] */ ExprList *pGroupBy; /* The group by clause */ struct AggInfo_col { /* For each column used in source tables */ Table *pTab; /* Source table */ Expr *pCExpr; /* The original expression */ int iTable; /* Cursor number of the source table */ - int iMem; /* Memory location that acts as accumulator */ i16 iColumn; /* Column number within the source table */ i16 iSorterColumn; /* Column number in the sorting index */ } *aCol;

@@ -18128,15 +18701,31 @@ ** aggregate functions */

struct AggInfo_func { /* For each aggregate function */ Expr *pFExpr; /* Expression encoding the function */ FuncDef *pFunc; /* The aggregate function implementation */ - int iMem; /* Memory location that acts as accumulator */ int iDistinct; /* Ephemeral table used to enforce DISTINCT */ int iDistAddr; /* Address of OP_OpenEphemeral */ + int iOBTab; /* Ephemeral table to implement ORDER BY */ + u8 bOBPayload; /* iOBTab has payload columns separate from key */ + u8 bOBUnique; /* Enforce uniqueness on iOBTab keys */ } *aFunc; int nFunc; /* Number of entries in aFunc[] */ u32 selId; /* Select to which this AggInfo belongs */ +#ifdef SQLITE_DEBUG + Select *pSelect; /* SELECT statement that this AggInfo supports */ +#endif }; /* +** Macros to compute aCol[] and aFunc[] register numbers. +** +** These macros should not be used prior to the call to +** assignAggregateRegisters() that computes the value of pAggInfo->iFirstReg. +** The assert()s that are part of this macro verify that constraint. +*/ +#define AggInfoColumnReg(A,I) (assert((A)->iFirstReg),(A)->iFirstReg+(I)) +#define AggInfoFuncReg(A,I) \ + (assert((A)->iFirstReg),(A)->iFirstReg+(A)->nColumn+(I)) + +/* ** The datatype ynVar is a signed integer, either 16-bit or 32-bit. ** Usually it is 16-bits. But if SQLITE_MAX_VARIABLE_NUMBER is greater ** than 32767 we have to make it 32-bit. 16-bit is preferred because

@@ -18255,7 +18844,7 @@ int iTable; /* TK_COLUMN: cursor number of table holding column

** TK_REGISTER: register number ** TK_TRIGGER: 1 -> new, 0 -> old ** EP_Unlikely: 134217728 times likelihood - ** TK_IN: ephemerial table holding RHS + ** TK_IN: ephemeral table holding RHS ** TK_SELECT_COLUMN: Number of columns on the LHS ** TK_SELECT: 1st register of result vector */ ynVar iColumn; /* TK_COLUMN: column index. -1 for rowid.

@@ -18301,7 +18890,7 @@ #define EP_Skip 0x002000 /* Operator does not contribute to affinity */

#define EP_Reduced 0x004000 /* Expr struct EXPR_REDUCEDSIZE bytes only */ #define EP_Win 0x008000 /* Contains window functions */ #define EP_TokenOnly 0x010000 /* Expr struct EXPR_TOKENONLYSIZE bytes only */ - /* 0x020000 // Available for reuse */ +#define EP_FullSize 0x020000 /* Expr structure must remain full sized */ #define EP_IfNullRow 0x040000 /* The TK_IF_NULL_ROW opcode */ #define EP_Unlikely 0x080000 /* unlikely() or likelihood() function */ #define EP_ConstFunc 0x100000 /* A SQLITE_FUNC_CONSTANT or _SLOCHNG function */

@@ -18331,12 +18920,15 @@ #define ExprSetProperty(E,P) (E)->flags|=(P)

#define ExprClearProperty(E,P) (E)->flags&=~(P) #define ExprAlwaysTrue(E) (((E)->flags&(EP_OuterON|EP_IsTrue))==EP_IsTrue) #define ExprAlwaysFalse(E) (((E)->flags&(EP_OuterON|EP_IsFalse))==EP_IsFalse) +#define ExprIsFullSize(E) (((E)->flags&(EP_Reduced|EP_TokenOnly))==0) /* Macros used to ensure that the correct members of unions are accessed ** in Expr. */ #define ExprUseUToken(E) (((E)->flags&EP_IntValue)==0) #define ExprUseUValue(E) (((E)->flags&EP_IntValue)!=0) +#define ExprUseWOfst(E) (((E)->flags&(EP_InnerON|EP_OuterON))==0) +#define ExprUseWJoin(E) (((E)->flags&(EP_InnerON|EP_OuterON))!=0) #define ExprUseXList(E) (((E)->flags&EP_xIsSelect)==0) #define ExprUseXSelect(E) (((E)->flags&EP_xIsSelect)!=0) #define ExprUseYTab(E) (((E)->flags&(EP_WinFunc|EP_Subrtn))==0)

@@ -18446,6 +19038,7 @@ */

#define ENAME_NAME 0 /* The AS clause of a result set */ #define ENAME_SPAN 1 /* Complete text of the result set expression */ #define ENAME_TAB 2 /* "DB.TABLE.NAME" for the result set */ +#define ENAME_ROWID 3 /* "DB.TABLE._rowid_" for * expansion of rowid */ /* ** An instance of this structure can hold a simple list of identifiers,

@@ -18525,7 +19118,7 @@ unsigned isCte :1; /* This is a CTE */

unsigned notCte :1; /* This item may not match a CTE */ unsigned isUsing :1; /* u3.pUsing is valid */ unsigned isOn :1; /* u3.pOn was once valid and non-NULL */ - unsigned isSynthUsing :1; /* u3.pUsing is synthensized from NATURAL */ + unsigned isSynthUsing :1; /* u3.pUsing is synthesized from NATURAL */ unsigned isNestedFrom :1; /* pSelect is a SF_NestedFrom subquery */ } fg; int iCursor; /* The VDBE cursor number used to access this table */

@@ -18666,7 +19259,7 @@ #define NC_GenCol 0x000008 /* True for a GENERATED ALWAYS AS clause */

#define NC_HasAgg 0x000010 /* One or more aggregate functions seen */ #define NC_IdxExpr 0x000020 /* True if resolving columns of CREATE INDEX */ #define NC_SelfRef 0x00002e /* Combo: PartIdx, isCheck, GenCol, and IdxExpr */ -#define NC_VarSelect 0x000040 /* A correlated subquery has been seen */ +#define NC_Subquery 0x000040 /* A subquery has been seen */ #define NC_UEList 0x000080 /* True if uNC.pEList is used */ #define NC_UAggInfo 0x000100 /* True if uNC.pAggInfo is used */ #define NC_UUpsert 0x000200 /* True if uNC.pUpsert is used */

@@ -18795,6 +19388,7 @@ #define SF_PushDown 0x1000000 /* SELECT has be modified by push-down opt */

#define SF_MultiPart 0x2000000 /* Has multiple incompatible PARTITIONs */ #define SF_CopyCte 0x4000000 /* SELECT statement is a copy of a CTE */ #define SF_OrderByReqd 0x8000000 /* The ORDER BY clause may not be omitted */ +#define SF_UpdateFrom 0x10000000 /* Query originates with UPDATE FROM */ /* True if S exists and has SF_NestedFrom */ #define IsNestedFrom(S) ((S)!=0 && ((S)->selFlags&SF_NestedFrom)!=0)

@@ -18903,7 +19497,7 @@ int iSDParm; /* A parameter used by the eDest disposal method */

int iSDParm2; /* A second parameter for the eDest disposal method */ int iSdst; /* Base register where results are written */ int nSdst; /* Number of registers allocated */ - char *zAffSdst; /* Affinity used for SRT_Set, SRT_Table, and similar */ + char *zAffSdst; /* Affinity used for SRT_Set */ ExprList *pOrderBy; /* Key columns for SRT_Queue and SRT_DistQueue */ };

@@ -18962,10 +19556,10 @@ # define DbMaskNonZero(M) (sqlite3DbMaskAllZero(M)==0)

#else typedef unsigned int yDbMask; # define DbMaskTest(M,I) (((M)&(((yDbMask)1)<<(I)))!=0) -# define DbMaskZero(M) (M)=0 -# define DbMaskSet(M,I) (M)|=(((yDbMask)1)<<(I)) -# define DbMaskAllZero(M) (M)==0 -# define DbMaskNonZero(M) (M)!=0 +# define DbMaskZero(M) ((M)=0) +# define DbMaskSet(M,I) ((M)|=(((yDbMask)1)<<(I))) +# define DbMaskAllZero(M) ((M)==0) +# define DbMaskNonZero(M) ((M)!=0) #endif /*

@@ -18984,6 +19578,7 @@ int iDataCur; /* The data cursor associated with the index */

int iIdxCur; /* The index cursor */ int iIdxCol; /* The index column that contains value of pExpr */ u8 bMaybeNullRow; /* True if we need an OP_IfNullRow check */ + u8 aff; /* Affinity of the pExpr expression */ IndexedExpr *pIENext; /* Next in a list of all indexed expressions */ #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS const char *zIdxName; /* Name of index, used only for bytecode comments */

@@ -19036,6 +19631,9 @@ u8 withinRJSubrtn; /* Nesting level for RIGHT JOIN body subroutines */

#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) u8 earlyCleanup; /* OOM inside sqlite3ParserAddCleanup() */ #endif +#ifdef SQLITE_DEBUG + u8 ifNotExists; /* Might be true if IF NOT EXISTS. Assert()s only */ +#endif int nRangeReg; /* Size of the temporary register block */ int iRangeReg; /* First register in temporary register block */ int nErr; /* Number of errors seen */

@@ -19048,7 +19646,8 @@ int nLabel; /* The *negative* of the number of labels used */

int nLabelAlloc; /* Number of slots in aLabel */ int *aLabel; /* Space to hold the labels */ ExprList *pConstExpr;/* Constant expressions */ - IndexedExpr *pIdxExpr;/* List of expressions used by active indexes */ + IndexedExpr *pIdxEpr;/* List of expressions used by active indexes */ + IndexedExpr *pIdxPartExpr; /* Exprs constrained by index WHERE clauses */ Token constraintName;/* Name of the constraint currently being parsed */ yDbMask writeMask; /* Start a write transaction on these databases */ yDbMask cookieMask; /* Bitmask of schema verified databases */

@@ -19056,6 +19655,9 @@ int regRowid; /* Register holding rowid of CREATE TABLE entry */

int regRoot; /* Register holding root page number for new objects */ int nMaxArg; /* Max args passed to user function by sub-program */ int nSelect; /* Number of SELECT stmts. Counter for Select.selId */ +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK + u32 nProgressSteps; /* xProgress steps taken during sqlite3_prepare() */ +#endif #ifndef SQLITE_OMIT_SHARED_CACHE int nTableLock; /* Number of locks in aTableLock */ TableLock *aTableLock; /* Required table locks for shared-cache mode */

@@ -19069,9 +19671,9 @@ union {

int addrCrTab; /* Address of OP_CreateBtree on CREATE TABLE */ Returning *pReturning; /* The RETURNING clause */ } u1; - u32 nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */ u32 oldmask; /* Mask of old.* columns referenced */ u32 newmask; /* Mask of new.* columns referenced */ + LogEst nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */ u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */ u8 bReturning; /* Coding a RETURNING trigger */ u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */

@@ -19195,6 +19797,7 @@ #define OPFLAG_USESEEKRESULT 0x10 /* Try to avoid a seek in BtreeInsert() */

#define OPFLAG_ISNOOP 0x40 /* OP_Delete does pre-update-hook only */ #define OPFLAG_LENGTHARG 0x40 /* OP_Column only used for length() */ #define OPFLAG_TYPEOFARG 0x80 /* OP_Column only used for typeof() */ +#define OPFLAG_BYTELENARG 0xc0 /* OP_Column only for octet_length() */ #define OPFLAG_BULKCSR 0x01 /* OP_Open** used to open bulk cursor */ #define OPFLAG_SEEKEQ 0x02 /* OP_Open** cursor uses EQ seek only */ #define OPFLAG_FORDELETE 0x08 /* OP_Open should use BTREE_FORDELETE */

@@ -19316,6 +19919,7 @@ TriggerStep retTStep; /* The trigger step */

int iRetCur; /* Transient table holding RETURNING results */ int nRetCol; /* Number of in pReturnEL after expansion */ int iRetReg; /* Register array for holding a row of RETURNING */ + char zName[40]; /* Name of trigger: "sqlite_returning_%p" */ }; /*

@@ -19337,6 +19941,25 @@ #define SQLITE_PRINTF_MALLOCED 0x04 /* True if xText is allocated space */

#define isMalloced(X) (((X)->printfFlags & SQLITE_PRINTF_MALLOCED)!=0) +/* +** The following object is the header for an "RCStr" or "reference-counted +** string". An RCStr is passed around and used like any other char* +** that has been dynamically allocated. The important interface +** differences: +** +** 1. RCStr strings are reference counted. They are deallocated +** when the reference count reaches zero. +** +** 2. Use sqlite3RCStrUnref() to free an RCStr string rather than +** sqlite3_free() +** +** 3. Make a (read-only) copy of a read-only RCStr string using +** sqlite3RCStrRef(). +*/ +struct RCStr { + u64 nRCRef; /* Number of references */ + /* Total structure size should be a multiple of 8 bytes for alignment */ +}; /* ** A pointer to this structure is used to communicate information

@@ -19363,7 +19986,7 @@

/* Tuning parameters are set using SQLITE_TESTCTRL_TUNE and are controlled ** on debug-builds of the CLI using ".testctrl tune ID VALUE". Tuning ** parameters are for temporary use during development, to help find -** optimial values for parameters in the query planner. The should not +** optimal values for parameters in the query planner. The should not ** be used on trunk check-ins. They are a temporary mechanism available ** for transient development builds only. **

@@ -19389,6 +20012,7 @@ u8 bOpenUri; /* True to interpret filenames as URIs */

u8 bUseCis; /* Use covering indices for full-scans */ u8 bSmallMalloc; /* Avoid large memory allocations if true */ u8 bExtraSchemaChecks; /* Verify type,name,tbl_name in schema */ + u8 bUseLongDouble; /* Make use of long double */ int mxStrlen; /* Maximum string length */ int neverCorrupt; /* Database is always well-formed */ int szLookaside; /* Default lookaside buffer size */

@@ -19475,6 +20099,7 @@ int (*xSelectCallback)(Walker*,Select*); /* Callback for SELECTs */

void (*xSelectCallback2)(Walker*,Select*);/* Second callback for SELECTs */ int walkerDepth; /* Number of subqueries */ u16 eCode; /* A small processing code */ + u16 mWFlags; /* Use-dependent flags */ union { /* Extra data for callback */ NameContext *pNC; /* Naming context */ int n; /* A counter */

@@ -19493,6 +20118,7 @@ struct Table *pTab; /* Table of generated column */

struct CoveringIndexCheck *pCovIdxCk; /* Check for covering index */ SrcItem *pSrcItem; /* A single FROM clause item */ DbFixer *pFix; /* See sqlite3FixSelect() */ + Mem *aMem; /* See sqlite3BtreeCursorHint() */ } u; };

@@ -19513,6 +20139,7 @@ };

/* Forward declarations */ SQLITE_PRIVATE int sqlite3WalkExpr(Walker*, Expr*); +SQLITE_PRIVATE int sqlite3WalkExprNN(Walker*, Expr*); SQLITE_PRIVATE int sqlite3WalkExprList(Walker*, ExprList*); SQLITE_PRIVATE int sqlite3WalkSelect(Walker*, Select*); SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker*, Select*);

@@ -19593,6 +20220,16 @@ u8 eM10d; /* The MATERIALIZED flag */

}; +/* Client data associated with sqlite3_set_clientdata() and +** sqlite3_get_clientdata(). +*/ +struct DbClientData { + DbClientData *pNext; /* Next in a linked list */ + void *pData; /* The data */ + void (*xDestructor)(void*); /* Destructor. Might be NULL */ + char zName[1]; /* Name of this client data. MUST BE LAST */ +}; + #ifdef SQLITE_DEBUG /* ** An instance of the TreeView object is used for printing the content of

@@ -19762,6 +20399,8 @@ # define sqlite3Isdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x04)

# define sqlite3Isxdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x08) # define sqlite3Tolower(x) (sqlite3UpperToLower[(unsigned char)(x)]) # define sqlite3Isquote(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x80) +# define sqlite3JsonId1(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x42) +# define sqlite3JsonId2(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x46) #else # define sqlite3Toupper(x) toupper((unsigned char)(x)) # define sqlite3Isspace(x) isspace((unsigned char)(x))

@@ -19771,6 +20410,8 @@ # define sqlite3Isdigit(x) isdigit((unsigned char)(x))

# define sqlite3Isxdigit(x) isxdigit((unsigned char)(x)) # define sqlite3Tolower(x) tolower((unsigned char)(x)) # define sqlite3Isquote(x) ((x)=='"'||(x)=='\''||(x)=='['||(x)=='`') +# define sqlite3JsonId1(x) (sqlite3IsIdChar(x)&&(x)<'0') +# define sqlite3JsonId2(x) sqlite3IsIdChar(x) #endif SQLITE_PRIVATE int sqlite3IsIdChar(u8);

@@ -19820,13 +20461,11 @@ */

#ifdef SQLITE_USE_ALLOCA # define sqlite3StackAllocRaw(D,N) alloca(N) # define sqlite3StackAllocRawNN(D,N) alloca(N) -# define sqlite3StackAllocZero(D,N) memset(alloca(N), 0, N) # define sqlite3StackFree(D,P) # define sqlite3StackFreeNN(D,P) #else # define sqlite3StackAllocRaw(D,N) sqlite3DbMallocRaw(D,N) # define sqlite3StackAllocRawNN(D,N) sqlite3DbMallocRawNN(D,N) -# define sqlite3StackAllocZero(D,N) sqlite3DbMallocZero(D,N) # define sqlite3StackFree(D,P) sqlite3DbFree(D,P) # define sqlite3StackFreeNN(D,P) sqlite3DbFreeNN(D,P) #endif

@@ -19892,6 +20531,20 @@ int nUsed; /* Number of arguments used so far */

sqlite3_value **apArg; /* The argument values */ }; +/* +** An instance of this object receives the decoding of a floating point +** value into an approximate decimal representation. +*/ +struct FpDecode { + char sign; /* '+' or '-' */ + char isSpecial; /* 1: Infinity 2: NaN */ + int n; /* Significant digits in the decode */ + int iDP; /* Location of the decimal point */ + char *z; /* Start of significant digits */ + char zBuf[24]; /* Storage for significant digits */ +}; + +SQLITE_PRIVATE void sqlite3FpDecode(FpDecode*,double,int,int); SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3*,const char*, ...); SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3*,const char*, va_list); #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE)

@@ -19951,6 +20604,7 @@ #endif

#endif SQLITE_PRIVATE void sqlite3SetString(char **, sqlite3*, const char*); +SQLITE_PRIVATE void sqlite3ProgressCheck(Parse*); SQLITE_PRIVATE void sqlite3ErrorMsg(Parse*, const char*, ...); SQLITE_PRIVATE int sqlite3ErrorToParser(sqlite3*,int); SQLITE_PRIVATE void sqlite3Dequote(char*);

@@ -19965,6 +20619,10 @@ SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse*,int);

SQLITE_PRIVATE int sqlite3GetTempRange(Parse*,int); SQLITE_PRIVATE void sqlite3ReleaseTempRange(Parse*,int,int); SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse*); +SQLITE_PRIVATE void sqlite3TouchRegister(Parse*,int); +#if defined(SQLITE_ENABLE_STAT4) || defined(SQLITE_DEBUG) +SQLITE_PRIVATE int sqlite3FirstAvailableRegister(Parse*,int); +#endif #ifdef SQLITE_DEBUG SQLITE_PRIVATE int sqlite3NoTempsInRange(Parse*,int,int); #endif

@@ -19976,6 +20634,8 @@ SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse*, Expr*, Select*);

SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse*,Expr*, Expr*); SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr*); SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, const Token*, int); +SQLITE_PRIVATE void sqlite3ExprAddFunctionOrderBy(Parse*,Expr*,ExprList*); +SQLITE_PRIVATE void sqlite3ExprOrderByAggregateError(Parse*,Expr*); SQLITE_PRIVATE void sqlite3ExprFunctionUsable(Parse*,const Expr*,const FuncDef*); SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32); SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*);

@@ -20008,7 +20668,7 @@ SQLITE_PRIVATE const char *sqlite3ColumnColl(Column*);

SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3*,Table*); SQLITE_PRIVATE void sqlite3GenerateColumnNames(Parse *pParse, Select *pSelect); SQLITE_PRIVATE int sqlite3ColumnsFromExprList(Parse*,ExprList*,i16*,Column**); -SQLITE_PRIVATE void sqlite3SelectAddColumnTypeAndCollation(Parse*,Table*,Select*,char); +SQLITE_PRIVATE void sqlite3SubqueryColumnTypes(Parse*,Table*,Select*,char); SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse*,Select*,char); SQLITE_PRIVATE void sqlite3OpenSchemaTable(Parse *, int); SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table*);

@@ -20115,7 +20775,7 @@ SQLITE_PRIVATE Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList*,

Expr*,ExprList*,u32,Expr*); SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3*, Select*); SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse*, SrcList*); -SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, int); +SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, Trigger*); SQLITE_PRIVATE void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int); #if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) SQLITE_PRIVATE Expr *sqlite3LimitWhere(Parse*,SrcList*,Expr*,ExprList*,Expr*,char*);

@@ -20177,7 +20837,7 @@ SQLITE_PRIVATE int sqlite3ExprCompare(const Parse*,const Expr*,const Expr*, int);

SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr*,Expr*,int); SQLITE_PRIVATE int sqlite3ExprListCompare(const ExprList*,const ExprList*, int); SQLITE_PRIVATE int sqlite3ExprImpliesExpr(const Parse*,const Expr*,const Expr*, int); -SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr*,int); +SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr*,int,int); SQLITE_PRIVATE void sqlite3AggInfoPersistWalkerInit(Walker*,Parse*); SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*); SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*);

@@ -20204,7 +20864,7 @@ SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr*);

SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*, u8); SQLITE_PRIVATE int sqlite3ExprIsConstantOrGroupBy(Parse*, Expr*, ExprList*); SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr*,int); -SQLITE_PRIVATE int sqlite3ExprIsTableConstraint(Expr*,const SrcItem*); +SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint(Expr*,const SrcList*,int); #ifdef SQLITE_ENABLE_CURSOR_HINTS SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr*); #endif

@@ -20212,6 +20872,7 @@ SQLITE_PRIVATE int sqlite3ExprIsInteger(const Expr*, int*);

SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr*); SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr*, char); SQLITE_PRIVATE int sqlite3IsRowid(const char*); +SQLITE_PRIVATE const char *sqlite3RowidAlias(Table *pTab); SQLITE_PRIVATE void sqlite3GenerateRowDelete( Parse*,Table*,Trigger*,int,int,int,i16,u8,u8,u8,int); SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(Parse*, Table*, int, int, int*, int);

@@ -20326,9 +20987,10 @@ SQLITE_PRIVATE int sqlite3FixSrcList(DbFixer*, SrcList*);

SQLITE_PRIVATE int sqlite3FixSelect(DbFixer*, Select*); SQLITE_PRIVATE int sqlite3FixExpr(DbFixer*, Expr*); SQLITE_PRIVATE int sqlite3FixTriggerStep(DbFixer*, TriggerStep*); + SQLITE_PRIVATE int sqlite3RealSameAsInt(double,sqlite3_int64); SQLITE_PRIVATE i64 sqlite3RealToI64(double); -SQLITE_PRIVATE void sqlite3Int64ToText(i64,char*); +SQLITE_PRIVATE int sqlite3Int64ToText(i64,char*); SQLITE_PRIVATE int sqlite3AtoF(const char *z, double*, int, u8); SQLITE_PRIVATE int sqlite3GetInt32(const char *, int*); SQLITE_PRIVATE int sqlite3GetUInt32(const char*, u32*);

@@ -20379,6 +21041,7 @@ SQLITE_PRIVATE char sqlite3CompareAffinity(const Expr *pExpr, char aff2);

SQLITE_PRIVATE int sqlite3IndexAffinityOk(const Expr *pExpr, char idx_affinity); SQLITE_PRIVATE char sqlite3TableColumnAffinity(const Table*,int); SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr); +SQLITE_PRIVATE int sqlite3ExprDataType(const Expr *pExpr); SQLITE_PRIVATE int sqlite3Atoi64(const char*, i64*, int, u8); SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char*, i64*); SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3*, int, const char*,...);

@@ -20395,6 +21058,9 @@ #endif

#ifndef SQLITE_OMIT_DESERIALIZE SQLITE_PRIVATE int sqlite3MemdbInit(void); +SQLITE_PRIVATE int sqlite3IsMemdb(const sqlite3_vfs*); +#else +# define sqlite3IsMemdb(X) 0 #endif SQLITE_PRIVATE const char *sqlite3ErrStr(int);

@@ -20426,6 +21092,7 @@ #endif

SQLITE_PRIVATE u8 sqlite3GetBoolean(const char *z,u8); SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value*, u8); +SQLITE_PRIVATE int sqlite3ValueIsOfClass(const sqlite3_value*, void(*)(void*)); SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value*, u8); SQLITE_PRIVATE void sqlite3ValueSetStr(sqlite3_value*, int, const void *,u8, void(*)(void*));

@@ -20477,7 +21144,8 @@ SQLITE_PRIVATE int sqlite3MatchEName(

const struct ExprList_item*, const char*, const char*, - const char* + const char*, + int* ); SQLITE_PRIVATE Bitmask sqlite3ExprColUsed(Expr*); SQLITE_PRIVATE u8 sqlite3StrIHash(const char*);

@@ -20533,8 +21201,13 @@ SQLITE_PRIVATE void sqlite3OomClear(sqlite3*);

SQLITE_PRIVATE int sqlite3ApiExit(sqlite3 *db, int); SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *); +SQLITE_PRIVATE char *sqlite3RCStrRef(char*); +SQLITE_PRIVATE void sqlite3RCStrUnref(void*); +SQLITE_PRIVATE char *sqlite3RCStrNew(u64); +SQLITE_PRIVATE char *sqlite3RCStrResize(char*,u64); + SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum*, sqlite3*, char*, int, int); -SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum*, int); +SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum*, i64); SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum*); SQLITE_PRIVATE void sqlite3StrAccumSetError(StrAccum*, u8); SQLITE_PRIVATE void sqlite3ResultStrAccum(sqlite3_context*,StrAccum*);

@@ -20648,10 +21321,7 @@ SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3*, int, const char *);

SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *, VTable *); SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction(sqlite3 *,FuncDef*, int nArg, Expr*); -#if (defined(SQLITE_ENABLE_DBPAGE_VTAB) || defined(SQLITE_TEST)) \ - && !defined(SQLITE_OMIT_VIRTUALTABLE) -SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(sqlite3_index_info*); -#endif +SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(Parse*); SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context*); SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe*, const char*, int); SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *, sqlite3_stmt *);

@@ -20787,6 +21457,7 @@ #else

#define sqlite3SelectExprHeight(x) 0 #define sqlite3ExprCheckHeight(x,y) #endif +SQLITE_PRIVATE void sqlite3ExprSetErrorOffset(Expr*,int); SQLITE_PRIVATE u32 sqlite3Get4byte(const u8*); SQLITE_PRIVATE void sqlite3Put4byte(u8*, u32);

@@ -20892,6 +21563,18 @@ #if SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL)

SQLITE_PRIVATE int sqlite3KvvfsInit(void); #endif +#if defined(VDBE_PROFILE) \ + || defined(SQLITE_PERFORMANCE_TRACE) \ + || defined(SQLITE_ENABLE_STMT_SCANSTATUS) +SQLITE_PRIVATE sqlite3_uint64 sqlite3Hwtime(void); +#endif + +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS +# define IS_STMT_SCANSTATUS(db) (db->flags & SQLITE_StmtScanStatus) +#else +# define IS_STMT_SCANSTATUS(db) 0 +#endif + #endif /* SQLITEINT_H */ /************** End of sqliteInt.h *******************************************/

@@ -20933,101 +21616,6 @@ ** on i486 hardware.

*/ #ifdef SQLITE_PERFORMANCE_TRACE -/* -** hwtime.h contains inline assembler code for implementing -** high-performance timing routines. -*/ -/************** Include hwtime.h in the middle of os_common.h ****************/ -/************** Begin file hwtime.h ******************************************/ -/* -** 2008 May 27 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains inline asm code for retrieving "high-performance" -** counters for x86 and x86_64 class CPUs. -*/ -#ifndef SQLITE_HWTIME_H -#define SQLITE_HWTIME_H - -/* -** The following routine only works on pentium-class (or newer) processors. -** It uses the RDTSC opcode to read the cycle count value out of the -** processor and returns that value. This can be used for high-res -** profiling. -*/ -#if !defined(__STRICT_ANSI__) && \ - (defined(__GNUC__) || defined(_MSC_VER)) && \ - (defined(i386) || defined(__i386__) || defined(_M_IX86)) - - #if defined(__GNUC__) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned int lo, hi; - __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); - return (sqlite_uint64)hi << 32 | lo; - } - - #elif defined(_MSC_VER) - - __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ - __asm { - rdtsc - ret ; return value at EDX:EAX - } - } - - #endif - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long val; - __asm__ __volatile__ ("rdtsc" : "=A" (val)); - return val; - } - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long long retval; - unsigned long junk; - __asm__ __volatile__ ("\n\ - 1: mftbu %1\n\ - mftb %L0\n\ - mftbu %0\n\ - cmpw %0,%1\n\ - bne 1b" - : "=r" (retval), "=r" (junk)); - return retval; - } - -#else - - /* - ** asm() is needed for hardware timing support. Without asm(), - ** disable the sqlite3Hwtime() routine. - ** - ** sqlite3Hwtime() is only used for some obscure debugging - ** and analysis configurations, not in any deliverable, so this - ** should not be a great loss. - */ -SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } - -#endif - -#endif /* !defined(SQLITE_HWTIME_H) */ - -/************** End of hwtime.h **********************************************/ -/************** Continuing where we left off in os_common.h ******************/ - static sqlite_uint64 g_start; static sqlite_uint64 g_elapsed; #define TIMER_START g_start=sqlite3Hwtime()

@@ -21155,9 +21743,6 @@ #endif

#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC "4_BYTE_ALIGNED_MALLOC", #endif -#ifdef SQLITE_64BIT_STATS - "64BIT_STATS", -#endif #ifdef SQLITE_ALLOW_COVERING_INDEX_SCAN # if SQLITE_ALLOW_COVERING_INDEX_SCAN != 1 "ALLOW_COVERING_INDEX_SCAN=" CTIMEOPT_VAL(SQLITE_ALLOW_COVERING_INDEX_SCAN),

@@ -21453,6 +22038,9 @@ #endif

#ifdef SQLITE_EXPLAIN_ESTIMATED_ROWS "EXPLAIN_ESTIMATED_ROWS", #endif +#ifdef SQLITE_EXTRA_AUTOEXT + "EXTRA_AUTOEXT=" CTIMEOPT_VAL(SQLITE_EXTRA_AUTOEXT), +#endif #ifdef SQLITE_EXTRA_IFNULLROW "EXTRA_IFNULLROW", #endif

@@ -21493,6 +22081,9 @@ "INT64_TYPE",

#endif #ifdef SQLITE_INTEGRITY_CHECK_ERROR_MAX "INTEGRITY_CHECK_ERROR_MAX=" CTIMEOPT_VAL(SQLITE_INTEGRITY_CHECK_ERROR_MAX), +#endif +#ifdef SQLITE_LEGACY_JSON_VALID + "LEGACY_JSON_VALID", #endif #ifdef SQLITE_LIKE_DOESNT_MATCH_BLOBS "LIKE_DOESNT_MATCH_BLOBS",

@@ -21731,6 +22322,9 @@ #endif

#ifdef SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS "OMIT_SCHEMA_VERSION_PRAGMAS", #endif +#ifdef SQLITE_OMIT_SEH + "OMIT_SEH", +#endif #ifdef SQLITE_OMIT_SHARED_CACHE "OMIT_SHARED_CACHE", #endif

@@ -21982,7 +22576,7 @@ ** isdigit() 0x04

** isalnum() 0x06 ** isxdigit() 0x08 ** toupper() 0x20 -** SQLite identifier character 0x40 +** SQLite identifier character 0x40 $, _, or non-ascii ** Quote character 0x80 ** ** Bit 0x20 is set if the mapped character requires translation to upper

@@ -22128,6 +22722,7 @@ SQLITE_USE_URI, /* bOpenUri */

SQLITE_ALLOW_COVERING_INDEX_SCAN, /* bUseCis */ 0, /* bSmallMalloc */ 1, /* bExtraSchemaChecks */ + sizeof(LONGDOUBLE_TYPE)>8, /* bUseLongDouble */ 0x7ffffffe, /* mxStrlen */ 0, /* neverCorrupt */ SQLITE_DEFAULT_LOOKASIDE, /* szLookaside, nLookaside */

@@ -22176,7 +22771,7 @@ 0x7ffffffe, /* iOnceResetThreshold */

SQLITE_DEFAULT_SORTERREF_SIZE, /* szSorterRef */ 0, /* iPrngSeed */ #ifdef SQLITE_DEBUG - {0,0,0,0,0,0} /* aTune */ + {0,0,0,0,0,0}, /* aTune */ #endif };

@@ -22357,6 +22952,9 @@

/* Elements of the linked list at Vdbe.pAuxData */ typedef struct AuxData AuxData; +/* A cache of large TEXT or BLOB values in a VdbeCursor */ +typedef struct VdbeTxtBlbCache VdbeTxtBlbCache; + /* Types of VDBE cursors */ #define CURTYPE_BTREE 0 #define CURTYPE_SORTER 1

@@ -22388,6 +22986,7 @@ Bool isEphemeral:1; /* True for an ephemeral table */

Bool useRandomRowid:1; /* Generate new record numbers semi-randomly */ Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */ Bool noReuse:1; /* OpenEphemeral may not reuse this cursor */ + Bool colCache:1; /* pCache pointer is initialized and non-NULL */ u16 seekHit; /* See the OP_SeekHit and OP_IfNoHope opcodes */ union { /* pBtx for isEphermeral. pAltMap otherwise */ Btree *pBtx; /* Separate file holding temporary table */

@@ -22428,6 +23027,7 @@ u32 szRow; /* Byte available in aRow */

#ifdef SQLITE_ENABLE_COLUMN_USED_MASK u64 maskUsed; /* Mask of columns used by this cursor */ #endif + VdbeTxtBlbCache *pCache; /* Cache of large TEXT or BLOB values */ /* 2*nField extra array elements allocated for aType[], beyond the one ** static element declared in the structure. nField total array slots for

@@ -22440,13 +23040,26 @@ */

#define IsNullCursor(P) \ ((P)->eCurType==CURTYPE_PSEUDO && (P)->nullRow && (P)->seekResult==0) - /* ** A value for VdbeCursor.cacheStatus that means the cache is always invalid. */ #define CACHE_STALE 0 /* +** Large TEXT or BLOB values can be slow to load, so we want to avoid +** loading them more than once. For that reason, large TEXT and BLOB values +** can be stored in a cache defined by this object, and attached to the +** VdbeCursor using the pCache field. +*/ +struct VdbeTxtBlbCache { + char *pCValue; /* A RCStr buffer to hold the value */ + i64 iOffset; /* File offset of the row being cached */ + int iCol; /* Column for which the cache is valid */ + u32 cacheStatus; /* Vdbe.cacheCtr value */ + u32 colCacheCtr; /* Column cache counter */ +}; + +/* ** When a sub-program is executed (OP_Program), a structure of this type ** is allocated to store the current value of the program counter, as ** well as the current memory cell array and various other frame specific

@@ -22472,7 +23085,6 @@ struct VdbeFrame {

Vdbe *v; /* VM this frame belongs to */ VdbeFrame *pParent; /* Parent of this frame, or NULL if parent is main */ Op *aOp; /* Program instructions for parent frame */ - i64 *anExec; /* Event counters from parent frame */ Mem *aMem; /* Array of memory cells for parent frame */ VdbeCursor **apCsr; /* Array of Vdbe cursors for parent frame */ u8 *aOnce; /* Bitmask used by OP_Once */

@@ -22688,10 +23300,19 @@ typedef unsigned bft; /* Bit Field Type */

/* The ScanStatus object holds a single value for the ** sqlite3_stmt_scanstatus() interface. +** +** aAddrRange[]: +** This array is used by ScanStatus elements associated with EQP +** notes that make an SQLITE_SCANSTAT_NCYCLE value available. It is +** an array of up to 3 ranges of VM addresses for which the Vdbe.anCycle[] +** values should be summed to calculate the NCYCLE value. Each pair of +** integer addresses is a start and end address (both inclusive) for a range +** instructions. A start value of 0 indicates an empty range. */ typedef struct ScanStatus ScanStatus; struct ScanStatus { int addrExplain; /* OP_Explain for loop */ + int aAddrRange[6]; int addrLoop; /* Address of "loops" counter */ int addrVisit; /* Address of "rows visited" counter */ int iSelectID; /* The "Select-ID" for this loop */

@@ -22747,7 +23368,7 @@ Op *aOp; /* Space to hold the virtual machine's program */

int nOp; /* Number of instructions in the program */ int nOpAlloc; /* Slots allocated for aOp[] */ Mem *aColName; /* Column names to return */ - Mem *pResultSet; /* Pointer to an array of results */ + Mem *pResultRow; /* Current output row */ char *zErrMsg; /* Error message written here */ VList *pVList; /* Name of variables */ #ifndef SQLITE_OMIT_TRACE

@@ -22758,16 +23379,18 @@ int rcApp; /* errcode set by sqlite3_result_error_code() */

u32 nWrite; /* Number of write operations that have occurred */ #endif u16 nResColumn; /* Number of columns in one row of the result set */ + u16 nResAlloc; /* Column slots allocated to aColName[] */ u8 errorAction; /* Recovery action to do in case of an error */ u8 minWriteFileFormat; /* Minimum file format for writable database files */ u8 prepFlags; /* SQLITE_PREPARE_* flags */ u8 eVdbeState; /* On of the VDBE_*_STATE values */ bft expired:2; /* 1: recompile VM immediately 2: when convenient */ - bft explain:2; /* True if EXPLAIN present on SQL command */ + bft explain:2; /* 0: normal, 1: EXPLAIN, 2: EXPLAIN QUERY PLAN */ bft changeCntOn:1; /* True to update the change-counter */ bft usesStmtJournal:1; /* True if uses a statement journal */ bft readOnly:1; /* True for statements that do not write */ bft bIsReader:1; /* True for statements that read */ + bft haveEqpOps:1; /* Bytecode supports EXPLAIN QUERY PLAN */ yDbMask btreeMask; /* Bitmask of db->aDb[] entries referenced */ yDbMask lockMask; /* Subset of btreeMask that requires a lock */ u32 aCounter[9]; /* Counters used by sqlite3_stmt_status() */

@@ -22784,7 +23407,6 @@ u32 expmask; /* Binding to these vars invalidates VM */

SubProgram *pProgram; /* Linked list of all sub-programs used by VM */ AuxData *pAuxData; /* Linked list of auxdata allocations */ #ifdef SQLITE_ENABLE_STMT_SCANSTATUS - i64 *anExec; /* Number of times each op has been executed */ int nScan; /* Entries in aScan[] */ ScanStatus *aScan; /* Scan definitions for sqlite3_stmt_scanstatus() */ #endif

@@ -22815,7 +23437,7 @@ int iBlobWrite; /* Value returned by preupdate_blobwrite() */

i64 iKey1; /* First key value passed to hook */ i64 iKey2; /* Second key value passed to hook */ Mem *aNew; /* Array of new.* values */ - Table *pTab; /* Schema object being upated */ + Table *pTab; /* Schema object being updated */ Index *pPk; /* PK index if pTab is WITHOUT ROWID */ };

@@ -22905,6 +23527,7 @@ #ifdef SQLITE_DEBUG

SQLITE_PRIVATE int sqlite3VdbeMemIsRowSet(const Mem*); #endif SQLITE_PRIVATE int sqlite3VdbeMemSetRowSet(Mem*); +SQLITE_PRIVATE void sqlite3VdbeMemZeroTerminateIfAble(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem*, u8, u8); SQLITE_PRIVATE int sqlite3IntFloatCompare(i64,double);

@@ -22950,6 +23573,8 @@ SQLITE_PRIVATE int sqlite3VdbeSorterNext(sqlite3 *, const VdbeCursor *);

SQLITE_PRIVATE int sqlite3VdbeSorterRewind(const VdbeCursor *, int *); SQLITE_PRIVATE int sqlite3VdbeSorterWrite(const VdbeCursor *, Mem *); SQLITE_PRIVATE int sqlite3VdbeSorterCompare(const VdbeCursor *, Mem *, int, int *); + +SQLITE_PRIVATE void sqlite3VdbeValueListFree(void*); #ifdef SQLITE_DEBUG SQLITE_PRIVATE void sqlite3VdbeIncrWriteCounter(Vdbe*, VdbeCursor*);

@@ -23466,6 +24091,7 @@ char validHMS; /* True (1) if h,m,s are valid */

char validTZ; /* True (1) if tz is valid */ char tzSet; /* Timezone was set explicitly */ char isError; /* An overflow has occurred */ + char useSubsec; /* Display subsecond precision */ };

@@ -23498,8 +24124,8 @@ ** The function returns the number of successful conversions.

*/ static int getDigits(const char *zDate, const char *zFormat, ...){ /* The aMx[] array translates the 3rd character of each format - ** spec into a max size: a b c d e f */ - static const u16 aMx[] = { 12, 14, 24, 31, 59, 9999 }; + ** spec into a max size: a b c d e f */ + static const u16 aMx[] = { 12, 14, 24, 31, 59, 14712 }; va_list ap; int cnt = 0; char nextC;

@@ -23780,6 +24406,11 @@ return setDateTimeToCurrent(context, p);

}else if( sqlite3AtoF(zDate, &r, sqlite3Strlen30(zDate), SQLITE_UTF8)>0 ){ setRawDateNumber(p, r); return 0; + }else if( (sqlite3StrICmp(zDate,"subsec")==0 + || sqlite3StrICmp(zDate,"subsecond")==0) + && sqlite3NotPureFunc(context) ){ + p->useSubsec = 1; + return setDateTimeToCurrent(context, p); } return 1; }

@@ -23835,17 +24466,14 @@ /*

** Compute the Hour, Minute, and Seconds from the julian day number. */ static void computeHMS(DateTime *p){ - int s; + int day_ms, day_min; /* milliseconds, minutes into the day */ if( p->validHMS ) return; computeJD(p); - s = (int)((p->iJD + 43200000) % 86400000); - p->s = s/1000.0; - s = (int)p->s; - p->s -= s; - p->h = s/3600; - s -= p->h*3600; - p->m = s/60; - p->s += s - p->m*60; + day_ms = (int)((p->iJD + 43200000) % 86400000); + p->s = (day_ms % 60000)/1000.0; + day_min = day_ms/60000; + p->m = day_min % 60; + p->h = day_min / 60; p->rawS = 0; p->validHMS = 1; }

@@ -24025,6 +24653,25 @@ { 4, "year", 14713.0, 31536000.0 },

}; /* +** If the DateTime p is raw number, try to figure out if it is +** a julian day number of a unix timestamp. Set the p value +** appropriately. +*/ +static void autoAdjustDate(DateTime *p){ + if( !p->rawS || p->validJD ){ + p->rawS = 0; + }else if( p->s>=-21086676*(i64)10000 /* -4713-11-24 12:00:00 */ + && p->s<=(25340230*(i64)10000)+799 /* 9999-12-31 23:59:59 */ + ){ + double r = p->s*1000.0 + 210866760000000.0; + clearYMD_HMS_TZ(p); + p->iJD = (sqlite3_int64)(r + 0.5); + p->validJD = 1; + p->rawS = 0; + } +} + +/* ** Process a modifier to a date-time stamp. The modifiers are ** as follows: **

@@ -24067,19 +24714,8 @@ ** a unix timestamp, depending on its magnitude.

*/ if( sqlite3_stricmp(z, "auto")==0 ){ if( idx>1 ) return 1; /* IMP: R-33611-57934 */ - if( !p->rawS || p->validJD ){ - rc = 0; - p->rawS = 0; - }else if( p->s>=-21086676*(i64)10000 /* -4713-11-24 12:00:00 */ - && p->s<=(25340230*(i64)10000)+799 /* 9999-12-31 23:59:59 */ - ){ - r = p->s*1000.0 + 210866760000000.0; - clearYMD_HMS_TZ(p); - p->iJD = (sqlite3_int64)(r + 0.5); - p->validJD = 1; - p->rawS = 0; - rc = 0; - } + autoAdjustDate(p); + rc = 0; } break; }

@@ -24138,7 +24774,7 @@ if( p->tzSet==0 ){

i64 iOrigJD; /* Original localtime */ i64 iGuess; /* Guess at the corresponding utc time */ int cnt = 0; /* Safety to prevent infinite loop */ - int iErr; /* Guess is off by this much */ + i64 iErr; /* Guess is off by this much */ computeJD(p); iGuess = iOrigJD = p->iJD;

@@ -24194,8 +24830,22 @@ ** start of TTTTT

** ** Move the date backwards to the beginning of the current day, ** or month or year. + ** + ** subsecond + ** subsec + ** + ** Show subsecond precision in the output of datetime() and + ** unixepoch() and strftime('%s'). */ - if( sqlite3_strnicmp(z, "start of ", 9)!=0 ) break; + if( sqlite3_strnicmp(z, "start of ", 9)!=0 ){ + if( sqlite3_stricmp(z, "subsec")==0 + || sqlite3_stricmp(z, "subsecond")==0 + ){ + p->useSubsec = 1; + rc = 0; + } + break; + } if( !p->validJD && !p->validYMD && !p->validHMS ) break; z += 9; computeYMD(p);

@@ -24231,18 +24881,73 @@ case '8':

case '9': { double rRounder; int i; - for(n=1; z[n] && z[n]!=':' && !sqlite3Isspace(z[n]); n++){} + int Y,M,D,h,m,x; + const char *z2 = z; + char z0 = z[0]; + for(n=1; z[n]; n++){ + if( z[n]==':' ) break; + if( sqlite3Isspace(z[n]) ) break; + if( z[n]=='-' ){ + if( n==5 && getDigits(&z[1], "40f", &Y)==1 ) break; + if( n==6 && getDigits(&z[1], "50f", &Y)==1 ) break; + } + } if( sqlite3AtoF(z, &r, n, SQLITE_UTF8)<=0 ){ - rc = 1; + assert( rc==1 ); break; } - if( z[n]==':' ){ + if( z[n]=='-' ){ + /* A modifier of the form (+|-)YYYY-MM-DD adds or subtracts the + ** specified number of years, months, and days. MM is limited to + ** the range 0-11 and DD is limited to 0-30. + */ + if( z0!='+' && z0!='-' ) break; /* Must start with +/- */ + if( n==5 ){ + if( getDigits(&z[1], "40f-20a-20d", &Y, &M, &D)!=3 ) break; + }else{ + assert( n==6 ); + if( getDigits(&z[1], "50f-20a-20d", &Y, &M, &D)!=3 ) break; + z++; + } + if( M>=12 ) break; /* M range 0..11 */ + if( D>=31 ) break; /* D range 0..30 */ + computeYMD_HMS(p); + p->validJD = 0; + if( z0=='-' ){ + p->Y -= Y; + p->M -= M; + D = -D; + }else{ + p->Y += Y; + p->M += M; + } + x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; + p->Y += x; + p->M -= x*12; + computeJD(p); + p->validHMS = 0; + p->validYMD = 0; + p->iJD += (i64)D*86400000; + if( z[11]==0 ){ + rc = 0; + break; + } + if( sqlite3Isspace(z[11]) + && getDigits(&z[12], "20c:20e", &h, &m)==2 + ){ + z2 = &z[12]; + n = 2; + }else{ + break; + } + } + if( z2[n]==':' ){ /* A modifier of the form (+|-)HH:MM:SS.FFF adds (or subtracts) the ** specified number of hours, minutes, seconds, and fractional seconds ** to the time. The ".FFF" may be omitted. The ":SS.FFF" may be ** omitted. */ - const char *z2 = z; + DateTime tx; sqlite3_int64 day; if( !sqlite3Isdigit(*z2) ) z2++;

@@ -24252,7 +24957,7 @@ computeJD(&tx);

tx.iJD -= 43200000; day = tx.iJD/86400000; tx.iJD -= day*86400000; - if( z[0]=='-' ) tx.iJD = -tx.iJD; + if( z0=='-' ) tx.iJD = -tx.iJD; computeJD(p); clearYMD_HMS_TZ(p); p->iJD += tx.iJD;

@@ -24268,7 +24973,7 @@ n = sqlite3Strlen30(z);

if( n>10 || n<3 ) break; if( sqlite3UpperToLower[(u8)z[n-1]]=='s' ) n--; computeJD(p); - rc = 1; + assert( rc==1 ); rRounder = r<0 ? -0.5 : +0.5; for(i=0; i<ArraySize(aXformType); i++){ if( aXformType[i].nName==n

@@ -24277,7 +24982,6 @@ && r>-aXformType[i].rLimit && r<aXformType[i].rLimit

){ switch( i ){ case 4: { /* Special processing to add months */ - int x; assert( strcmp(aXformType[i].zName,"month")==0 ); computeYMD_HMS(p); p->M += (int)r;

@@ -24393,7 +25097,11 @@ ){

DateTime x; if( isDate(context, argc, argv, &x)==0 ){ computeJD(&x); - sqlite3_result_int64(context, x.iJD/1000 - 21086676*(i64)10000); + if( x.useSubsec ){ + sqlite3_result_double(context, (x.iJD - 21086676*(i64)10000000)/1000.0); + }else{ + sqlite3_result_int64(context, x.iJD/1000 - 21086676*(i64)10000); + } } }

@@ -24409,8 +25117,8 @@ sqlite3_value **argv

){ DateTime x; if( isDate(context, argc, argv, &x)==0 ){ - int Y, s; - char zBuf[24]; + int Y, s, n; + char zBuf[32]; computeYMD_HMS(&x); Y = x.Y; if( Y<0 ) Y = -Y;

@@ -24431,15 +25139,28 @@ zBuf[14] = ':';

zBuf[15] = '0' + (x.m/10)%10; zBuf[16] = '0' + (x.m)%10; zBuf[17] = ':'; - s = (int)x.s; - zBuf[18] = '0' + (s/10)%10; - zBuf[19] = '0' + (s)%10; - zBuf[20] = 0; + if( x.useSubsec ){ + s = (int)(1000.0*x.s + 0.5); + zBuf[18] = '0' + (s/10000)%10; + zBuf[19] = '0' + (s/1000)%10; + zBuf[20] = '.'; + zBuf[21] = '0' + (s/100)%10; + zBuf[22] = '0' + (s/10)%10; + zBuf[23] = '0' + (s)%10; + zBuf[24] = 0; + n = 24; + }else{ + s = (int)x.s; + zBuf[18] = '0' + (s/10)%10; + zBuf[19] = '0' + (s)%10; + zBuf[20] = 0; + n = 20; + } if( x.Y<0 ){ zBuf[0] = '-'; - sqlite3_result_text(context, zBuf, 20, SQLITE_TRANSIENT); + sqlite3_result_text(context, zBuf, n, SQLITE_TRANSIENT); }else{ - sqlite3_result_text(context, &zBuf[1], 19, SQLITE_TRANSIENT); + sqlite3_result_text(context, &zBuf[1], n-1, SQLITE_TRANSIENT); } } }

@@ -24456,7 +25177,7 @@ sqlite3_value **argv

){ DateTime x; if( isDate(context, argc, argv, &x)==0 ){ - int s; + int s, n; char zBuf[16]; computeHMS(&x); zBuf[0] = '0' + (x.h/10)%10;

@@ -24465,11 +25186,24 @@ zBuf[2] = ':';

zBuf[3] = '0' + (x.m/10)%10; zBuf[4] = '0' + (x.m)%10; zBuf[5] = ':'; - s = (int)x.s; - zBuf[6] = '0' + (s/10)%10; - zBuf[7] = '0' + (s)%10; - zBuf[8] = 0; - sqlite3_result_text(context, zBuf, 8, SQLITE_TRANSIENT); + if( x.useSubsec ){ + s = (int)(1000.0*x.s + 0.5); + zBuf[6] = '0' + (s/10000)%10; + zBuf[7] = '0' + (s/1000)%10; + zBuf[8] = '.'; + zBuf[9] = '0' + (s/100)%10; + zBuf[10] = '0' + (s/10)%10; + zBuf[11] = '0' + (s)%10; + zBuf[12] = 0; + n = 12; + }else{ + s = (int)x.s; + zBuf[6] = '0' + (s/10)%10; + zBuf[7] = '0' + (s)%10; + zBuf[8] = 0; + n = 8; + } + sqlite3_result_text(context, zBuf, n, SQLITE_TRANSIENT); } }

@@ -24524,7 +25258,7 @@ ** %m month 01-12

** %M minute 00-59 ** %s seconds since 1970-01-01 ** %S seconds 00-59 -** %w day of week 0-6 sunday==0 +** %w day of week 0-6 Sunday==0 ** %W week of year 00-53 ** %Y year 0000-9999 ** %% %

@@ -24550,13 +25284,16 @@

computeJD(&x); computeYMD_HMS(&x); for(i=j=0; zFmt[i]; i++){ + char cf; if( zFmt[i]!='%' ) continue; if( j<i ) sqlite3_str_append(&sRes, zFmt+j, (int)(i-j)); i++; j = i + 1; - switch( zFmt[i] ){ - case 'd': { - sqlite3_str_appendf(&sRes, "%02d", x.D); + cf = zFmt[i]; + switch( cf ){ + case 'd': /* Fall thru */ + case 'e': { + sqlite3_str_appendf(&sRes, cf=='d' ? "%02d" : "%2d", x.D); break; } case 'f': {

@@ -24565,8 +25302,21 @@ if( s>59.999 ) s = 59.999;

sqlite3_str_appendf(&sRes, "%06.3f", s); break; } - case 'H': { - sqlite3_str_appendf(&sRes, "%02d", x.h); + case 'F': { + sqlite3_str_appendf(&sRes, "%04d-%02d-%02d", x.Y, x.M, x.D); + break; + } + case 'H': + case 'k': { + sqlite3_str_appendf(&sRes, cf=='H' ? "%02d" : "%2d", x.h); + break; + } + case 'I': /* Fall thru */ + case 'l': { + int h = x.h; + if( h>12 ) h -= 12; + if( h==0 ) h = 12; + sqlite3_str_appendf(&sRes, cf=='I' ? "%02d" : "%2d", h); break; } case 'W': /* Fall thru */

@@ -24578,7 +25328,7 @@ y.M = 1;

y.D = 1; computeJD(&y); nDay = (int)((x.iJD-y.iJD+43200000)/86400000); - if( zFmt[i]=='W' ){ + if( cf=='W' ){ int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */ wd = (int)(((x.iJD+43200000)/86400000)%7); sqlite3_str_appendf(&sRes,"%02d",(nDay+7-wd)/7);

@@ -24599,18 +25349,42 @@ case 'M': {

sqlite3_str_appendf(&sRes,"%02d",x.m); break; } + case 'p': /* Fall thru */ + case 'P': { + if( x.h>=12 ){ + sqlite3_str_append(&sRes, cf=='p' ? "PM" : "pm", 2); + }else{ + sqlite3_str_append(&sRes, cf=='p' ? "AM" : "am", 2); + } + break; + } + case 'R': { + sqlite3_str_appendf(&sRes, "%02d:%02d", x.h, x.m); + break; + } case 's': { - i64 iS = (i64)(x.iJD/1000 - 21086676*(i64)10000); - sqlite3_str_appendf(&sRes,"%lld",iS); + if( x.useSubsec ){ + sqlite3_str_appendf(&sRes,"%.3f", + (x.iJD - 21086676*(i64)10000000)/1000.0); + }else{ + i64 iS = (i64)(x.iJD/1000 - 21086676*(i64)10000); + sqlite3_str_appendf(&sRes,"%lld",iS); + } break; } case 'S': { sqlite3_str_appendf(&sRes,"%02d",(int)x.s); break; } + case 'T': { + sqlite3_str_appendf(&sRes,"%02d:%02d:%02d", x.h, x.m, (int)x.s); + break; + } + case 'u': /* Fall thru */ case 'w': { - sqlite3_str_appendchar(&sRes, 1, - (char)(((x.iJD+129600000)/86400000) % 7) + '0'); + char c = (char)(((x.iJD+129600000)/86400000) % 7) + '0'; + if( c=='0' && cf=='u' ) c = '7'; + sqlite3_str_appendchar(&sRes, 1, c); break; } case 'Y': {

@@ -24660,6 +25434,117 @@ dateFunc(context, 0, 0);

} /* +** timediff(DATE1, DATE2) +** +** Return the amount of time that must be added to DATE2 in order to +** convert it into DATE2. The time difference format is: +** +** +YYYY-MM-DD HH:MM:SS.SSS +** +** The initial "+" becomes "-" if DATE1 occurs before DATE2. For +** date/time values A and B, the following invariant should hold: +** +** datetime(A) == (datetime(B, timediff(A,B)) +** +** Both DATE arguments must be either a julian day number, or an +** ISO-8601 string. The unix timestamps are not supported by this +** routine. +*/ +static void timediffFunc( + sqlite3_context *context, + int NotUsed1, + sqlite3_value **argv +){ + char sign; + int Y, M; + DateTime d1, d2; + sqlite3_str sRes; + UNUSED_PARAMETER(NotUsed1); + if( isDate(context, 1, &argv[0], &d1) ) return; + if( isDate(context, 1, &argv[1], &d2) ) return; + computeYMD_HMS(&d1); + computeYMD_HMS(&d2); + if( d1.iJD>=d2.iJD ){ + sign = '+'; + Y = d1.Y - d2.Y; + if( Y ){ + d2.Y = d1.Y; + d2.validJD = 0; + computeJD(&d2); + } + M = d1.M - d2.M; + if( M<0 ){ + Y--; + M += 12; + } + if( M!=0 ){ + d2.M = d1.M; + d2.validJD = 0; + computeJD(&d2); + } + while( d1.iJD<d2.iJD ){ + M--; + if( M<0 ){ + M = 11; + Y--; + } + d2.M--; + if( d2.M<1 ){ + d2.M = 12; + d2.Y--; + } + d2.validJD = 0; + computeJD(&d2); + } + d1.iJD -= d2.iJD; + d1.iJD += (u64)1486995408 * (u64)100000; + }else /* d1<d2 */{ + sign = '-'; + Y = d2.Y - d1.Y; + if( Y ){ + d2.Y = d1.Y; + d2.validJD = 0; + computeJD(&d2); + } + M = d2.M - d1.M; + if( M<0 ){ + Y--; + M += 12; + } + if( M!=0 ){ + d2.M = d1.M; + d2.validJD = 0; + computeJD(&d2); + } + while( d1.iJD>d2.iJD ){ + M--; + if( M<0 ){ + M = 11; + Y--; + } + d2.M++; + if( d2.M>12 ){ + d2.M = 1; + d2.Y++; + } + d2.validJD = 0; + computeJD(&d2); + } + d1.iJD = d2.iJD - d1.iJD; + d1.iJD += (u64)1486995408 * (u64)100000; + } + d1.validYMD = 0; + d1.validHMS = 0; + d1.validTZ = 0; + computeYMD_HMS(&d1); + sqlite3StrAccumInit(&sRes, 0, 0, 0, 100); + sqlite3_str_appendf(&sRes, "%c%04d-%02d-%02d %02d:%02d:%06.3f", + sign, Y, M, d1.D-1, d1.h, d1.m, d1.s); + sqlite3ResultStrAccum(context, &sRes); +} + + +/* ** current_timestamp() ** ** This function returns the same value as datetime('now').

@@ -24733,6 +25618,7 @@ PURE_DATE(date, -1, 0, 0, dateFunc ),

PURE_DATE(time, -1, 0, 0, timeFunc ), PURE_DATE(datetime, -1, 0, 0, datetimeFunc ), PURE_DATE(strftime, -1, 0, 0, strftimeFunc ), + PURE_DATE(timediff, 2, 0, 0, timediffFunc ), DFUNCTION(current_time, 0, 0, 0, ctimeFunc ), DFUNCTION(current_timestamp, 0, 0, 0, ctimestampFunc), DFUNCTION(current_date, 0, 0, 0, cdateFunc ),

@@ -24886,7 +25772,7 @@ ){

/* Faults are not injected into COMMIT_PHASETWO because, assuming SQLite ** is using a regular VFS, it is called after the corresponding ** transaction has been committed. Injecting a fault at this point - ** confuses the test scripts - the COMMIT comand returns SQLITE_NOMEM + ** confuses the test scripts - the COMMIT command returns SQLITE_NOMEM ** but the transaction is committed anyway. ** ** The core must call OsFileControl() though, not OsFileControlHint(),

@@ -25507,7 +26393,7 @@ ** Like free() but works for allocations obtained from sqlite3MemMalloc()

** or sqlite3MemRealloc(). ** ** For this low-level routine, we already know that pPrior!=0 since -** cases where pPrior==0 will have been intecepted and dealt with +** cases where pPrior==0 will have been intercepted and dealt with ** by higher-level routines. */ static void sqlite3MemFree(void *pPrior){

@@ -25595,7 +26481,7 @@ if( _sqliteZone_ ){

return SQLITE_OK; } len = sizeof(cpuCount); - /* One usually wants to use hw.acctivecpu for MT decisions, but not here */ + /* One usually wants to use hw.activecpu for MT decisions, but not here */ sysctlbyname("hw.ncpu", &cpuCount, &len, NULL, 0); if( cpuCount>1 ){ /* defer MT decisions to system malloc */

@@ -27290,9 +28176,13 @@ if( n<=mem5.szAtom*2 ){

if( n<=mem5.szAtom ) return mem5.szAtom; return mem5.szAtom*2; } - if( n>0x40000000 ) return 0; + if( n>0x10000000 ){ + if( n>0x40000000 ) return 0; + if( n>0x20000000 ) return 0x40000000; + return 0x20000000; + } for(iFullSz=mem5.szAtom*8; iFullSz<n; iFullSz *= 4); - if( (iFullSz/2)>=n ) return iFullSz/2; + if( (iFullSz/2)>=(i64)n ) return iFullSz/2; return iFullSz; }

@@ -27583,7 +28473,7 @@ assert( SQLITE_MUTEX_RECURSIVE<2 );

assert( SQLITE_MUTEX_FAST<2 ); assert( SQLITE_MUTEX_WARNONCONTENTION<2 ); -#if SQLITE_ENABLE_API_ARMOR +#ifdef SQLITE_ENABLE_API_ARMOR if( ((CheckMutex*)p)->iType<2 ) #endif {

@@ -28058,7 +28948,7 @@ #include <pthread.h>

/* ** The sqlite3_mutex.id, sqlite3_mutex.nRef, and sqlite3_mutex.owner fields -** are necessary under two condidtions: (1) Debug builds and (2) using +** are necessary under two conditions: (1) Debug builds and (2) using ** home-grown mutexes. Encapsulate these conditions into a single #define. */ #if defined(SQLITE_DEBUG) || defined(SQLITE_HOMEGROWN_RECURSIVE_MUTEX)

@@ -28255,7 +29145,7 @@ ** mutex that it allocates.

*/ static void pthreadMutexFree(sqlite3_mutex *p){ assert( p->nRef==0 ); -#if SQLITE_ENABLE_API_ARMOR +#ifdef SQLITE_ENABLE_API_ARMOR if( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ) #endif {

@@ -28559,7 +29449,7 @@ struct sqlite3_mutex {

CRITICAL_SECTION mutex; /* Mutex controlling the lock */ int id; /* Mutex type */ #ifdef SQLITE_DEBUG - volatile int nRef; /* Number of enterances */ + volatile int nRef; /* Number of entrances */ volatile DWORD owner; /* Thread holding this mutex */ volatile LONG trace; /* True to trace changes */ #endif

@@ -28608,7 +29498,7 @@ #if defined(SQLITE_MEMORY_BARRIER)

SQLITE_MEMORY_BARRIER; #elif defined(__GNUC__) __sync_synchronize(); -#elif MSVC_VERSION>=1300 +#elif MSVC_VERSION>=1400 _ReadWriteBarrier(); #elif defined(MemoryBarrier) MemoryBarrier();

@@ -29202,7 +30092,7 @@ **

** The upper bound is slightly less than 2GiB: 0x7ffffeff == 2,147,483,391 ** This provides a 256-byte safety margin for defense against 32-bit ** signed integer overflow bugs when computing memory allocation sizes. -** Parnoid applications might want to reduce the maximum allocation size +** Paranoid applications might want to reduce the maximum allocation size ** further for an even larger safety margin. 0x3fffffff or 0x0fffffff ** or even smaller would be reasonable upper bounds on the size of a memory ** allocations for most applications.

@@ -29716,9 +30606,14 @@ ** sqlite3DbMalloc(). Omit leading and trailing whitespace.

*/ SQLITE_PRIVATE char *sqlite3DbSpanDup(sqlite3 *db, const char *zStart, const char *zEnd){ int n; +#ifdef SQLITE_DEBUG + /* Because of the way the parser works, the span is guaranteed to contain + ** at least one non-space character */ + for(n=0; sqlite3Isspace(zStart[n]); n++){ assert( &zStart[n]<zEnd ); } +#endif while( sqlite3Isspace(zStart[0]) ) zStart++; n = (int)(zEnd - zStart); - while( ALWAYS(n>0) && sqlite3Isspace(zStart[n-1]) ) n--; + while( sqlite3Isspace(zStart[n-1]) ) n--; return sqlite3DbStrNDup(db, zStart, n); }

@@ -29814,7 +30709,7 @@ assert( sqlite3_mutex_held(db->mutex) );

if( db->mallocFailed || rc ){ return apiHandleError(db, rc); } - return rc & db->errMask; + return 0; } /************** End of malloc.c **********************************************/

@@ -29926,43 +30821,6 @@ ** %S Takes a pointer to SrcItem. Shows name or database.name

** %!S Like %S but prefer the zName over the zAlias */ -/* Floating point constants used for rounding */ -static const double arRound[] = { - 5.0e-01, 5.0e-02, 5.0e-03, 5.0e-04, 5.0e-05, - 5.0e-06, 5.0e-07, 5.0e-08, 5.0e-09, 5.0e-10, -}; - -/* -** If SQLITE_OMIT_FLOATING_POINT is defined, then none of the floating point -** conversions will work. -*/ -#ifndef SQLITE_OMIT_FLOATING_POINT -/* -** "*val" is a double such that 0.1 <= *val < 10.0 -** Return the ascii code for the leading digit of *val, then -** multiply "*val" by 10.0 to renormalize. -** -** Example: -** input: *val = 3.14159 -** output: *val = 1.4159 function return = '3' -** -** The counter *cnt is incremented each time. After counter exceeds -** 16 (the number of significant digits in a 64-bit float) '0' is -** always returned. -*/ -static char et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){ - int digit; - LONGDOUBLE_TYPE d; - if( (*cnt)<=0 ) return '0'; - (*cnt)--; - digit = (int)*val; - d = digit; - digit += '0'; - *val = (*val - d)*10.0; - return (char)digit; -} -#endif /* SQLITE_OMIT_FLOATING_POINT */ - /* ** Set the StrAccum object to an error mode. */

@@ -30054,18 +30912,15 @@ etByte xtype = etINVALID; /* Conversion paradigm */

u8 bArgList; /* True for SQLITE_PRINTF_SQLFUNC */ char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */ sqlite_uint64 longvalue; /* Value for integer types */ - LONGDOUBLE_TYPE realvalue; /* Value for real types */ + double realvalue; /* Value for real types */ const et_info *infop; /* Pointer to the appropriate info structure */ char *zOut; /* Rendering buffer */ int nOut; /* Size of the rendering buffer */ char *zExtra = 0; /* Malloced memory used by some conversion */ -#ifndef SQLITE_OMIT_FLOATING_POINT - int exp, e2; /* exponent of real numbers */ - int nsd; /* Number of significant digits returned */ - double rounder; /* Used for rounding floating point values */ + int exp, e2; /* exponent of real numbers */ etByte flag_dp; /* True if decimal point should be shown */ etByte flag_rtz; /* True if trailing zeros should be removed */ -#endif + PrintfArguments *pArgList = 0; /* Arguments for SQLITE_PRINTF_SQLFUNC */ char buf[etBUFSIZE]; /* Conversion buffer */

@@ -30340,73 +31195,66 @@ length = (int)(&zOut[nOut-1]-bufpt);

break; case etFLOAT: case etEXP: - case etGENERIC: + case etGENERIC: { + FpDecode s; + int iRound; + int j; + if( bArgList ){ realvalue = getDoubleArg(pArgList); }else{ realvalue = va_arg(ap,double); } -#ifdef SQLITE_OMIT_FLOATING_POINT - length = 0; -#else if( precision<0 ) precision = 6; /* Set default precision */ #ifdef SQLITE_FP_PRECISION_LIMIT if( precision>SQLITE_FP_PRECISION_LIMIT ){ precision = SQLITE_FP_PRECISION_LIMIT; } #endif - if( realvalue<0.0 ){ - realvalue = -realvalue; - prefix = '-'; - }else{ - prefix = flag_prefix; - } - if( xtype==etGENERIC && precision>0 ) precision--; - testcase( precision>0xfff ); - idx = precision & 0xfff; - rounder = arRound[idx%10]; - while( idx>=10 ){ rounder *= 1.0e-10; idx -= 10; } if( xtype==etFLOAT ){ - double rx = (double)realvalue; - sqlite3_uint64 u; - int ex; - memcpy(&u, &rx, sizeof(u)); - ex = -1023 + (int)((u>>52)&0x7ff); - if( precision+(ex/3) < 15 ) rounder += realvalue*3e-16; - realvalue += rounder; - } - /* Normalize realvalue to within 10.0 > realvalue >= 1.0 */ - exp = 0; - if( sqlite3IsNaN((double)realvalue) ){ - bufpt = "NaN"; - length = 3; - break; + iRound = -precision; + }else if( xtype==etGENERIC ){ + iRound = precision; + }else{ + iRound = precision+1; } - if( realvalue>0.0 ){ - LONGDOUBLE_TYPE scale = 1.0; - while( realvalue>=1e100*scale && exp<=350 ){ scale *= 1e100;exp+=100;} - while( realvalue>=1e10*scale && exp<=350 ){ scale *= 1e10; exp+=10; } - while( realvalue>=10.0*scale && exp<=350 ){ scale *= 10.0; exp++; } - realvalue /= scale; - while( realvalue<1e-8 ){ realvalue *= 1e8; exp-=8; } - while( realvalue<1.0 ){ realvalue *= 10.0; exp--; } - if( exp>350 ){ + sqlite3FpDecode(&s, realvalue, iRound, flag_altform2 ? 26 : 16); + if( s.isSpecial ){ + if( s.isSpecial==2 ){ + bufpt = flag_zeropad ? "null" : "NaN"; + length = sqlite3Strlen30(bufpt); + break; + }else if( flag_zeropad ){ + s.z[0] = '9'; + s.iDP = 1000; + s.n = 1; + }else{ + memcpy(buf, "-Inf", 5); bufpt = buf; - buf[0] = prefix; - memcpy(buf+(prefix!=0),"Inf",4); - length = 3+(prefix!=0); + if( s.sign=='-' ){ + /* no-op */ + }else if( flag_prefix ){ + buf[0] = flag_prefix; + }else{ + bufpt++; + } + length = sqlite3Strlen30(bufpt); break; } } - bufpt = buf; + if( s.sign=='-' ){ + prefix = '-'; + }else{ + prefix = flag_prefix; + } + + exp = s.iDP-1; + if( xtype==etGENERIC && precision>0 ) precision--; + /* ** If the field type is etGENERIC, then convert to either etEXP ** or etFLOAT, as appropriate. */ - if( xtype!=etFLOAT ){ - realvalue += rounder; - if( realvalue>=10.0 ){ realvalue *= 0.1; exp++; } - } if( xtype==etGENERIC ){ flag_rtz = !flag_alternateform; if( exp<-4 || exp>precision ){

@@ -30421,29 +31269,32 @@ }

if( xtype==etEXP ){ e2 = 0; }else{ - e2 = exp; + e2 = s.iDP - 1; } + bufpt = buf; { i64 szBufNeeded; /* Size of a temporary buffer needed */ szBufNeeded = MAX(e2,0)+(i64)precision+(i64)width+15; + if( cThousand && e2>0 ) szBufNeeded += (e2+2)/3; if( szBufNeeded > etBUFSIZE ){ bufpt = zExtra = printfTempBuf(pAccum, szBufNeeded); if( bufpt==0 ) return; } } zOut = bufpt; - nsd = 16 + flag_altform2*10; flag_dp = (precision>0 ?1:0) | flag_alternateform | flag_altform2; /* The sign in front of the number */ if( prefix ){ *(bufpt++) = prefix; } /* Digits prior to the decimal point */ + j = 0; if( e2<0 ){ *(bufpt++) = '0'; }else{ for(; e2>=0; e2--){ - *(bufpt++) = et_getdigit(&realvalue,&nsd); + *(bufpt++) = j<s.n ? s.z[j++] : '0'; + if( cThousand && (e2%3)==0 && e2>1 ) *(bufpt++) = ','; } } /* The decimal point */

@@ -30452,13 +31303,12 @@ *(bufpt++) = '.';

} /* "0" digits after the decimal point but before the first ** significant digit of the number */ - for(e2++; e2<0; precision--, e2++){ - assert( precision>0 ); + for(e2++; e2<0 && precision>0; precision--, e2++){ *(bufpt++) = '0'; } /* Significant digits after the decimal point */ while( (precision--)>0 ){ - *(bufpt++) = et_getdigit(&realvalue,&nsd); + *(bufpt++) = j<s.n ? s.z[j++] : '0'; } /* Remove trailing zeros and the "." if no digits follow the "." */ if( flag_rtz && flag_dp ){

@@ -30474,6 +31324,7 @@ }

} /* Add the "eNNN" suffix */ if( xtype==etEXP ){ + exp = s.iDP - 1; *(bufpt++) = aDigits[infop->charset]; if( exp<0 ){ *(bufpt++) = '-'; exp = -exp;

@@ -30507,8 +31358,8 @@ i = prefix!=0;

while( nPad-- ) bufpt[i++] = '0'; length = width; } -#endif /* !defined(SQLITE_OMIT_FLOATING_POINT) */ break; + } case etSIZE: if( !bArgList ){ *(va_arg(ap,int*)) = pAccum->nChar;

@@ -30557,13 +31408,26 @@ length = 4;

} } if( precision>1 ){ + i64 nPrior = 1; width -= precision-1; if( width>1 && !flag_leftjustify ){ sqlite3_str_appendchar(pAccum, width-1, ' '); width = 0; } - while( precision-- > 1 ){ - sqlite3_str_append(pAccum, buf, length); + sqlite3_str_append(pAccum, buf, length); + precision--; + while( precision > 1 ){ + i64 nCopyBytes; + if( nPrior > precision-1 ) nPrior = precision - 1; + nCopyBytes = length*nPrior; + if( nCopyBytes + pAccum->nChar >= pAccum->nAlloc ){ + sqlite3StrAccumEnlarge(pAccum, nCopyBytes); + } + if( pAccum->accError ) break; + sqlite3_str_append(pAccum, + &pAccum->zText[pAccum->nChar-nCopyBytes], nCopyBytes); + precision -= nPrior; + nPrior *= 2; } } bufpt = buf;

@@ -30791,9 +31655,9 @@ **

** Return the number of bytes of text that StrAccum is able to accept ** after the attempted enlargement. The value returned might be zero. */ -SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum *p, int N){ +SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum *p, i64 N){ char *zNew; - assert( p->nChar+(i64)N >= p->nAlloc ); /* Only called if really needed */ + assert( p->nChar+N >= p->nAlloc ); /* Only called if really needed */ if( p->accError ){ testcase(p->accError==SQLITE_TOOBIG); testcase(p->accError==SQLITE_NOMEM);

@@ -30804,8 +31668,7 @@ sqlite3StrAccumSetError(p, SQLITE_TOOBIG);

return p->nAlloc - p->nChar - 1; }else{ char *zOld = isMalloced(p) ? p->zText : 0; - i64 szNew = p->nChar; - szNew += (sqlite3_int64)N + 1; + i64 szNew = p->nChar + N + 1; if( szNew+p->nChar<=p->mxAlloc ){ /* Force exponential buffer size growth as long as it does not overflow, ** to avoid having to call this routine too often */

@@ -30835,7 +31698,8 @@ sqlite3StrAccumSetError(p, SQLITE_NOMEM);

return 0; } } - return N; + assert( N>=0 && N<=0x7fffffff ); + return (int)N; } /*

@@ -31126,12 +31990,22 @@ zBuf[acc.nChar] = 0;

return zBuf; } SQLITE_API char *sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){ - char *z; + StrAccum acc; va_list ap; + if( n<=0 ) return zBuf; +#ifdef SQLITE_ENABLE_API_ARMOR + if( zBuf==0 || zFormat==0 ) { + (void)SQLITE_MISUSE_BKPT; + if( zBuf ) zBuf[0] = 0; + return zBuf; + } +#endif + sqlite3StrAccumInit(&acc, 0, zBuf, n, 0); va_start(ap,zFormat); - z = sqlite3_vsnprintf(n, zBuf, zFormat, ap); + sqlite3_str_vappendf(&acc, zFormat, ap); va_end(ap); - return z; + zBuf[acc.nChar] = 0; + return zBuf; } /*

@@ -31207,6 +32081,75 @@ va_list ap;

va_start(ap,zFormat); sqlite3_str_vappendf(p, zFormat, ap); va_end(ap); +} + + +/***************************************************************************** +** Reference counted string storage +*****************************************************************************/ + +/* +** Increase the reference count of the string by one. +** +** The input parameter is returned. +*/ +SQLITE_PRIVATE char *sqlite3RCStrRef(char *z){ + RCStr *p = (RCStr*)z; + assert( p!=0 ); + p--; + p->nRCRef++; + return z; +} + +/* +** Decrease the reference count by one. Free the string when the +** reference count reaches zero. +*/ +SQLITE_PRIVATE void sqlite3RCStrUnref(void *z){ + RCStr *p = (RCStr*)z; + assert( p!=0 ); + p--; + assert( p->nRCRef>0 ); + if( p->nRCRef>=2 ){ + p->nRCRef--; + }else{ + sqlite3_free(p); + } +} + +/* +** Create a new string that is capable of holding N bytes of text, not counting +** the zero byte at the end. The string is uninitialized. +** +** The reference count is initially 1. Call sqlite3RCStrUnref() to free the +** newly allocated string. +** +** This routine returns 0 on an OOM. +*/ +SQLITE_PRIVATE char *sqlite3RCStrNew(u64 N){ + RCStr *p = sqlite3_malloc64( N + sizeof(*p) + 1 ); + if( p==0 ) return 0; + p->nRCRef = 1; + return (char*)&p[1]; +} + +/* +** Change the size of the string so that it is able to hold N bytes. +** The string might be reallocated, so return the new allocation. +*/ +SQLITE_PRIVATE char *sqlite3RCStrResize(char *z, u64 N){ + RCStr *p = (RCStr*)z; + RCStr *pNew; + assert( p!=0 ); + p--; + assert( p->nRCRef==1 ); + pNew = sqlite3_realloc64(p, N+sizeof(RCStr)+1); + if( pNew==0 ){ + sqlite3_free(p); + return 0; + }else{ + return (char*)&pNew[1]; + } } /************** End of printf.c **********************************************/

@@ -31431,6 +32374,13 @@ }

if( pItem->fg.isOn || (pItem->fg.isUsing==0 && pItem->u3.pOn!=0) ){ sqlite3_str_appendf(&x, " ON"); } + if( pItem->fg.isTabFunc ) sqlite3_str_appendf(&x, " isTabFunc"); + if( pItem->fg.isCorrelated ) sqlite3_str_appendf(&x, " isCorrelated"); + if( pItem->fg.isMaterialized ) sqlite3_str_appendf(&x, " isMaterialized"); + if( pItem->fg.viaCoroutine ) sqlite3_str_appendf(&x, " viaCoroutine"); + if( pItem->fg.notCte ) sqlite3_str_appendf(&x, " notCte"); + if( pItem->fg.isNestedFrom ) sqlite3_str_appendf(&x, " isNestedFrom"); + sqlite3StrAccumFinish(&x); sqlite3TreeViewItem(pView, zLine, i<pSrc->nSrc-1); n = 0;

@@ -31618,6 +32568,7 @@ if( pWin->pFilter ){

sqlite3TreeViewItem(pView, "FILTER", 1); sqlite3TreeViewExpr(pView, pWin->pFilter, 0); sqlite3TreeViewPop(&pView); + if( pWin->eFrmType==TK_FILTER ) return; } sqlite3TreeViewPush(&pView, more); if( pWin->zName ){

@@ -31627,7 +32578,7 @@ sqlite3TreeViewLine(pView, "OVER (%p)", pWin);

} if( pWin->zBase ) nElement++; if( pWin->pOrderBy ) nElement++; - if( pWin->eFrmType ) nElement++; + if( pWin->eFrmType!=0 && pWin->eFrmType!=TK_FILTER ) nElement++; if( pWin->eExclude ) nElement++; if( pWin->zBase ){ sqlite3TreeViewPush(&pView, (--nElement)>0);

@@ -31640,7 +32591,7 @@ }

if( pWin->pOrderBy ){ sqlite3TreeViewExprList(pView, pWin->pOrderBy, (--nElement)>0, "ORDER-BY"); } - if( pWin->eFrmType ){ + if( pWin->eFrmType!=0 && pWin->eFrmType!=TK_FILTER ){ char zBuf[30]; const char *zFrmType = "ROWS"; if( pWin->eFrmType==TK_RANGE ) zFrmType = "RANGE";

@@ -31700,7 +32651,7 @@ sqlite3TreeViewLine(pView, "nil");

sqlite3TreeViewPop(&pView); return; } - if( pExpr->flags || pExpr->affExpr || pExpr->vvaFlags ){ + if( pExpr->flags || pExpr->affExpr || pExpr->vvaFlags || pExpr->pAggInfo ){ StrAccum x; sqlite3StrAccumInit(&x, 0, zFlgs, sizeof(zFlgs), 0); sqlite3_str_appendf(&x, " fg.af=%x.%c",

@@ -31716,6 +32667,9 @@ sqlite3_str_appendf(&x, " DDL");

} if( ExprHasVVAProperty(pExpr, EP_Immutable) ){ sqlite3_str_appendf(&x, " IMMUTABLE"); + } + if( pExpr->pAggInfo!=0 ){ + sqlite3_str_appendf(&x, " agg-column[%d]", pExpr->iAgg); } sqlite3StrAccumFinish(&x); }else{

@@ -31846,7 +32800,8 @@ "IS-FALSE", "IS-TRUE", "IS-NOT-FALSE", "IS-NOT-TRUE"

}; assert( pExpr->op2==TK_IS || pExpr->op2==TK_ISNOT ); assert( pExpr->pRight ); - assert( sqlite3ExprSkipCollate(pExpr->pRight)->op==TK_TRUEFALSE ); + assert( sqlite3ExprSkipCollateAndLikely(pExpr->pRight)->op + == TK_TRUEFALSE ); x = (pExpr->op2==TK_ISNOT)*2 + sqlite3ExprTruthValue(pExpr->pRight); zUniOp = azOp[x]; break;

@@ -31884,7 +32839,7 @@ }else{

assert( ExprUseXList(pExpr) ); pFarg = pExpr->x.pList; #ifndef SQLITE_OMIT_WINDOWFUNC - pWin = ExprHasProperty(pExpr, EP_WinFunc) ? pExpr->y.pWin : 0; + pWin = IsWindowFunc(pExpr) ? pExpr->y.pWin : 0; #else pWin = 0; #endif

@@ -31910,13 +32865,23 @@ }else{

sqlite3TreeViewLine(pView, "FUNCTION %Q%s", pExpr->u.zToken, zFlgs); } if( pFarg ){ - sqlite3TreeViewExprList(pView, pFarg, pWin!=0, 0); + sqlite3TreeViewExprList(pView, pFarg, pWin!=0 || pExpr->pLeft, 0); + if( pExpr->pLeft ){ + Expr *pOB = pExpr->pLeft; + assert( pOB->op==TK_ORDER ); + assert( ExprUseXList(pOB) ); + sqlite3TreeViewExprList(pView, pOB->x.pList, pWin!=0, "ORDERBY"); + } } #ifndef SQLITE_OMIT_WINDOWFUNC if( pWin ){ sqlite3TreeViewWindow(pView, pWin, 0); } #endif + break; + } + case TK_ORDER: { + sqlite3TreeViewExprList(pView, pExpr->x.pList, 0, "ORDERBY"); break; } #ifndef SQLITE_OMIT_SUBQUERY

@@ -33505,7 +34470,7 @@

/* ** Calls to sqlite3FaultSim() are used to simulate a failure during testing, ** or to bypass normal error detection during testing in order to let -** execute proceed futher downstream. +** execute proceed further downstream. ** ** In deployment, sqlite3FaultSim() *always* return SQLITE_OK (0). The ** sqlite3FaultSim() function only returns non-zero during testing.

@@ -33622,6 +34587,23 @@ ** to do based on the SQLite error code in rc.

*/ SQLITE_PRIVATE void sqlite3SystemError(sqlite3 *db, int rc){ if( rc==SQLITE_IOERR_NOMEM ) return; +#ifdef SQLITE_USE_SEH + if( rc==SQLITE_IOERR_IN_PAGE ){ + int ii; + int iErr; + sqlite3BtreeEnterAll(db); + for(ii=0; ii<db->nDb; ii++){ + if( db->aDb[ii].pBt ){ + iErr = sqlite3PagerWalSystemErrno(sqlite3BtreePager(db->aDb[ii].pBt)); + if( iErr ){ + db->iSysErrno = iErr; + } + } + } + sqlite3BtreeLeaveAll(db); + return; + } +#endif rc &= 0xff; if( rc==SQLITE_CANTOPEN || rc==SQLITE_IOERR ){ db->iSysErrno = sqlite3OsGetLastError(db->pVfs);

@@ -33657,6 +34639,30 @@ }

} /* +** Check for interrupts and invoke progress callback. +*/ +SQLITE_PRIVATE void sqlite3ProgressCheck(Parse *p){ + sqlite3 *db = p->db; + if( AtomicLoad(&db->u1.isInterrupted) ){ + p->nErr++; + p->rc = SQLITE_INTERRUPT; + } +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + if( p->rc==SQLITE_INTERRUPT ){ + p->nProgressSteps = 0; + }else if( (++p->nProgressSteps)>=db->nProgressOps ){ + if( db->xProgress(db->pProgressArg) ){ + p->nErr++; + p->rc = SQLITE_INTERRUPT; + } + p->nProgressSteps = 0; + } + } +#endif +} + +/* ** Add an error message to pParse->zErrMsg and increment pParse->nErr. ** ** This function should be used to report any error that occurs while

@@ -33847,43 +34853,40 @@ }

return h; } -/* -** Compute 10 to the E-th power. Examples: E==1 results in 10. -** E==2 results in 100. E==50 results in 1.0e50. +/* Double-Double multiplication. (x[0],x[1]) *= (y,yy) ** -** This routine only works for values of E between 1 and 341. +** Reference: +** T. J. Dekker, "A Floating-Point Technique for Extending the +** Available Precision". 1971-07-26. */ -static LONGDOUBLE_TYPE sqlite3Pow10(int E){ -#if defined(_MSC_VER) - static const LONGDOUBLE_TYPE x[] = { - 1.0e+001L, - 1.0e+002L, - 1.0e+004L, - 1.0e+008L, - 1.0e+016L, - 1.0e+032L, - 1.0e+064L, - 1.0e+128L, - 1.0e+256L - }; - LONGDOUBLE_TYPE r = 1.0; - int i; - assert( E>=0 && E<=307 ); - for(i=0; E!=0; i++, E >>=1){ - if( E & 1 ) r *= x[i]; - } - return r; -#else - LONGDOUBLE_TYPE x = 10.0; - LONGDOUBLE_TYPE r = 1.0; - while(1){ - if( E & 1 ) r *= x; - E >>= 1; - if( E==0 ) break; - x *= x; - } - return r; -#endif +static void dekkerMul2(volatile double *x, double y, double yy){ + /* + ** The "volatile" keywords on parameter x[] and on local variables + ** below are needed force intermediate results to be truncated to + ** binary64 rather than be carried around in an extended-precision + ** format. The truncation is necessary for the Dekker algorithm to + ** work. Intel x86 floating point might omit the truncation without + ** the use of volatile. + */ + volatile double tx, ty, p, q, c, cc; + double hx, hy; + u64 m; + memcpy(&m, (void*)&x[0], 8); + m &= 0xfffffffffc000000LL; + memcpy(&hx, &m, 8); + tx = x[0] - hx; + memcpy(&m, &y, 8); + m &= 0xfffffffffc000000LL; + memcpy(&hy, &m, 8); + ty = y - hy; + p = hx*hy; + q = hx*ty + tx*hy; + c = p+q; + cc = p - c + q + tx*ty; + cc = x[0]*yy + x[1]*y + cc; + x[0] = c + cc; + x[1] = c - x[0]; + x[1] += cc; } /*

@@ -33924,12 +34927,11 @@ int incr;

const char *zEnd; /* sign * significand * (10 ^ (esign * exponent)) */ int sign = 1; /* sign of significand */ - i64 s = 0; /* significand */ + u64 s = 0; /* significand */ int d = 0; /* adjust exponent for shifting decimal point */ int esign = 1; /* sign of exponent */ int e = 0; /* exponent */ int eValid = 1; /* True exponent is either not used or is well-formed */ - double result; int nDigit = 0; /* Number of digits processed */ int eType = 1; /* 1: pure integer, 2+: fractional -1 or less: bad UTF16 */

@@ -33969,7 +34971,7 @@ /* copy max significant digits to significand */

while( z<zEnd && sqlite3Isdigit(*z) ){ s = s*10 + (*z - '0'); z+=incr; nDigit++; - if( s>=((LARGEST_INT64-9)/10) ){ + if( s>=((LARGEST_UINT64-9)/10) ){ /* skip non-significant significand digits ** (increase exponent by d to shift decimal left) */ while( z<zEnd && sqlite3Isdigit(*z) ){ z+=incr; d++; }

@@ -33984,7 +34986,7 @@ eType++;

/* copy digits from after decimal to significand ** (decrease exponent by d to shift decimal right) */ while( z<zEnd && sqlite3Isdigit(*z) ){ - if( s<((LARGEST_INT64-9)/10) ){ + if( s<((LARGEST_UINT64-9)/10) ){ s = s*10 + (*z - '0'); d--; nDigit++;

@@ -34024,79 +35026,89 @@ /* skip trailing spaces */

while( z<zEnd && sqlite3Isspace(*z) ) z+=incr; do_atof_calc: + /* Zero is a special case */ + if( s==0 ){ + *pResult = sign<0 ? -0.0 : +0.0; + goto atof_return; + } + /* adjust exponent by d, and update sign */ e = (e*esign) + d; - if( e<0 ) { - esign = -1; - e *= -1; - } else { - esign = 1; + + /* Try to adjust the exponent to make it smaller */ + while( e>0 && s<(LARGEST_UINT64/10) ){ + s *= 10; + e--; + } + while( e<0 && (s%10)==0 ){ + s /= 10; + e++; } - if( s==0 ) { - /* In the IEEE 754 standard, zero is signed. */ - result = sign<0 ? -(double)0 : (double)0; - } else { - /* Attempt to reduce exponent. - ** - ** Branches that are not required for the correct answer but which only - ** help to obtain the correct answer faster are marked with special - ** comments, as a hint to the mutation tester. - */ - while( e>0 ){ /*OPTIMIZATION-IF-TRUE*/ - if( esign>0 ){ - if( s>=(LARGEST_INT64/10) ) break; /*OPTIMIZATION-IF-FALSE*/ - s *= 10; - }else{ - if( s%10!=0 ) break; /*OPTIMIZATION-IF-FALSE*/ - s /= 10; - } - e--; + if( e==0 ){ + *pResult = s; + }else if( sqlite3Config.bUseLongDouble ){ + LONGDOUBLE_TYPE r = (LONGDOUBLE_TYPE)s; + if( e>0 ){ + while( e>=100 ){ e-=100; r *= 1.0e+100L; } + while( e>=10 ){ e-=10; r *= 1.0e+10L; } + while( e>=1 ){ e-=1; r *= 1.0e+01L; } + }else{ + while( e<=-100 ){ e+=100; r *= 1.0e-100L; } + while( e<=-10 ){ e+=10; r *= 1.0e-10L; } + while( e<=-1 ){ e+=1; r *= 1.0e-01L; } } - - /* adjust the sign of significand */ - s = sign<0 ? -s : s; - - if( e==0 ){ /*OPTIMIZATION-IF-TRUE*/ - result = (double)s; - }else{ - /* attempt to handle extremely small/large numbers better */ - if( e>307 ){ /*OPTIMIZATION-IF-TRUE*/ - if( e<342 ){ /*OPTIMIZATION-IF-TRUE*/ - LONGDOUBLE_TYPE scale = sqlite3Pow10(e-308); - if( esign<0 ){ - result = s / scale; - result /= 1.0e+308; - }else{ - result = s * scale; - result *= 1.0e+308; - } - }else{ assert( e>=342 ); - if( esign<0 ){ - result = 0.0*s; - }else{ + assert( r>=0.0 ); + if( r>+1.7976931348623157081452742373e+308L ){ #ifdef INFINITY - result = INFINITY*s; + *pResult = +INFINITY; #else - result = 1e308*1e308*s; /* Infinity */ + *pResult = 1.0e308*10.0; #endif - } - } - }else{ - LONGDOUBLE_TYPE scale = sqlite3Pow10(e); - if( esign<0 ){ - result = s / scale; - }else{ - result = s * scale; - } + }else{ + *pResult = (double)r; + } + }else{ + double rr[2]; + u64 s2; + rr[0] = (double)s; + s2 = (u64)rr[0]; + rr[1] = s>=s2 ? (double)(s - s2) : -(double)(s2 - s); + if( e>0 ){ + while( e>=100 ){ + e -= 100; + dekkerMul2(rr, 1.0e+100, -1.5902891109759918046e+83); + } + while( e>=10 ){ + e -= 10; + dekkerMul2(rr, 1.0e+10, 0.0); + } + while( e>=1 ){ + e -= 1; + dekkerMul2(rr, 1.0e+01, 0.0); + } + }else{ + while( e<=-100 ){ + e += 100; + dekkerMul2(rr, 1.0e-100, -1.99918998026028836196e-117); + } + while( e<=-10 ){ + e += 10; + dekkerMul2(rr, 1.0e-10, -3.6432197315497741579e-27); + } + while( e<=-1 ){ + e += 1; + dekkerMul2(rr, 1.0e-01, -5.5511151231257827021e-18); } } + *pResult = rr[0]+rr[1]; + if( sqlite3IsNaN(*pResult) ) *pResult = 1e300*1e300; } - - /* store the result */ - *pResult = result; + if( sign<0 ) *pResult = -*pResult; + assert( !sqlite3IsNaN(*pResult) ); - /* return true if number and no extra non-whitespace chracters after */ +atof_return: + /* return true if number and no extra non-whitespace characters after */ if( z==zEnd && nDigit>0 && eValid && eType>0 ){ return eType; }else if( eType>=2 && (eType==3 || eValid) && nDigit>0 ){

@@ -34113,11 +35125,14 @@ #pragma warning(default : 4756)

#endif /* -** Render an signed 64-bit integer as text. Store the result in zOut[]. +** Render an signed 64-bit integer as text. Store the result in zOut[] and +** return the length of the string that was stored, in bytes. The value +** returned does not include the zero terminator at the end of the output +** string. ** ** The caller must ensure that zOut[] is at least 21 bytes in size. */ -SQLITE_PRIVATE void sqlite3Int64ToText(i64 v, char *zOut){ +SQLITE_PRIVATE int sqlite3Int64ToText(i64 v, char *zOut){ int i; u64 x; char zTemp[22];

@@ -34128,12 +35143,15 @@ x = v;

} i = sizeof(zTemp)-2; zTemp[sizeof(zTemp)-1] = 0; - do{ - zTemp[i--] = (x%10) + '0'; + while( 1 /*exit-by-break*/ ){ + zTemp[i] = (x%10) + '0'; x = x/10; - }while( x ); - if( v<0 ) zTemp[i--] = '-'; - memcpy(zOut, &zTemp[i+1], sizeof(zTemp)-1-i); + if( x==0 ) break; + i--; + }; + if( v<0 ) zTemp[--i] = '-'; + memcpy(zOut, &zTemp[i], sizeof(zTemp)-i); + return sizeof(zTemp)-1-i; } /*

@@ -34226,7 +35244,7 @@ if( u>LARGEST_INT64 ){

/* This test and assignment is needed only to suppress UB warnings ** from clang and -fsanitize=undefined. This test and assignment make ** the code a little larger and slower, and no harm comes from omitting - ** them, but we must appaise the undefined-behavior pharisees. */ + ** them, but we must appease the undefined-behavior pharisees. */ *pNum = neg ? SMALLEST_INT64 : LARGEST_INT64; }else if( neg ){ *pNum = -(i64)u;

@@ -34298,11 +35316,15 @@ for(k=i; sqlite3Isxdigit(z[k]); k++){

u = u*16 + sqlite3HexToInt(z[k]); } memcpy(pOut, &u, 8); - return (z[k]==0 && k-i<=16) ? 0 : 2; + if( k-i>16 ) return 2; + if( z[k]!=0 ) return 1; + return 0; }else #endif /* SQLITE_OMIT_HEX_INTEGER */ { - return sqlite3Atoi64(z, pOut, sqlite3Strlen30(z), SQLITE_UTF8); + int n = (int)(0x3fffffff&strspn(z,"+- \n\t0123456789")); + if( z[n] ) n++; + return sqlite3Atoi64(z, pOut, n, SQLITE_UTF8); } }

@@ -34334,7 +35356,7 @@ ){

u32 u = 0; zNum += 2; while( zNum[0]=='0' ) zNum++; - for(i=0; sqlite3Isxdigit(zNum[i]) && i<8; i++){ + for(i=0; i<8 && sqlite3Isxdigit(zNum[i]); i++){ u = u*16 + sqlite3HexToInt(zNum[i]); } if( (u&0x80000000)==0 && sqlite3Isxdigit(zNum[i])==0 ){

@@ -34382,6 +35404,153 @@ return x;

} /* +** Decode a floating-point value into an approximate decimal +** representation. +** +** Round the decimal representation to n significant digits if +** n is positive. Or round to -n signficant digits after the +** decimal point if n is negative. No rounding is performed if +** n is zero. +** +** The significant digits of the decimal representation are +** stored in p->z[] which is a often (but not always) a pointer +** into the middle of p->zBuf[]. There are p->n significant digits. +** The p->z[] array is *not* zero-terminated. +*/ +SQLITE_PRIVATE void sqlite3FpDecode(FpDecode *p, double r, int iRound, int mxRound){ + int i; + u64 v; + int e, exp = 0; + p->isSpecial = 0; + p->z = p->zBuf; + + /* Convert negative numbers to positive. Deal with Infinity, 0.0, and + ** NaN. */ + if( r<0.0 ){ + p->sign = '-'; + r = -r; + }else if( r==0.0 ){ + p->sign = '+'; + p->n = 1; + p->iDP = 1; + p->z = "0"; + return; + }else{ + p->sign = '+'; + } + memcpy(&v,&r,8); + e = v>>52; + if( (e&0x7ff)==0x7ff ){ + p->isSpecial = 1 + (v!=0x7ff0000000000000LL); + p->n = 0; + p->iDP = 0; + return; + } + + /* Multiply r by powers of ten until it lands somewhere in between + ** 1.0e+19 and 1.0e+17. + */ + if( sqlite3Config.bUseLongDouble ){ + LONGDOUBLE_TYPE rr = r; + if( rr>=1.0e+19 ){ + while( rr>=1.0e+119L ){ exp+=100; rr *= 1.0e-100L; } + while( rr>=1.0e+29L ){ exp+=10; rr *= 1.0e-10L; } + while( rr>=1.0e+19L ){ exp++; rr *= 1.0e-1L; } + }else{ + while( rr<1.0e-97L ){ exp-=100; rr *= 1.0e+100L; } + while( rr<1.0e+07L ){ exp-=10; rr *= 1.0e+10L; } + while( rr<1.0e+17L ){ exp--; rr *= 1.0e+1L; } + } + v = (u64)rr; + }else{ + /* If high-precision floating point is not available using "long double", + ** then use Dekker-style double-double computation to increase the + ** precision. + ** + ** The error terms on constants like 1.0e+100 computed using the + ** decimal extension, for example as follows: + ** + ** SELECT decimal_exp(decimal_sub('1.0e+100',decimal(1.0e+100))); + */ + double rr[2]; + rr[0] = r; + rr[1] = 0.0; + if( rr[0]>9.223372036854774784e+18 ){ + while( rr[0]>9.223372036854774784e+118 ){ + exp += 100; + dekkerMul2(rr, 1.0e-100, -1.99918998026028836196e-117); + } + while( rr[0]>9.223372036854774784e+28 ){ + exp += 10; + dekkerMul2(rr, 1.0e-10, -3.6432197315497741579e-27); + } + while( rr[0]>9.223372036854774784e+18 ){ + exp += 1; + dekkerMul2(rr, 1.0e-01, -5.5511151231257827021e-18); + } + }else{ + while( rr[0]<9.223372036854774784e-83 ){ + exp -= 100; + dekkerMul2(rr, 1.0e+100, -1.5902891109759918046e+83); + } + while( rr[0]<9.223372036854774784e+07 ){ + exp -= 10; + dekkerMul2(rr, 1.0e+10, 0.0); + } + while( rr[0]<9.22337203685477478e+17 ){ + exp -= 1; + dekkerMul2(rr, 1.0e+01, 0.0); + } + } + v = rr[1]<0.0 ? (u64)rr[0]-(u64)(-rr[1]) : (u64)rr[0]+(u64)rr[1]; + } + + + /* Extract significant digits. */ + i = sizeof(p->zBuf)-1; + assert( v>0 ); + while( v ){ p->zBuf[i--] = (v%10) + '0'; v /= 10; } + assert( i>=0 && i<sizeof(p->zBuf)-1 ); + p->n = sizeof(p->zBuf) - 1 - i; + assert( p->n>0 ); + assert( p->n<sizeof(p->zBuf) ); + p->iDP = p->n + exp; + if( iRound<0 ){ + iRound = p->iDP - iRound; + if( iRound==0 && p->zBuf[i+1]>='5' ){ + iRound = 1; + p->zBuf[i--] = '0'; + p->n++; + p->iDP++; + } + } + if( iRound>0 && (iRound<p->n || p->n>mxRound) ){ + char *z = &p->zBuf[i+1]; + if( iRound>mxRound ) iRound = mxRound; + p->n = iRound; + if( z[iRound]>='5' ){ + int j = iRound-1; + while( 1 /*exit-by-break*/ ){ + z[j]++; + if( z[j]<='9' ) break; + z[j] = '0'; + if( j==0 ){ + p->z[i--] = '1'; + p->n++; + p->iDP++; + break; + }else{ + j--; + } + } + } + } + p->z = &p->zBuf[i+1]; + assert( i+p->n < sizeof(p->zBuf) ); + while( ALWAYS(p->n>0) && p->z[p->n-1]=='0' ){ p->n--; } +} + +/* ** Try to convert z into an unsigned 32-bit integer. Return true on ** success and false if there is an error. **

@@ -34644,121 +35813,32 @@ ** single-byte case. All code should use the MACRO version as

** this function assumes the single-byte case has already been handled. */ SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *p, u32 *v){ - u32 a,b; + u64 v64; + u8 n; - /* The 1-byte case. Overwhelmingly the most common. Handled inline - ** by the getVarin32() macro */ - a = *p; - /* a: p0 (unmasked) */ -#ifndef getVarint32 - if (!(a&0x80)) - { - /* Values between 0 and 127 */ - *v = a; - return 1; - } -#endif + /* Assume that the single-byte case has already been handled by + ** the getVarint32() macro */ + assert( (p[0] & 0x80)!=0 ); - /* The 2-byte case */ - p++; - b = *p; - /* b: p1 (unmasked) */ - if (!(b&0x80)) - { - /* Values between 128 and 16383 */ - a &= 0x7f; - a = a<<7; - *v = a | b; + if( (p[1] & 0x80)==0 ){ + /* This is the two-byte case */ + *v = ((p[0]&0x7f)<<7) | p[1]; return 2; } - - /* The 3-byte case */ - p++; - a = a<<14; - a |= *p; - /* a: p0<<14 | p2 (unmasked) */ - if (!(a&0x80)) - { - /* Values between 16384 and 2097151 */ - a &= (0x7f<<14)|(0x7f); - b &= 0x7f; - b = b<<7; - *v = a | b; + if( (p[2] & 0x80)==0 ){ + /* This is the three-byte case */ + *v = ((p[0]&0x7f)<<14) | ((p[1]&0x7f)<<7) | p[2]; return 3; } - - /* A 32-bit varint is used to store size information in btrees. - ** Objects are rarely larger than 2MiB limit of a 3-byte varint. - ** A 3-byte varint is sufficient, for example, to record the size - ** of a 1048569-byte BLOB or string. - ** - ** We only unroll the first 1-, 2-, and 3- byte cases. The very - ** rare larger cases can be handled by the slower 64-bit varint - ** routine. - */ -#if 1 - { - u64 v64; - u8 n; - - n = sqlite3GetVarint(p-2, &v64); - assert( n>3 && n<=9 ); - if( (v64 & SQLITE_MAX_U32)!=v64 ){ - *v = 0xffffffff; - }else{ - *v = (u32)v64; - } - return n; - } - -#else - /* For following code (kept for historical record only) shows an - ** unrolling for the 3- and 4-byte varint cases. This code is - ** slightly faster, but it is also larger and much harder to test. - */ - p++; - b = b<<14; - b |= *p; - /* b: p1<<14 | p3 (unmasked) */ - if (!(b&0x80)) - { - /* Values between 2097152 and 268435455 */ - b &= (0x7f<<14)|(0x7f); - a &= (0x7f<<14)|(0x7f); - a = a<<7; - *v = a | b; - return 4; - } - - p++; - a = a<<14; - a |= *p; - /* a: p0<<28 | p2<<14 | p4 (unmasked) */ - if (!(a&0x80)) - { - /* Values between 268435456 and 34359738367 */ - a &= SLOT_4_2_0; - b &= SLOT_4_2_0; - b = b<<7; - *v = a | b; - return 5; - } - - /* We can only reach this point when reading a corrupt database - ** file. In that case we are not in any hurry. Use the (relatively - ** slow) general-purpose sqlite3GetVarint() routine to extract the - ** value. */ - { - u64 v64; - u8 n; - - p -= 4; - n = sqlite3GetVarint(p, &v64); - assert( n>5 && n<=9 ); + /* four or more bytes */ + n = sqlite3GetVarint(p, &v64); + assert( n>3 && n<=9 ); + if( (v64 & SQLITE_MAX_U32)!=v64 ){ + *v = 0xffffffff; + }else{ *v = (u32)v64; - return n; } -#endif + return n; } /*

@@ -34909,7 +35989,7 @@ }

} /* -** Attempt to add, substract, or multiply the 64-bit signed value iB against +** Attempt to add, subtract, or multiply the 64-bit signed value iB against ** the other 64-bit signed integer at *pA and store the result in *pA. ** Return 0 on success. Or if the operation would have resulted in an ** overflow, leave *pA unchanged and return 1.

@@ -35195,6 +36275,104 @@ }while( i<mx );

return 0; } +/* +** High-resolution hardware timer used for debugging and testing only. +*/ +#if defined(VDBE_PROFILE) \ + || defined(SQLITE_PERFORMANCE_TRACE) \ + || defined(SQLITE_ENABLE_STMT_SCANSTATUS) +/************** Include hwtime.h in the middle of util.c *********************/ +/************** Begin file hwtime.h ******************************************/ +/* +** 2008 May 27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains inline asm code for retrieving "high-performance" +** counters for x86 and x86_64 class CPUs. +*/ +#ifndef SQLITE_HWTIME_H +#define SQLITE_HWTIME_H + +/* +** The following routine only works on Pentium-class (or newer) processors. +** It uses the RDTSC opcode to read the cycle count value out of the +** processor and returns that value. This can be used for high-res +** profiling. +*/ +#if !defined(__STRICT_ANSI__) && \ + (defined(__GNUC__) || defined(_MSC_VER)) && \ + (defined(i386) || defined(__i386__) || defined(_M_IX86)) + + #if defined(__GNUC__) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + + #elif defined(_MSC_VER) + + __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ + __asm { + rdtsc + ret ; return value at EDX:EAX + } + } + + #endif + +#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + +#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long long retval; + unsigned long junk; + __asm__ __volatile__ ("\n\ + 1: mftbu %1\n\ + mftb %L0\n\ + mftbu %0\n\ + cmpw %0,%1\n\ + bne 1b" + : "=r" (retval), "=r" (junk)); + return retval; + } + +#else + + /* + ** asm() is needed for hardware timing support. Without asm(), + ** disable the sqlite3Hwtime() routine. + ** + ** sqlite3Hwtime() is only used for some obscure debugging + ** and analysis configurations, not in any deliverable, so this + ** should not be a great loss. + */ +SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } + +#endif + +#endif /* !defined(SQLITE_HWTIME_H) */ + +/************** End of hwtime.h **********************************************/ +/************** Continuing where we left off in util.c ***********************/ +#endif + /************** End of util.c ************************************************/ /************** Begin file hash.c ********************************************/ /*

@@ -35296,7 +36474,7 @@ }

} -/* Resize the hash table so that it cantains "new_size" buckets. +/* Resize the hash table so that it contains "new_size" buckets. ** ** The hash table might fail to resize if sqlite3_malloc() fails or ** if the new size is the same as the prior size.

@@ -35365,12 +36543,13 @@ elem = pH->first;

count = pH->count; } if( pHash ) *pHash = h; - while( count-- ){ + while( count ){ assert( elem!=0 ); if( sqlite3StrICmp(elem->pKey,pKey)==0 ){ return elem; } elem = elem->next; + count--; } return &nullElement; }

@@ -35655,19 +36834,20 @@ /* 170 */ "VBegin" OpHelp(""),

/* 171 */ "VCreate" OpHelp(""), /* 172 */ "VDestroy" OpHelp(""), /* 173 */ "VOpen" OpHelp(""), - /* 174 */ "VInitIn" OpHelp("r[P2]=ValueList(P1,P3)"), - /* 175 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), - /* 176 */ "VRename" OpHelp(""), - /* 177 */ "Pagecount" OpHelp(""), - /* 178 */ "MaxPgcnt" OpHelp(""), - /* 179 */ "ClrSubtype" OpHelp("r[P1].subtype = 0"), - /* 180 */ "FilterAdd" OpHelp("filter(P1) += key(P3@P4)"), - /* 181 */ "Trace" OpHelp(""), - /* 182 */ "CursorHint" OpHelp(""), - /* 183 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"), - /* 184 */ "Noop" OpHelp(""), - /* 185 */ "Explain" OpHelp(""), - /* 186 */ "Abortable" OpHelp(""), + /* 174 */ "VCheck" OpHelp(""), + /* 175 */ "VInitIn" OpHelp("r[P2]=ValueList(P1,P3)"), + /* 176 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), + /* 177 */ "VRename" OpHelp(""), + /* 178 */ "Pagecount" OpHelp(""), + /* 179 */ "MaxPgcnt" OpHelp(""), + /* 180 */ "ClrSubtype" OpHelp("r[P1].subtype = 0"), + /* 181 */ "FilterAdd" OpHelp("filter(P1) += key(P3@P4)"), + /* 182 */ "Trace" OpHelp(""), + /* 183 */ "CursorHint" OpHelp(""), + /* 184 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"), + /* 185 */ "Noop" OpHelp(""), + /* 186 */ "Explain" OpHelp(""), + /* 187 */ "Abortable" OpHelp(""), }; return azName[i]; }

@@ -35729,7 +36909,9 @@ unsigned int nJrnl; /* Space allocated for aJrnl[] */

char *aJrnl; /* Journal content */ int szPage; /* Last known page size */ sqlite3_int64 szDb; /* Database file size. -1 means unknown */ + char *aData; /* Buffer to hold page data */ }; +#define SQLITE_KVOS_SZ 133073 /* ** Methods for KVVfsFile

@@ -36092,8 +37274,7 @@ }

if( j+n>nOut ) return -1; memset(&aOut[j], 0, n); j += n; - c = aIn[i]; - if( c==0 ) break; + if( c==0 || mult==1 ) break; /* progress stalled if mult==1 */ }else{ aOut[j] = c<<4; c = kvvfsHexValue[aIn[++i]];

@@ -36170,6 +37351,7 @@

SQLITE_KV_LOG(("xClose %s %s\n", pFile->zClass, pFile->isJournal ? "journal" : "db")); sqlite3_free(pFile->aJrnl); + sqlite3_free(pFile->aData); return SQLITE_OK; }

@@ -36218,7 +37400,7 @@ KVVfsFile *pFile = (KVVfsFile*)pProtoFile;

unsigned int pgno; int got, n; char zKey[30]; - char aData[133073]; + char *aData = pFile->aData; assert( iOfst>=0 ); assert( iAmt>=0 ); SQLITE_KV_LOG(("xRead('%s-db',%d,%lld)\n", pFile->zClass, iAmt, iOfst));

@@ -36235,7 +37417,8 @@ }else{

pgno = 1; } sqlite3_snprintf(sizeof(zKey), zKey, "%u", pgno); - got = sqlite3KvvfsMethods.xRead(pFile->zClass, zKey, aData, sizeof(aData)-1); + got = sqlite3KvvfsMethods.xRead(pFile->zClass, zKey, + aData, SQLITE_KVOS_SZ-1); if( got<0 ){ n = 0; }else{

@@ -36243,7 +37426,7 @@ aData[got] = 0;

if( iOfst+iAmt<512 ){ int k = iOfst+iAmt; aData[k*2] = 0; - n = kvvfsDecode(aData, &aData[2000], sizeof(aData)-2000); + n = kvvfsDecode(aData, &aData[2000], SQLITE_KVOS_SZ-2000); if( n>=iOfst+iAmt ){ memcpy(zBuf, &aData[2000+iOfst], iAmt); n = iAmt;

@@ -36302,7 +37485,7 @@ ){

KVVfsFile *pFile = (KVVfsFile*)pProtoFile; unsigned int pgno; char zKey[30]; - char aData[131073]; + char *aData = pFile->aData; SQLITE_KV_LOG(("xWrite('%s-db',%d,%lld)\n", pFile->zClass, iAmt, iOfst)); assert( iAmt>=512 && iAmt<=65536 ); assert( (iAmt & (iAmt-1))==0 );

@@ -36511,6 +37694,10 @@ pFile->zClass = "session";

}else{ pFile->zClass = "local"; } + pFile->aData = sqlite3_malloc64(SQLITE_KVOS_SZ); + if( pFile->aData==0 ){ + return SQLITE_NOMEM; + } pFile->aJrnl = 0; pFile->nJrnl = 0; pFile->szPage = -1;

@@ -36674,7 +37861,7 @@ **

** This source file is organized into divisions where the logic for various ** subfunctions is contained within the appropriate division. PLEASE ** KEEP THE STRUCTURE OF THIS FILE INTACT. New code should be placed -** in the correct division and should be clearly labeled. +** in the correct division and should be clearly labelled. ** ** The layout of divisions is as follows: **

@@ -36724,7 +37911,7 @@ # endif

#endif /* Use pread() and pwrite() if they are available */ -#if defined(__APPLE__) +#if defined(__APPLE__) || defined(__linux__) # define HAVE_PREAD 1 # define HAVE_PWRITE 1 #endif

@@ -36747,7 +37934,8 @@ #include <unistd.h> /* amalgamator: keep */

/* #include <time.h> */ #include <sys/time.h> /* amalgamator: keep */ #include <errno.h> -#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 +#if (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) \ + && !defined(SQLITE_WASI) # include <sys/mman.h> #endif

@@ -36835,9 +38023,46 @@ ** Maximum supported symbolic links

*/ #define SQLITE_MAX_SYMLINKS 100 +/* +** Remove and stub certain info for WASI (WebAssembly System +** Interface) builds. +*/ +#ifdef SQLITE_WASI +# undef HAVE_FCHMOD +# undef HAVE_FCHOWN +# undef HAVE_MREMAP +# define HAVE_MREMAP 0 +# ifndef SQLITE_DEFAULT_UNIX_VFS +# define SQLITE_DEFAULT_UNIX_VFS "unix-dotfile" + /* ^^^ should SQLITE_DEFAULT_UNIX_VFS be "unix-none"? */ +# endif +# ifndef F_RDLCK +# define F_RDLCK 0 +# define F_WRLCK 1 +# define F_UNLCK 2 +# if __LONG_MAX == 0x7fffffffL +# define F_GETLK 12 +# define F_SETLK 13 +# define F_SETLKW 14 +# else +# define F_GETLK 5 +# define F_SETLK 6 +# define F_SETLKW 7 +# endif +# endif +#else /* !SQLITE_WASI */ +# ifndef HAVE_FCHMOD +# define HAVE_FCHMOD +# endif +#endif /* SQLITE_WASI */ + +#ifdef SQLITE_WASI +# define osGetpid(X) (pid_t)1 +#else /* Always cast the getpid() return type for compatibility with ** kernel modules in VxWorks. */ -#define osGetpid(X) (pid_t)getpid() +# define osGetpid(X) (pid_t)getpid() +#endif /* ** Only set the lastErrno if the error code is a real error and not

@@ -37109,7 +38334,11 @@ #endif

#define osPwrite64 ((ssize_t(*)(int,const void*,size_t,off64_t))\ aSyscall[13].pCurrent) +#if defined(HAVE_FCHMOD) { "fchmod", (sqlite3_syscall_ptr)fchmod, 0 }, +#else + { "fchmod", (sqlite3_syscall_ptr)0, 0 }, +#endif #define osFchmod ((int(*)(int,mode_t))aSyscall[14].pCurrent) #if defined(HAVE_POSIX_FALLOCATE) && HAVE_POSIX_FALLOCATE

@@ -37145,14 +38374,16 @@ { "geteuid", (sqlite3_syscall_ptr)0, 0 },

#endif #define osGeteuid ((uid_t(*)(void))aSyscall[21].pCurrent) -#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 +#if (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) \ + && !defined(SQLITE_WASI) { "mmap", (sqlite3_syscall_ptr)mmap, 0 }, #else { "mmap", (sqlite3_syscall_ptr)0, 0 }, #endif #define osMmap ((void*(*)(void*,size_t,int,int,int,off_t))aSyscall[22].pCurrent) -#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 +#if (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) \ + && !defined(SQLITE_WASI) { "munmap", (sqlite3_syscall_ptr)munmap, 0 }, #else { "munmap", (sqlite3_syscall_ptr)0, 0 },

@@ -37217,7 +38448,7 @@ }

/* ** This is the xSetSystemCall() method of sqlite3_vfs for all of the -** "unix" VFSes. Return SQLITE_OK opon successfully updating the +** "unix" VFSes. Return SQLITE_OK upon successfully updating the ** system call pointer, or SQLITE_NOTFOUND if there is no configurable ** system call named zName. */

@@ -37338,6 +38569,9 @@ if( errno==EINTR ) continue;

break; } if( fd>=SQLITE_MINIMUM_FILE_DESCRIPTOR ) break; + if( (f & (O_EXCL|O_CREAT))==(O_EXCL|O_CREAT) ){ + (void)osUnlink(z); + } osClose(fd); sqlite3_log(SQLITE_WARNING, "attempt to open \"%s\" as file descriptor %d", z, fd);

@@ -37736,7 +38970,7 @@ **

** If you close a file descriptor that points to a file that has locks, ** all locks on that file that are owned by the current process are ** released. To work around this problem, each unixInodeInfo object -** maintains a count of the number of pending locks on tha inode. +** maintains a count of the number of pending locks on the inode. ** When an attempt is made to close an unixFile, if there are ** other unixFile open on the same inode that are holding locks, the call ** to close() the file descriptor is deferred until all of the locks clear.

@@ -37750,7 +38984,7 @@ ** Many older versions of linux use the LinuxThreads library which is

** not posix compliant. Under LinuxThreads, a lock created by thread ** A cannot be modified or overridden by a different thread B. ** Only thread A can modify the lock. Locking behavior is correct -** if the appliation uses the newer Native Posix Thread Library (NPTL) +** if the application uses the newer Native Posix Thread Library (NPTL) ** on linux - with NPTL a lock created by thread A can override locks ** in thread B. But there is no way to know at compile-time which ** threading library is being used. So there is no way to know at

@@ -37952,7 +39186,7 @@ pFile->lastErrno = error;

} /* -** Close all file descriptors accumuated in the unixInodeInfo->pUnused list. +** Close all file descriptors accumulated in the unixInodeInfo->pUnused list. */ static void closePendingFds(unixFile *pFile){ unixInodeInfo *pInode = pFile->pInode;

@@ -38300,7 +39534,7 @@ ** transitions and the inserted intermediate states:

** ** UNLOCKED -> SHARED ** SHARED -> RESERVED -** SHARED -> (PENDING) -> EXCLUSIVE +** SHARED -> EXCLUSIVE ** RESERVED -> (PENDING) -> EXCLUSIVE ** PENDING -> EXCLUSIVE **

@@ -38315,7 +39549,7 @@ ** confusion with SQLite lock names). The algorithms are complicated

** slightly in order to be compatible with Windows95 systems simultaneously ** accessing the same database file, in case that is ever required. ** - ** Symbols defined in os.h indentify the 'pending byte' and the 'reserved + ** Symbols defined in os.h identify the 'pending byte' and the 'reserved ** byte', each single bytes at well known offsets, and the 'shared byte ** range', a range of 510 bytes at a well known offset. **

@@ -38323,7 +39557,7 @@ ** To obtain a SHARED lock, a read-lock is obtained on the 'pending

** byte'. If this is successful, 'shared byte range' is read-locked ** and the lock on the 'pending byte' released. (Legacy note: When ** SQLite was first developed, Windows95 systems were still very common, - ** and Widnows95 lacks a shared-lock capability. So on Windows95, a + ** and Windows95 lacks a shared-lock capability. So on Windows95, a ** single randomly selected by from the 'shared byte range' is locked. ** Windows95 is now pretty much extinct, but this work-around for the ** lack of shared-locks on Windows95 lives on, for backwards

@@ -38333,19 +39567,20 @@ ** A process may only obtain a RESERVED lock after it has a SHARED lock.

** A RESERVED lock is implemented by grabbing a write-lock on the ** 'reserved byte'. ** - ** A process may only obtain a PENDING lock after it has obtained a - ** SHARED lock. A PENDING lock is implemented by obtaining a write-lock - ** on the 'pending byte'. This ensures that no new SHARED locks can be - ** obtained, but existing SHARED locks are allowed to persist. A process - ** does not have to obtain a RESERVED lock on the way to a PENDING lock. - ** This property is used by the algorithm for rolling back a journal file - ** after a crash. + ** An EXCLUSIVE lock may only be requested after either a SHARED or + ** RESERVED lock is held. An EXCLUSIVE lock is implemented by obtaining + ** a write-lock on the entire 'shared byte range'. Since all other locks + ** require a read-lock on one of the bytes within this range, this ensures + ** that no other locks are held on the database. ** - ** An EXCLUSIVE lock, obtained after a PENDING lock is held, is - ** implemented by obtaining a write-lock on the entire 'shared byte - ** range'. Since all other locks require a read-lock on one of the bytes - ** within this range, this ensures that no other locks are held on the - ** database. + ** If a process that holds a RESERVED lock requests an EXCLUSIVE, then + ** a PENDING lock is obtained first. A PENDING lock is implemented by + ** obtaining a write-lock on the 'pending byte'. This ensures that no new + ** SHARED locks can be obtained, but existing SHARED locks are allowed to + ** persist. If the call to this function fails to obtain the EXCLUSIVE + ** lock in this case, it holds the PENDING lock instead. The client may + ** then re-attempt the EXCLUSIVE lock later on, after existing SHARED + ** locks have cleared. */ int rc = SQLITE_OK; unixFile *pFile = (unixFile*)id;

@@ -38371,7 +39606,7 @@ }

/* Make sure the locking sequence is correct. ** (1) We never move from unlocked to anything higher than shared lock. - ** (2) SQLite never explicitly requests a pendig lock. + ** (2) SQLite never explicitly requests a pending lock. ** (3) A shared lock is always held when a reserve lock is requested. */ assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK );

@@ -38416,7 +39651,7 @@ */

lock.l_len = 1L; lock.l_whence = SEEK_SET; if( eFileLock==SHARED_LOCK - || (eFileLock==EXCLUSIVE_LOCK && pFile->eFileLock<PENDING_LOCK) + || (eFileLock==EXCLUSIVE_LOCK && pFile->eFileLock==RESERVED_LOCK) ){ lock.l_type = (eFileLock==SHARED_LOCK?F_RDLCK:F_WRLCK); lock.l_start = PENDING_BYTE;

@@ -38427,6 +39662,9 @@ if( rc!=SQLITE_BUSY ){

storeLastErrno(pFile, tErrno); } goto end_lock; + }else if( eFileLock==EXCLUSIVE_LOCK ){ + pFile->eFileLock = PENDING_LOCK; + pInode->eFileLock = PENDING_LOCK; } }

@@ -38514,13 +39752,9 @@ pFile->inNormalWrite = 1;

} #endif - if( rc==SQLITE_OK ){ pFile->eFileLock = eFileLock; pInode->eFileLock = eFileLock; - }else if( eFileLock==EXCLUSIVE_LOCK ){ - pFile->eFileLock = PENDING_LOCK; - pInode->eFileLock = PENDING_LOCK; } end_lock:

@@ -39590,7 +40824,7 @@ }

/* Make sure the locking sequence is correct ** (1) We never move from unlocked to anything higher than shared lock. - ** (2) SQLite never explicitly requests a pendig lock. + ** (2) SQLite never explicitly requests a pending lock. ** (3) A shared lock is always held when a reserve lock is requested. */ assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK );

@@ -39706,7 +40940,7 @@ */

if( !(failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST + pInode->sharedByte, 1, 0)) ){ int failed2 = SQLITE_OK; - /* now attemmpt to get the exclusive lock range */ + /* now attempt to get the exclusive lock range */ failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST, SHARED_SIZE, 1); if( failed && (failed2 = afpSetLock(context->dbPath, pFile,

@@ -39755,9 +40989,6 @@ unixFile *pFile = (unixFile*)id;

unixInodeInfo *pInode; afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; int skipShared = 0; -#ifdef SQLITE_TEST - int h = pFile->h; -#endif assert( pFile ); OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (afp)\n", pFile->h, eFileLock,

@@ -39773,9 +41004,6 @@ sqlite3_mutex_enter(pInode->pLockMutex);

assert( pInode->nShared!=0 ); if( pFile->eFileLock>SHARED_LOCK ){ assert( pInode->eFileLock==pFile->eFileLock ); - SimulateIOErrorBenign(1); - SimulateIOError( h=(-1) ) - SimulateIOErrorBenign(0); #ifdef SQLITE_DEBUG /* When reducing a lock such that other processes can start

@@ -39824,9 +41052,6 @@ */

unsigned long long sharedLockByte = SHARED_FIRST+pInode->sharedByte; pInode->nShared--; if( pInode->nShared==0 ){ - SimulateIOErrorBenign(1); - SimulateIOError( h=(-1) ) - SimulateIOErrorBenign(0); if( !skipShared ){ rc = afpSetLock(context->dbPath, pFile, sharedLockByte, 1, 0); }

@@ -39927,12 +41152,6 @@ /*

** Seek to the offset passed as the second argument, then read cnt ** bytes into pBuf. Return the number of bytes actually read. ** -** NB: If you define USE_PREAD or USE_PREAD64, then it might also -** be necessary to define _XOPEN_SOURCE to be 500. This varies from -** one system to another. Since SQLite does not define USE_PREAD -** in any form by default, we will not attempt to define _XOPEN_SOURCE. -** See tickets #2741 and #2681. -** ** To avoid stomping the errno value on a failed read the lastErrno value ** is set before returning. */

@@ -40007,7 +41226,7 @@ );

#endif #if SQLITE_MAX_MMAP_SIZE>0 - /* Deal with as much of this read request as possible by transfering + /* Deal with as much of this read request as possible by transferring ** data from the memory mapping using memcpy(). */ if( offset<pFile->mmapSize ){ if( offset+amt <= pFile->mmapSize ){

@@ -40159,7 +41378,7 @@ }

#endif #if defined(SQLITE_MMAP_READWRITE) && SQLITE_MAX_MMAP_SIZE>0 - /* Deal with as much of this write request as possible by transfering + /* Deal with as much of this write request as possible by transferring ** data from the memory mapping using memcpy(). */ if( offset<pFile->mmapSize ){ if( offset+amt <= pFile->mmapSize ){

@@ -40281,7 +41500,7 @@

/* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a ** no-op. But go ahead and call fstat() to validate the file ** descriptor as we need a method to provoke a failure during - ** coverate testing. + ** coverage testing. */ #ifdef SQLITE_NO_SYNC {

@@ -43111,12 +44330,10 @@ assert( zName!=0 );

if( zName[0]=='.' ){ if( nName==1 ) return; if( zName[1]=='.' && nName==2 ){ - if( pPath->nUsed<=1 ){ - pPath->rc = SQLITE_ERROR; - return; + if( pPath->nUsed>1 ){ + assert( pPath->zOut[0]=='/' ); + while( pPath->zOut[--pPath->nUsed]!='/' ){} } - assert( pPath->zOut[0]=='/' ); - while( pPath->zOut[--pPath->nUsed]!='/' ){} return; } }

@@ -43328,12 +44545,17 @@ ** might be greater than or equal to the argument, but not less

** than the argument. */ static int unixSleep(sqlite3_vfs *NotUsed, int microseconds){ -#if OS_VXWORKS +#if !defined(HAVE_NANOSLEEP) || HAVE_NANOSLEEP+0 struct timespec sp; - sp.tv_sec = microseconds / 1000000; sp.tv_nsec = (microseconds % 1000000) * 1000; + + /* Almost all modern unix systems support nanosleep(). But if you are + ** compiling for one of the rare exceptions, you can use + ** -DHAVE_NANOSLEEP=0 (perhaps in conjuction with -DHAVE_USLEEP if + ** usleep() is available) in order to bypass the use of nanosleep() */ nanosleep(&sp, NULL); + UNUSED_PARAMETER(NotUsed); return microseconds; #elif defined(HAVE_USLEEP) && HAVE_USLEEP

@@ -45923,7 +47145,7 @@ }; /* End of the overrideable system calls */

/* ** This is the xSetSystemCall() method of sqlite3_vfs for all of the -** "win32" VFSes. Return SQLITE_OK opon successfully updating the +** "win32" VFSes. Return SQLITE_OK upon successfully updating the ** system call pointer, or SQLITE_NOTFOUND if there is no configurable ** system call named zName. */

@@ -47503,7 +48725,7 @@ "offset=%lld, lock=%d\n", osGetCurrentProcessId(), pFile,

pFile->h, pBuf, amt, offset, pFile->locktype)); #if SQLITE_MAX_MMAP_SIZE>0 - /* Deal with as much of this read request as possible by transfering + /* Deal with as much of this read request as possible by transferring ** data from the memory mapping using memcpy(). */ if( offset<pFile->mmapSize ){ if( offset+amt <= pFile->mmapSize ){

@@ -47581,7 +48803,7 @@ "offset=%lld, lock=%d\n", osGetCurrentProcessId(), pFile,

pFile->h, pBuf, amt, offset, pFile->locktype)); #if defined(SQLITE_MMAP_READWRITE) && SQLITE_MAX_MMAP_SIZE>0 - /* Deal with as much of this write request as possible by transfering + /* Deal with as much of this write request as possible by transferring ** data from the memory mapping using memcpy(). */ if( offset<pFile->mmapSize ){ if( offset+amt <= pFile->mmapSize ){

@@ -47691,7 +48913,7 @@ ** The only feasible work-around is to defer the truncation until after

** all references to memory-mapped content are closed. That is doable, ** but involves adding a few branches in the common write code path which ** could slow down normal operations slightly. Hence, we have decided for - ** now to simply make trancations a no-op if there are pending reads. We + ** now to simply make transactions a no-op if there are pending reads. We ** can maybe revisit this decision in the future. */ return SQLITE_OK;

@@ -47750,7 +48972,7 @@

#ifdef SQLITE_TEST /* ** Count the number of fullsyncs and normal syncs. This is used to test -** that syncs and fullsyncs are occuring at the right times. +** that syncs and fullsyncs are occurring at the right times. */ SQLITE_API int sqlite3_sync_count = 0; SQLITE_API int sqlite3_fullsync_count = 0;

@@ -48107,7 +49329,7 @@ /* Acquire an EXCLUSIVE lock

*/ if( locktype==EXCLUSIVE_LOCK && res ){ assert( pFile->locktype>=SHARED_LOCK ); - res = winUnlockReadLock(pFile); + (void)winUnlockReadLock(pFile); res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, SHARED_FIRST, 0, SHARED_SIZE, 0); if( res ){

@@ -49511,6 +50733,7 @@ "abcdefghijklmnopqrstuvwxyz"

"ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789"; size_t i, j; + DWORD pid; int nPre = sqlite3Strlen30(SQLITE_TEMP_FILE_PREFIX); int nMax, nBuf, nDir, nLen; char *zBuf;

@@ -49723,7 +50946,10 @@ sqlite3_snprintf(nBuf-16-nLen, zBuf+nLen, SQLITE_TEMP_FILE_PREFIX);

j = sqlite3Strlen30(zBuf); sqlite3_randomness(15, &zBuf[j]); + pid = osGetCurrentProcessId(); for(i=0; i<15; i++, j++){ + zBuf[j] += pid & 0xff; + pid >>= 8; zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; } zBuf[j] = 0;

@@ -49961,7 +51187,7 @@ if( h!=INVALID_HANDLE_VALUE ) break;

if( isReadWrite ){ int rc2, isRO = 0; sqlite3BeginBenignMalloc(); - rc2 = winAccess(pVfs, zName, SQLITE_ACCESS_READ, &isRO); + rc2 = winAccess(pVfs, zUtf8Name, SQLITE_ACCESS_READ, &isRO); sqlite3EndBenignMalloc(); if( rc2==SQLITE_OK && isRO ) break; }

@@ -49978,7 +51204,7 @@ if( h!=INVALID_HANDLE_VALUE ) break;

if( isReadWrite ){ int rc2, isRO = 0; sqlite3BeginBenignMalloc(); - rc2 = winAccess(pVfs, zName, SQLITE_ACCESS_READ, &isRO); + rc2 = winAccess(pVfs, zUtf8Name, SQLITE_ACCESS_READ, &isRO); sqlite3EndBenignMalloc(); if( rc2==SQLITE_OK && isRO ) break; }

@@ -49998,7 +51224,7 @@ if( h!=INVALID_HANDLE_VALUE ) break;

if( isReadWrite ){ int rc2, isRO = 0; sqlite3BeginBenignMalloc(); - rc2 = winAccess(pVfs, zName, SQLITE_ACCESS_READ, &isRO); + rc2 = winAccess(pVfs, zUtf8Name, SQLITE_ACCESS_READ, &isRO); sqlite3EndBenignMalloc(); if( rc2==SQLITE_OK && isRO ) break; }

@@ -50221,6 +51447,13 @@ SimulateIOError( return SQLITE_IOERR_ACCESS; );

OSTRACE(("ACCESS name=%s, flags=%x, pResOut=%p\n", zFilename, flags, pResOut)); + if( zFilename==0 ){ + *pResOut = 0; + OSTRACE(("ACCESS name=%s, pResOut=%p, *pResOut=%d, rc=SQLITE_OK\n", + zFilename, pResOut, *pResOut)); + return SQLITE_OK; + } + zConverted = winConvertFromUtf8Filename(zFilename); if( zConverted==0 ){ OSTRACE(("ACCESS name=%s, rc=SQLITE_IOERR_NOMEM\n", zFilename));

@@ -51071,6 +52304,7 @@ static int memdbTruncate(sqlite3_file*, sqlite3_int64 size);

static int memdbSync(sqlite3_file*, int flags); static int memdbFileSize(sqlite3_file*, sqlite3_int64 *pSize); static int memdbLock(sqlite3_file*, int); +static int memdbUnlock(sqlite3_file*, int); /* static int memdbCheckReservedLock(sqlite3_file*, int *pResOut);// not used */ static int memdbFileControl(sqlite3_file*, int op, void *pArg); /* static int memdbSectorSize(sqlite3_file*); // not used */

@@ -51129,7 +52363,7 @@ memdbTruncate, /* xTruncate */

memdbSync, /* xSync */ memdbFileSize, /* xFileSize */ memdbLock, /* xLock */ - memdbLock, /* xUnlock - same as xLock in this case */ + memdbUnlock, /* xUnlock */ 0, /* memdbCheckReservedLock, */ /* xCheckReservedLock */ memdbFileControl, /* xFileControl */ 0, /* memdbSectorSize,*/ /* xSectorSize */

@@ -51330,39 +52564,81 @@ static int memdbLock(sqlite3_file *pFile, int eLock){

MemFile *pThis = (MemFile*)pFile; MemStore *p = pThis->pStore; int rc = SQLITE_OK; - if( eLock==pThis->eLock ) return SQLITE_OK; + if( eLock<=pThis->eLock ) return SQLITE_OK; memdbEnter(p); - if( eLock>SQLITE_LOCK_SHARED ){ - if( p->mFlags & SQLITE_DESERIALIZE_READONLY ){ - rc = SQLITE_READONLY; - }else if( pThis->eLock<=SQLITE_LOCK_SHARED ){ - if( p->nWrLock ){ - rc = SQLITE_BUSY; - }else{ - p->nWrLock = 1; + + assert( p->nWrLock==0 || p->nWrLock==1 ); + assert( pThis->eLock<=SQLITE_LOCK_SHARED || p->nWrLock==1 ); + assert( pThis->eLock==SQLITE_LOCK_NONE || p->nRdLock>=1 ); + + if( eLock>SQLITE_LOCK_SHARED && (p->mFlags & SQLITE_DESERIALIZE_READONLY) ){ + rc = SQLITE_READONLY; + }else{ + switch( eLock ){ + case SQLITE_LOCK_SHARED: { + assert( pThis->eLock==SQLITE_LOCK_NONE ); + if( p->nWrLock>0 ){ + rc = SQLITE_BUSY; + }else{ + p->nRdLock++; + } + break; + }; + + case SQLITE_LOCK_RESERVED: + case SQLITE_LOCK_PENDING: { + assert( pThis->eLock>=SQLITE_LOCK_SHARED ); + if( ALWAYS(pThis->eLock==SQLITE_LOCK_SHARED) ){ + if( p->nWrLock>0 ){ + rc = SQLITE_BUSY; + }else{ + p->nWrLock = 1; + } + } + break; + } + + default: { + assert( eLock==SQLITE_LOCK_EXCLUSIVE ); + assert( pThis->eLock>=SQLITE_LOCK_SHARED ); + if( p->nRdLock>1 ){ + rc = SQLITE_BUSY; + }else if( pThis->eLock==SQLITE_LOCK_SHARED ){ + p->nWrLock = 1; + } + break; } } - }else if( eLock==SQLITE_LOCK_SHARED ){ - if( pThis->eLock > SQLITE_LOCK_SHARED ){ - assert( p->nWrLock==1 ); - p->nWrLock = 0; - }else if( p->nWrLock ){ - rc = SQLITE_BUSY; - }else{ - p->nRdLock++; + } + if( rc==SQLITE_OK ) pThis->eLock = eLock; + memdbLeave(p); + return rc; +} + +/* +** Unlock an memdb-file. +*/ +static int memdbUnlock(sqlite3_file *pFile, int eLock){ + MemFile *pThis = (MemFile*)pFile; + MemStore *p = pThis->pStore; + if( eLock>=pThis->eLock ) return SQLITE_OK; + memdbEnter(p); + + assert( eLock==SQLITE_LOCK_SHARED || eLock==SQLITE_LOCK_NONE ); + if( eLock==SQLITE_LOCK_SHARED ){ + if( ALWAYS(pThis->eLock>SQLITE_LOCK_SHARED) ){ + p->nWrLock--; } }else{ - assert( eLock==SQLITE_LOCK_NONE ); if( pThis->eLock>SQLITE_LOCK_SHARED ){ - assert( p->nWrLock==1 ); - p->nWrLock = 0; + p->nWrLock--; } - assert( p->nRdLock>0 ); p->nRdLock--; } - if( rc==SQLITE_OK ) pThis->eLock = eLock; + + pThis->eLock = eLock; memdbLeave(p); - return rc; + return SQLITE_OK; } #if 0

@@ -51472,7 +52748,7 @@ UNUSED_PARAMETER(pVfs);

memset(pFile, 0, sizeof(*pFile)); szName = sqlite3Strlen30(zName); - if( szName>1 && zName[0]=='/' ){ + if( szName>1 && (zName[0]=='/' || zName[0]=='\\') ){ int i; #ifndef SQLITE_MUTEX_OMIT sqlite3_mutex *pVfsMutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1);

@@ -51817,6 +53093,13 @@ sqlite3_free(pData);

} sqlite3_mutex_leave(db->mutex); return rc; +} + +/* +** Return true if the VFS is the memvfs. +*/ +SQLITE_PRIVATE int sqlite3IsMemdb(const sqlite3_vfs *pVfs){ + return pVfs==&memdb_vfs; } /*

@@ -52031,7 +53314,7 @@ }

h = BITVEC_HASH(i++); /* if there wasn't a hash collision, and this doesn't */ /* completely fill the hash, then just add it without */ - /* worring about sub-dividing and re-hashing. */ + /* worrying about sub-dividing and re-hashing. */ if( !p->u.aHash[h] ){ if (p->nSet<(BITVEC_NINT-1)) { goto bitvec_set_end;

@@ -52298,7 +53581,7 @@ */

struct PCache { PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */ PgHdr *pSynced; /* Last synced page in dirty page list */ - int nRefSum; /* Sum of ref counts over all pages */ + i64 nRefSum; /* Sum of ref counts over all pages */ int szCache; /* Configured cache size */ int szSpill; /* Size before spilling occurs */ int szPage; /* Size of every page in this cache */

@@ -52327,11 +53610,15 @@ static void pcachePageTrace(int i, sqlite3_pcache_page *pLower){

PgHdr *pPg; unsigned char *a; int j; - pPg = (PgHdr*)pLower->pExtra; - printf("%3d: nRef %2d flgs %02x data ", i, pPg->nRef, pPg->flags); - a = (unsigned char *)pLower->pBuf; - for(j=0; j<12; j++) printf("%02x", a[j]); - printf(" ptr %p\n", pPg); + if( pLower==0 ){ + printf("%3d: NULL\n", i); + }else{ + pPg = (PgHdr*)pLower->pExtra; + printf("%3d: nRef %2lld flgs %02x data ", i, pPg->nRef, pPg->flags); + a = (unsigned char *)pLower->pBuf; + for(j=0; j<12; j++) printf("%02x", a[j]); + printf(" ptr %p\n", pPg); + } } static void pcacheDump(PCache *pCache){ int N;

@@ -52344,9 +53631,8 @@ N = sqlite3PcachePagecount(pCache);

if( N>sqlite3PcacheMxDump ) N = sqlite3PcacheMxDump; for(i=1; i<=N; i++){ pLower = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, i, 0); - if( pLower==0 ) continue; pcachePageTrace(i, pLower); - if( ((PgHdr*)pLower)->pPage==0 ){ + if( pLower && ((PgHdr*)pLower)->pPage==0 ){ sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, pLower, 0); } }

@@ -52361,7 +53647,7 @@ /*

** Return 1 if pPg is on the dirty list for pCache. Return 0 if not. ** This routine runs inside of assert() statements only. */ -#ifdef SQLITE_DEBUG +#if defined(SQLITE_ENABLE_EXPENSIVE_ASSERT) static int pageOnDirtyList(PCache *pCache, PgHdr *pPg){ PgHdr *p; for(p=pCache->pDirty; p; p=p->pDirtyNext){

@@ -52369,6 +53655,16 @@ if( p==pPg ) return 1;

} return 0; } +static int pageNotOnDirtyList(PCache *pCache, PgHdr *pPg){ + PgHdr *p; + for(p=pCache->pDirty; p; p=p->pDirtyNext){ + if( p==pPg ) return 0; + } + return 1; +} +#else +# define pageOnDirtyList(A,B) 1 +# define pageNotOnDirtyList(A,B) 1 #endif /*

@@ -52389,7 +53685,7 @@ pCache = pPg->pCache;

assert( pCache!=0 ); /* Every page has an associated PCache */ if( pPg->flags & PGHDR_CLEAN ){ assert( (pPg->flags & PGHDR_DIRTY)==0 );/* Cannot be both CLEAN and DIRTY */ - assert( !pageOnDirtyList(pCache, pPg) );/* CLEAN pages not on dirty list */ + assert( pageNotOnDirtyList(pCache, pPg) );/* CLEAN pages not on dirtylist */ }else{ assert( (pPg->flags & PGHDR_DIRTY)!=0 );/* If not CLEAN must be DIRTY */ assert( pPg->pDirtyNext==0 || pPg->pDirtyNext->pDirtyPrev==pPg );

@@ -52525,7 +53821,7 @@ ** suggested cache size is set to N. */

return p->szCache; }else{ i64 n; - /* IMPLEMANTATION-OF: R-59858-46238 If the argument N is negative, then the + /* IMPLEMENTATION-OF: R-59858-46238 If the argument N is negative, then the ** number of cache pages is adjusted to be a number of pages that would ** use approximately abs(N*1024) bytes of memory based on the current ** page size. */

@@ -53013,7 +54309,7 @@ return result.pDirty;

} /* -** Sort the list of pages in accending order by pgno. Pages are +** Sort the list of pages in ascending order by pgno. Pages are ** connected by pDirty pointers. The pDirtyPrev pointers are ** corrupted by this sort. **

@@ -53072,14 +54368,14 @@ **

** This is not the total number of pages referenced, but the sum of the ** reference count for all pages. */ -SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache *pCache){ +SQLITE_PRIVATE i64 sqlite3PcacheRefCount(PCache *pCache){ return pCache->nRefSum; } /* ** Return the number of references to the page supplied as an argument. */ -SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr *p){ +SQLITE_PRIVATE i64 sqlite3PcachePageRefcount(PgHdr *p){ return p->nRef; }

@@ -53253,7 +54549,7 @@ **

** If N is positive, then N pages worth of memory are allocated using a single ** sqlite3Malloc() call and that memory is used for the first N pages allocated. ** Or if N is negative, then -1024*N bytes of memory are allocated and used -** for as many pages as can be accomodated. +** for as many pages as can be accommodated. ** ** Only one of (2) or (3) can be used. Once the memory available to (2) or ** (3) is exhausted, subsequent allocations fail over to the general-purpose

@@ -53287,7 +54583,7 @@ ** pointers must be either 4 or 8-byte aligned). As this structure is located

** in memory directly after the associated page data, if the database is ** corrupt, code at the b-tree layer may overread the page buffer and ** read part of this structure before the corruption is detected. This -** can cause a valgrind error if the unitialized gap is accessed. Using u16 +** can cause a valgrind error if the uninitialized gap is accessed. Using u16 ** ensures there is no such gap, and therefore no bytes of uninitialized ** memory in the structure. **

@@ -54507,7 +55803,7 @@ **

** The TEST primitive includes a "batch" number. The TEST primitive ** will only see elements that were inserted before the last change ** in the batch number. In other words, if an INSERT occurs between -** two TESTs where the TESTs have the same batch nubmer, then the +** two TESTs where the TESTs have the same batch number, then the ** value added by the INSERT will not be visible to the second TEST. ** The initial batch number is zero, so if the very first TEST contains ** a non-zero batch number, it will see all prior INSERTs.

@@ -55039,6 +56335,7 @@ # define sqlite3WalHeapMemory(z) 0

# define sqlite3WalFramesize(z) 0 # define sqlite3WalFindFrame(x,y,z) 0 # define sqlite3WalFile(x) 0 +# undef SQLITE_USE_SEH #else #define WAL_SAVEPOINT_NDATA 4

@@ -55145,6 +56442,10 @@ SQLITE_PRIVATE int sqlite3WalWriteLock(Wal *pWal, int bLock);

SQLITE_PRIVATE void sqlite3WalDb(Wal *pWal, sqlite3 *db); #endif +#ifdef SQLITE_USE_SEH +SQLITE_PRIVATE int sqlite3WalSystemErrno(Wal*); +#endif + #endif /* ifndef SQLITE_OMIT_WAL */ #endif /* SQLITE_WAL_H */

@@ -55430,7 +56731,7 @@ ** to read or write data returns an error. Eventually, once all

** outstanding transactions have been abandoned, the pager is able to ** transition back to OPEN state, discarding the contents of the ** page-cache and any other in-memory state at the same time. Everything -** is reloaded from disk (and, if necessary, hot-journal rollback peformed) +** is reloaded from disk (and, if necessary, hot-journal rollback performed) ** when a read-transaction is next opened on the pager (transitioning ** the pager into READER state). At that point the system has recovered ** from the error.

@@ -56621,9 +57922,32 @@ }else{

memset(zHeader, 0, sizeof(aJournalMagic)+4); } + + /* The random check-hash initializer */ - sqlite3_randomness(sizeof(pPager->cksumInit), &pPager->cksumInit); + if( pPager->journalMode!=PAGER_JOURNALMODE_MEMORY ){ + sqlite3_randomness(sizeof(pPager->cksumInit), &pPager->cksumInit); + } +#ifdef SQLITE_DEBUG + else{ + /* The Pager.cksumInit variable is usually randomized above to protect + ** against there being existing records in the journal file. This is + ** dangerous, as following a crash they may be mistaken for records + ** written by the current transaction and rolled back into the database + ** file, causing corruption. The following assert statements verify + ** that this is not required in "journal_mode=memory" mode, as in that + ** case the journal file is always 0 bytes in size at this point. + ** It is advantageous to avoid the sqlite3_randomness() call if possible + ** as it takes the global PRNG mutex. */ + i64 sz = 0; + sqlite3OsFileSize(pPager->jfd, &sz); + assert( sz==0 ); + assert( pPager->journalOff==journalHdrOffset(pPager) ); + assert( sqlite3JournalIsInMemory(pPager->jfd) ); + } +#endif put32bits(&zHeader[sizeof(aJournalMagic)+4], pPager->cksumInit); + /* The initial database size */ put32bits(&zHeader[sizeof(aJournalMagic)+8], pPager->dbOrigSize); /* The assumed sector size for this process */

@@ -56803,7 +58127,7 @@ ** + 4 bytes: N (length of super-journal name in bytes, no nul-terminator).

** + 4 bytes: super-journal name checksum. ** + 8 bytes: aJournalMagic[]. ** -** The super-journal page checksum is the sum of the bytes in thesuper-journal +** The super-journal page checksum is the sum of the bytes in the super-journal ** name, where each byte is interpreted as a signed 8-bit integer. ** ** If zSuper is a NULL pointer (occurs for a single database transaction),

@@ -56856,7 +58180,7 @@ return rc;

} pPager->journalOff += (nSuper+20); - /* If the pager is in peristent-journal mode, then the physical + /* If the pager is in persistent-journal mode, then the physical ** journal-file may extend past the end of the super-journal name ** and 8 bytes of magic data just written to the file. This is ** dangerous because the code to rollback a hot-journal file

@@ -57026,7 +58350,7 @@ }

/* ** This function is called whenever an IOERR or FULL error that requires -** the pager to transition into the ERROR state may ahve occurred. +** the pager to transition into the ERROR state may have occurred. ** The first argument is a pointer to the pager structure, the second ** the error-code about to be returned by a pager API function. The ** value returned is a copy of the second argument to this function.

@@ -57266,6 +58590,9 @@ pPager->setSuper = 0;

return (rc==SQLITE_OK?rc2:rc); } + +/* Forward reference */ +static int pager_playback(Pager *pPager, int isHot); /* ** Execute a rollback if a transaction is active and unlock the

@@ -57295,13 +58622,28 @@ }else if( !pPager->exclusiveMode ){

assert( pPager->eState==PAGER_READER ); pager_end_transaction(pPager, 0, 0); } + }else if( pPager->eState==PAGER_ERROR + && pPager->journalMode==PAGER_JOURNALMODE_MEMORY + && isOpen(pPager->jfd) + ){ + /* Special case for a ROLLBACK due to I/O error with an in-memory + ** journal: We have to rollback immediately, before the journal is + ** closed, because once it is closed, all content is forgotten. */ + int errCode = pPager->errCode; + u8 eLock = pPager->eLock; + pPager->eState = PAGER_OPEN; + pPager->errCode = SQLITE_OK; + pPager->eLock = EXCLUSIVE_LOCK; + pager_playback(pPager, 1); + pPager->errCode = errCode; + pPager->eLock = eLock; } pager_unlock(pPager); } /* ** Parameter aData must point to a buffer of pPager->pageSize bytes -** of data. Compute and return a checksum based ont the contents of the +** of data. Compute and return a checksum based on the contents of the ** page of data and the current value of pPager->cksumInit. ** ** This is not a real checksum. It is really just the sum of the

@@ -57734,6 +59076,8 @@ static int pager_truncate(Pager *pPager, Pgno nPage){

int rc = SQLITE_OK; assert( pPager->eState!=PAGER_ERROR ); assert( pPager->eState!=PAGER_READER ); + PAGERTRACE(("Truncate %d npage %u\n", PAGERID(pPager), nPage)); + if( isOpen(pPager->fd) && (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN)

@@ -58064,7 +59408,7 @@ /* If there was a super-journal and this routine will return success,

** see if it is possible to delete the super-journal. */ assert( zSuper==&pPager->pTmpSpace[4] ); - memset(&zSuper[-4], 0, 4); + memset(pPager->pTmpSpace, 0, 4); rc = pager_delsuper(pPager, zSuper); testcase( rc!=SQLITE_OK ); }

@@ -58265,7 +59609,7 @@

assert( pPager->pWal ); assert( pList ); #ifdef SQLITE_DEBUG - /* Verify that the page list is in accending order */ + /* Verify that the page list is in ascending order */ for(p=pList; p && p->pDirty; p=p->pDirty){ assert( p->pgno < p->pDirty->pgno ); }

@@ -58396,7 +59740,7 @@

#ifndef SQLITE_OMIT_WAL /* ** Check if the *-wal file that corresponds to the database opened by pPager -** exists if the database is not empy, or verify that the *-wal file does +** exists if the database is not empty, or verify that the *-wal file does ** not exist (by deleting it) if the database file is empty. ** ** If the database is not empty and the *-wal file exists, open the pager

@@ -58685,7 +60029,6 @@ **

** Numeric values associated with these states are OFF==1, NORMAL=2, ** and FULL=3. */ -#ifndef SQLITE_OMIT_PAGER_PRAGMAS SQLITE_PRIVATE void sqlite3PagerSetFlags( Pager *pPager, /* The pager to set safety level for */ unsigned pgFlags /* Various flags */

@@ -58720,7 +60063,6 @@ }else{

pPager->doNotSpill |= SPILLFLAG_OFF; } } -#endif /* ** The following global variable is incremented whenever the library

@@ -59808,11 +61150,7 @@ Pager *pPager = 0; /* Pager object to allocate and return */

int rc = SQLITE_OK; /* Return code */ int tempFile = 0; /* True for temp files (incl. in-memory files) */ int memDb = 0; /* True if this is an in-memory file */ -#ifndef SQLITE_OMIT_DESERIALIZE int memJM = 0; /* Memory journal mode */ -#else -# define memJM 0 -#endif int readOnly = 0; /* True if this is a read-only file */ int journalFileSize; /* Bytes to allocate for each journal fd */ char *zPathname = 0; /* Full path to database file */

@@ -59822,7 +61160,6 @@ int pcacheSize = sqlite3PcacheSize(); /* Bytes to allocate for PCache */

u32 szPageDflt = SQLITE_DEFAULT_PAGE_SIZE; /* Default page size */ const char *zUri = 0; /* URI args to copy */ int nUriByte = 1; /* Number of bytes of URI args at *zUri */ - int nUri = 0; /* Number of URI parameters */ /* Figure out how much space is required for each journal file-handle ** (there are two of them, the main journal and the sub-journal). */

@@ -59870,7 +61207,6 @@ z = zUri = &zFilename[sqlite3Strlen30(zFilename)+1];

while( *z ){ z += strlen(z)+1; z += strlen(z)+1; - nUri++; } nUriByte = (int)(&z[1] - zUri); assert( nUriByte>=1 );

@@ -59933,12 +61269,13 @@ ** that is used by sqlite3_filename_database() and kin also depend on the

** specific formatting and order of the various filenames, so if the format ** changes here, be sure to change it there as well. */ + assert( SQLITE_PTRSIZE==sizeof(Pager*) ); pPtr = (u8 *)sqlite3MallocZero( ROUND8(sizeof(*pPager)) + /* Pager structure */ ROUND8(pcacheSize) + /* PCache object */ ROUND8(pVfs->szOsFile) + /* The main db file */ journalFileSize * 2 + /* The two journal files */ - sizeof(pPager) + /* Space to hold a pointer */ + SQLITE_PTRSIZE + /* Space to hold a pointer */ 4 + /* Database prefix */ nPathname + 1 + /* database filename */ nUriByte + /* query parameters */

@@ -59959,7 +61296,7 @@ pPager->fd = (sqlite3_file*)pPtr; pPtr += ROUND8(pVfs->szOsFile);

pPager->sjfd = (sqlite3_file*)pPtr; pPtr += journalFileSize; pPager->jfd = (sqlite3_file*)pPtr; pPtr += journalFileSize; assert( EIGHT_BYTE_ALIGNMENT(pPager->jfd) ); - memcpy(pPtr, &pPager, sizeof(pPager)); pPtr += sizeof(pPager); + memcpy(pPtr, &pPager, SQLITE_PTRSIZE); pPtr += SQLITE_PTRSIZE; /* Fill in the Pager.zFilename and pPager.zQueryParam fields */ pPtr += 4; /* Skip zero prefix */

@@ -60013,9 +61350,7 @@ if( zFilename && zFilename[0] ){

int fout = 0; /* VFS flags returned by xOpen() */ rc = sqlite3OsOpen(pVfs, pPager->zFilename, pPager->fd, vfsFlags, &fout); assert( !memDb ); -#ifndef SQLITE_OMIT_DESERIALIZE pPager->memVfs = memJM = (fout&SQLITE_OPEN_MEMORY)!=0; -#endif readOnly = (fout&SQLITE_OPEN_READONLY)!=0; /* If the file was successfully opened for read/write access,

@@ -60126,18 +61461,7 @@ pPager->changeCountDone = pPager->tempFile;

pPager->memDb = (u8)memDb; pPager->readOnly = (u8)readOnly; assert( useJournal || pPager->tempFile ); - pPager->noSync = pPager->tempFile; - if( pPager->noSync ){ - assert( pPager->fullSync==0 ); - assert( pPager->extraSync==0 ); - assert( pPager->syncFlags==0 ); - assert( pPager->walSyncFlags==0 ); - }else{ - pPager->fullSync = 1; - pPager->extraSync = 0; - pPager->syncFlags = SQLITE_SYNC_NORMAL; - pPager->walSyncFlags = SQLITE_SYNC_NORMAL | (SQLITE_SYNC_NORMAL<<2); - } + sqlite3PagerSetFlags(pPager, (SQLITE_DEFAULT_SYNCHRONOUS+1)|PAGER_CACHESPILL); /* pPager->pFirst = 0; */ /* pPager->pFirstSynced = 0; */ /* pPager->pLast = 0; */

@@ -60163,15 +61487,18 @@ }

/* ** Return the sqlite3_file for the main database given the name -** of the corresonding WAL or Journal name as passed into +** of the corresponding WAL or Journal name as passed into ** xOpen. */ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char *zName){ Pager *pPager; + const char *p; while( zName[-1]!=0 || zName[-2]!=0 || zName[-3]!=0 || zName[-4]!=0 ){ zName--; } - pPager = *(Pager**)(zName - 4 - sizeof(Pager*)); + p = zName - 4 - sizeof(Pager*); + assert( EIGHT_BYTE_ALIGNMENT(p) ); + pPager = *(Pager**)p; return pPager->fd; }

@@ -60666,6 +61993,10 @@ assert( !isOpen(pPager->fd) || !MEMDB );

if( !isOpen(pPager->fd) || pPager->dbSize<pgno || noContent ){ if( pgno>pPager->mxPgno ){ rc = SQLITE_FULL; + if( pgno<=pPager->dbSize ){ + sqlite3PcacheRelease(pPg); + pPg = 0; + } goto pager_acquire_err; } if( noContent ){

@@ -60801,8 +62132,20 @@ Pgno pgno, /* Page number to fetch */

DbPage **ppPage, /* Write a pointer to the page here */ int flags /* PAGER_GET_XXX flags */ ){ - /* printf("PAGE %u\n", pgno); fflush(stdout); */ +#if 0 /* Trace page fetch by setting to 1 */ + int rc; + printf("PAGE %u\n", pgno); + fflush(stdout); + rc = pPager->xGet(pPager, pgno, ppPage, flags); + if( rc ){ + printf("PAGE %u failed with 0x%02x\n", pgno, rc); + fflush(stdout); + } + return rc; +#else + /* Normal, high-speed version of sqlite3PagerGet() */ return pPager->xGet(pPager, pgno, ppPage, flags); +#endif } /*

@@ -60830,10 +62173,12 @@

/* ** Release a page reference. ** -** The sqlite3PagerUnref() and sqlite3PagerUnrefNotNull() may only be -** used if we know that the page being released is not the last page. +** The sqlite3PagerUnref() and sqlite3PagerUnrefNotNull() may only be used +** if we know that the page being released is not the last reference to page1. ** The btree layer always holds page1 open until the end, so these first -** to routines can be used to release any page other than BtShared.pPage1. +** two routines can be used to release any page other than BtShared.pPage1. +** The assert() at tag-20230419-2 proves that this constraint is always +** honored. ** ** Use sqlite3PagerUnrefPageOne() to release page1. This latter routine ** checks the total number of outstanding pages and if the number of

@@ -60849,7 +62194,7 @@ }else{

sqlite3PcacheRelease(pPg); } /* Do not use this routine to release the last reference to page1 */ - assert( sqlite3PcacheRefCount(pPager->pPCache)>0 ); + assert( sqlite3PcacheRefCount(pPager->pPCache)>0 ); /* tag-20230419-2 */ } SQLITE_PRIVATE void sqlite3PagerUnref(DbPage *pPg){ if( pPg ) sqlite3PagerUnrefNotNull(pPg);

@@ -61398,7 +62743,7 @@ #else

# define DIRECT_MODE isDirectMode #endif - if( !pPager->changeCountDone && ALWAYS(pPager->dbSize>0) ){ + if( !pPager->changeCountDone && pPager->dbSize>0 ){ PgHdr *pPgHdr; /* Reference to page 1 */ assert( !pPager->tempFile && isOpen(pPager->fd) );

@@ -61676,6 +63021,13 @@ if( bBatch ){

rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_BEGIN_ATOMIC_WRITE, 0); if( rc==SQLITE_OK ){ rc = pager_write_pagelist(pPager, pList); + if( rc==SQLITE_OK && pPager->dbSize>pPager->dbFileSize ){ + char *pTmp = pPager->pTmpSpace; + int szPage = (int)pPager->pageSize; + memset(pTmp, 0, szPage); + rc = sqlite3OsWrite(pPager->fd, pTmp, szPage, + ((i64)pPager->dbSize*pPager->pageSize)-szPage); + } if( rc==SQLITE_OK ){ rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_COMMIT_ATOMIC_WRITE, 0); }

@@ -62138,7 +63490,11 @@ ** sqlite3_uri_parameter() and sqlite3_filename_database() and friends.

*/ SQLITE_PRIVATE const char *sqlite3PagerFilename(const Pager *pPager, int nullIfMemDb){ static const char zFake[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; - return (nullIfMemDb && pPager->memDb) ? &zFake[4] : pPager->zFilename; + if( nullIfMemDb && (pPager->memDb || sqlite3IsMemdb(pPager->pVfs)) ){ + return &zFake[4]; + }else{ + return pPager->zFilename; + } } /*

@@ -62438,7 +63794,7 @@ /* Change the journal mode. */

assert( pPager->eState!=PAGER_ERROR ); pPager->journalMode = (u8)eMode; - /* When transistioning from TRUNCATE or PERSIST to any other journal + /* When transitioning from TRUNCATE or PERSIST to any other journal ** mode except WAL, unless the pager is in locking_mode=exclusive mode, ** delete the journal file. */

@@ -62483,7 +63839,7 @@ pager_unlock(pPager);

} assert( state==pPager->eState ); } - }else if( eMode==PAGER_JOURNALMODE_OFF ){ + }else if( eMode==PAGER_JOURNALMODE_OFF || eMode==PAGER_JOURNALMODE_MEMORY ){ sqlite3OsClose(pPager->jfd); } }

@@ -62605,13 +63961,15 @@ ** is obtained instead, immediately release it.

*/ static int pagerExclusiveLock(Pager *pPager){ int rc; /* Return code */ + u8 eOrigLock; /* Original lock */ - assert( pPager->eLock==SHARED_LOCK || pPager->eLock==EXCLUSIVE_LOCK ); + assert( pPager->eLock>=SHARED_LOCK ); + eOrigLock = pPager->eLock; rc = pagerLockDb(pPager, EXCLUSIVE_LOCK); if( rc!=SQLITE_OK ){ /* If the attempt to grab the exclusive lock failed, release the ** pending lock that may have been obtained instead. */ - pagerUnlockDb(pPager, SHARED_LOCK); + pagerUnlockDb(pPager, eOrigLock); } return rc;

@@ -62864,6 +64222,12 @@ return sqlite3WalFramesize(pPager->pWal);

} #endif +#ifdef SQLITE_USE_SEH +SQLITE_PRIVATE int sqlite3PagerWalSystemErrno(Pager *pPager){ + return sqlite3WalSystemErrno(pPager->pWal); +} +#endif + #endif /* SQLITE_OMIT_DISKIO */ /************** End of pager.c ***********************************************/

@@ -63154,7 +64518,7 @@ ** is SQLITE_SHM_NLOCK==8 and WAL_NREADER==5.

** ** Technically, the various VFSes are free to implement these locks however ** they see fit. However, compatibility is encouraged so that VFSes can -** interoperate. The standard implemention used on both unix and windows +** interoperate. The standard implementation used on both unix and windows ** is for the index number to indicate a byte offset into the ** WalCkptInfo.aLock[] array in the wal-index header. In other words, all ** locks are on the shm file. The WALINDEX_LOCK_OFFSET constant (which

@@ -63230,7 +64594,7 @@ ** holds read-lock K, then the value in aReadMark[K] is no greater than

** the mxFrame for that reader. The value READMARK_NOT_USED (0xffffffff) ** for any aReadMark[] means that entry is unused. aReadMark[0] is ** a special case; its value is never used and it exists as a place-holder -** to avoid having to offset aReadMark[] indexs by one. Readers holding +** to avoid having to offset aReadMark[] indexes by one. Readers holding ** WAL_READ_LOCK(0) always ignore the entire WAL and read all content ** directly from the database. **

@@ -63398,7 +64762,15 @@ u32 minFrame; /* Ignore wal frames before this one */

u32 iReCksum; /* On commit, recalculate checksums from here */ const char *zWalName; /* Name of WAL file */ u32 nCkpt; /* Checkpoint sequence counter in the wal-header */ +#ifdef SQLITE_USE_SEH + u32 lockMask; /* Mask of locks held */ + void *pFree; /* Pointer to sqlite3_free() if exception thrown */ + u32 *pWiValue; /* Value to write into apWiData[iWiPg] */ + int iWiPg; /* Write pWiValue into apWiData[iWiPg] */ + int iSysErrno; /* System error code following exception */ +#endif #ifdef SQLITE_DEBUG + int nSehTry; /* Number of nested SEH_TRY{} blocks */ u8 lockError; /* True if a locking error has occurred */ #endif #ifdef SQLITE_ENABLE_SNAPSHOT

@@ -63481,6 +64853,113 @@ sizeof(ht_slot)*HASHTABLE_NSLOT + HASHTABLE_NPAGE*sizeof(u32) \

) /* +** Structured Exception Handling (SEH) is a Windows-specific technique +** for catching exceptions raised while accessing memory-mapped files. +** +** The -DSQLITE_USE_SEH compile-time option means to use SEH to catch and +** deal with system-level errors that arise during WAL -shm file processing. +** Without this compile-time option, any system-level faults that appear +** while accessing the memory-mapped -shm file will cause a process-wide +** signal to be deliver, which will more than likely cause the entire +** process to exit. +*/ +#ifdef SQLITE_USE_SEH +#include <Windows.h> + +/* Beginning of a block of code in which an exception might occur */ +# define SEH_TRY __try { \ + assert( walAssertLockmask(pWal) && pWal->nSehTry==0 ); \ + VVA_ONLY(pWal->nSehTry++); + +/* The end of a block of code in which an exception might occur */ +# define SEH_EXCEPT(X) \ + VVA_ONLY(pWal->nSehTry--); \ + assert( pWal->nSehTry==0 ); \ + } __except( sehExceptionFilter(pWal, GetExceptionCode(), GetExceptionInformation() ) ){ X } + +/* Simulate a memory-mapping fault in the -shm file for testing purposes */ +# define SEH_INJECT_FAULT sehInjectFault(pWal) + +/* +** The second argument is the return value of GetExceptionCode() for the +** current exception. Return EXCEPTION_EXECUTE_HANDLER if the exception code +** indicates that the exception may have been caused by accessing the *-shm +** file mapping. Or EXCEPTION_CONTINUE_SEARCH otherwise. +*/ +static int sehExceptionFilter(Wal *pWal, int eCode, EXCEPTION_POINTERS *p){ + VVA_ONLY(pWal->nSehTry--); + if( eCode==EXCEPTION_IN_PAGE_ERROR ){ + if( p && p->ExceptionRecord && p->ExceptionRecord->NumberParameters>=3 ){ + /* From MSDN: For this type of exception, the first element of the + ** ExceptionInformation[] array is a read-write flag - 0 if the exception + ** was thrown while reading, 1 if while writing. The second element is + ** the virtual address being accessed. The "third array element specifies + ** the underlying NTSTATUS code that resulted in the exception". */ + pWal->iSysErrno = (int)p->ExceptionRecord->ExceptionInformation[2]; + } + return EXCEPTION_EXECUTE_HANDLER; + } + return EXCEPTION_CONTINUE_SEARCH; +} + +/* +** If one is configured, invoke the xTestCallback callback with 650 as +** the argument. If it returns true, throw the same exception that is +** thrown by the system if the *-shm file mapping is accessed after it +** has been invalidated. +*/ +static void sehInjectFault(Wal *pWal){ + int res; + assert( pWal->nSehTry>0 ); + + res = sqlite3FaultSim(650); + if( res!=0 ){ + ULONG_PTR aArg[3]; + aArg[0] = 0; + aArg[1] = 0; + aArg[2] = (ULONG_PTR)res; + RaiseException(EXCEPTION_IN_PAGE_ERROR, 0, 3, (const ULONG_PTR*)aArg); + } +} + +/* +** There are two ways to use this macro. To set a pointer to be freed +** if an exception is thrown: +** +** SEH_FREE_ON_ERROR(0, pPtr); +** +** and to cancel the same: +** +** SEH_FREE_ON_ERROR(pPtr, 0); +** +** In the first case, there must not already be a pointer registered to +** be freed. In the second case, pPtr must be the registered pointer. +*/ +#define SEH_FREE_ON_ERROR(X,Y) \ + assert( (X==0 || Y==0) && pWal->pFree==X ); pWal->pFree = Y + +/* +** There are two ways to use this macro. To arrange for pWal->apWiData[iPg] +** to be set to pValue if an exception is thrown: +** +** SEH_SET_ON_ERROR(iPg, pValue); +** +** and to cancel the same: +** +** SEH_SET_ON_ERROR(0, 0); +*/ +#define SEH_SET_ON_ERROR(X,Y) pWal->iWiPg = X; pWal->pWiValue = Y + +#else +# define SEH_TRY VVA_ONLY(pWal->nSehTry++); +# define SEH_EXCEPT(X) VVA_ONLY(pWal->nSehTry--); assert( pWal->nSehTry==0 ); +# define SEH_INJECT_FAULT assert( pWal->nSehTry>0 ); +# define SEH_FREE_ON_ERROR(X,Y) +# define SEH_SET_ON_ERROR(X,Y) +#endif /* ifdef SQLITE_USE_SEH */ + + +/* ** Obtain a pointer to the iPage'th page of the wal-index. The wal-index ** is broken into pages of WALINDEX_PGSZ bytes. Wal-index pages are ** numbered from zero.

@@ -63552,6 +65031,7 @@ Wal *pWal, /* The WAL context */

int iPage, /* The page we seek */ volatile u32 **ppPage /* Write the page pointer here */ ){ + SEH_INJECT_FAULT; if( pWal->nWiData<=iPage || (*ppPage = pWal->apWiData[iPage])==0 ){ return walIndexPageRealloc(pWal, iPage, ppPage); }

@@ -63563,6 +65043,7 @@ ** Return a pointer to the WalCkptInfo structure in the wal-index.

*/ static volatile WalCkptInfo *walCkptInfo(Wal *pWal){ assert( pWal->nWiData>0 && pWal->apWiData[0] ); + SEH_INJECT_FAULT; return (volatile WalCkptInfo*)&(pWal->apWiData[0][sizeof(WalIndexHdr)/2]); }

@@ -63571,6 +65052,7 @@ ** Return a pointer to the WalIndexHdr structure in the wal-index.

*/ static volatile WalIndexHdr *walIndexHdr(Wal *pWal){ assert( pWal->nWiData>0 && pWal->apWiData[0] ); + SEH_INJECT_FAULT; return (volatile WalIndexHdr*)pWal->apWiData[0]; }

@@ -63616,19 +65098,40 @@

assert( nByte>=8 ); assert( (nByte&0x00000007)==0 ); assert( nByte<=65536 ); + assert( nByte%4==0 ); - if( nativeCksum ){ + if( !nativeCksum ){ + do { + s1 += BYTESWAP32(aData[0]) + s2; + s2 += BYTESWAP32(aData[1]) + s1; + aData += 2; + }while( aData<aEnd ); + }else if( nByte%64==0 ){ do { s1 += *aData++ + s2; s2 += *aData++ + s1; + s1 += *aData++ + s2; + s2 += *aData++ + s1; + s1 += *aData++ + s2; + s2 += *aData++ + s1; + s1 += *aData++ + s2; + s2 += *aData++ + s1; + s1 += *aData++ + s2; + s2 += *aData++ + s1; + s1 += *aData++ + s2; + s2 += *aData++ + s1; + s1 += *aData++ + s2; + s2 += *aData++ + s1; + s1 += *aData++ + s2; + s2 += *aData++ + s1; }while( aData<aEnd ); }else{ do { - s1 += BYTESWAP32(aData[0]) + s2; - s2 += BYTESWAP32(aData[1]) + s1; - aData += 2; + s1 += *aData++ + s2; + s2 += *aData++ + s1; }while( aData<aEnd ); } + assert( aData==aEnd ); aOut[0] = s1; aOut[1] = s2;

@@ -63739,7 +65242,7 @@ if( memcmp(&pWal->hdr.aSalt, &aFrame[8], 8)!=0 ){

return 0; } - /* A frame is only valid if the page number is creater than zero. + /* A frame is only valid if the page number is greater than zero. */ pgno = sqlite3Get4byte(&aFrame[0]); if( pgno==0 ){

@@ -63747,7 +65250,7 @@ return 0;

} /* A frame is only valid if a checksum of the WAL header, - ** all prior frams, the first 16 bytes of this frame-header, + ** all prior frames, the first 16 bytes of this frame-header, ** and the frame-data matches the checksum in the last 8 ** bytes of this frame-header. */

@@ -63807,12 +65310,18 @@ SQLITE_SHM_LOCK | SQLITE_SHM_SHARED);

WALTRACE(("WAL%p: acquire SHARED-%s %s\n", pWal, walLockName(lockIdx), rc ? "failed" : "ok")); VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && (rc&0xFF)!=SQLITE_BUSY); ) +#ifdef SQLITE_USE_SEH + if( rc==SQLITE_OK ) pWal->lockMask |= (1 << lockIdx); +#endif return rc; } static void walUnlockShared(Wal *pWal, int lockIdx){ if( pWal->exclusiveMode ) return; (void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, 1, SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED); +#ifdef SQLITE_USE_SEH + pWal->lockMask &= ~(1 << lockIdx); +#endif WALTRACE(("WAL%p: release SHARED-%s\n", pWal, walLockName(lockIdx))); } static int walLockExclusive(Wal *pWal, int lockIdx, int n){

@@ -63823,12 +65332,20 @@ SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE);

WALTRACE(("WAL%p: acquire EXCLUSIVE-%s cnt=%d %s\n", pWal, walLockName(lockIdx), n, rc ? "failed" : "ok")); VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && (rc&0xFF)!=SQLITE_BUSY); ) +#ifdef SQLITE_USE_SEH + if( rc==SQLITE_OK ){ + pWal->lockMask |= (((1<<n)-1) << (SQLITE_SHM_NLOCK+lockIdx)); + } +#endif return rc; } static void walUnlockExclusive(Wal *pWal, int lockIdx, int n){ if( pWal->exclusiveMode ) return; (void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, n, SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE); +#ifdef SQLITE_USE_SEH + pWal->lockMask &= ~(((1<<n)-1) << (SQLITE_SHM_NLOCK+lockIdx)); +#endif WALTRACE(("WAL%p: release EXCLUSIVE-%s cnt=%d\n", pWal, walLockName(lockIdx), n)); }

@@ -63920,6 +65437,7 @@ ** Return the page number associated with frame iFrame in this WAL.

*/ static u32 walFramePgno(Wal *pWal, u32 iFrame){ int iHash = walFramePage(iFrame); + SEH_INJECT_FAULT; if( iHash==0 ){ return pWal->apWiData[0][WALINDEX_HDR_SIZE/sizeof(u32) + iFrame - 1]; }

@@ -64179,6 +65697,7 @@

/* Malloc a buffer to read frames into. */ szFrame = szPage + WAL_FRAME_HDRSIZE; aFrame = (u8 *)sqlite3_malloc64(szFrame + WALINDEX_PGSZ); + SEH_FREE_ON_ERROR(0, aFrame); if( !aFrame ){ rc = SQLITE_NOMEM_BKPT; goto recovery_error;

@@ -64197,6 +65716,7 @@ u32 nHdr, nHdr32;

rc = walIndexPage(pWal, iPg, (volatile u32**)&aShare); assert( aShare!=0 || rc!=SQLITE_OK ); if( aShare==0 ) break; + SEH_SET_ON_ERROR(iPg, aShare); pWal->apWiData[iPg] = aPrivate; for(iFrame=iFirst; iFrame<=iLast; iFrame++){

@@ -64224,6 +65744,7 @@ aFrameCksum[1] = pWal->hdr.aFrameCksum[1];

} } pWal->apWiData[iPg] = aShare; + SEH_SET_ON_ERROR(0,0); nHdr = (iPg==0 ? WALINDEX_HDR_SIZE : 0); nHdr32 = nHdr / sizeof(u32); #ifndef SQLITE_SAFER_WALINDEX_RECOVERY

@@ -64254,9 +65775,11 @@ }

} } #endif + SEH_INJECT_FAULT; if( iFrame<=iLast ) break; } + SEH_FREE_ON_ERROR(aFrame, 0); sqlite3_free(aFrame); }

@@ -64284,6 +65807,7 @@ pInfo->aReadMark[i] = pWal->hdr.mxFrame;

}else{ pInfo->aReadMark[i] = READMARK_NOT_USED; } + SEH_INJECT_FAULT; walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); }else if( rc!=SQLITE_BUSY ){ goto recovery_error;

@@ -64441,7 +65965,7 @@ return rc;

} /* -** Change the size to which the WAL file is trucated on each reset. +** Change the size to which the WAL file is truncated on each reset. */ SQLITE_PRIVATE void sqlite3WalLimit(Wal *pWal, i64 iLimit){ if( pWal ) pWal->mxWalSize = iLimit;

@@ -64667,23 +66191,16 @@ nSegment = walFramePage(iLast) + 1;

nByte = sizeof(WalIterator) + (nSegment-1)*sizeof(struct WalSegment) + iLast*sizeof(ht_slot); - p = (WalIterator *)sqlite3_malloc64(nByte); + p = (WalIterator *)sqlite3_malloc64(nByte + + sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast) + ); if( !p ){ return SQLITE_NOMEM_BKPT; } memset(p, 0, nByte); p->nSegment = nSegment; - - /* Allocate temporary space used by the merge-sort routine. This block - ** of memory will be freed before this function returns. - */ - aTmp = (ht_slot *)sqlite3_malloc64( - sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast) - ); - if( !aTmp ){ - rc = SQLITE_NOMEM_BKPT; - } - + aTmp = (ht_slot*)&(((u8*)p)[nByte]); + SEH_FREE_ON_ERROR(0, p); for(i=walFramePage(nBackfill+1); rc==SQLITE_OK && i<nSegment; i++){ WalHashLoc sLoc;

@@ -64711,9 +66228,8 @@ p->aSegment[i].aIndex = aIndex;

p->aSegment[i].aPgno = (u32 *)sLoc.aPgno; } } - sqlite3_free(aTmp); - if( rc!=SQLITE_OK ){ + SEH_FREE_ON_ERROR(p, 0); walIteratorFree(p); p = 0; }

@@ -64939,13 +66455,13 @@ */

mxSafeFrame = pWal->hdr.mxFrame; mxPage = pWal->hdr.nPage; for(i=1; i<WAL_NREADER; i++){ - u32 y = AtomicLoad(pInfo->aReadMark+i); + u32 y = AtomicLoad(pInfo->aReadMark+i); SEH_INJECT_FAULT; if( mxSafeFrame>y ){ assert( y<=pWal->hdr.mxFrame ); rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(i), 1); if( rc==SQLITE_OK ){ u32 iMark = (i==1 ? mxSafeFrame : READMARK_NOT_USED); - AtomicStore(pInfo->aReadMark+i, iMark); + AtomicStore(pInfo->aReadMark+i, iMark); SEH_INJECT_FAULT; walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); }else if( rc==SQLITE_BUSY ){ mxSafeFrame = y;

@@ -64966,8 +66482,7 @@ if( pIter

&& (rc = walBusyLock(pWal,xBusy,pBusyArg,WAL_READ_LOCK(0),1))==SQLITE_OK ){ u32 nBackfill = pInfo->nBackfill; - - pInfo->nBackfillAttempted = mxSafeFrame; + pInfo->nBackfillAttempted = mxSafeFrame; SEH_INJECT_FAULT; /* Sync the WAL to disk */ rc = sqlite3OsSync(pWal->pWalFd, CKPT_SYNC_FLAGS(sync_flags));

@@ -64998,6 +66513,7 @@ /* Iterate through the contents of the WAL, copying data to the db file */

while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){ i64 iOffset; assert( walFramePgno(pWal, iFrame)==iDbpage ); + SEH_INJECT_FAULT; if( AtomicLoad(&db->u1.isInterrupted) ){ rc = db->mallocFailed ? SQLITE_NOMEM_BKPT : SQLITE_INTERRUPT; break;

@@ -65027,7 +66543,7 @@ rc = sqlite3OsSync(pWal->pDbFd, CKPT_SYNC_FLAGS(sync_flags));

} } if( rc==SQLITE_OK ){ - AtomicStore(&pInfo->nBackfill, mxSafeFrame); + AtomicStore(&pInfo->nBackfill, mxSafeFrame); SEH_INJECT_FAULT; } }

@@ -65049,6 +66565,7 @@ ** the next process to write to the database restarts the wal file.

*/ if( rc==SQLITE_OK && eMode!=SQLITE_CHECKPOINT_PASSIVE ){ assert( pWal->writeLock ); + SEH_INJECT_FAULT; if( pInfo->nBackfill<pWal->hdr.mxFrame ){ rc = SQLITE_BUSY; }else if( eMode>=SQLITE_CHECKPOINT_RESTART ){

@@ -65080,6 +66597,7 @@ }

} walcheckpoint_out: + SEH_FREE_ON_ERROR(pIter, 0); walIteratorFree(pIter); return rc; }

@@ -65102,6 +66620,93 @@ sqlite3_log(rx, "cannot limit WAL size: %s", pWal->zWalName);

} } +#ifdef SQLITE_USE_SEH +/* +** This is the "standard" exception handler used in a few places to handle +** an exception thrown by reading from the *-shm mapping after it has become +** invalid in SQLITE_USE_SEH builds. It is used as follows: +** +** SEH_TRY { ... } +** SEH_EXCEPT( rc = walHandleException(pWal); ) +** +** This function does three things: +** +** 1) Determines the locks that should be held, based on the contents of +** the Wal.readLock, Wal.writeLock and Wal.ckptLock variables. All other +** held locks are assumed to be transient locks that would have been +** released had the exception not been thrown and are dropped. +** +** 2) Frees the pointer at Wal.pFree, if any, using sqlite3_free(). +** +** 3) Set pWal->apWiData[pWal->iWiPg] to pWal->pWiValue if not NULL +** +** 4) Returns SQLITE_IOERR. +*/ +static int walHandleException(Wal *pWal){ + if( pWal->exclusiveMode==0 ){ + static const int S = 1; + static const int E = (1<<SQLITE_SHM_NLOCK); + int ii; + u32 mUnlock = pWal->lockMask & ~( + (pWal->readLock<0 ? 0 : (S << WAL_READ_LOCK(pWal->readLock))) + | (pWal->writeLock ? (E << WAL_WRITE_LOCK) : 0) + | (pWal->ckptLock ? (E << WAL_CKPT_LOCK) : 0) + ); + for(ii=0; ii<SQLITE_SHM_NLOCK; ii++){ + if( (S<<ii) & mUnlock ) walUnlockShared(pWal, ii); + if( (E<<ii) & mUnlock ) walUnlockExclusive(pWal, ii, 1); + } + } + sqlite3_free(pWal->pFree); + pWal->pFree = 0; + if( pWal->pWiValue ){ + pWal->apWiData[pWal->iWiPg] = pWal->pWiValue; + pWal->pWiValue = 0; + } + return SQLITE_IOERR_IN_PAGE; +} + +/* +** Assert that the Wal.lockMask mask, which indicates the locks held +** by the connenction, is consistent with the Wal.readLock, Wal.writeLock +** and Wal.ckptLock variables. To be used as: +** +** assert( walAssertLockmask(pWal) ); +*/ +static int walAssertLockmask(Wal *pWal){ + if( pWal->exclusiveMode==0 ){ + static const int S = 1; + static const int E = (1<<SQLITE_SHM_NLOCK); + u32 mExpect = ( + (pWal->readLock<0 ? 0 : (S << WAL_READ_LOCK(pWal->readLock))) + | (pWal->writeLock ? (E << WAL_WRITE_LOCK) : 0) + | (pWal->ckptLock ? (E << WAL_CKPT_LOCK) : 0) +#ifdef SQLITE_ENABLE_SNAPSHOT + | (pWal->pSnapshot ? (pWal->lockMask & (1 << WAL_CKPT_LOCK)) : 0) +#endif + ); + assert( mExpect==pWal->lockMask ); + } + return 1; +} + +/* +** Return and zero the "system error" field set when an +** EXCEPTION_IN_PAGE_ERROR exception is caught. +*/ +SQLITE_PRIVATE int sqlite3WalSystemErrno(Wal *pWal){ + int iRet = 0; + if( pWal ){ + iRet = pWal->iSysErrno; + pWal->iSysErrno = 0; + } + return iRet; +} + +#else +# define walAssertLockmask(x) 1 +#endif /* ifdef SQLITE_USE_SEH */ + /* ** Close a connection to a log file. */

@@ -65115,6 +66720,8 @@ ){

int rc = SQLITE_OK; if( pWal ){ int isDelete = 0; /* True to unlink wal and wal-index files */ + + assert( walAssertLockmask(pWal) ); /* If an EXCLUSIVE lock can be obtained on the database file (using the ** ordinary, rollback-mode locking methods, this guarantees that the

@@ -65140,7 +66747,7 @@ pWal->pDbFd, SQLITE_FCNTL_PERSIST_WAL, &bPersist

); if( bPersist!=1 ){ /* Try to delete the WAL file if the checkpoint completed and - ** fsyned (rc==SQLITE_OK) and if we are not in persistent-wal + ** fsynced (rc==SQLITE_OK) and if we are not in persistent-wal ** mode (!bPersist) */ isDelete = 1; }else if( pWal->mxWalSize>=0 ){

@@ -65207,7 +66814,7 @@ ** reordering the reads and writes. TSAN and similar tools can sometimes

** give false-positive warnings about these accesses because the tools do not ** account for the double-read and the memory barrier. The use of mutexes ** here would be problematic as the memory being accessed is potentially - ** shared among multiple processes and not all mutex implementions work + ** shared among multiple processes and not all mutex implementations work ** reliably in that environment. */ aHdr = walIndexHdr(pWal);

@@ -65658,6 +67265,7 @@

assert( pWal->nWiData>0 ); assert( pWal->apWiData[0]!=0 ); pInfo = walCkptInfo(pWal); + SEH_INJECT_FAULT; if( !useWal && AtomicLoad(&pInfo->nBackfill)==pWal->hdr.mxFrame #ifdef SQLITE_ENABLE_SNAPSHOT && (pWal->pSnapshot==0 || pWal->hdr.mxFrame==0)

@@ -65707,7 +67315,7 @@ mxFrame = pWal->pSnapshot->mxFrame;

} #endif for(i=1; i<WAL_NREADER; i++){ - u32 thisMark = AtomicLoad(pInfo->aReadMark+i); + u32 thisMark = AtomicLoad(pInfo->aReadMark+i); SEH_INJECT_FAULT; if( mxReadMark<=thisMark && thisMark<=mxFrame ){ assert( thisMark!=READMARK_NOT_USED ); mxReadMark = thisMark;

@@ -65773,7 +67381,7 @@ ** that it can read version A from the database file. However, since

** we can guarantee that the checkpointer that set nBackfill could not ** see any pages past pWal->hdr.mxFrame, this problem does not come up. */ - pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1; + pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1; SEH_INJECT_FAULT; walShmBarrier(pWal); if( AtomicLoad(pInfo->aReadMark+mxI)!=mxReadMark || memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr))

@@ -65789,6 +67397,54 @@ }

#ifdef SQLITE_ENABLE_SNAPSHOT /* +** This function does the work of sqlite3WalSnapshotRecover(). +*/ +static int walSnapshotRecover( + Wal *pWal, /* WAL handle */ + void *pBuf1, /* Temp buffer pWal->szPage bytes in size */ + void *pBuf2 /* Temp buffer pWal->szPage bytes in size */ +){ + int szPage = (int)pWal->szPage; + int rc; + i64 szDb; /* Size of db file in bytes */ + + rc = sqlite3OsFileSize(pWal->pDbFd, &szDb); + if( rc==SQLITE_OK ){ + volatile WalCkptInfo *pInfo = walCkptInfo(pWal); + u32 i = pInfo->nBackfillAttempted; + for(i=pInfo->nBackfillAttempted; i>AtomicLoad(&pInfo->nBackfill); i--){ + WalHashLoc sLoc; /* Hash table location */ + u32 pgno; /* Page number in db file */ + i64 iDbOff; /* Offset of db file entry */ + i64 iWalOff; /* Offset of wal file entry */ + + rc = walHashGet(pWal, walFramePage(i), &sLoc); + if( rc!=SQLITE_OK ) break; + assert( i - sLoc.iZero - 1 >=0 ); + pgno = sLoc.aPgno[i-sLoc.iZero-1]; + iDbOff = (i64)(pgno-1) * szPage; + + if( iDbOff+szPage<=szDb ){ + iWalOff = walFrameOffset(i, szPage) + WAL_FRAME_HDRSIZE; + rc = sqlite3OsRead(pWal->pWalFd, pBuf1, szPage, iWalOff); + + if( rc==SQLITE_OK ){ + rc = sqlite3OsRead(pWal->pDbFd, pBuf2, szPage, iDbOff); + } + + if( rc!=SQLITE_OK || 0==memcmp(pBuf1, pBuf2, szPage) ){ + break; + } + } + + pInfo->nBackfillAttempted = i-1; + } + } + + return rc; +} + +/* ** Attempt to reduce the value of the WalCkptInfo.nBackfillAttempted ** variable so that older snapshots can be accessed. To do this, loop ** through all wal frames from nBackfillAttempted to (nBackfill+1),

@@ -65813,50 +67469,21 @@

assert( pWal->readLock>=0 ); rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1); if( rc==SQLITE_OK ){ - volatile WalCkptInfo *pInfo = walCkptInfo(pWal); - int szPage = (int)pWal->szPage; - i64 szDb; /* Size of db file in bytes */ - - rc = sqlite3OsFileSize(pWal->pDbFd, &szDb); - if( rc==SQLITE_OK ){ - void *pBuf1 = sqlite3_malloc(szPage); - void *pBuf2 = sqlite3_malloc(szPage); - if( pBuf1==0 || pBuf2==0 ){ - rc = SQLITE_NOMEM; - }else{ - u32 i = pInfo->nBackfillAttempted; - for(i=pInfo->nBackfillAttempted; i>AtomicLoad(&pInfo->nBackfill); i--){ - WalHashLoc sLoc; /* Hash table location */ - u32 pgno; /* Page number in db file */ - i64 iDbOff; /* Offset of db file entry */ - i64 iWalOff; /* Offset of wal file entry */ - - rc = walHashGet(pWal, walFramePage(i), &sLoc); - if( rc!=SQLITE_OK ) break; - assert( i - sLoc.iZero - 1 >=0 ); - pgno = sLoc.aPgno[i-sLoc.iZero-1]; - iDbOff = (i64)(pgno-1) * szPage; - - if( iDbOff+szPage<=szDb ){ - iWalOff = walFrameOffset(i, szPage) + WAL_FRAME_HDRSIZE; - rc = sqlite3OsRead(pWal->pWalFd, pBuf1, szPage, iWalOff); - - if( rc==SQLITE_OK ){ - rc = sqlite3OsRead(pWal->pDbFd, pBuf2, szPage, iDbOff); - } - - if( rc!=SQLITE_OK || 0==memcmp(pBuf1, pBuf2, szPage) ){ - break; - } - } - - pInfo->nBackfillAttempted = i-1; - } + void *pBuf1 = sqlite3_malloc(pWal->szPage); + void *pBuf2 = sqlite3_malloc(pWal->szPage); + if( pBuf1==0 || pBuf2==0 ){ + rc = SQLITE_NOMEM; + }else{ + pWal->ckptLock = 1; + SEH_TRY { + rc = walSnapshotRecover(pWal, pBuf1, pBuf2); } + SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) + pWal->ckptLock = 0; + } - sqlite3_free(pBuf1); - sqlite3_free(pBuf2); - } + sqlite3_free(pBuf1); + sqlite3_free(pBuf2); walUnlockExclusive(pWal, WAL_CKPT_LOCK, 1); }

@@ -65865,28 +67492,20 @@ }

#endif /* SQLITE_ENABLE_SNAPSHOT */ /* -** Begin a read transaction on the database. -** -** This routine used to be called sqlite3OpenSnapshot() and with good reason: -** it takes a snapshot of the state of the WAL and wal-index for the current -** instant in time. The current thread will continue to use this snapshot. -** Other threads might append new content to the WAL and wal-index but -** that extra content is ignored by the current thread. -** -** If the database contents have changes since the previous read -** transaction, then *pChanged is set to 1 before returning. The -** Pager layer will use this to know that its cache is stale and -** needs to be flushed. +** This function does the work of sqlite3WalBeginReadTransaction() (see +** below). That function simply calls this one inside an SEH_TRY{...} block. */ -SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){ +static int walBeginReadTransaction(Wal *pWal, int *pChanged){ int rc; /* Return code */ int cnt = 0; /* Number of TryBeginRead attempts */ #ifdef SQLITE_ENABLE_SNAPSHOT + int ckptLock = 0; int bChanged = 0; WalIndexHdr *pSnapshot = pWal->pSnapshot; #endif assert( pWal->ckptLock==0 ); + assert( pWal->nSehTry>0 ); #ifdef SQLITE_ENABLE_SNAPSHOT if( pSnapshot ){

@@ -65909,7 +67528,7 @@

if( rc!=SQLITE_OK ){ return rc; } - pWal->ckptLock = 1; + ckptLock = 1; } #endif

@@ -65973,16 +67592,38 @@ }

} /* Release the shared CKPT lock obtained above. */ - if( pWal->ckptLock ){ + if( ckptLock ){ assert( pSnapshot ); walUnlockShared(pWal, WAL_CKPT_LOCK); - pWal->ckptLock = 0; } #endif return rc; } /* +** Begin a read transaction on the database. +** +** This routine used to be called sqlite3OpenSnapshot() and with good reason: +** it takes a snapshot of the state of the WAL and wal-index for the current +** instant in time. The current thread will continue to use this snapshot. +** Other threads might append new content to the WAL and wal-index but +** that extra content is ignored by the current thread. +** +** If the database contents have changes since the previous read +** transaction, then *pChanged is set to 1 before returning. The +** Pager layer will use this to know that its cache is stale and +** needs to be flushed. +*/ +SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){ + int rc; + SEH_TRY { + rc = walBeginReadTransaction(pWal, pChanged); + } + SEH_EXCEPT( rc = walHandleException(pWal); ) + return rc; +} + +/* ** Finish with a read transaction. All this does is release the ** read-lock. */

@@ -66002,7 +67643,7 @@ **

** Return SQLITE_OK if successful, or an error code if an error occurs. If an ** error does occur, the final value of *piRead is undefined. */ -SQLITE_PRIVATE int sqlite3WalFindFrame( +static int walFindFrame( Wal *pWal, /* WAL handle */ Pgno pgno, /* Database page number to read data for */ u32 *piRead /* OUT: Frame number (or zero) */

@@ -66065,6 +67706,7 @@ return rc;

} nCollide = HASHTABLE_NSLOT; iKey = walHash(pgno); + SEH_INJECT_FAULT; while( (iH = AtomicLoad(&sLoc.aHash[iKey]))!=0 ){ u32 iFrame = iH + sLoc.iZero; if( iFrame<=iLast && iFrame>=pWal->minFrame && sLoc.aPgno[iH-1]==pgno ){

@@ -66102,6 +67744,30 @@ return SQLITE_OK;

} /* +** Search the wal file for page pgno. If found, set *piRead to the frame that +** contains the page. Otherwise, if pgno is not in the wal file, set *piRead +** to zero. +** +** Return SQLITE_OK if successful, or an error code if an error occurs. If an +** error does occur, the final value of *piRead is undefined. +** +** The difference between this function and walFindFrame() is that this +** function wraps walFindFrame() in an SEH_TRY{...} block. +*/ +SQLITE_PRIVATE int sqlite3WalFindFrame( + Wal *pWal, /* WAL handle */ + Pgno pgno, /* Database page number to read data for */ + u32 *piRead /* OUT: Frame number (or zero) */ +){ + int rc; + SEH_TRY { + rc = walFindFrame(pWal, pgno, piRead); + } + SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) + return rc; +} + +/* ** Read the contents of frame iRead from the wal file into buffer pOut ** (which is nOut bytes in size). Return SQLITE_OK if successful, or an ** error code otherwise.

@@ -66182,12 +67848,17 @@ /* If another connection has written to the database file since the

** time the read transaction on this connection was started, then ** the write is disallowed. */ - if( memcmp(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr))!=0 ){ + SEH_TRY { + if( memcmp(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr))!=0 ){ + rc = SQLITE_BUSY_SNAPSHOT; + } + } + SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) + + if( rc!=SQLITE_OK ){ walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); pWal->writeLock = 0; - rc = SQLITE_BUSY_SNAPSHOT; } - return rc; }

@@ -66223,30 +67894,33 @@ if( ALWAYS(pWal->writeLock) ){

Pgno iMax = pWal->hdr.mxFrame; Pgno iFrame; - /* Restore the clients cache of the wal-index header to the state it - ** was in before the client began writing to the database. - */ - memcpy(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr)); - - for(iFrame=pWal->hdr.mxFrame+1; - ALWAYS(rc==SQLITE_OK) && iFrame<=iMax; - iFrame++ - ){ - /* This call cannot fail. Unless the page for which the page number - ** is passed as the second argument is (a) in the cache and - ** (b) has an outstanding reference, then xUndo is either a no-op - ** (if (a) is false) or simply expels the page from the cache (if (b) - ** is false). - ** - ** If the upper layer is doing a rollback, it is guaranteed that there - ** are no outstanding references to any page other than page 1. And - ** page 1 is never written to the log until the transaction is - ** committed. As a result, the call to xUndo may not fail. + SEH_TRY { + /* Restore the clients cache of the wal-index header to the state it + ** was in before the client began writing to the database. */ - assert( walFramePgno(pWal, iFrame)!=1 ); - rc = xUndo(pUndoCtx, walFramePgno(pWal, iFrame)); + memcpy(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr)); + + for(iFrame=pWal->hdr.mxFrame+1; + ALWAYS(rc==SQLITE_OK) && iFrame<=iMax; + iFrame++ + ){ + /* This call cannot fail. Unless the page for which the page number + ** is passed as the second argument is (a) in the cache and + ** (b) has an outstanding reference, then xUndo is either a no-op + ** (if (a) is false) or simply expels the page from the cache (if (b) + ** is false). + ** + ** If the upper layer is doing a rollback, it is guaranteed that there + ** are no outstanding references to any page other than page 1. And + ** page 1 is never written to the log until the transaction is + ** committed. As a result, the call to xUndo may not fail. + */ + assert( walFramePgno(pWal, iFrame)!=1 ); + rc = xUndo(pUndoCtx, walFramePgno(pWal, iFrame)); + } + if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal); } - if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal); + SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) } return rc; }

@@ -66290,7 +67964,10 @@ if( aWalData[0]<pWal->hdr.mxFrame ){

pWal->hdr.mxFrame = aWalData[0]; pWal->hdr.aFrameCksum[0] = aWalData[1]; pWal->hdr.aFrameCksum[1] = aWalData[2]; - walCleanupHash(pWal); + SEH_TRY { + walCleanupHash(pWal); + } + SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) } return rc;

@@ -66471,7 +68148,7 @@ /*

** Write a set of frames to the log. The caller must hold the write-lock ** on the log file (obtained using sqlite3WalBeginWriteTransaction()). */ -SQLITE_PRIVATE int sqlite3WalFrames( +static int walFrames( Wal *pWal, /* Wal handle to write to */ int szPage, /* Database page-size in bytes */ PgHdr *pList, /* List of dirty pages to write */

@@ -66559,7 +68236,9 @@ rc = sqlite3OsSync(pWal->pWalFd, CKPT_SYNC_FLAGS(sync_flags));

if( rc ) return rc; } } - assert( (int)pWal->szPage==szPage ); + if( (int)pWal->szPage!=szPage ){ + return SQLITE_CORRUPT_BKPT; /* TH3 test case: cov1/corrupt155.test */ + } /* Setup information needed to write frames into the WAL */ w.pWal = pWal;

@@ -66580,7 +68259,7 @@ ** set Wal.writeLock to WAL_WRITELOCK_RECKSUM - indicating that

** checksums must be recomputed when the transaction is committed. */ if( iFirst && (p->pDirty || isCommit==0) ){ u32 iWrite = 0; - VVA_ONLY(rc =) sqlite3WalFindFrame(pWal, p->pgno, &iWrite); + VVA_ONLY(rc =) walFindFrame(pWal, p->pgno, &iWrite); assert( rc==SQLITE_OK || iWrite==0 ); if( iWrite>=iFirst ){ i64 iOff = walFrameOffset(iWrite, szPage) + WAL_FRAME_HDRSIZE;

@@ -66700,6 +68379,29 @@ return rc;

} /* +** Write a set of frames to the log. The caller must hold the write-lock +** on the log file (obtained using sqlite3WalBeginWriteTransaction()). +** +** The difference between this function and walFrames() is that this +** function wraps walFrames() in an SEH_TRY{...} block. +*/ +SQLITE_PRIVATE int sqlite3WalFrames( + Wal *pWal, /* Wal handle to write to */ + int szPage, /* Database page-size in bytes */ + PgHdr *pList, /* List of dirty pages to write */ + Pgno nTruncate, /* Database size after this commit */ + int isCommit, /* True if this is a commit */ + int sync_flags /* Flags to pass to OsSync() (or 0) */ +){ + int rc; + SEH_TRY { + rc = walFrames(pWal, szPage, pList, nTruncate, isCommit, sync_flags); + } + SEH_EXCEPT( rc = walHandleException(pWal); ) + return rc; +} + +/* ** This routine is called to implement sqlite3_wal_checkpoint() and ** related interfaces. **

@@ -66778,30 +68480,33 @@ }

/* Read the wal-index header. */ - if( rc==SQLITE_OK ){ - walDisableBlocking(pWal); - rc = walIndexReadHdr(pWal, &isChanged); - (void)walEnableBlocking(pWal); - if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){ - sqlite3OsUnfetch(pWal->pDbFd, 0, 0); + SEH_TRY { + if( rc==SQLITE_OK ){ + walDisableBlocking(pWal); + rc = walIndexReadHdr(pWal, &isChanged); + (void)walEnableBlocking(pWal); + if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){ + sqlite3OsUnfetch(pWal->pDbFd, 0, 0); + } } - } - /* Copy data from the log to the database file. */ - if( rc==SQLITE_OK ){ - - if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){ - rc = SQLITE_CORRUPT_BKPT; - }else{ - rc = walCheckpoint(pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf); - } + /* Copy data from the log to the database file. */ + if( rc==SQLITE_OK ){ + if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){ + rc = SQLITE_CORRUPT_BKPT; + }else{ + rc = walCheckpoint(pWal, db, eMode2, xBusy2, pBusyArg, sync_flags,zBuf); + } - /* If no error occurred, set the output variables. */ - if( rc==SQLITE_OK || rc==SQLITE_BUSY ){ - if( pnLog ) *pnLog = (int)pWal->hdr.mxFrame; - if( pnCkpt ) *pnCkpt = (int)(walCkptInfo(pWal)->nBackfill); + /* If no error occurred, set the output variables. */ + if( rc==SQLITE_OK || rc==SQLITE_BUSY ){ + if( pnLog ) *pnLog = (int)pWal->hdr.mxFrame; + SEH_INJECT_FAULT; + if( pnCkpt ) *pnCkpt = (int)(walCkptInfo(pWal)->nBackfill); + } } } + SEH_EXCEPT( rc = walHandleException(pWal); ) if( isChanged ){ /* If a new wal-index header was loaded before the checkpoint was

@@ -66878,7 +68583,9 @@ ** happen if the connection is actually in exclusive mode (as no xShmLock

** locks are taken in this case). Nor should the pager attempt to ** upgrade to exclusive-mode following such an error. */ +#ifndef SQLITE_USE_SEH assert( pWal->readLock>=0 || pWal->lockError ); +#endif assert( pWal->readLock>=0 || (op<=0 && pWal->exclusiveMode==0) ); if( op==0 ){

@@ -66979,16 +68686,19 @@ ** lock is released before returning.

*/ SQLITE_PRIVATE int sqlite3WalSnapshotCheck(Wal *pWal, sqlite3_snapshot *pSnapshot){ int rc; - rc = walLockShared(pWal, WAL_CKPT_LOCK); - if( rc==SQLITE_OK ){ - WalIndexHdr *pNew = (WalIndexHdr*)pSnapshot; - if( memcmp(pNew->aSalt, pWal->hdr.aSalt, sizeof(pWal->hdr.aSalt)) - || pNew->mxFrame<walCkptInfo(pWal)->nBackfillAttempted - ){ - rc = SQLITE_ERROR_SNAPSHOT; - walUnlockShared(pWal, WAL_CKPT_LOCK); + SEH_TRY { + rc = walLockShared(pWal, WAL_CKPT_LOCK); + if( rc==SQLITE_OK ){ + WalIndexHdr *pNew = (WalIndexHdr*)pSnapshot; + if( memcmp(pNew->aSalt, pWal->hdr.aSalt, sizeof(pWal->hdr.aSalt)) + || pNew->mxFrame<walCkptInfo(pWal)->nBackfillAttempted + ){ + rc = SQLITE_ERROR_SNAPSHOT; + walUnlockShared(pWal, WAL_CKPT_LOCK); + } } } + SEH_EXCEPT( rc = walHandleException(pWal); ) return rc; }

@@ -67219,7 +68929,7 @@ ** length integer is 1 to 9 bytes where the lower 7 bits of each

** byte are used. The integer consists of all bytes that have bit 8 set and ** the first byte with bit 8 clear. The most significant byte of the integer ** appears first. A variable-length integer may not be more than 9 bytes long. -** As a special case, all 8 bytes of the 9th byte are used as data. This +** As a special case, all 8 bits of the 9th byte are used as data. This ** allows a 64-bit integer to be encoded in 9 bytes. ** ** 0x00 becomes 0x00000000

@@ -67227,7 +68937,7 @@ ** 0x7f becomes 0x0000007f

** 0x81 0x00 becomes 0x00000080 ** 0x82 0x00 becomes 0x00000100 ** 0x80 0x7f becomes 0x0000007f -** 0x8a 0x91 0xd1 0xac 0x78 becomes 0x12345678 +** 0x81 0x91 0xd1 0xac 0x78 becomes 0x12345678 ** 0x81 0x81 0x81 0x81 0x01 becomes 0x10204081 ** ** Variable length integers are used for rowids and to hold the number of

@@ -67310,7 +69020,7 @@ ** An instance of this object stores information about each a single database

** page that has been loaded into memory. The information in this object ** is derived from the raw on-disk page content. ** -** As each database page is loaded into memory, the pager allocats an +** As each database page is loaded into memory, the pager allocates an ** instance of this object and zeros the first 8 bytes. (This is the ** "extra" information associated with each page of the pager.) **

@@ -67603,7 +69313,7 @@ */

#define BTCF_WriteFlag 0x01 /* True if a write cursor */ #define BTCF_ValidNKey 0x02 /* True if info.nKey is valid */ #define BTCF_ValidOvfl 0x04 /* True if aOverflow is valid */ -#define BTCF_AtLast 0x08 /* Cursor is pointing ot the last entry */ +#define BTCF_AtLast 0x08 /* Cursor is pointing to the last entry */ #define BTCF_Incrblob 0x10 /* True if an incremental I/O handle */ #define BTCF_Multiple 0x20 /* Maybe another cursor on the same btree */ #define BTCF_Pinned 0x40 /* Cursor is busy and cannot be moved */

@@ -67721,15 +69431,15 @@ ** (sqliteMallocRaw), it is not possible to use conditional compilation.

** So, this macro is defined instead. */ #ifndef SQLITE_OMIT_AUTOVACUUM -#define ISAUTOVACUUM (pBt->autoVacuum) +#define ISAUTOVACUUM(pBt) (pBt->autoVacuum) #else -#define ISAUTOVACUUM 0 +#define ISAUTOVACUUM(pBt) 0 #endif /* -** This structure is passed around through all the sanity checking routines -** in order to keep track of some global state information. +** This structure is passed around through all the PRAGMA integrity_check +** checking routines in order to keep track of some global state information. ** ** The aRef[] array is allocated so that there is 1 bit for each page in ** the database. As the integrity-check proceeds, for each page used in

@@ -67742,13 +69452,15 @@ struct IntegrityCk {

BtShared *pBt; /* The tree being checked out */ Pager *pPager; /* The associated pager. Also accessible by pBt->pPager */ u8 *aPgRef; /* 1 bit per page in the db (see above) */ - Pgno nPage; /* Number of pages in the database */ + Pgno nCkPage; /* Pages in the database. 0 for partial check */ int mxErr; /* Stop accumulating errors when this reaches zero */ int nErr; /* Number of messages written to zErrMsg so far */ - int bOomFault; /* A memory allocation error has occurred */ + int rc; /* SQLITE_OK, SQLITE_NOMEM, or SQLITE_INTERRUPT */ + u32 nStep; /* Number of steps into the integrity_check process */ const char *zPfx; /* Error message prefix */ - Pgno v1; /* Value for first %u substitution in zPfx */ - int v2; /* Value for second %d substitution in zPfx */ + Pgno v0; /* Value for first %u substitution in zPfx (root page) */ + Pgno v1; /* Value for second %u substitution in zPfx (current pg) */ + int v2; /* Value for third %d substitution in zPfx */ StrAccum errMsg; /* Accumulate the error message text here */ u32 *heap; /* Min-heap used for analyzing cell coverage */ sqlite3 *db; /* Database connection running the check */

@@ -67764,7 +69476,7 @@ #define put4byte sqlite3Put4byte

/* ** get2byteAligned(), unlike get2byte(), requires that its argument point to a -** two-byte aligned address. get2bytea() is only used for accessing the +** two-byte aligned address. get2byteAligned() is only used for accessing the ** cell addresses in a btree header. */ #if SQLITE_BYTEORDER==4321

@@ -67941,7 +69653,7 @@ ** reset out from under us.

** ** There is a corresponding leave-all procedures. ** -** Enter the mutexes in accending order by BtShared pointer address +** Enter the mutexes in ascending order by BtShared pointer address ** to avoid the possibility of deadlock when two threads with ** two or more btrees in common both try to lock all their btrees ** at the same instant.

@@ -68211,8 +69923,8 @@ #ifdef SQLITE_DEBUG

int corruptPageError(int lineno, MemPage *p){ char *zMsg; sqlite3BeginBenignMalloc(); - zMsg = sqlite3_mprintf("database corruption page %d of %s", - (int)p->pgno, sqlite3PagerFilename(p->pBt->pPager, 0) + zMsg = sqlite3_mprintf("database corruption page %u of %s", + p->pgno, sqlite3PagerFilename(p->pBt->pPager, 0) ); sqlite3EndBenignMalloc(); if( zMsg ){

@@ -69021,8 +70733,25 @@ ** parameter. See the definitions of the BTREE_HINT_* macros for details.

*/ SQLITE_PRIVATE void sqlite3BtreeCursorHint(BtCursor *pCur, int eHintType, ...){ /* Used only by system that substitute their own storage engine */ +#ifdef SQLITE_DEBUG + if( ALWAYS(eHintType==BTREE_HINT_RANGE) ){ + va_list ap; + Expr *pExpr; + Walker w; + memset(&w, 0, sizeof(w)); + w.xExprCallback = sqlite3CursorRangeHintExprCheck; + va_start(ap, eHintType); + pExpr = va_arg(ap, Expr*); + w.u.aMem = va_arg(ap, Mem*); + va_end(ap); + assert( pExpr!=0 ); + assert( w.u.aMem!=0 ); + sqlite3WalkExpr(&w, pExpr); + } +#endif /* SQLITE_DEBUG */ } -#endif +#endif /* SQLITE_ENABLE_CURSOR_HINTS */ + /* ** Provide flag hints to the cursor.

@@ -69107,7 +70836,7 @@ assert( offset <= (int)pBt->usableSize-5 );

pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage); if( eType!=pPtrmap[offset] || get4byte(&pPtrmap[offset+1])!=parent ){ - TRACE(("PTRMAP_UPDATE: %d->(%d,%d)\n", key, eType, parent)); + TRACE(("PTRMAP_UPDATE: %u->(%u,%u)\n", key, eType, parent)); *pRC= rc = sqlite3PagerWrite(pDbPage); if( rc==SQLITE_OK ){ pPtrmap[offset] = eType;

@@ -69306,27 +71035,31 @@ */

iKey = *pIter; if( iKey>=0x80 ){ u8 x; - iKey = ((iKey&0x7f)<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x =*++pIter) & 0x7f); + iKey = (iKey<<7) ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ 0x10204000 ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ 0x4000 ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ 0x4000 ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ 0x4000 ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ 0x4000 ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<8) | (*++pIter); + iKey = (iKey<<8) ^ 0x8000 ^ (*++pIter); } } } } } + }else{ + iKey ^= 0x204000; } + }else{ + iKey ^= 0x4000; } } pIter++;

@@ -69403,10 +71136,11 @@ ** the space used by the cell pointer.

** ** cellSizePtrNoPayload() => table internal nodes ** cellSizePtrTableLeaf() => table leaf nodes -** cellSizePtr() => all index nodes & table leaf nodes +** cellSizePtr() => index internal nodes +** cellSizeIdxLeaf() => index leaf nodes */ static u16 cellSizePtr(MemPage *pPage, u8 *pCell){ - u8 *pIter = pCell + pPage->childPtrSize; /* For looping over bytes of pCell */ + u8 *pIter = pCell + 4; /* For looping over bytes of pCell */ u8 *pEnd; /* End mark for a varint */ u32 nSize; /* Size value to return */

@@ -69419,6 +71153,49 @@ CellInfo debuginfo;

pPage->xParseCell(pPage, pCell, &debuginfo); #endif + assert( pPage->childPtrSize==4 ); + nSize = *pIter; + if( nSize>=0x80 ){ + pEnd = &pIter[8]; + nSize &= 0x7f; + do{ + nSize = (nSize<<7) | (*++pIter & 0x7f); + }while( *(pIter)>=0x80 && pIter<pEnd ); + } + pIter++; + testcase( nSize==pPage->maxLocal ); + testcase( nSize==(u32)pPage->maxLocal+1 ); + if( nSize<=pPage->maxLocal ){ + nSize += (u32)(pIter - pCell); + assert( nSize>4 ); + }else{ + int minLocal = pPage->minLocal; + nSize = minLocal + (nSize - minLocal) % (pPage->pBt->usableSize - 4); + testcase( nSize==pPage->maxLocal ); + testcase( nSize==(u32)pPage->maxLocal+1 ); + if( nSize>pPage->maxLocal ){ + nSize = minLocal; + } + nSize += 4 + (u16)(pIter - pCell); + } + assert( nSize==debuginfo.nSize || CORRUPT_DB ); + return (u16)nSize; +} +static u16 cellSizePtrIdxLeaf(MemPage *pPage, u8 *pCell){ + u8 *pIter = pCell; /* For looping over bytes of pCell */ + u8 *pEnd; /* End mark for a varint */ + u32 nSize; /* Size value to return */ + +#ifdef SQLITE_DEBUG + /* The value returned by this function should always be the same as + ** the (CellInfo.nSize) value found by doing a full parse of the + ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of + ** this function verifies that this invariant is not violated. */ + CellInfo debuginfo; + pPage->xParseCell(pPage, pCell, &debuginfo); +#endif + + assert( pPage->childPtrSize==0 ); nSize = *pIter; if( nSize>=0x80 ){ pEnd = &pIter[8];

@@ -69543,7 +71320,7 @@ assert( pCell!=0 );

pPage->xParseCell(pPage, pCell, &info); if( info.nLocal<info.nPayload ){ Pgno ovfl; - if( SQLITE_WITHIN(pSrc->aDataEnd, pCell, pCell+info.nLocal) ){ + if( SQLITE_OVERFLOW(pSrc->aDataEnd, pCell, pCell+info.nLocal) ){ testcase( pSrc!=pPage ); *pRC = SQLITE_CORRUPT_BKPT; return;

@@ -69644,7 +71421,7 @@ iCellLast = usableSize - 4;

iCellStart = get2byte(&data[hdr+5]); if( nCell>0 ){ temp = sqlite3PagerTempSpace(pPage->pBt->pPager); - memcpy(&temp[iCellStart], &data[iCellStart], usableSize - iCellStart); + memcpy(temp, data, usableSize); src = temp; for(i=0; i<nCell; i++){ u8 *pAddr; /* The i-th cell pointer */

@@ -69655,10 +71432,10 @@ testcase( pc==iCellLast );

/* These conditions have already been verified in btreeInitPage() ** if PRAGMA cell_size_check=ON. */ - if( pc<iCellStart || pc>iCellLast ){ + if( pc>iCellLast ){ return SQLITE_CORRUPT_PAGE(pPage); } - assert( pc>=iCellStart && pc<=iCellLast ); + assert( pc>=0 && pc<=iCellLast ); size = pPage->xCellSize(pPage, &src[pc]); cbrk -= size; if( cbrk<iCellStart || pc+size>usableSize ){

@@ -69773,7 +71550,7 @@ ** the first two bytes past the cell pointer area since presumably this

** allocation is being made in order to insert a new cell, so we will ** also end up needing a new cell pointer. */ -static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ +static SQLITE_INLINE int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ const int hdr = pPage->hdrOffset; /* Local cache of pPage->hdrOffset */ u8 * const data = pPage->aData; /* Local cache of pPage->aData */ int top; /* First byte of cell content area */

@@ -69799,13 +71576,14 @@ ** However, that integer is too large to be stored in a 2-byte unsigned

** integer, so a value of 0 is used in its place. */ pTmp = &data[hdr+5]; top = get2byte(pTmp); - assert( top<=(int)pPage->pBt->usableSize ); /* by btreeComputeFreeSpace() */ if( gap>top ){ if( top==0 && pPage->pBt->usableSize==65536 ){ top = 65536; }else{ return SQLITE_CORRUPT_PAGE(pPage); } + }else if( top>(int)pPage->pBt->usableSize ){ + return SQLITE_CORRUPT_PAGE(pPage); } /* If there is enough space between gap and top for one more cell pointer,

@@ -69867,7 +71645,7 @@ ** Adjacent freeblocks are coalesced.

** ** Even though the freeblock list was checked by btreeComputeFreeSpace(), ** that routine will not detect overlap between cells or freeblocks. Nor -** does it detect cells or freeblocks that encrouch into the reserved bytes +** does it detect cells or freeblocks that encroach into the reserved bytes ** at the end of the page. So do additional corruption checks inside this ** routine and return SQLITE_CORRUPT if any problems are found. */

@@ -69888,7 +71666,7 @@ assert( CORRUPT_DB || iStart>=pPage->hdrOffset+6+pPage->childPtrSize );

assert( CORRUPT_DB || iEnd <= pPage->pBt->usableSize ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( iSize>=4 ); /* Minimum cell size is 4 */ - assert( iStart<=pPage->pBt->usableSize-4 ); + assert( CORRUPT_DB || iStart<=pPage->pBt->usableSize-4 ); /* The list of freeblocks must be in ascending order. Find the ** spot on the list where iStart should be inserted.

@@ -69945,6 +71723,11 @@ data[hdr+7] -= nFrag;

} pTmp = &data[hdr+5]; x = get2byte(pTmp); + if( pPage->pBt->btsFlags & BTS_FAST_SECURE ){ + /* Overwrite deleted information with zeros when the secure_delete + ** option is enabled */ + memset(&data[iStart], 0, iSize); + } if( iStart<=x ){ /* The new freeblock is at the beginning of the cell content area, ** so just extend the cell content area rather than create another

@@ -69956,14 +71739,9 @@ put2byte(&data[hdr+5], iEnd);

}else{ /* Insert the new freeblock into the freelist */ put2byte(&data[iPtr], iStart); - } - if( pPage->pBt->btsFlags & BTS_FAST_SECURE ){ - /* Overwrite deleted information with zeros when the secure_delete - ** option is enabled */ - memset(&data[iStart], 0, iSize); + put2byte(&data[iStart], iFreeBlk); + put2byte(&data[iStart+2], iSize); } - put2byte(&data[iStart], iFreeBlk); - put2byte(&data[iStart+2], iSize); pPage->nFree += iOrigSize; return SQLITE_OK; }

@@ -69975,62 +71753,67 @@ **

** Only the following combinations are supported. Anything different ** indicates a corrupt database files: ** -** PTF_ZERODATA -** PTF_ZERODATA | PTF_LEAF -** PTF_LEAFDATA | PTF_INTKEY -** PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF +** PTF_ZERODATA (0x02, 2) +** PTF_LEAFDATA | PTF_INTKEY (0x05, 5) +** PTF_ZERODATA | PTF_LEAF (0x0a, 10) +** PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF (0x0d, 13) */ static int decodeFlags(MemPage *pPage, int flagByte){ BtShared *pBt; /* A copy of pPage->pBt */ assert( pPage->hdrOffset==(pPage->pgno==1 ? 100 : 0) ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - pPage->leaf = (u8)(flagByte>>3); assert( PTF_LEAF == 1<<3 ); - flagByte &= ~PTF_LEAF; - pPage->childPtrSize = 4-4*pPage->leaf; pBt = pPage->pBt; - if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){ - /* EVIDENCE-OF: R-07291-35328 A value of 5 (0x05) means the page is an - ** interior table b-tree page. */ - assert( (PTF_LEAFDATA|PTF_INTKEY)==5 ); - /* EVIDENCE-OF: R-26900-09176 A value of 13 (0x0d) means the page is a - ** leaf table b-tree page. */ - assert( (PTF_LEAFDATA|PTF_INTKEY|PTF_LEAF)==13 ); - pPage->intKey = 1; - if( pPage->leaf ){ + pPage->max1bytePayload = pBt->max1bytePayload; + if( flagByte>=(PTF_ZERODATA | PTF_LEAF) ){ + pPage->childPtrSize = 0; + pPage->leaf = 1; + if( flagByte==(PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF) ){ pPage->intKeyLeaf = 1; pPage->xCellSize = cellSizePtrTableLeaf; pPage->xParseCell = btreeParseCellPtr; + pPage->intKey = 1; + pPage->maxLocal = pBt->maxLeaf; + pPage->minLocal = pBt->minLeaf; + }else if( flagByte==(PTF_ZERODATA | PTF_LEAF) ){ + pPage->intKey = 0; + pPage->intKeyLeaf = 0; + pPage->xCellSize = cellSizePtrIdxLeaf; + pPage->xParseCell = btreeParseCellPtrIndex; + pPage->maxLocal = pBt->maxLocal; + pPage->minLocal = pBt->minLocal; }else{ + pPage->intKey = 0; + pPage->intKeyLeaf = 0; + pPage->xCellSize = cellSizePtrIdxLeaf; + pPage->xParseCell = btreeParseCellPtrIndex; + return SQLITE_CORRUPT_PAGE(pPage); + } + }else{ + pPage->childPtrSize = 4; + pPage->leaf = 0; + if( flagByte==(PTF_ZERODATA) ){ + pPage->intKey = 0; + pPage->intKeyLeaf = 0; + pPage->xCellSize = cellSizePtr; + pPage->xParseCell = btreeParseCellPtrIndex; + pPage->maxLocal = pBt->maxLocal; + pPage->minLocal = pBt->minLocal; + }else if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){ pPage->intKeyLeaf = 0; pPage->xCellSize = cellSizePtrNoPayload; pPage->xParseCell = btreeParseCellPtrNoPayload; + pPage->intKey = 1; + pPage->maxLocal = pBt->maxLeaf; + pPage->minLocal = pBt->minLeaf; + }else{ + pPage->intKey = 0; + pPage->intKeyLeaf = 0; + pPage->xCellSize = cellSizePtr; + pPage->xParseCell = btreeParseCellPtrIndex; + return SQLITE_CORRUPT_PAGE(pPage); } - pPage->maxLocal = pBt->maxLeaf; - pPage->minLocal = pBt->minLeaf; - }else if( flagByte==PTF_ZERODATA ){ - /* EVIDENCE-OF: R-43316-37308 A value of 2 (0x02) means the page is an - ** interior index b-tree page. */ - assert( (PTF_ZERODATA)==2 ); - /* EVIDENCE-OF: R-59615-42828 A value of 10 (0x0a) means the page is a - ** leaf index b-tree page. */ - assert( (PTF_ZERODATA|PTF_LEAF)==10 ); - pPage->intKey = 0; - pPage->intKeyLeaf = 0; - pPage->xCellSize = cellSizePtr; - pPage->xParseCell = btreeParseCellPtrIndex; - pPage->maxLocal = pBt->maxLocal; - pPage->minLocal = pBt->minLocal; - }else{ - /* EVIDENCE-OF: R-47608-56469 Any other value for the b-tree page type is - ** an error. */ - pPage->intKey = 0; - pPage->intKeyLeaf = 0; - pPage->xCellSize = cellSizePtr; - pPage->xParseCell = btreeParseCellPtrIndex; - return SQLITE_CORRUPT_PAGE(pPage); } - pPage->max1bytePayload = pBt->max1bytePayload; return SQLITE_OK; }

@@ -70321,68 +72104,41 @@ }

/* ** Get a page from the pager and initialize it. -** -** If pCur!=0 then the page is being fetched as part of a moveToChild() -** call. Do additional sanity checking on the page in this case. -** And if the fetch fails, this routine must decrement pCur->iPage. -** -** The page is fetched as read-write unless pCur is not NULL and is -** a read-only cursor. -** -** If an error occurs, then *ppPage is undefined. It -** may remain unchanged, or it may be set to an invalid value. */ static int getAndInitPage( BtShared *pBt, /* The database file */ Pgno pgno, /* Number of the page to get */ MemPage **ppPage, /* Write the page pointer here */ - BtCursor *pCur, /* Cursor to receive the page, or NULL */ int bReadOnly /* True for a read-only page */ ){ int rc; DbPage *pDbPage; + MemPage *pPage; assert( sqlite3_mutex_held(pBt->mutex) ); - assert( pCur==0 || ppPage==&pCur->pPage ); - assert( pCur==0 || bReadOnly==pCur->curPagerFlags ); - assert( pCur==0 || pCur->iPage>0 ); if( pgno>btreePagecount(pBt) ){ - rc = SQLITE_CORRUPT_BKPT; - goto getAndInitPage_error1; + *ppPage = 0; + return SQLITE_CORRUPT_BKPT; } rc = sqlite3PagerGet(pBt->pPager, pgno, (DbPage**)&pDbPage, bReadOnly); if( rc ){ - goto getAndInitPage_error1; + *ppPage = 0; + return rc; } - *ppPage = (MemPage*)sqlite3PagerGetExtra(pDbPage); - if( (*ppPage)->isInit==0 ){ + pPage = (MemPage*)sqlite3PagerGetExtra(pDbPage); + if( pPage->isInit==0 ){ btreePageFromDbPage(pDbPage, pgno, pBt); - rc = btreeInitPage(*ppPage); + rc = btreeInitPage(pPage); if( rc!=SQLITE_OK ){ - goto getAndInitPage_error2; + releasePage(pPage); + *ppPage = 0; + return rc; } } - assert( (*ppPage)->pgno==pgno || CORRUPT_DB ); - assert( (*ppPage)->aData==sqlite3PagerGetData(pDbPage) ); - - /* If obtaining a child page for a cursor, we must verify that the page is - ** compatible with the root page. */ - if( pCur && ((*ppPage)->nCell<1 || (*ppPage)->intKey!=pCur->curIntKey) ){ - rc = SQLITE_CORRUPT_PGNO(pgno); - goto getAndInitPage_error2; - } + assert( pPage->pgno==pgno || CORRUPT_DB ); + assert( pPage->aData==sqlite3PagerGetData(pDbPage) ); + *ppPage = pPage; return SQLITE_OK; - -getAndInitPage_error2: - releasePage(*ppPage); -getAndInitPage_error1: - if( pCur ){ - pCur->iPage--; - pCur->pPage = pCur->apPage[pCur->iPage]; - } - testcase( pgno==0 ); - assert( pgno!=0 || rc!=SQLITE_OK ); - return rc; } /*

@@ -70465,7 +72221,7 @@ ** or ptrmap page or a free page. In those cases, the following

** call to btreeInitPage() will likely return SQLITE_CORRUPT. ** But no harm is done by this. And it is very important that ** btreeInitPage() be called on every btree page so we make - ** the call for every page that comes in for re-initing. */ + ** the call for every page that comes in for re-initializing. */ btreeInitPage(pPage); } }

@@ -70644,6 +72400,9 @@ assert( sizeof(u32)==4 );

assert( sizeof(u16)==2 ); assert( sizeof(Pgno)==4 ); + /* Suppress false-positive compiler warning from PVS-Studio */ + memset(&zDbHeader[16], 0, 8); + pBt = sqlite3MallocZero( sizeof(*pBt) ); if( pBt==0 ){ rc = SQLITE_NOMEM_BKPT;

@@ -70860,7 +72619,7 @@ ** by the various routines that manipulate binary cells. Which

** can mean that fillInCell() only initializes the first 2 or 3 ** bytes of pTmpSpace, but that the first 4 bytes are copied from ** it into a database page. This is not actually a problem, but it - ** does cause a valgrind error when the 1 or 2 bytes of unitialized + ** does cause a valgrind error when the 1 or 2 bytes of uninitialized ** data is passed to system call write(). So to avoid this error, ** zero the first 4 bytes of temp space here. **

@@ -71095,7 +72854,7 @@ }

/* ** Return the number of bytes of space at the end of every page that -** are intentually left unused. This is the "reserved" space that is +** are intentionally left unused. This is the "reserved" space that is ** sometimes used by extensions. ** ** The value returned is the larger of the current reserve size and

@@ -71342,7 +73101,6 @@ || pageSize<=256

){ goto page1_init_failed; } - pBt->btsFlags |= BTS_PAGESIZE_FIXED; assert( (pageSize & 7)==0 ); /* EVIDENCE-OF: R-59310-51205 The "reserved space" size in the 1-byte ** integer at offset 20 is the number of bytes of space at the end of

@@ -71362,6 +73120,7 @@ */

releasePageOne(pPage1); pBt->usableSize = usableSize; pBt->pageSize = pageSize; + pBt->btsFlags |= BTS_PAGESIZE_FIXED; freeTempSpace(pBt); rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, pageSize-usableSize);

@@ -71381,6 +73140,7 @@ ** reserved space size cannot exceed 32. */

if( usableSize<480 ){ goto page1_init_failed; } + pBt->btsFlags |= BTS_PAGESIZE_FIXED; pBt->pageSize = pageSize; pBt->usableSize = usableSize; #ifndef SQLITE_OMIT_AUTOVACUUM

@@ -71559,7 +73319,11 @@ ** no progress. By returning SQLITE_BUSY and not invoking the busy callback

** when A already has a read lock, we encourage A to give up and let B ** proceed. */ -SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVersion){ +static SQLITE_NOINLINE int btreeBeginTrans( + Btree *p, /* The btree in which to start the transaction */ + int wrflag, /* True to start a write transaction */ + int *pSchemaVersion /* Put schema version number here, if not NULL */ +){ BtShared *pBt = p->pBt; Pager *pPager = pBt->pPager; int rc = SQLITE_OK;

@@ -71731,6 +73495,28 @@ btreeIntegrity(p);

sqlite3BtreeLeave(p); return rc; } +SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVersion){ + BtShared *pBt; + if( p->sharable + || p->inTrans==TRANS_NONE + || (p->inTrans==TRANS_READ && wrflag!=0) + ){ + return btreeBeginTrans(p,wrflag,pSchemaVersion); + } + pBt = p->pBt; + if( pSchemaVersion ){ + *pSchemaVersion = get4byte(&pBt->pPage1->aData[40]); + } + if( wrflag ){ + /* This call makes sure that the pager has the correct number of + ** open savepoints. If the second parameter is greater than 0 and + ** the sub-journal is not already open, then it will be opened here. + */ + return sqlite3PagerOpenSavepoint(pBt->pPager, p->db->nSavepoint); + }else{ + return SQLITE_OK; + } +} #ifndef SQLITE_OMIT_AUTOVACUUM

@@ -71868,7 +73654,7 @@ assert( pDbPage->pBt==pBt );

if( iDbPage<3 ) return SQLITE_CORRUPT_BKPT; /* Move page iDbPage from its current location to page number iFreePage */ - TRACE(("AUTOVACUUM: Moving %d to free page %d (ptr page %d type %d)\n", + TRACE(("AUTOVACUUM: Moving %u to free page %u (ptr page %u type %u)\n", iDbPage, iFreePage, iPtrPage, eType)); rc = sqlite3PagerMovepage(pPager, pDbPage->pDbPage, iFreePage, isCommit); if( rc!=SQLITE_OK ){

@@ -72826,7 +74612,6 @@ assert( (pCur->curFlags & BTCF_Pinned)!=0 );

pCur->curFlags &= ~BTCF_Pinned; } -#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC /* ** Return the offset into the database file for the start of the ** payload to which the cursor is pointing.

@@ -72838,7 +74623,6 @@ getCellInfo(pCur);

return (i64)pCur->pBt->pageSize*((i64)pCur->pPage->pgno - 1) + (i64)(pCur->info.pPayload - pCur->pPage->aData); } -#endif /* SQLITE_ENABLE_OFFSET_SQL_FUNC */ /* ** Return the number of bytes of payload for the entry that pCur is

@@ -72864,7 +74648,7 @@ ** This is an optimization. Everything will still work if this

** routine always returns 2147483647 (which is the largest record ** that SQLite can handle) or more. But returning a smaller value might ** prevent large memory allocations when trying to interpret a -** corrupt datrabase. +** corrupt database. ** ** The current implementation merely returns the size of the underlying ** database file.

@@ -73326,6 +75110,7 @@ ** if an intkey page appears to be the parent of a non-intkey page, or

** vice-versa). */ static int moveToChild(BtCursor *pCur, u32 newPgno){ + int rc; assert( cursorOwnsBtShared(pCur) ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->iPage<BTCURSOR_MAX_DEPTH );

@@ -73339,8 +75124,18 @@ pCur->aiIdx[pCur->iPage] = pCur->ix;

pCur->apPage[pCur->iPage] = pCur->pPage; pCur->ix = 0; pCur->iPage++; - return getAndInitPage(pCur->pBt, newPgno, &pCur->pPage, pCur, - pCur->curPagerFlags); + rc = getAndInitPage(pCur->pBt, newPgno, &pCur->pPage, pCur->curPagerFlags); + assert( pCur->pPage!=0 || rc!=SQLITE_OK ); + if( rc==SQLITE_OK + && (pCur->pPage->nCell<1 || pCur->pPage->intKey!=pCur->curIntKey) + ){ + releasePage(pCur->pPage); + rc = SQLITE_CORRUPT_PGNO(newPgno); + } + if( rc ){ + pCur->pPage = pCur->apPage[--pCur->iPage]; + } + return rc; } #ifdef SQLITE_DEBUG

@@ -73447,7 +75242,7 @@ }

sqlite3BtreeClearCursor(pCur); } rc = getAndInitPage(pCur->pBt, pCur->pgnoRoot, &pCur->pPage, - 0, pCur->curPagerFlags); + pCur->curPagerFlags); if( rc!=SQLITE_OK ){ pCur->eState = CURSOR_INVALID; return rc;

@@ -73559,7 +75354,7 @@ assert( pCur->pPage->nCell>0 );

*pRes = 0; rc = moveToLeftmost(pCur); }else if( rc==SQLITE_EMPTY ){ - assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 ); + assert( pCur->pgnoRoot==0 || (pCur->pPage!=0 && pCur->pPage->nCell==0) ); *pRes = 1; rc = SQLITE_OK; }

@@ -73570,9 +75365,25 @@ /* Move the cursor to the last entry in the table. Return SQLITE_OK

** on success. Set *pRes to 0 if the cursor actually points to something ** or set *pRes to 1 if the table is empty. */ +static SQLITE_NOINLINE int btreeLast(BtCursor *pCur, int *pRes){ + int rc = moveToRoot(pCur); + if( rc==SQLITE_OK ){ + assert( pCur->eState==CURSOR_VALID ); + *pRes = 0; + rc = moveToRightmost(pCur); + if( rc==SQLITE_OK ){ + pCur->curFlags |= BTCF_AtLast; + }else{ + pCur->curFlags &= ~BTCF_AtLast; + } + }else if( rc==SQLITE_EMPTY ){ + assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 ); + *pRes = 1; + rc = SQLITE_OK; + } + return rc; +} SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ - int rc; - assert( cursorOwnsBtShared(pCur) ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );

@@ -73593,23 +75404,7 @@ #endif

*pRes = 0; return SQLITE_OK; } - - rc = moveToRoot(pCur); - if( rc==SQLITE_OK ){ - assert( pCur->eState==CURSOR_VALID ); - *pRes = 0; - rc = moveToRightmost(pCur); - if( rc==SQLITE_OK ){ - pCur->curFlags |= BTCF_AtLast; - }else{ - pCur->curFlags &= ~BTCF_AtLast; - } - }else if( rc==SQLITE_EMPTY ){ - assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 ); - *pRes = 1; - rc = SQLITE_OK; - } - return rc; + return btreeLast(pCur, pRes); } /* Move the cursor so that it points to an entry in a table (a.k.a INTKEY)

@@ -73664,7 +75459,7 @@ }

/* If the requested key is one more than the previous key, then ** try to get there using sqlite3BtreeNext() rather than a full ** binary search. This is an optimization only. The correct answer - ** is still obtained without this case, only a little more slowely */ + ** is still obtained without this case, only a little more slowly. */ if( pCur->info.nKey+1==intKey ){ *pRes = 0; rc = sqlite3BtreeNext(pCur, 0);

@@ -74060,10 +75855,36 @@ chldPg = get4byte(&pPage->aData[pPage->hdrOffset+8]);

}else{ chldPg = get4byte(findCell(pPage, lwr)); } - pCur->ix = (u16)lwr; - rc = moveToChild(pCur, chldPg); - if( rc ) break; - } + + /* This block is similar to an in-lined version of: + ** + ** pCur->ix = (u16)lwr; + ** rc = moveToChild(pCur, chldPg); + ** if( rc ) break; + */ + pCur->info.nSize = 0; + pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); + if( pCur->iPage>=(BTCURSOR_MAX_DEPTH-1) ){ + return SQLITE_CORRUPT_BKPT; + } + pCur->aiIdx[pCur->iPage] = (u16)lwr; + pCur->apPage[pCur->iPage] = pCur->pPage; + pCur->ix = 0; + pCur->iPage++; + rc = getAndInitPage(pCur->pBt, chldPg, &pCur->pPage, pCur->curPagerFlags); + if( rc==SQLITE_OK + && (pCur->pPage->nCell<1 || pCur->pPage->intKey!=pCur->curIntKey) + ){ + releasePage(pCur->pPage); + rc = SQLITE_CORRUPT_PGNO(chldPg); + } + if( rc ){ + pCur->pPage = pCur->apPage[--pCur->iPage]; + break; + } + /* + ***** End of in-lined moveToChild() call */ + } moveto_index_finish: pCur->info.nSize = 0; assert( (pCur->curFlags & BTCF_ValidOvfl)==0 );

@@ -74154,7 +75975,8 @@ }

pPage = pCur->pPage; idx = ++pCur->ix; - if( NEVER(!pPage->isInit) || sqlite3FaultSim(412) ){ + if( sqlite3FaultSim(412) ) pPage->isInit = 0; + if( !pPage->isInit ){ return SQLITE_CORRUPT_BKPT; }

@@ -74417,7 +76239,7 @@ *pPgno = iTrunk;

memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4); *ppPage = pTrunk; pTrunk = 0; - TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); + TRACE(("ALLOCATE: %u trunk - %u free pages left\n", *pPgno, n-1)); }else if( k>(u32)(pBt->usableSize/4 - 2) ){ /* Value of k is out of range. Database corruption */ rc = SQLITE_CORRUPT_PGNO(iTrunk);

@@ -74483,7 +76305,7 @@ put4byte(&pPrevTrunk->aData[0], iNewTrunk);

} } pTrunk = 0; - TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); + TRACE(("ALLOCATE: %u trunk - %u free pages left\n", *pPgno, n-1)); #endif }else if( k>0 ){ /* Extract a leaf from the trunk */

@@ -74528,8 +76350,8 @@ || (iPage==nearby || (iPage<nearby && eMode==BTALLOC_LE))

){ int noContent; *pPgno = iPage; - TRACE(("ALLOCATE: %d was leaf %d of %d on trunk %d" - ": %d more free pages\n", + TRACE(("ALLOCATE: %u was leaf %u of %u on trunk %u" + ": %u more free pages\n", *pPgno, closest+1, k, pTrunk->pgno, n-1)); rc = sqlite3PagerWrite(pTrunk->pDbPage); if( rc ) goto end_allocate_page;

@@ -74585,7 +76407,7 @@ ** at the end of the file instead of one. The first allocated page

** becomes a new pointer-map page, the second is used by the caller. */ MemPage *pPg = 0; - TRACE(("ALLOCATE: %d from end of file (pointer-map page)\n", pBt->nPage)); + TRACE(("ALLOCATE: %u from end of file (pointer-map page)\n", pBt->nPage)); assert( pBt->nPage!=PENDING_BYTE_PAGE(pBt) ); rc = btreeGetUnusedPage(pBt, pBt->nPage, &pPg, bNoContent); if( rc==SQLITE_OK ){

@@ -74608,7 +76430,7 @@ if( rc!=SQLITE_OK ){

releasePage(*ppPage); *ppPage = 0; } - TRACE(("ALLOCATE: %d from end of file\n", *pPgno)); + TRACE(("ALLOCATE: %u from end of file\n", *pPgno)); } assert( CORRUPT_DB || *pPgno!=PENDING_BYTE_PAGE(pBt) );

@@ -74676,7 +76498,7 @@

/* If the database supports auto-vacuum, write an entry in the pointer-map ** to indicate that the page is free. */ - if( ISAUTOVACUUM ){ + if( ISAUTOVACUUM(pBt) ){ ptrmapPut(pBt, iPage, PTRMAP_FREEPAGE, 0, &rc); if( rc ) goto freepage_out; }

@@ -74736,7 +76558,7 @@ sqlite3PagerDontWrite(pPage->pDbPage);

} rc = btreeSetHasContent(pBt, iPage); } - TRACE(("FREE-PAGE: %d leaf on trunk page %d\n",pPage->pgno,pTrunk->pgno)); + TRACE(("FREE-PAGE: %u leaf on trunk page %u\n",pPage->pgno,pTrunk->pgno)); goto freepage_out; } }

@@ -74757,7 +76579,7 @@ }

put4byte(pPage->aData, iTrunk); put4byte(&pPage->aData[4], 0); put4byte(&pPage1->aData[32], iPage); - TRACE(("FREE-PAGE: %d new trunk page replacing %d\n", pPage->pgno, iTrunk)); + TRACE(("FREE-PAGE: %u new trunk page replacing %u\n", pPage->pgno, iTrunk)); freepage_out: if( pPage ){

@@ -74846,7 +76668,7 @@ }

/* Call xParseCell to compute the size of a cell. If the cell contains ** overflow, then invoke cellClearOverflow to clear out that overflow. -** STore the result code (SQLITE_OK or some error code) in rc. +** Store the result code (SQLITE_OK or some error code) in rc. ** ** Implemented as macro to force inlining for performance. */

@@ -75117,23 +76939,27 @@ ** in pTemp or the original pCell) and also record its index.

** Allocating a new entry in pPage->aCell[] implies that ** pPage->nOverflow is incremented. ** -** *pRC must be SQLITE_OK when this routine is called. +** The insertCellFast() routine below works exactly the same as +** insertCell() except that it lacks the pTemp and iChild parameters +** which are assumed zero. Other than that, the two routines are the +** same. +** +** Fixes or enhancements to this routine should be reflected in +** insertCellFast()! */ -static void insertCell( +static int insertCell( MemPage *pPage, /* Page into which we are copying */ int i, /* New cell becomes the i-th cell of the page */ u8 *pCell, /* Content of the new cell */ int sz, /* Bytes of content in pCell */ u8 *pTemp, /* Temp storage space for pCell, if needed */ - Pgno iChild, /* If non-zero, replace first 4 bytes with this value */ - int *pRC /* Read and write return code from here */ + Pgno iChild /* If non-zero, replace first 4 bytes with this value */ ){ int idx = 0; /* Where to write new cell content in data[] */ int j; /* Loop counter */ u8 *data; /* The content of the whole page */ u8 *pIns; /* The point in pPage->aCellIdx[] where no cell inserted */ - assert( *pRC==SQLITE_OK ); assert( i>=0 && i<=pPage->nCell+pPage->nOverflow ); assert( MX_CELL(pPage->pBt)<=10921 ); assert( pPage->nCell<=MX_CELL(pPage->pBt) || CORRUPT_DB );

@@ -75142,14 +76968,103 @@ assert( ArraySize(pPage->apOvfl)==ArraySize(pPage->aiOvfl) );

assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( sz==pPage->xCellSize(pPage, pCell) || CORRUPT_DB ); assert( pPage->nFree>=0 ); + assert( iChild>0 ); if( pPage->nOverflow || sz+2>pPage->nFree ){ if( pTemp ){ memcpy(pTemp, pCell, sz); pCell = pTemp; } - if( iChild ){ - put4byte(pCell, iChild); + put4byte(pCell, iChild); + j = pPage->nOverflow++; + /* Comparison against ArraySize-1 since we hold back one extra slot + ** as a contingency. In other words, never need more than 3 overflow + ** slots but 4 are allocated, just to be safe. */ + assert( j < ArraySize(pPage->apOvfl)-1 ); + pPage->apOvfl[j] = pCell; + pPage->aiOvfl[j] = (u16)i; + + /* When multiple overflows occur, they are always sequential and in + ** sorted order. This invariants arise because multiple overflows can + ** only occur when inserting divider cells into the parent page during + ** balancing, and the dividers are adjacent and sorted. + */ + assert( j==0 || pPage->aiOvfl[j-1]<(u16)i ); /* Overflows in sorted order */ + assert( j==0 || i==pPage->aiOvfl[j-1]+1 ); /* Overflows are sequential */ + }else{ + int rc = sqlite3PagerWrite(pPage->pDbPage); + if( NEVER(rc!=SQLITE_OK) ){ + return rc; } + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + data = pPage->aData; + assert( &data[pPage->cellOffset]==pPage->aCellIdx ); + rc = allocateSpace(pPage, sz, &idx); + if( rc ){ return rc; } + /* The allocateSpace() routine guarantees the following properties + ** if it returns successfully */ + assert( idx >= 0 ); + assert( idx >= pPage->cellOffset+2*pPage->nCell+2 || CORRUPT_DB ); + assert( idx+sz <= (int)pPage->pBt->usableSize ); + pPage->nFree -= (u16)(2 + sz); + /* In a corrupt database where an entry in the cell index section of + ** a btree page has a value of 3 or less, the pCell value might point + ** as many as 4 bytes in front of the start of the aData buffer for + ** the source page. Make sure this does not cause problems by not + ** reading the first 4 bytes */ + memcpy(&data[idx+4], pCell+4, sz-4); + put4byte(&data[idx], iChild); + pIns = pPage->aCellIdx + i*2; + memmove(pIns+2, pIns, 2*(pPage->nCell - i)); + put2byte(pIns, idx); + pPage->nCell++; + /* increment the cell count */ + if( (++data[pPage->hdrOffset+4])==0 ) data[pPage->hdrOffset+3]++; + assert( get2byte(&data[pPage->hdrOffset+3])==pPage->nCell || CORRUPT_DB ); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pPage->pBt->autoVacuum ){ + int rc2 = SQLITE_OK; + /* The cell may contain a pointer to an overflow page. If so, write + ** the entry for the overflow page into the pointer map. + */ + ptrmapPutOvflPtr(pPage, pPage, pCell, &rc2); + if( rc2 ) return rc2; + } +#endif + } + return SQLITE_OK; +} + +/* +** This variant of insertCell() assumes that the pTemp and iChild +** parameters are both zero. Use this variant in sqlite3BtreeInsert() +** for performance improvement, and also so that this variant is only +** called from that one place, and is thus inlined, and thus runs must +** faster. +** +** Fixes or enhancements to this routine should be reflected into +** the insertCell() routine. +*/ +static int insertCellFast( + MemPage *pPage, /* Page into which we are copying */ + int i, /* New cell becomes the i-th cell of the page */ + u8 *pCell, /* Content of the new cell */ + int sz /* Bytes of content in pCell */ +){ + int idx = 0; /* Where to write new cell content in data[] */ + int j; /* Loop counter */ + u8 *data; /* The content of the whole page */ + u8 *pIns; /* The point in pPage->aCellIdx[] where no cell inserted */ + + assert( i>=0 && i<=pPage->nCell+pPage->nOverflow ); + assert( MX_CELL(pPage->pBt)<=10921 ); + assert( pPage->nCell<=MX_CELL(pPage->pBt) || CORRUPT_DB ); + assert( pPage->nOverflow<=ArraySize(pPage->apOvfl) ); + assert( ArraySize(pPage->apOvfl)==ArraySize(pPage->aiOvfl) ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( sz==pPage->xCellSize(pPage, pCell) || CORRUPT_DB ); + assert( pPage->nFree>=0 ); + assert( pPage->nOverflow==0 ); + if( sz+2>pPage->nFree ){ j = pPage->nOverflow++; /* Comparison against ArraySize-1 since we hold back one extra slot ** as a contingency. In other words, never need more than 3 overflow

@@ -75168,31 +77083,20 @@ assert( j==0 || i==pPage->aiOvfl[j-1]+1 ); /* Overflows are sequential */

}else{ int rc = sqlite3PagerWrite(pPage->pDbPage); if( rc!=SQLITE_OK ){ - *pRC = rc; - return; + return rc; } assert( sqlite3PagerIswriteable(pPage->pDbPage) ); data = pPage->aData; assert( &data[pPage->cellOffset]==pPage->aCellIdx ); rc = allocateSpace(pPage, sz, &idx); - if( rc ){ *pRC = rc; return; } + if( rc ){ return rc; } /* The allocateSpace() routine guarantees the following properties ** if it returns successfully */ assert( idx >= 0 ); assert( idx >= pPage->cellOffset+2*pPage->nCell+2 || CORRUPT_DB ); assert( idx+sz <= (int)pPage->pBt->usableSize ); pPage->nFree -= (u16)(2 + sz); - if( iChild ){ - /* In a corrupt database where an entry in the cell index section of - ** a btree page has a value of 3 or less, the pCell value might point - ** as many as 4 bytes in front of the start of the aData buffer for - ** the source page. Make sure this does not cause problems by not - ** reading the first 4 bytes */ - memcpy(&data[idx+4], pCell+4, sz-4); - put4byte(&data[idx], iChild); - }else{ - memcpy(&data[idx], pCell, sz); - } + memcpy(&data[idx], pCell, sz); pIns = pPage->aCellIdx + i*2; memmove(pIns+2, pIns, 2*(pPage->nCell - i)); put2byte(pIns, idx);

@@ -75202,13 +77106,16 @@ if( (++data[pPage->hdrOffset+4])==0 ) data[pPage->hdrOffset+3]++;

assert( get2byte(&data[pPage->hdrOffset+3])==pPage->nCell || CORRUPT_DB ); #ifndef SQLITE_OMIT_AUTOVACUUM if( pPage->pBt->autoVacuum ){ + int rc2 = SQLITE_OK; /* The cell may contain a pointer to an overflow page. If so, write ** the entry for the overflow page into the pointer map. */ - ptrmapPutOvflPtr(pPage, pPage, pCell, pRC); + ptrmapPutOvflPtr(pPage, pPage, pCell, &rc2); + if( rc2 ) return rc2; } #endif } + return SQLITE_OK; } /*

@@ -75309,14 +77216,16 @@ ** Make sure the cell sizes at idx, idx+1, ..., idx+N-1 have been

** computed. */ static void populateCellCache(CellArray *p, int idx, int N){ + MemPage *pRef = p->pRef; + u16 *szCell = p->szCell; assert( idx>=0 && idx+N<=p->nCell ); while( N>0 ){ assert( p->apCell[idx]!=0 ); - if( p->szCell[idx]==0 ){ - p->szCell[idx] = p->pRef->xCellSize(p->pRef, p->apCell[idx]); + if( szCell[idx]==0 ){ + szCell[idx] = pRef->xCellSize(pRef, p->apCell[idx]); }else{ assert( CORRUPT_DB || - p->szCell[idx]==p->pRef->xCellSize(p->pRef, p->apCell[idx]) ); + szCell[idx]==pRef->xCellSize(pRef, p->apCell[idx]) ); } idx++; N--;

@@ -75370,12 +77279,13 @@ u8 *pData;

int k; /* Current slot in pCArray->apEnd[] */ u8 *pSrcEnd; /* Current pCArray->apEnd[k] value */ + assert( nCell>0 ); assert( i<iEnd ); j = get2byte(&aData[hdr+5]); if( j>(u32)usableSize ){ j = 0; } memcpy(&pTmp[j], &aData[j], usableSize - j); - for(k=0; pCArray->ixNx[k]<=i && ALWAYS(k<NB*2); k++){} + for(k=0; ALWAYS(k<NB*2) && pCArray->ixNx[k]<=i; k++){} pSrcEnd = pCArray->apEnd[k]; pData = pEnd;

@@ -75438,7 +77348,7 @@ **

** Finally, argument pBegin points to the byte immediately following the ** end of the space required by this page for the cell-pointer area (for ** all cells - not just those inserted by the current call). If the content -** area must be extended to before this point in order to accomodate all +** area must be extended to before this point in order to accommodate all ** cells in apCell[], then the cells do not fit and non-zero is returned. */ static int pageInsertArray(

@@ -75458,7 +77368,7 @@ int k; /* Current slot in pCArray->apEnd[] */

u8 *pEnd; /* Maximum extent of cell data */ assert( CORRUPT_DB || pPg->hdrOffset==0 ); /* Never called on page 1 */ if( iEnd<=iFirst ) return 0; - for(k=0; pCArray->ixNx[k]<=i && ALWAYS(k<NB*2); k++){} + for(k=0; ALWAYS(k<NB*2) && pCArray->ixNx[k]<=i ; k++){} pEnd = pCArray->apEnd[k]; while( 1 /*Exit by break*/ ){ int sz, rc;

@@ -75516,39 +77426,50 @@ u8 * const aData = pPg->aData;

u8 * const pEnd = &aData[pPg->pBt->usableSize]; u8 * const pStart = &aData[pPg->hdrOffset + 8 + pPg->childPtrSize]; int nRet = 0; - int i; + int i, j; int iEnd = iFirst + nCell; - u8 *pFree = 0; - int szFree = 0; + int nFree = 0; + int aOfst[10]; + int aAfter[10]; for(i=iFirst; i<iEnd; i++){ u8 *pCell = pCArray->apCell[i]; if( SQLITE_WITHIN(pCell, pStart, pEnd) ){ int sz; + int iAfter; + int iOfst; /* No need to use cachedCellSize() here. The sizes of all cells that ** are to be freed have already been computing while deciding which ** cells need freeing */ sz = pCArray->szCell[i]; assert( sz>0 ); - if( pFree!=(pCell + sz) ){ - if( pFree ){ - assert( pFree>aData && (pFree - aData)<65536 ); - freeSpace(pPg, (u16)(pFree - aData), szFree); + iOfst = (u16)(pCell - aData); + iAfter = iOfst+sz; + for(j=0; j<nFree; j++){ + if( aOfst[j]==iAfter ){ + aOfst[j] = iOfst; + break; + }else if( aAfter[j]==iOfst ){ + aAfter[j] = iAfter; + break; } - pFree = pCell; - szFree = sz; - if( pFree+sz>pEnd ){ - return 0; + } + if( j>=nFree ){ + if( nFree>=(int)(sizeof(aOfst)/sizeof(aOfst[0])) ){ + for(j=0; j<nFree; j++){ + freeSpace(pPg, aOfst[j], aAfter[j]-aOfst[j]); + } + nFree = 0; } - }else{ - pFree = pCell; - szFree += sz; + aOfst[nFree] = iOfst; + aAfter[nFree] = iAfter; + if( &aData[iAfter]>pEnd ) return 0; + nFree++; } nRet++; } } - if( pFree ){ - assert( pFree>aData && (pFree - aData)<65536 ); - freeSpace(pPg, (u16)(pFree - aData), szFree); + for(j=0; j<nFree; j++){ + freeSpace(pPg, aOfst[j], aAfter[j]-aOfst[j]); } return nRet; }

@@ -75601,9 +77522,9 @@ assert( nCell>=nTail );

nCell -= nTail; } - pData = &aData[get2byteNotZero(&aData[hdr+5])]; + pData = &aData[get2byte(&aData[hdr+5])]; if( pData<pBegin ) goto editpage_fail; - if( pData>pPg->aDataEnd ) goto editpage_fail; + if( NEVER(pData>pPg->aDataEnd) ) goto editpage_fail; /* Add cells to the start of the page */ if( iNew<iOld ){

@@ -75665,6 +77586,7 @@

return SQLITE_OK; editpage_fail: /* Unable to edit this page. Rebuild it from scratch instead. */ + if( nNew<1 ) return SQLITE_CORRUPT_BKPT; populateCellCache(pCArray, iNew, nNew); return rebuildPage(pCArray, iNew, nNew, pPg); }

@@ -75742,12 +77664,12 @@ /* If this is an auto-vacuum database, update the pointer map

** with entries for the new page, and any pointer from the ** cell on the page to an overflow page. If either of these ** operations fails, the return code is set, but the contents - ** of the parent page are still manipulated by thh code below. + ** of the parent page are still manipulated by the code below. ** That is Ok, at this point the parent page is guaranteed to ** be marked as dirty. Returning an error code will cause a ** rollback, undoing any changes made to the parent page. */ - if( ISAUTOVACUUM ){ + if( ISAUTOVACUUM(pBt) ){ ptrmapPut(pBt, pgnoNew, PTRMAP_BTREE, pParent->pgno, &rc); if( szCell>pNew->minLocal ){ ptrmapPutOvflPtr(pNew, pNew, pCell, &rc);

@@ -75775,8 +77697,8 @@ while( ((*(pOut++) = *(pCell++))&0x80) && pCell<pStop );

/* Insert the new divider cell into pParent. */ if( rc==SQLITE_OK ){ - insertCell(pParent, pParent->nCell, pSpace, (int)(pOut-pSpace), - 0, pPage->pgno, &rc); + rc = insertCell(pParent, pParent->nCell, pSpace, (int)(pOut-pSpace), + 0, pPage->pgno); } /* Set the right-child pointer of pParent to point to the new page. */

@@ -75885,7 +77807,7 @@

/* If this is an auto-vacuum database, update the pointer-map entries ** for any b-tree or overflow pages that pTo now contains the pointers to. */ - if( ISAUTOVACUUM ){ + if( ISAUTOVACUUM(pBt) ){ *pRC = setChildPtrmaps(pTo); } }

@@ -76018,7 +77940,7 @@ }

pgno = get4byte(pRight); while( 1 ){ if( rc==SQLITE_OK ){ - rc = getAndInitPage(pBt, pgno, &apOld[i], 0, 0); + rc = getAndInitPage(pBt, pgno, &apOld[i], 0); } if( rc ){ memset(apOld, 0, (i+1)*sizeof(MemPage*));

@@ -76309,15 +78231,17 @@ r = cntNew[i-1] - 1;

d = r + 1 - leafData; (void)cachedCellSize(&b, d); do{ + int szR, szD; assert( d<nMaxCells ); assert( r<nMaxCells ); - (void)cachedCellSize(&b, r); + szR = cachedCellSize(&b, r); + szD = b.szCell[d]; if( szRight!=0 - && (bBulk || szRight+b.szCell[d]+2 > szLeft-(b.szCell[r]+(i==k-1?0:2)))){ + && (bBulk || szRight+szD+2 > szLeft-(szR+(i==k-1?0:2)))){ break; } - szRight += b.szCell[d] + 2; - szLeft -= b.szCell[r] + 2; + szRight += szD + 2; + szLeft -= szR + 2; cntNew[i-1] = r; r--; d--;

@@ -76330,7 +78254,7 @@ goto balance_cleanup;

} } - /* Sanity check: For a non-corrupt database file one of the follwing + /* Sanity check: For a non-corrupt database file one of the following ** must be true: ** (1) We found one or more cells (cntNew[0])>0), or ** (2) pPage is a virtual root page. A virtual root page is when

@@ -76338,7 +78262,7 @@ ** the real root page is page 1 and we are the only child of

** that page. */ assert( cntNew[0]>0 || (pParent->pgno==1 && pParent->nCell==0) || CORRUPT_DB); - TRACE(("BALANCE: old: %d(nc=%d) %d(nc=%d) %d(nc=%d)\n", + TRACE(("BALANCE: old: %u(nc=%u) %u(nc=%u) %u(nc=%u)\n", apOld[0]->pgno, apOld[0]->nCell, nOld>=2 ? apOld[1]->pgno : 0, nOld>=2 ? apOld[1]->nCell : 0, nOld>=3 ? apOld[2]->pgno : 0, nOld>=3 ? apOld[2]->nCell : 0

@@ -76371,7 +78295,7 @@ nNew++;

cntOld[i] = b.nCell; /* Set the pointer-map entry for the new sibling page. */ - if( ISAUTOVACUUM ){ + if( ISAUTOVACUUM(pBt) ){ ptrmapPut(pBt, pNew->pgno, PTRMAP_BTREE, pParent->pgno, &rc); if( rc!=SQLITE_OK ){ goto balance_cleanup;

@@ -76422,8 +78346,8 @@ apNew[iB]->pgno = pgnoA;

} } - TRACE(("BALANCE: new: %d(%d nc=%d) %d(%d nc=%d) %d(%d nc=%d) " - "%d(%d nc=%d) %d(%d nc=%d)\n", + TRACE(("BALANCE: new: %u(%u nc=%u) %u(%u nc=%u) %u(%u nc=%u) " + "%u(%u nc=%u) %u(%u nc=%u)\n", apNew[0]->pgno, szNew[0], cntNew[0], nNew>=2 ? apNew[1]->pgno : 0, nNew>=2 ? szNew[1] : 0, nNew>=2 ? cntNew[1] - cntNew[0] - !leafData : 0,

@@ -76464,7 +78388,7 @@ ** associated with the right-child of each sibling may also need to be

** updated. This happens below, after the sibling pages have been ** populated, not here. */ - if( ISAUTOVACUUM ){ + if( ISAUTOVACUUM(pBt) ){ MemPage *pOld; MemPage *pNew = pOld = apNew[0]; int cntOldNext = pNew->nCell + pNew->nOverflow;

@@ -76555,13 +78479,13 @@ }

iOvflSpace += sz; assert( sz<=pBt->maxLocal+23 ); assert( iOvflSpace <= (int)pBt->pageSize ); - for(k=0; b.ixNx[k]<=j && ALWAYS(k<NB*2); k++){} + for(k=0; ALWAYS(k<NB*2) && b.ixNx[k]<=j; k++){} pSrcEnd = b.apEnd[k]; - if( SQLITE_WITHIN(pSrcEnd, pCell, pCell+sz) ){ + if( SQLITE_OVERFLOW(pSrcEnd, pCell, pCell+sz) ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; } - insertCell(pParent, nxDiv+i, pCell, sz, pTemp, pNew->pgno, &rc); + rc = insertCell(pParent, nxDiv+i, pCell, sz, pTemp, pNew->pgno); if( rc!=SQLITE_OK ) goto balance_cleanup; assert( sqlite3PagerIswriteable(pParent->pDbPage) ); }

@@ -76591,6 +78515,8 @@ */

for(i=1-nNew; i<nNew; i++){ int iPg = i<0 ? -i : i; assert( iPg>=0 && iPg<nNew ); + assert( iPg>=1 || i>=0 ); + assert( iPg<ArraySize(cntOld) ); if( abDone[iPg] ) continue; /* Skip pages already processed */ if( i>=0 /* On the upwards pass, or... */ || cntOld[iPg-1]>=cntNew[iPg-1] /* Condition (1) is true */

@@ -76657,7 +78583,7 @@ || rc!=SQLITE_OK

); copyNodeContent(apNew[0], pParent, &rc); freePage(apNew[0], &rc); - }else if( ISAUTOVACUUM && !leafCorrection ){ + }else if( ISAUTOVACUUM(pBt) && !leafCorrection ){ /* Fix the pointer map entries associated with the right-child of each ** sibling page. All other pointer map entries have already been taken ** care of. */

@@ -76668,7 +78594,7 @@ }

} assert( pParent->isInit ); - TRACE(("BALANCE: finished: old=%d new=%d cells=%d\n", + TRACE(("BALANCE: finished: old=%u new=%u cells=%u\n", nOld, nNew, b.nCell)); /* Free any old pages that were not reused as new pages.

@@ -76678,7 +78604,7 @@ freePage(apOld[i], &rc);

} #if 0 - if( ISAUTOVACUUM && rc==SQLITE_OK && apNew[0]->isInit ){ + if( ISAUTOVACUUM(pBt) && rc==SQLITE_OK && apNew[0]->isInit ){ /* The ptrmapCheckPages() contains assert() statements that verify that ** all pointer map pages are set correctly. This is helpful while ** debugging. This is usually disabled because a corrupt database may

@@ -76740,7 +78666,7 @@ rc = sqlite3PagerWrite(pRoot->pDbPage);

if( rc==SQLITE_OK ){ rc = allocateBtreePage(pBt,&pChild,&pgnoChild,pRoot->pgno,0); copyNodeContent(pRoot, pChild, &rc); - if( ISAUTOVACUUM ){ + if( ISAUTOVACUUM(pBt) ){ ptrmapPut(pBt, pgnoChild, PTRMAP_BTREE, pRoot->pgno, &rc); } }

@@ -76753,7 +78679,7 @@ assert( sqlite3PagerIswriteable(pChild->pDbPage) );

assert( sqlite3PagerIswriteable(pRoot->pDbPage) ); assert( pChild->nCell==pRoot->nCell || CORRUPT_DB ); - TRACE(("BALANCE: copy root %d into %d\n", pRoot->pgno, pChild->pgno)); + TRACE(("BALANCE: copy root %u into %u\n", pRoot->pgno, pChild->pgno)); /* Copy the overflow cells from pRoot to pChild */ memcpy(pChild->aiOvfl, pRoot->aiOvfl,

@@ -76947,7 +78873,7 @@ int iAmt /* Number of bytes to be written */

){ int nData = pX->nData - iOffset; if( nData<=0 ){ - /* Overwritting with zeros */ + /* Overwriting with zeros */ int i; for(i=0; i<iAmt && pDest[i]==0; i++){} if( i<iAmt ){

@@ -76979,9 +78905,13 @@ }

/* ** Overwrite the cell that cursor pCur is pointing to with fresh content -** contained in pX. +** contained in pX. In this variant, pCur is pointing to an overflow +** cell. */ -static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){ +static SQLITE_NOINLINE int btreeOverwriteOverflowCell( + BtCursor *pCur, /* Cursor pointing to cell to overwrite */ + const BtreePayload *pX /* Content to write into the cell */ +){ int iOffset; /* Next byte of pX->pData to write */ int nTotal = pX->nData + pX->nZero; /* Total bytes of to write */ int rc; /* Return code */

@@ -76990,16 +78920,12 @@ BtShared *pBt; /* Btree */

Pgno ovflPgno; /* Next overflow page to write */ u32 ovflPageSize; /* Size to write on overflow page */ - if( pCur->info.pPayload + pCur->info.nLocal > pPage->aDataEnd - || pCur->info.pPayload < pPage->aData + pPage->cellOffset - ){ - return SQLITE_CORRUPT_BKPT; - } + assert( pCur->info.nLocal<nTotal ); /* pCur is an overflow cell */ + /* Overwrite the local portion first */ rc = btreeOverwriteContent(pPage, pCur->info.pPayload, pX, 0, pCur->info.nLocal); if( rc ) return rc; - if( pCur->info.nLocal==nTotal ) return SQLITE_OK; /* Now overwrite the overflow pages */ iOffset = pCur->info.nLocal;

@@ -77029,6 +78955,29 @@ }while( iOffset<nTotal );

return SQLITE_OK; } +/* +** Overwrite the cell that cursor pCur is pointing to with fresh content +** contained in pX. +*/ +static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){ + int nTotal = pX->nData + pX->nZero; /* Total bytes of to write */ + MemPage *pPage = pCur->pPage; /* Page being written */ + + if( pCur->info.pPayload + pCur->info.nLocal > pPage->aDataEnd + || pCur->info.pPayload < pPage->aData + pPage->cellOffset + ){ + return SQLITE_CORRUPT_BKPT; + } + if( pCur->info.nLocal==nTotal ){ + /* The entire cell is local */ + return btreeOverwriteContent(pPage, pCur->info.pPayload, pX, + 0, pCur->info.nLocal); + }else{ + /* The cell contains overflow content */ + return btreeOverwriteOverflowCell(pCur, pX); + } +} + /* ** Insert a new record into the BTree. The content of the new record

@@ -77072,7 +79021,6 @@ int szNew = 0;

int idx; MemPage *pPage; Btree *p = pCur->pBtree; - BtShared *pBt = p->pBt; unsigned char *oldCell; unsigned char *newCell = 0;

@@ -77091,7 +79039,7 @@ ** doing any work. To avoid thwarting these optimizations, it is important

** not to clear the cursor here. */ if( pCur->curFlags & BTCF_Multiple ){ - rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur); + rc = saveAllCursors(p->pBt, pCur->pgnoRoot, pCur); if( rc ) return rc; if( loc && pCur->iPage<0 ){ /* This can only happen if the schema is corrupt such that there is more

@@ -77115,8 +79063,8 @@ }

assert( cursorOwnsBtShared(pCur) ); assert( (pCur->curFlags & BTCF_WriteFlag)!=0 - && pBt->inTransaction==TRANS_WRITE - && (pBt->btsFlags & BTS_READ_ONLY)==0 ); + && p->pBt->inTransaction==TRANS_WRITE + && (p->pBt->btsFlags & BTS_READ_ONLY)==0 ); assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) ); /* Assert that the caller has been consistent. If this cursor was opened

@@ -77214,7 +79162,7 @@ }

} } assert( pCur->eState==CURSOR_VALID - || (pCur->eState==CURSOR_INVALID && loc) ); + || (pCur->eState==CURSOR_INVALID && loc) || CORRUPT_DB ); pPage = pCur->pPage; assert( pPage->intKey || pX->nKey>=0 || (flags & BTREE_PREFORMAT) );

@@ -77229,31 +79177,34 @@ }

if( rc ) return rc; } - TRACE(("INSERT: table=%d nkey=%lld ndata=%d page=%d %s\n", + TRACE(("INSERT: table=%u nkey=%lld ndata=%u page=%u %s\n", pCur->pgnoRoot, pX->nKey, pX->nData, pPage->pgno, loc==0 ? "overwrite" : "new entry")); assert( pPage->isInit || CORRUPT_DB ); - newCell = pBt->pTmpSpace; + newCell = p->pBt->pTmpSpace; assert( newCell!=0 ); + assert( BTREE_PREFORMAT==OPFLAG_PREFORMAT ); if( flags & BTREE_PREFORMAT ){ rc = SQLITE_OK; - szNew = pBt->nPreformatSize; + szNew = p->pBt->nPreformatSize; if( szNew<4 ) szNew = 4; - if( ISAUTOVACUUM && szNew>pPage->maxLocal ){ + if( ISAUTOVACUUM(p->pBt) && szNew>pPage->maxLocal ){ CellInfo info; pPage->xParseCell(pPage, newCell, &info); if( info.nPayload!=info.nLocal ){ Pgno ovfl = get4byte(&newCell[szNew-4]); - ptrmapPut(pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno, &rc); + ptrmapPut(p->pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno, &rc); + if( NEVER(rc) ) goto end_insert; } } }else{ rc = fillInCell(pPage, newCell, pX, &szNew); + if( rc ) goto end_insert; } - if( rc ) goto end_insert; assert( szNew==pPage->xCellSize(pPage, newCell) ); - assert( szNew <= MX_CELL_SIZE(pBt) ); + assert( szNew <= MX_CELL_SIZE(p->pBt) ); idx = pCur->ix; + pCur->info.nSize = 0; if( loc==0 ){ CellInfo info; assert( idx>=0 );

@@ -77272,7 +79223,7 @@ BTREE_CLEAR_CELL(rc, pPage, oldCell, info);

testcase( pCur->curFlags & BTCF_ValidOvfl ); invalidateOverflowCache(pCur); if( info.nSize==szNew && info.nLocal==info.nPayload - && (!ISAUTOVACUUM || szNew<pPage->minLocal) + && (!ISAUTOVACUUM(p->pBt) || szNew<pPage->minLocal) ){ /* Overwrite the old cell with the new if they are the same size. ** We could also try to do this if the old cell is smaller, then add

@@ -77302,7 +79253,7 @@ pCur->curFlags &= ~BTCF_ValidNKey;

}else{ assert( pPage->leaf ); } - insertCell(pPage, idx, newCell, szNew, 0, 0, &rc); + rc = insertCellFast(pPage, idx, newCell, szNew); assert( pPage->nOverflow==0 || rc==SQLITE_OK ); assert( rc!=SQLITE_OK || pPage->nCell>0 || pPage->nOverflow>0 );

@@ -77326,7 +79277,6 @@ ** entry in the table, and the next row inserted has an integer key

** larger than the largest existing key, it is possible to insert the ** row without seeking the cursor. This can be a big performance boost. */ - pCur->info.nSize = 0; if( pPage->nOverflow ){ assert( rc==SQLITE_OK ); pCur->curFlags &= ~(BTCF_ValidNKey);

@@ -77375,7 +79325,6 @@ **

** SQLITE_OK is returned if successful, or an SQLite error code otherwise. */ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 iKey){ - int rc = SQLITE_OK; BtShared *pBt = pDest->pBt; u8 *aOut = pBt->pTmpSpace; /* Pointer to next output buffer */ const u8 *aIn; /* Pointer to next input buffer */

@@ -77398,7 +79347,9 @@ nRem = pSrc->info.nPayload;

if( nIn==nRem && nIn<pDest->pPage->maxLocal ){ memcpy(aOut, aIn, nIn); pBt->nPreformatSize = nIn + (aOut - pBt->pTmpSpace); + return SQLITE_OK; }else{ + int rc = SQLITE_OK; Pager *pSrcPager = pSrc->pBt->pPager; u8 *pPgnoOut = 0; Pgno ovflIn = 0;

@@ -77450,7 +79401,7 @@ Pgno pgnoNew;

MemPage *pNew = 0; rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0); put4byte(pPgnoOut, pgnoNew); - if( ISAUTOVACUUM && pPageOut ){ + if( ISAUTOVACUUM(pBt) && pPageOut ){ ptrmapPut(pBt, pgnoNew, PTRMAP_OVERFLOW2, pPageOut->pgno, &rc); } releasePage(pPageOut);

@@ -77466,9 +79417,8 @@ }while( nRem>0 && rc==SQLITE_OK );

releasePage(pPageOut); sqlite3PagerUnref(pPageIn); + return rc; } - - return rc; } /*

@@ -77527,6 +79477,9 @@ pCell = findCell(pPage, iCellIdx);

if( pPage->nFree<0 && btreeComputeFreeSpace(pPage) ){ return SQLITE_CORRUPT_BKPT; } + if( pCell<&pPage->aCellIdx[pPage->nCell] ){ + return SQLITE_CORRUPT_BKPT; + } /* If the BTREE_SAVEPOSITION bit is on, then the cursor position must ** be preserved following this delete operation. If the current delete

@@ -77623,7 +79576,7 @@ pTmp = pBt->pTmpSpace;

assert( pTmp!=0 ); rc = sqlite3PagerWrite(pLeaf->pDbPage); if( rc==SQLITE_OK ){ - insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n, &rc); + rc = insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n); } dropCell(pLeaf, pLeaf->nCell-1, nCell, &rc); if( rc ) return rc;

@@ -77703,7 +79656,7 @@ BtShared *pBt = p->pBt;

MemPage *pRoot; Pgno pgnoRoot; int rc; - int ptfFlags; /* Page-type flage for the root page of new table */ + int ptfFlags; /* Page-type flags for the root page of new table */ assert( sqlite3BtreeHoldsMutex(p) ); assert( pBt->inTransaction==TRANS_WRITE );

@@ -77872,7 +79825,7 @@ assert( sqlite3_mutex_held(pBt->mutex) );

if( pgno>btreePagecount(pBt) ){ return SQLITE_CORRUPT_BKPT; } - rc = getAndInitPage(pBt, pgno, &pPage, 0, 0); + rc = getAndInitPage(pBt, pgno, &pPage, 0); if( rc ) return rc; if( (pBt->openFlags & BTREE_SINGLE)==0 && sqlite3PagerPageRefcount(pPage->pDbPage) != (1 + (pgno==1))

@@ -78223,6 +80176,41 @@ }

#ifndef SQLITE_OMIT_INTEGRITY_CHECK /* +** Record an OOM error during integrity_check +*/ +static void checkOom(IntegrityCk *pCheck){ + pCheck->rc = SQLITE_NOMEM; + pCheck->mxErr = 0; /* Causes integrity_check processing to stop */ + if( pCheck->nErr==0 ) pCheck->nErr++; +} + +/* +** Invoke the progress handler, if appropriate. Also check for an +** interrupt. +*/ +static void checkProgress(IntegrityCk *pCheck){ + sqlite3 *db = pCheck->db; + if( AtomicLoad(&db->u1.isInterrupted) ){ + pCheck->rc = SQLITE_INTERRUPT; + pCheck->nErr++; + pCheck->mxErr = 0; + } +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + assert( db->nProgressOps>0 ); + pCheck->nStep++; + if( (pCheck->nStep % db->nProgressOps)==0 + && db->xProgress(db->pProgressArg) + ){ + pCheck->rc = SQLITE_INTERRUPT; + pCheck->nErr++; + pCheck->mxErr = 0; + } + } +#endif +} + +/* ** Append a message to the error message string. */ static void checkAppendMsg(

@@ -78231,6 +80219,7 @@ const char *zFormat,

... ){ va_list ap; + checkProgress(pCheck); if( !pCheck->mxErr ) return; pCheck->mxErr--; pCheck->nErr++;

@@ -78239,12 +80228,13 @@ if( pCheck->errMsg.nChar ){

sqlite3_str_append(&pCheck->errMsg, "\n", 1); } if( pCheck->zPfx ){ - sqlite3_str_appendf(&pCheck->errMsg, pCheck->zPfx, pCheck->v1, pCheck->v2); + sqlite3_str_appendf(&pCheck->errMsg, pCheck->zPfx, + pCheck->v0, pCheck->v1, pCheck->v2); } sqlite3_str_vappendf(&pCheck->errMsg, zFormat, ap); va_end(ap); if( pCheck->errMsg.accError==SQLITE_NOMEM ){ - pCheck->bOomFault = 1; + checkOom(pCheck); } } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */

@@ -78256,7 +80246,8 @@ ** Return non-zero if the bit in the IntegrityCk.aPgRef[] array that

** corresponds to page iPg is already set. */ static int getPageReferenced(IntegrityCk *pCheck, Pgno iPg){ - assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 ); + assert( pCheck->aPgRef!=0 ); + assert( iPg<=pCheck->nCkPage && sizeof(pCheck->aPgRef[0])==1 ); return (pCheck->aPgRef[iPg/8] & (1 << (iPg & 0x07))); }

@@ -78264,7 +80255,8 @@ /*

** Set the bit in the IntegrityCk.aPgRef[] array that corresponds to page iPg. */ static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){ - assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 ); + assert( pCheck->aPgRef!=0 ); + assert( iPg<=pCheck->nCkPage && sizeof(pCheck->aPgRef[0])==1 ); pCheck->aPgRef[iPg/8] |= (1 << (iPg & 0x07)); }

@@ -78278,15 +80270,14 @@ **

** Also check that the page number is in bounds. */ static int checkRef(IntegrityCk *pCheck, Pgno iPage){ - if( iPage>pCheck->nPage || iPage==0 ){ - checkAppendMsg(pCheck, "invalid page number %d", iPage); + if( iPage>pCheck->nCkPage || iPage==0 ){ + checkAppendMsg(pCheck, "invalid page number %u", iPage); return 1; } if( getPageReferenced(pCheck, iPage) ){ - checkAppendMsg(pCheck, "2nd reference to page %d", iPage); + checkAppendMsg(pCheck, "2nd reference to page %u", iPage); return 1; } - if( AtomicLoad(&pCheck->db->u1.isInterrupted) ) return 1; setPageReferenced(pCheck, iPage); return 0; }

@@ -78309,14 +80300,14 @@ Pgno iPtrmapParent;

rc = ptrmapGet(pCheck->pBt, iChild, &ePtrmapType, &iPtrmapParent); if( rc!=SQLITE_OK ){ - if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) pCheck->bOomFault = 1; - checkAppendMsg(pCheck, "Failed to read ptrmap key=%d", iChild); + if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) checkOom(pCheck); + checkAppendMsg(pCheck, "Failed to read ptrmap key=%u", iChild); return; } if( ePtrmapType!=eType || iPtrmapParent!=iParent ){ checkAppendMsg(pCheck, - "Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)", + "Bad ptr map entry key=%u expected=(%u,%u) got=(%u,%u)", iChild, eType, iParent, ePtrmapType, iPtrmapParent); } }

@@ -78341,7 +80332,7 @@ unsigned char *pOvflData;

if( checkRef(pCheck, iPage) ) break; N--; if( sqlite3PagerGet(pCheck->pPager, (Pgno)iPage, &pOvflPage, 0) ){ - checkAppendMsg(pCheck, "failed to get page %d", iPage); + checkAppendMsg(pCheck, "failed to get page %u", iPage); break; } pOvflData = (unsigned char *)sqlite3PagerGetData(pOvflPage);

@@ -78354,7 +80345,7 @@ }

#endif if( n>pCheck->pBt->usableSize/4-2 ){ checkAppendMsg(pCheck, - "freelist leaf count too big on page %d", iPage); + "freelist leaf count too big on page %u", iPage); N--; }else{ for(i=0; i<(int)n; i++){

@@ -78386,7 +80377,7 @@ sqlite3PagerUnref(pOvflPage);

} if( N && nErrAtStart==pCheck->nErr ){ checkAppendMsg(pCheck, - "%s is %d but should be %d", + "%s is %u but should be %u", isFreeList ? "size" : "overflow list length", expected-N, expected); }

@@ -78416,7 +80407,9 @@ ** The upper 16 bits are the index of the first byte of a range and the

** lower 16 bits are the index of the last byte of that range. */ static void btreeHeapInsert(u32 *aHeap, u32 x){ - u32 j, i = ++aHeap[0]; + u32 j, i; + assert( aHeap!=0 ); + i = ++aHeap[0]; aHeap[i] = x; while( (j = i/2)>0 && aHeap[j]>aHeap[i] ){ x = aHeap[j];

@@ -78493,15 +80486,18 @@ u8 savedIsInit = 0;

/* Check that the page exists */ + checkProgress(pCheck); + if( pCheck->mxErr==0 ) goto end_of_check; pBt = pCheck->pBt; usableSize = pBt->usableSize; if( iPage==0 ) return 0; if( checkRef(pCheck, iPage) ) return 0; - pCheck->zPfx = "Page %u: "; + pCheck->zPfx = "Tree %u page %u: "; pCheck->v1 = iPage; if( (rc = btreeGetPage(pBt, iPage, &pPage, 0))!=0 ){ checkAppendMsg(pCheck, "unable to get the page. error code=%d", rc); + if( rc==SQLITE_IOERR_NOMEM ) pCheck->rc = SQLITE_NOMEM; goto end_of_check; }

@@ -78524,7 +80520,7 @@ data = pPage->aData;

hdr = pPage->hdrOffset; /* Set up for cell analysis */ - pCheck->zPfx = "On tree page %u cell %d: "; + pCheck->zPfx = "Tree %u page %u cell %u: "; contentOffset = get2byteNotZero(&data[hdr+5]); assert( contentOffset<=usableSize ); /* Enforced by btreeInitPage() */

@@ -78544,7 +80540,7 @@ /* Analyze the right-child page of internal pages */

pgno = get4byte(&data[hdr+8]); #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum ){ - pCheck->zPfx = "On page %u at right child: "; + pCheck->zPfx = "Tree %u page %u right child: "; checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage); } #endif

@@ -78568,7 +80564,7 @@ assert( pCellIdx==&data[cellStart + i*2] );

pc = get2byteAligned(pCellIdx); pCellIdx -= 2; if( pc<contentOffset || pc>usableSize-4 ){ - checkAppendMsg(pCheck, "Offset %d out of range %d..%d", + checkAppendMsg(pCheck, "Offset %u out of range %u..%u", pc, contentOffset, usableSize-4); doCoverageCheck = 0; continue;

@@ -78700,7 +80696,7 @@ ** number of fragmented free bytes within the cell content area.

*/ if( heap[0]==0 && nFrag!=data[hdr+7] ){ checkAppendMsg(pCheck, - "Fragmentation of %d bytes reported as %d on page %u", + "Fragmentation of %u bytes reported as %u on page %u", nFrag, data[hdr+7], iPage); } }

@@ -78738,13 +80734,14 @@ ** since obviously it is not possible to know which pages are covered by

** the unverified btrees. Except, if aRoot[1] is 1, then the freelist ** checks are still performed. */ -SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck( +SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( sqlite3 *db, /* Database connection that is running the check */ Btree *p, /* The btree to be checked */ Pgno *aRoot, /* An array of root pages numbers for individual trees */ int nRoot, /* Number of entries in aRoot[] */ int mxErr, /* Stop reporting errors after this many */ - int *pnErr /* Write number of errors seen to this variable */ + int *pnErr, /* OUT: Write number of errors seen to this variable */ + char **pzOut /* OUT: Write the error message string here */ ){ Pgno i; IntegrityCk sCheck;

@@ -78767,42 +80764,36 @@ sqlite3BtreeEnter(p);

assert( p->inTrans>TRANS_NONE && pBt->inTransaction>TRANS_NONE ); VVA_ONLY( nRef = sqlite3PagerRefcount(pBt->pPager) ); assert( nRef>=0 ); + memset(&sCheck, 0, sizeof(sCheck)); sCheck.db = db; sCheck.pBt = pBt; sCheck.pPager = pBt->pPager; - sCheck.nPage = btreePagecount(sCheck.pBt); + sCheck.nCkPage = btreePagecount(sCheck.pBt); sCheck.mxErr = mxErr; - sCheck.nErr = 0; - sCheck.bOomFault = 0; - sCheck.zPfx = 0; - sCheck.v1 = 0; - sCheck.v2 = 0; - sCheck.aPgRef = 0; - sCheck.heap = 0; sqlite3StrAccumInit(&sCheck.errMsg, 0, zErr, sizeof(zErr), SQLITE_MAX_LENGTH); sCheck.errMsg.printfFlags = SQLITE_PRINTF_INTERNAL; - if( sCheck.nPage==0 ){ + if( sCheck.nCkPage==0 ){ goto integrity_ck_cleanup; } - sCheck.aPgRef = sqlite3MallocZero((sCheck.nPage / 8)+ 1); + sCheck.aPgRef = sqlite3MallocZero((sCheck.nCkPage / 8)+ 1); if( !sCheck.aPgRef ){ - sCheck.bOomFault = 1; + checkOom(&sCheck); goto integrity_ck_cleanup; } sCheck.heap = (u32*)sqlite3PageMalloc( pBt->pageSize ); if( sCheck.heap==0 ){ - sCheck.bOomFault = 1; + checkOom(&sCheck); goto integrity_ck_cleanup; } i = PENDING_BYTE_PAGE(pBt); - if( i<=sCheck.nPage ) setPageReferenced(&sCheck, i); + if( i<=sCheck.nCkPage ) setPageReferenced(&sCheck, i); /* Check the integrity of the freelist */ if( bCkFreelist ){ - sCheck.zPfx = "Main freelist: "; + sCheck.zPfx = "Freelist: "; checkList(&sCheck, 1, get4byte(&pBt->pPage1->aData[32]), get4byte(&pBt->pPage1->aData[36])); sCheck.zPfx = 0;

@@ -78819,7 +80810,7 @@ for(i=0; (int)i<nRoot; i++) if( mx<aRoot[i] ) mx = aRoot[i];

mxInHdr = get4byte(&pBt->pPage1->aData[52]); if( mx!=mxInHdr ){ checkAppendMsg(&sCheck, - "max rootpage (%d) disagrees with header (%d)", + "max rootpage (%u) disagrees with header (%u)", mx, mxInHdr ); }

@@ -78840,6 +80831,7 @@ if( pBt->autoVacuum && aRoot[i]>1 && !bPartial ){

checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0); } #endif + sCheck.v0 = aRoot[i]; checkTreePage(&sCheck, aRoot[i], &notUsed, LARGEST_INT64); } pBt->db->flags = savedDbFlags;

@@ -78847,10 +80839,10 @@

/* Make sure every page in the file is referenced */ if( !bPartial ){ - for(i=1; i<=sCheck.nPage && sCheck.mxErr; i++){ + for(i=1; i<=sCheck.nCkPage && sCheck.mxErr; i++){ #ifdef SQLITE_OMIT_AUTOVACUUM if( getPageReferenced(&sCheck, i)==0 ){ - checkAppendMsg(&sCheck, "Page %d is never used", i); + checkAppendMsg(&sCheck, "Page %u: never used", i); } #else /* If the database supports auto-vacuum, make sure no tables contain

@@ -78858,11 +80850,11 @@ ** references to pointer-map pages.

*/ if( getPageReferenced(&sCheck, i)==0 && (PTRMAP_PAGENO(pBt, i)!=i || !pBt->autoVacuum) ){ - checkAppendMsg(&sCheck, "Page %d is never used", i); + checkAppendMsg(&sCheck, "Page %u: never used", i); } if( getPageReferenced(&sCheck, i)!=0 && (PTRMAP_PAGENO(pBt, i)==i && pBt->autoVacuum) ){ - checkAppendMsg(&sCheck, "Pointer map page %d is referenced", i); + checkAppendMsg(&sCheck, "Page %u: pointer map referenced", i); } #endif }

@@ -78873,16 +80865,17 @@ */

integrity_ck_cleanup: sqlite3PageFree(sCheck.heap); sqlite3_free(sCheck.aPgRef); - if( sCheck.bOomFault ){ + *pnErr = sCheck.nErr; + if( sCheck.nErr==0 ){ sqlite3_str_reset(&sCheck.errMsg); - sCheck.nErr++; + *pzOut = 0; + }else{ + *pzOut = sqlite3StrAccumFinish(&sCheck.errMsg); } - *pnErr = sCheck.nErr; - if( sCheck.nErr==0 ) sqlite3_str_reset(&sCheck.errMsg); /* Make sure this analysis did not leave any unref() pages. */ assert( nRef==sqlite3PagerRefcount(pBt->pPager) ); sqlite3BtreeLeave(p); - return sqlite3StrAccumFinish(&sCheck.errMsg); + return sCheck.rc; } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */

@@ -79147,6 +81140,17 @@ ** Return the size of the header added to each page by this module.

*/ SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void){ return ROUND8(sizeof(MemPage)); } +/* +** If no transaction is active and the database is not a temp-db, clear +** the in-memory pager cache. +*/ +SQLITE_PRIVATE void sqlite3BtreeClearCache(Btree *p){ + BtShared *pBt = p->pBt; + if( pBt->inTransaction==TRANS_NONE ){ + sqlite3PagerClearCache(pBt->pPager); + } +} + #if !defined(SQLITE_OMIT_SHARED_CACHE) /* ** Return true if the Btree passed as the only argument is sharable.

@@ -79412,13 +81416,7 @@ assert( p->bDestLocked );

assert( !isFatalError(p->rc) ); assert( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ); assert( zSrcData ); - - /* Catch the case where the destination is an in-memory database and the - ** page sizes of the source and destination differ. - */ - if( nSrcPgsz!=nDestPgsz && sqlite3PagerIsMemdb(pDestPager) ){ - rc = SQLITE_READONLY; - } + assert( nSrcPgsz==nDestPgsz || sqlite3PagerIsMemdb(pDestPager)==0 ); /* This loop runs once for each destination page spanned by the source ** page. For each iteration, variable iOff is set to the byte offset

@@ -79551,7 +81549,10 @@ ** and the page sizes are different between source and destination */

pgszSrc = sqlite3BtreeGetPageSize(p->pSrc); pgszDest = sqlite3BtreeGetPageSize(p->pDest); destMode = sqlite3PagerGetJournalMode(sqlite3BtreePager(p->pDest)); - if( SQLITE_OK==rc && destMode==PAGER_JOURNALMODE_WAL && pgszSrc!=pgszDest ){ + if( SQLITE_OK==rc + && (destMode==PAGER_JOURNALMODE_WAL || sqlite3PagerIsMemdb(pDestPager)) + && pgszSrc!=pgszDest + ){ rc = SQLITE_READONLY; }

@@ -80057,9 +82058,9 @@ ** https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96270 */

i64 x; assert( (p->flags&MEM_Int)*2==sizeof(x) ); memcpy(&x, (char*)&p->u, (p->flags&MEM_Int)*2); - sqlite3Int64ToText(x, zBuf); + p->n = sqlite3Int64ToText(x, zBuf); #else - sqlite3Int64ToText(p->u.i, zBuf); + p->n = sqlite3Int64ToText(p->u.i, zBuf); #endif }else{ sqlite3StrAccumInit(&acc, 0, zBuf, sz, 0);

@@ -80067,6 +82068,7 @@ sqlite3_str_appendf(&acc, "%!.15g",

(p->flags & MEM_IntReal)!=0 ? (double)p->u.i : p->u.r); assert( acc.zText==zBuf && acc.mxAlloc<=0 ); zBuf[acc.nChar] = 0; /* Fast version of sqlite3StrAccumFinish(&acc) */ + p->n = acc.nChar; } }

@@ -80094,10 +82096,12 @@ **

** This routine is for use inside of assert() statements only. */ SQLITE_PRIVATE int sqlite3VdbeMemValidStrRep(Mem *p){ + Mem tmp; char zBuf[100]; char *z; int i, j, incr; if( (p->flags & MEM_Str)==0 ) return 1; + if( p->db && p->db->mallocFailed ) return 1; if( p->flags & MEM_Term ){ /* Insure that the string is properly zero-terminated. Pay particular ** attention to the case where p->n is odd */

@@ -80110,7 +82114,8 @@ assert( p->enc==SQLITE_UTF8 || p->z[(p->n+1)&~1]==0 );

assert( p->enc==SQLITE_UTF8 || p->z[((p->n+1)&~1)+1]==0 ); } if( (p->flags & (MEM_Int|MEM_Real|MEM_IntReal))==0 ) return 1; - vdbeMemRenderNum(sizeof(zBuf), zBuf, p); + memcpy(&tmp, p, sizeof(tmp)); + vdbeMemRenderNum(sizeof(zBuf), zBuf, &tmp); z = p->z; i = j = 0; incr = 1;

@@ -80254,6 +82259,40 @@ return SQLITE_OK;

} /* +** If pMem is already a string, detect if it is a zero-terminated +** string, or make it into one if possible, and mark it as such. +** +** This is an optimization. Correct operation continues even if +** this routine is a no-op. +*/ +SQLITE_PRIVATE void sqlite3VdbeMemZeroTerminateIfAble(Mem *pMem){ + if( (pMem->flags & (MEM_Str|MEM_Term|MEM_Ephem|MEM_Static))!=MEM_Str ){ + /* pMem must be a string, and it cannot be an ephemeral or static string */ + return; + } + if( pMem->enc!=SQLITE_UTF8 ) return; + if( NEVER(pMem->z==0) ) return; + if( pMem->flags & MEM_Dyn ){ + if( pMem->xDel==sqlite3_free + && sqlite3_msize(pMem->z) >= (u64)(pMem->n+1) + ){ + pMem->z[pMem->n] = 0; + pMem->flags |= MEM_Term; + return; + } + if( pMem->xDel==sqlite3RCStrUnref ){ + /* Blindly assume that all RCStr objects are zero-terminated */ + pMem->flags |= MEM_Term; + return; + } + }else if( pMem->szMalloc >= pMem->n+1 ){ + pMem->z[pMem->n] = 0; + pMem->flags |= MEM_Term; + return; + } +} + +/* ** It is already known that pMem contains an unterminated string. ** Add the zero terminator. **

@@ -80379,7 +82418,7 @@ }

vdbeMemRenderNum(nByte, pMem->z, pMem); assert( pMem->z!=0 ); - pMem->n = sqlite3Strlen30NN(pMem->z); + assert( pMem->n==(int)sqlite3Strlen30NN(pMem->z) ); pMem->enc = SQLITE_UTF8; pMem->flags |= MEM_Str|MEM_Term; if( bForce ) pMem->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal);

@@ -80515,36 +82554,6 @@ if( p->szMalloc ) vdbeMemClear(p);

} /* -** Convert a 64-bit IEEE double into a 64-bit signed integer. -** If the double is out of range of a 64-bit signed integer then -** return the closest available 64-bit signed integer. -*/ -static SQLITE_NOINLINE i64 doubleToInt64(double r){ -#ifdef SQLITE_OMIT_FLOATING_POINT - /* When floating-point is omitted, double and int64 are the same thing */ - return r; -#else - /* - ** Many compilers we encounter do not define constants for the - ** minimum and maximum 64-bit integers, or they define them - ** inconsistently. And many do not understand the "LL" notation. - ** So we define our own static constants here using nothing - ** larger than a 32-bit integer constant. - */ - static const i64 maxInt = LARGEST_INT64; - static const i64 minInt = SMALLEST_INT64; - - if( r<=(double)minInt ){ - return minInt; - }else if( r>=(double)maxInt ){ - return maxInt; - }else{ - return (i64)r; - } -#endif -} - -/* ** Return some kind of integer value which is the best we can do ** at representing the value that *pMem describes as an integer. ** If pMem is an integer, then the value is exact. If pMem is

@@ -80570,7 +82579,7 @@ if( flags & (MEM_Int|MEM_IntReal) ){

testcase( flags & MEM_IntReal ); return pMem->u.i; }else if( flags & MEM_Real ){ - return doubleToInt64(pMem->u.r); + return sqlite3RealToI64(pMem->u.r); }else if( (flags & (MEM_Str|MEM_Blob))!=0 && pMem->z!=0 ){ return memIntValue(pMem); }else{

@@ -80619,32 +82628,35 @@ return sqlite3VdbeRealValue(pMem)!=0.0;

} /* -** The MEM structure is already a MEM_Real. Try to also make it a -** MEM_Int if we can. +** The MEM structure is already a MEM_Real or MEM_IntReal. Try to +** make it a MEM_Int if we can. */ SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){ - i64 ix; assert( pMem!=0 ); - assert( pMem->flags & MEM_Real ); + assert( pMem->flags & (MEM_Real|MEM_IntReal) ); assert( !sqlite3VdbeMemIsRowSet(pMem) ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( EIGHT_BYTE_ALIGNMENT(pMem) ); - ix = doubleToInt64(pMem->u.r); + if( pMem->flags & MEM_IntReal ){ + MemSetTypeFlag(pMem, MEM_Int); + }else{ + i64 ix = sqlite3RealToI64(pMem->u.r); - /* Only mark the value as an integer if - ** - ** (1) the round-trip conversion real->int->real is a no-op, and - ** (2) The integer is neither the largest nor the smallest - ** possible integer (ticket #3922) - ** - ** The second and third terms in the following conditional enforces - ** the second condition under the assumption that addition overflow causes - ** values to wrap around. - */ - if( pMem->u.r==ix && ix>SMALLEST_INT64 && ix<LARGEST_INT64 ){ - pMem->u.i = ix; - MemSetTypeFlag(pMem, MEM_Int); + /* Only mark the value as an integer if + ** + ** (1) the round-trip conversion real->int->real is a no-op, and + ** (2) The integer is neither the largest nor the smallest + ** possible integer (ticket #3922) + ** + ** The second and third terms in the following conditional enforces + ** the second condition under the assumption that addition overflow causes + ** values to wrap around. + */ + if( pMem->u.r==ix && ix>SMALLEST_INT64 && ix<LARGEST_INT64 ){ + pMem->u.i = ix; + MemSetTypeFlag(pMem, MEM_Int); + } } }

@@ -80697,8 +82709,8 @@ ** a way that avoids 'outside the range of representable values' warnings

** from UBSAN. */ SQLITE_PRIVATE i64 sqlite3RealToI64(double r){ - if( r<=(double)SMALLEST_INT64 ) return SMALLEST_INT64; - if( r>=(double)LARGEST_INT64) return LARGEST_INT64; + if( r<-9223372036854774784.0 ) return SMALLEST_INT64; + if( r>+9223372036854774784.0 ) return LARGEST_INT64; return (i64)r; }

@@ -80769,6 +82781,7 @@ sqlite3VdbeMemRealify(pMem);

break; } default: { + int rc; assert( aff==SQLITE_AFF_TEXT ); assert( MEM_Str==(MEM_Blob>>3) ); pMem->flags |= (pMem->flags&MEM_Blob)>>3;

@@ -80776,7 +82789,9 @@ sqlite3ValueApplyAffinity(pMem, SQLITE_AFF_TEXT, encoding);

assert( pMem->flags & MEM_Str || pMem->db->mallocFailed ); pMem->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal|MEM_Blob|MEM_Zero); if( encoding!=SQLITE_UTF8 ) pMem->n &= ~1; - return sqlite3VdbeChangeEncoding(pMem, encoding); + rc = sqlite3VdbeChangeEncoding(pMem, encoding); + if( rc ) return rc; + sqlite3VdbeMemZeroTerminateIfAble(pMem); } } return SQLITE_OK;

@@ -81298,6 +83313,24 @@ if( pVal->flags&MEM_Null ){

return 0; } return valueToText(pVal, enc); +} + +/* Return true if sqlit3_value object pVal is a string or blob value +** that uses the destructor specified in the second argument. +** +** TODO: Maybe someday promote this interface into a published API so +** that third-party extensions can get access to it? +*/ +SQLITE_PRIVATE int sqlite3ValueIsOfClass(const sqlite3_value *pVal, void(*xFree)(void*)){ + if( ALWAYS(pVal!=0) + && ALWAYS((pVal->flags & (MEM_Str|MEM_Blob))!=0) + && (pVal->flags & MEM_Dyn)!=0 + && pVal->xDel==xFree + ){ + return 1; + }else{ + return 0; + } } /*

@@ -81367,6 +83400,7 @@ p->ppRec[0] = pRec;

} pRec->nField = p->iVal+1; + sqlite3VdbeMemSetNull(&pRec->aMem[p->iVal]); return &pRec->aMem[p->iVal]; } #else

@@ -81420,9 +83454,12 @@ pList = p->x.pList;

if( pList ) nVal = pList->nExpr; assert( !ExprHasProperty(p, EP_IntValue) ); pFunc = sqlite3FindFunction(db, p->u.zToken, nVal, enc, 0); +#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION + if( pFunc==0 ) return SQLITE_OK; +#endif assert( pFunc ); if( (pFunc->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG))==0 - || (pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL) + || (pFunc->funcFlags & (SQLITE_FUNC_NEEDCOLL|SQLITE_FUNC_RUNONLY))!=0 ){ return SQLITE_OK; }

@@ -81445,8 +83482,6 @@ rc = SQLITE_NOMEM_BKPT;

goto value_from_function_out; } - testcase( pCtx->pParse->rc==SQLITE_ERROR ); - testcase( pCtx->pParse->rc==SQLITE_OK ); memset(&ctx, 0, sizeof(ctx)); ctx.pOut = pVal; ctx.pFunc = pFunc;

@@ -81459,16 +83494,16 @@ }else{

sqlite3ValueApplyAffinity(pVal, aff, SQLITE_UTF8); assert( rc==SQLITE_OK ); rc = sqlite3VdbeChangeEncoding(pVal, enc); - if( rc==SQLITE_OK && sqlite3VdbeMemTooBig(pVal) ){ + if( NEVER(rc==SQLITE_OK && sqlite3VdbeMemTooBig(pVal)) ){ rc = SQLITE_TOOBIG; pCtx->pParse->nErr++; } } - pCtx->pParse->rc = rc; value_from_function_out: if( rc!=SQLITE_OK ){ pVal = 0; + pCtx->pParse->rc = rc; } if( apVal ){ for(i=0; i<nVal; i++){

@@ -81526,6 +83561,13 @@ aff = sqlite3AffinityType(pExpr->u.zToken,0);

rc = valueFromExpr(db, pExpr->pLeft, enc, aff, ppVal, pCtx); testcase( rc!=SQLITE_OK ); if( *ppVal ){ +#ifdef SQLITE_ENABLE_STAT4 + rc = ExpandBlob(*ppVal); +#else + /* zero-blobs only come from functions, not literal values. And + ** functions are only processed under STAT4 */ + assert( (ppVal[0][0].flags & MEM_Zero)==0 ); +#endif sqlite3VdbeMemCast(*ppVal, aff, enc); sqlite3ValueApplyAffinity(*ppVal, affinity, enc); }

@@ -81618,6 +83660,7 @@ pVal = valueNew(db, pCtx);

if( pVal ){ pVal->flags = MEM_Int; pVal->u.i = pExpr->u.zToken[4]==0; + sqlite3ValueApplyAffinity(pVal, affinity, enc); } }

@@ -82140,12 +84183,44 @@ ** sqlite3MisuseError(lineno)

** sqlite3CantopenError(lineno) */ static void test_addop_breakpoint(int pc, Op *pOp){ - static int n = 0; + static u64 n = 0; + (void)pc; + (void)pOp; n++; + if( n==LARGEST_UINT64 ) abort(); /* so that n is used, preventing a warning */ } #endif /* +** Slow paths for sqlite3VdbeAddOp3() and sqlite3VdbeAddOp4Int() for the +** unusual case when we need to increase the size of the Vdbe.aOp[] array +** before adding the new opcode. +*/ +static SQLITE_NOINLINE int growOp3(Vdbe *p, int op, int p1, int p2, int p3){ + assert( p->nOpAlloc<=p->nOp ); + if( growOpArray(p, 1) ) return 1; + assert( p->nOpAlloc>p->nOp ); + return sqlite3VdbeAddOp3(p, op, p1, p2, p3); +} +static SQLITE_NOINLINE int addOp4IntSlow( + Vdbe *p, /* Add the opcode to this VM */ + int op, /* The new opcode */ + int p1, /* The P1 operand */ + int p2, /* The P2 operand */ + int p3, /* The P3 operand */ + int p4 /* The P4 operand as an integer */ +){ + int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3); + if( p->db->mallocFailed==0 ){ + VdbeOp *pOp = &p->aOp[addr]; + pOp->p4type = P4_INT32; + pOp->p4.i = p4; + } + return addr; +} + + +/* ** Add a new instruction to the list of instructions current in the ** VDBE. Return the address of the new instruction. **

@@ -82155,17 +84230,16 @@ ** p Pointer to the VDBE

** ** op The opcode for this instruction ** -** p1, p2, p3 Operands -** -** Use the sqlite3VdbeResolveLabel() function to fix an address and -** the sqlite3VdbeChangeP4() function to change the value of the P4 -** operand. +** p1, p2, p3, p4 Operands */ -static SQLITE_NOINLINE int growOp3(Vdbe *p, int op, int p1, int p2, int p3){ - assert( p->nOpAlloc<=p->nOp ); - if( growOpArray(p, 1) ) return 1; - assert( p->nOpAlloc>p->nOp ); - return sqlite3VdbeAddOp3(p, op, p1, p2, p3); +SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe *p, int op){ + return sqlite3VdbeAddOp3(p, op, 0, 0, 0); +} +SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe *p, int op, int p1){ + return sqlite3VdbeAddOp3(p, op, p1, 0, 0); +} +SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe *p, int op, int p1, int p2){ + return sqlite3VdbeAddOp3(p, op, p1, p2, 0); } SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){ int i;

@@ -82188,8 +84262,15 @@ pOp->p2 = p2;

pOp->p3 = p3; pOp->p4.p = 0; pOp->p4type = P4_NOTUSED; + + /* Replicate this logic in sqlite3VdbeAddOp4Int() + ** vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv */ #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS pOp->zComment = 0; +#endif +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE) + pOp->nExec = 0; + pOp->nCycle = 0; #endif #ifdef SQLITE_DEBUG if( p->db->flags & SQLITE_VdbeAddopTrace ){

@@ -82197,23 +84278,62 @@ sqlite3VdbePrintOp(0, i, &p->aOp[i]);

test_addop_breakpoint(i, &p->aOp[i]); } #endif -#ifdef VDBE_PROFILE - pOp->cycles = 0; - pOp->cnt = 0; +#ifdef SQLITE_VDBE_COVERAGE + pOp->iSrcLine = 0; +#endif + /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + ** Replicate in sqlite3VdbeAddOp4Int() */ + + return i; +} +SQLITE_PRIVATE int sqlite3VdbeAddOp4Int( + Vdbe *p, /* Add the opcode to this VM */ + int op, /* The new opcode */ + int p1, /* The P1 operand */ + int p2, /* The P2 operand */ + int p3, /* The P3 operand */ + int p4 /* The P4 operand as an integer */ +){ + int i; + VdbeOp *pOp; + + i = p->nOp; + if( p->nOpAlloc<=i ){ + return addOp4IntSlow(p, op, p1, p2, p3, p4); + } + p->nOp++; + pOp = &p->aOp[i]; + assert( pOp!=0 ); + pOp->opcode = (u8)op; + pOp->p5 = 0; + pOp->p1 = p1; + pOp->p2 = p2; + pOp->p3 = p3; + pOp->p4.i = p4; + pOp->p4type = P4_INT32; + + /* Replicate this logic in sqlite3VdbeAddOp3() + ** vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv */ +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS + pOp->zComment = 0; +#endif +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE) + pOp->nExec = 0; + pOp->nCycle = 0; +#endif +#ifdef SQLITE_DEBUG + if( p->db->flags & SQLITE_VdbeAddopTrace ){ + sqlite3VdbePrintOp(0, i, &p->aOp[i]); + test_addop_breakpoint(i, &p->aOp[i]); + } #endif #ifdef SQLITE_VDBE_COVERAGE pOp->iSrcLine = 0; #endif + /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + ** Replicate in sqlite3VdbeAddOp3() */ + return i; -} -SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe *p, int op){ - return sqlite3VdbeAddOp3(p, op, 0, 0, 0); -} -SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe *p, int op, int p1){ - return sqlite3VdbeAddOp3(p, op, p1, 0, 0); -} -SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe *p, int op, int p1, int p2){ - return sqlite3VdbeAddOp3(p, op, p1, p2, 0); } /* Generate code for an unconditional jump to instruction iDest

@@ -82368,11 +84488,12 @@ **

** If the bPush flag is true, then make this opcode the parent for ** subsequent Explains until sqlite3VdbeExplainPop() is called. */ -SQLITE_PRIVATE void sqlite3VdbeExplain(Parse *pParse, u8 bPush, const char *zFmt, ...){ -#ifndef SQLITE_DEBUG +SQLITE_PRIVATE int sqlite3VdbeExplain(Parse *pParse, u8 bPush, const char *zFmt, ...){ + int addr = 0; +#if !defined(SQLITE_DEBUG) /* Always include the OP_Explain opcodes if SQLITE_DEBUG is defined. ** But omit them (for performance) during production builds */ - if( pParse->explain==2 ) + if( pParse->explain==2 || IS_STMT_SCANSTATUS(pParse->db) ) #endif { char *zMsg;

@@ -82384,13 +84505,15 @@ zMsg = sqlite3VMPrintf(pParse->db, zFmt, ap);

va_end(ap); v = pParse->pVdbe; iThis = v->nOp; - sqlite3VdbeAddOp4(v, OP_Explain, iThis, pParse->addrExplain, 0, + addr = sqlite3VdbeAddOp4(v, OP_Explain, iThis, pParse->addrExplain, 0, zMsg, P4_DYNAMIC); sqlite3ExplainBreakpoint(bPush?"PUSH":"", sqlite3VdbeGetLastOp(v)->p4.z); if( bPush){ pParse->addrExplain = iThis; } + sqlite3VdbeScanStatus(v, iThis, -1, -1, 0, 0); } + return addr; } /*

@@ -82418,26 +84541,6 @@ for(j=0; j<p->db->nDb; j++) sqlite3VdbeUsesBtree(p, j);

sqlite3MayAbort(p->pParse); } -/* -** Add an opcode that includes the p4 value as an integer. -*/ -SQLITE_PRIVATE int sqlite3VdbeAddOp4Int( - Vdbe *p, /* Add the opcode to this VM */ - int op, /* The new opcode */ - int p1, /* The P1 operand */ - int p2, /* The P2 operand */ - int p3, /* The P3 operand */ - int p4 /* The P4 operand as an integer */ -){ - int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3); - if( p->db->mallocFailed==0 ){ - VdbeOp *pOp = &p->aOp[addr]; - pOp->p4type = P4_INT32; - pOp->p4.i = p4; - } - return addr; -} - /* Insert the end of a co-routine */ SQLITE_PRIVATE void sqlite3VdbeEndCoroutine(Vdbe *v, int regYield){

@@ -82498,6 +84601,9 @@ #ifdef SQLITE_DEBUG

int i; for(i=p->nLabelAlloc; i<nNewSize; i++) p->aLabel[i] = -1; #endif + if( nNewSize>=100 && (nNewSize/100)>(p->nLabelAlloc/100) ){ + sqlite3ProgressCheck(p); + } p->nLabelAlloc = nNewSize; p->aLabel[j] = v->nOp; }

@@ -82741,11 +84847,13 @@ int nMaxArgs = *pMaxFuncArgs;

Op *pOp; Parse *pParse = p->pParse; int *aLabel = pParse->aLabel; + + assert( pParse->db->mallocFailed==0 ); /* tag-20230419-1 */ p->readOnly = 1; p->bIsReader = 0; pOp = &p->aOp[p->nOp-1]; assert( p->aOp[0].opcode==OP_Init ); - while( 1 /* Loop termates when it reaches the OP_Init opcode */ ){ + while( 1 /* Loop terminates when it reaches the OP_Init opcode */ ){ /* Only JUMP opcodes and the short list of special opcodes in the switch ** below need to be considered. The mkopcodeh.tcl generator script groups ** all these opcodes together near the front of the opcode list. Skip

@@ -82800,6 +84908,7 @@ ** non-jump opcodes less than SQLITE_MX_JUMP_CODE are guaranteed to

** have non-negative values for P2. */ assert( (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_JUMP)!=0 ); assert( ADDR(pOp->p2)<-pParse->nLabel ); + assert( aLabel!=0 ); /* True because of tag-20230419-1 */ pOp->p2 = aLabel[ADDR(pOp->p2)]; } break;

@@ -82866,6 +84975,10 @@ if( (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_JUMP)!=0 ){

int iDest = pOp->p2; /* Jump destination */ if( iDest==0 ) continue; if( pOp->opcode==OP_Gosub ) continue; + if( pOp->p3==20230325 && pOp->opcode==OP_NotNull ){ + /* This is a deliberately taken illegal branch. tag-20230325-2 */ + continue; + } if( iDest<0 ){ int j = ADDR(iDest); assert( j>=0 );

@@ -83043,20 +85156,83 @@ int addrVisit, /* Address of rows visited counter */

LogEst nEst, /* Estimated number of output rows */ const char *zName /* Name of table or index being scanned */ ){ - sqlite3_int64 nByte = (p->nScan+1) * sizeof(ScanStatus); - ScanStatus *aNew; - aNew = (ScanStatus*)sqlite3DbRealloc(p->db, p->aScan, nByte); - if( aNew ){ - ScanStatus *pNew = &aNew[p->nScan++]; - pNew->addrExplain = addrExplain; - pNew->addrLoop = addrLoop; - pNew->addrVisit = addrVisit; - pNew->nEst = nEst; - pNew->zName = sqlite3DbStrDup(p->db, zName); - p->aScan = aNew; + if( IS_STMT_SCANSTATUS(p->db) ){ + sqlite3_int64 nByte = (p->nScan+1) * sizeof(ScanStatus); + ScanStatus *aNew; + aNew = (ScanStatus*)sqlite3DbRealloc(p->db, p->aScan, nByte); + if( aNew ){ + ScanStatus *pNew = &aNew[p->nScan++]; + memset(pNew, 0, sizeof(ScanStatus)); + pNew->addrExplain = addrExplain; + pNew->addrLoop = addrLoop; + pNew->addrVisit = addrVisit; + pNew->nEst = nEst; + pNew->zName = sqlite3DbStrDup(p->db, zName); + p->aScan = aNew; + } } } -#endif + +/* +** Add the range of instructions from addrStart to addrEnd (inclusive) to +** the set of those corresponding to the sqlite3_stmt_scanstatus() counters +** associated with the OP_Explain instruction at addrExplain. The +** sum of the sqlite3Hwtime() values for each of these instructions +** will be returned for SQLITE_SCANSTAT_NCYCLE requests. +*/ +SQLITE_PRIVATE void sqlite3VdbeScanStatusRange( + Vdbe *p, + int addrExplain, + int addrStart, + int addrEnd +){ + if( IS_STMT_SCANSTATUS(p->db) ){ + ScanStatus *pScan = 0; + int ii; + for(ii=p->nScan-1; ii>=0; ii--){ + pScan = &p->aScan[ii]; + if( pScan->addrExplain==addrExplain ) break; + pScan = 0; + } + if( pScan ){ + if( addrEnd<0 ) addrEnd = sqlite3VdbeCurrentAddr(p)-1; + for(ii=0; ii<ArraySize(pScan->aAddrRange); ii+=2){ + if( pScan->aAddrRange[ii]==0 ){ + pScan->aAddrRange[ii] = addrStart; + pScan->aAddrRange[ii+1] = addrEnd; + break; + } + } + } + } +} + +/* +** Set the addresses for the SQLITE_SCANSTAT_NLOOP and SQLITE_SCANSTAT_NROW +** counters for the query element associated with the OP_Explain at +** addrExplain. +*/ +SQLITE_PRIVATE void sqlite3VdbeScanStatusCounters( + Vdbe *p, + int addrExplain, + int addrLoop, + int addrVisit +){ + if( IS_STMT_SCANSTATUS(p->db) ){ + ScanStatus *pScan = 0; + int ii; + for(ii=p->nScan-1; ii>=0; ii--){ + pScan = &p->aScan[ii]; + if( pScan->addrExplain==addrExplain ) break; + pScan = 0; + } + if( pScan ){ + if( addrLoop>0 ) pScan->addrLoop = addrLoop; + if( addrVisit>0 ) pScan->addrVisit = addrVisit; + } + } +} +#endif /* defined(SQLITE_ENABLE_STMT_SCANSTATUS) */ /*

@@ -83135,7 +85311,7 @@

/* ** If the input FuncDef structure is ephemeral, then free it. If -** the FuncDef is not ephermal, then do nothing. +** the FuncDef is not ephemeral, then do nothing. */ static void freeEphemeralFunction(sqlite3 *db, FuncDef *pDef){ assert( db!=0 );

@@ -83298,7 +85474,6 @@ if( bUndefine ) sqlite3VdbeChangeP5(pParse->pVdbe, 1);

} } #endif /* SQLITE_DEBUG */ - /* ** Change the value of the P4 operand for a specific instruction.

@@ -83386,7 +85561,7 @@ assert( n<=0 );

if( p->db->mallocFailed ){ freeP4(p->db, n, pP4); }else{ - assert( pP4!=0 ); + assert( pP4!=0 || n==P4_DYNAMIC ); assert( p->nOp>0 ); pOp = &p->aOp[p->nOp-1]; assert( pOp->p4type==P4_NOTUSED );

@@ -83480,7 +85655,7 @@ }

/* Return the most recently added opcode */ -VdbeOp * sqlite3VdbeGetLastOp(Vdbe *p){ +SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetLastOp(Vdbe *p){ return sqlite3VdbeGetOp(p, p->nOp - 1); }

@@ -84185,7 +86360,6 @@ ** the result, result columns may become dynamic if the user calls

** sqlite3_column_text16(), causing a translation to UTF-16 encoding. */ releaseMemArray(pMem, 8); - p->pResultSet = 0; if( p->rc==SQLITE_NOMEM ){ /* This happens if a malloc() inside a call to sqlite3_column_text() or

@@ -84221,7 +86395,7 @@ sqlite3VdbeMemSetInt64(pMem, pOp->p1);

sqlite3VdbeMemSetInt64(pMem+1, pOp->p2); sqlite3VdbeMemSetInt64(pMem+2, pOp->p3); sqlite3VdbeMemSetStr(pMem+3, zP4, -1, SQLITE_UTF8, sqlite3_free); - p->nResColumn = 4; + assert( p->nResColumn==4 ); }else{ sqlite3VdbeMemSetInt64(pMem+0, i); sqlite3VdbeMemSetStr(pMem+1, (char*)sqlite3OpcodeName(pOp->opcode),

@@ -84240,9 +86414,9 @@ #else

sqlite3VdbeMemSetNull(pMem+7); #endif sqlite3VdbeMemSetStr(pMem+5, zP4, -1, SQLITE_UTF8, sqlite3_free); - p->nResColumn = 8; + assert( p->nResColumn==8 ); } - p->pResultSet = pMem; + p->pResultRow = pMem; if( db->mallocFailed ){ p->rc = SQLITE_NOMEM; rc = SQLITE_ERROR;

@@ -84353,7 +86527,7 @@ ** Rewind the VDBE back to the beginning in preparation for

** running it. */ SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe *p){ -#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) +#if defined(SQLITE_DEBUG) int i; #endif assert( p!=0 );

@@ -84382,8 +86556,8 @@ p->iStatement = 0;

p->nFkConstraint = 0; #ifdef VDBE_PROFILE for(i=0; i<p->nOp; i++){ - p->aOp[i].cnt = 0; - p->aOp[i].cycles = 0; + p->aOp[i].nExec = 0; + p->aOp[i].nCycle = 0; } #endif }

@@ -84454,26 +86628,9 @@

resolveP2Values(p, &nArg); p->usesStmtJournal = (u8)(pParse->isMultiWrite && pParse->mayAbort); if( pParse->explain ){ - static const char * const azColName[] = { - "addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment", - "id", "parent", "notused", "detail" - }; - int iFirst, mx, i; if( nMem<10 ) nMem = 10; p->explain = pParse->explain; - if( pParse->explain==2 ){ - sqlite3VdbeSetNumCols(p, 4); - iFirst = 8; - mx = 12; - }else{ - sqlite3VdbeSetNumCols(p, 8); - iFirst = 0; - mx = 8; - } - for(i=iFirst; i<mx; i++){ - sqlite3VdbeSetColName(p, i-iFirst, COLNAME_NAME, - azColName[i], SQLITE_STATIC); - } + p->nResColumn = 12 - 4*p->explain; } p->expired = 0;

@@ -84492,9 +86649,6 @@ p->aMem = allocSpace(&x, 0, nMem*sizeof(Mem));

p->aVar = allocSpace(&x, 0, nVar*sizeof(Mem)); p->apArg = allocSpace(&x, 0, nArg*sizeof(Mem*)); p->apCsr = allocSpace(&x, 0, nCursor*sizeof(VdbeCursor*)); -#ifdef SQLITE_ENABLE_STMT_SCANSTATUS - p->anExec = allocSpace(&x, 0, p->nOp*sizeof(i64)); -#endif if( x.nNeeded ){ x.pSpace = p->pFree = sqlite3DbMallocRawNN(db, x.nNeeded); x.nFree = x.nNeeded;

@@ -84503,9 +86657,6 @@ p->aMem = allocSpace(&x, p->aMem, nMem*sizeof(Mem));

p->aVar = allocSpace(&x, p->aVar, nVar*sizeof(Mem)); p->apArg = allocSpace(&x, p->apArg, nArg*sizeof(Mem*)); p->apCsr = allocSpace(&x, p->apCsr, nCursor*sizeof(VdbeCursor*)); -#ifdef SQLITE_ENABLE_STMT_SCANSTATUS - p->anExec = allocSpace(&x, p->anExec, p->nOp*sizeof(i64)); -#endif } }

@@ -84520,9 +86671,6 @@ initMemArray(p->aVar, nVar, db, MEM_Null);

p->nMem = nMem; initMemArray(p->aMem, nMem, db, MEM_Undefined); memset(p->apCsr, 0, nCursor*sizeof(VdbeCursor*)); -#ifdef SQLITE_ENABLE_STMT_SCANSTATUS - memset(p->anExec, 0, p->nOp*sizeof(i64)); -#endif } sqlite3VdbeRewind(p); }

@@ -84534,7 +86682,23 @@ */

SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){ if( pCx ) sqlite3VdbeFreeCursorNN(p,pCx); } +static SQLITE_NOINLINE void freeCursorWithCache(Vdbe *p, VdbeCursor *pCx){ + VdbeTxtBlbCache *pCache = pCx->pCache; + assert( pCx->colCache ); + pCx->colCache = 0; + pCx->pCache = 0; + if( pCache->pCValue ){ + sqlite3RCStrUnref(pCache->pCValue); + pCache->pCValue = 0; + } + sqlite3DbFree(p->db, pCache); + sqlite3VdbeFreeCursorNN(p, pCx); +} SQLITE_PRIVATE void sqlite3VdbeFreeCursorNN(Vdbe *p, VdbeCursor *pCx){ + if( pCx->colCache ){ + freeCursorWithCache(p, pCx); + return; + } switch( pCx->eCurType ){ case CURTYPE_SORTER: { sqlite3VdbeSorterClose(p->db, pCx);

@@ -84580,9 +86744,6 @@ */

SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *pFrame){ Vdbe *v = pFrame->v; closeCursorsInFrame(v); -#ifdef SQLITE_ENABLE_STMT_SCANSTATUS - v->anExec = pFrame->anExec; -#endif v->aOp = pFrame->aOp; v->nOp = pFrame->nOp; v->aMem = pFrame->aMem;

@@ -84638,12 +86799,12 @@ SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe *p, int nResColumn){

int n; sqlite3 *db = p->db; - if( p->nResColumn ){ - releaseMemArray(p->aColName, p->nResColumn*COLNAME_N); + if( p->nResAlloc ){ + releaseMemArray(p->aColName, p->nResAlloc*COLNAME_N); sqlite3DbFree(db, p->aColName); } n = nResColumn*COLNAME_N; - p->nResColumn = (u16)nResColumn; + p->nResColumn = p->nResAlloc = (u16)nResColumn; p->aColName = (Mem*)sqlite3DbMallocRawNN(db, sizeof(Mem)*n ); if( p->aColName==0 ) return; initMemArray(p->aColName, n, db, MEM_Null);

@@ -84668,14 +86829,14 @@ void (*xDel)(void*) /* Memory management strategy for zName */

){ int rc; Mem *pColName; - assert( idx<p->nResColumn ); + assert( idx<p->nResAlloc ); assert( var<COLNAME_N ); if( p->db->mallocFailed ){ assert( !zName || xDel!=SQLITE_DYNAMIC ); return SQLITE_NOMEM_BKPT; } assert( p->aColName!=0 ); - pColName = &(p->aColName[idx+var*p->nResColumn]); + pColName = &(p->aColName[idx+var*p->nResAlloc]); rc = sqlite3VdbeMemSetStr(pColName, zName, -1, SQLITE_UTF8, xDel); assert( rc!=0 || !zName || (pColName->flags&MEM_Term)!=0 ); return rc;

@@ -85188,6 +87349,7 @@ if( rc==SQLITE_BUSY && p->readOnly ){

sqlite3VdbeLeave(p); return SQLITE_BUSY; }else if( rc!=SQLITE_OK ){ + sqlite3SystemError(db, rc); p->rc = rc; sqlite3RollbackAll(db, SQLITE_OK); p->nChange = 0;

@@ -85197,6 +87359,8 @@ db->nDeferredImmCons = 0;

db->flags &= ~(u64)SQLITE_DeferFKs; sqlite3CommitInternalChanges(db); } + }else if( p->rc==SQLITE_SCHEMA && db->nVdbeActive>1 ){ + p->nChange = 0; }else{ sqlite3RollbackAll(db, SQLITE_OK); p->nChange = 0;

@@ -85386,7 +87550,7 @@ if( p->zErrMsg ){

sqlite3DbFree(db, p->zErrMsg); p->zErrMsg = 0; } - p->pResultSet = 0; + p->pResultRow = 0; #ifdef SQLITE_DEBUG p->nWrite = 0; #endif

@@ -85414,10 +87578,12 @@ if( pc!='\n' ) fprintf(out, "\n");

} for(i=0; i<p->nOp; i++){ char zHdr[100]; + i64 cnt = p->aOp[i].nExec; + i64 cycles = p->aOp[i].nCycle; sqlite3_snprintf(sizeof(zHdr), zHdr, "%6u %12llu %8llu ", - p->aOp[i].cnt, - p->aOp[i].cycles, - p->aOp[i].cnt>0 ? p->aOp[i].cycles/p->aOp[i].cnt : 0 + cnt, + cycles, + cnt>0 ? cycles/cnt : 0 ); fprintf(out, "%s", zHdr); sqlite3VdbePrintOp(out, i, &p->aOp[i]);

@@ -85495,7 +87661,7 @@ SubProgram *pSub, *pNext;

assert( db!=0 ); assert( p->db==0 || p->db==db ); if( p->aColName ){ - releaseMemArray(p->aColName, p->nResColumn*COLNAME_N); + releaseMemArray(p->aColName, p->nResAlloc*COLNAME_N); sqlite3DbNNFreeNN(db, p->aColName); } for(pSub=p->pProgram; pSub; pSub=pNext){

@@ -85513,9 +87679,9 @@ if( p->zSql ) sqlite3DbNNFreeNN(db, p->zSql);

#ifdef SQLITE_ENABLE_NORMALIZE sqlite3DbFree(db, p->zNormSql); { - DblquoteStr *pThis, *pNext; - for(pThis=p->pDblStr; pThis; pThis=pNext){ - pNext = pThis->pNextStr; + DblquoteStr *pThis, *pNxt; + for(pThis=p->pDblStr; pThis; pThis=pNxt){ + pNxt = pThis->pNextStr; sqlite3DbFree(db, pThis); } }

@@ -86095,6 +88261,15 @@ */

if( d1+(u64)serial_type1+2>(u64)nKey1 && d1+(u64)sqlite3VdbeSerialTypeLen(serial_type1)>(u64)nKey1 ){ + if( serial_type1>=1 + && serial_type1<=7 + && d1+(u64)sqlite3VdbeSerialTypeLen(serial_type1)<=(u64)nKey1+8 + && CORRUPT_DB + ){ + return 1; /* corrupt record not detected by + ** sqlite3VdbeRecordCompareWithSkip(). Return true + ** to avoid firing the assert() */ + } break; }

@@ -86263,20 +88438,33 @@ if( c ) return c;

return n1 - n2; } +/* The following two functions are used only within testcase() to prove +** test coverage. These functions do no exist for production builds. +** We must use separate SQLITE_NOINLINE functions here, since otherwise +** optimizer code movement causes gcov to become very confused. +*/ +#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_DEBUG) +static int SQLITE_NOINLINE doubleLt(double a, double b){ return a<b; } +static int SQLITE_NOINLINE doubleEq(double a, double b){ return a==b; } +#endif + /* ** Do a comparison between a 64-bit signed integer and a 64-bit floating-point ** number. Return negative, zero, or positive if the first (i64) is less than, ** equal to, or greater than the second (double). */ SQLITE_PRIVATE int sqlite3IntFloatCompare(i64 i, double r){ - if( sizeof(LONGDOUBLE_TYPE)>8 ){ + if( sqlite3IsNaN(r) ){ + /* SQLite considers NaN to be a NULL. And all integer values are greater + ** than NULL */ + return 1; + } + if( sqlite3Config.bUseLongDouble ){ LONGDOUBLE_TYPE x = (LONGDOUBLE_TYPE)i; testcase( x<r ); testcase( x>r ); testcase( x==r ); - if( x<r ) return -1; - if( x>r ) return +1; /*NO_TEST*/ /* work around bugs in gcov */ - return 0; /*NO_TEST*/ /* work around bugs in gcov */ + return (x<r) ? -1 : (x>r); }else{ i64 y; double s;

@@ -86286,9 +88474,10 @@ y = (i64)r;

if( i<y ) return -1; if( i>y ) return +1; s = (double)i; - if( s<r ) return -1; - if( s>r ) return +1; - return 0; + testcase( doubleLt(s,r) ); + testcase( doubleLt(r,s) ); + testcase( doubleEq(r,s) ); + return (s<r) ? -1 : (s>r); } }

@@ -86538,7 +88727,7 @@ if( serial_type>=10 ){

/* Serial types 12 or greater are strings and blobs (greater than ** numbers). Types 10 and 11 are currently "reserved for future ** use", so it doesn't really matter what the results of comparing - ** them to numberic values are. */ + ** them to numeric values are. */ rc = serial_type==10 ? -1 : +1; }else if( serial_type==0 ){ rc = -1;

@@ -87141,6 +89330,20 @@ return 0;

} return 1; } + +#if defined(SQLITE_ENABLE_CURSOR_HINTS) && defined(SQLITE_DEBUG) +/* +** This Walker callback is used to help verify that calls to +** sqlite3BtreeCursorHint() with opcode BTREE_HINT_RANGE have +** byte-code register values correctly initialized. +*/ +SQLITE_PRIVATE int sqlite3CursorRangeHintExprCheck(Walker *pWalker, Expr *pExpr){ + if( pExpr->op==TK_REGISTER ){ + assert( (pWalker->u.aMem[pExpr->iTable].flags & MEM_Undefined)==0 ); + } + return WRC_Continue; +} +#endif /* SQLITE_ENABLE_CURSOR_HINTS && SQLITE_DEBUG */ #ifndef SQLITE_OMIT_VIRTUALTABLE /*

@@ -87204,6 +89407,16 @@ i64 iKey2;

PreUpdate preupdate; const char *zTbl = pTab->zName; static const u8 fakeSortOrder = 0; +#ifdef SQLITE_DEBUG + int nRealCol; + if( pTab->tabFlags & TF_WithoutRowid ){ + nRealCol = sqlite3PrimaryKeyIndex(pTab)->nColumn; + }else if( pTab->tabFlags & TF_HasVirtual ){ + nRealCol = pTab->nNVCol; + }else{ + nRealCol = pTab->nCol; + } +#endif assert( db->pPreUpdate==0 ); memset(&preupdate, 0, sizeof(PreUpdate));

@@ -87220,8 +89433,8 @@ }

assert( pCsr!=0 ); assert( pCsr->eCurType==CURTYPE_BTREE ); - assert( pCsr->nField==pTab->nCol - || (pCsr->nField==pTab->nCol+1 && op==SQLITE_DELETE && iReg==-1) + assert( pCsr->nField==nRealCol + || (pCsr->nField==nRealCol+1 && op==SQLITE_DELETE && iReg==-1) ); preupdate.v = v;

@@ -87272,6 +89485,7 @@ ** VDBE.

*/ /* #include "sqliteInt.h" */ /* #include "vdbeInt.h" */ +/* #include "opcodes.h" */ #ifndef SQLITE_OMIT_DEPRECATED /*

@@ -87527,7 +89741,7 @@ SQLITE_INTEGER, /* 0x1e (not possible) */

SQLITE_NULL, /* 0x1f (not possible) */ SQLITE_FLOAT, /* 0x20 INTREAL */ SQLITE_NULL, /* 0x21 (not possible) */ - SQLITE_TEXT, /* 0x22 INTREAL + TEXT */ + SQLITE_FLOAT, /* 0x22 INTREAL + TEXT */ SQLITE_NULL, /* 0x23 (not possible) */ SQLITE_FLOAT, /* 0x24 (not possible) */ SQLITE_NULL, /* 0x25 (not possible) */

@@ -87631,7 +89845,7 @@ ** result as a string or blob. Appropriate errors are set if the string/blob

** is too big or if an OOM occurs. ** ** The invokeValueDestructor(P,X) routine invokes destructor function X() -** on value P is not going to be used and need to be destroyed. +** on value P if P is not going to be used and need to be destroyed. */ static void setResultStrOrError( sqlite3_context *pCtx, /* Function context */

@@ -87661,7 +89875,7 @@ }

static int invokeValueDestructor( const void *p, /* Value to destroy */ void (*xDel)(void*), /* The destructor */ - sqlite3_context *pCtx /* Set a SQLITE_TOOBIG error if no NULL */ + sqlite3_context *pCtx /* Set a SQLITE_TOOBIG error if not NULL */ ){ assert( xDel!=SQLITE_DYNAMIC ); if( xDel==0 ){

@@ -87671,7 +89885,14 @@ /* noop */

}else{ xDel((void*)p); } +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx!=0 ){ + sqlite3_result_error_toobig(pCtx); + } +#else + assert( pCtx!=0 ); sqlite3_result_error_toobig(pCtx); +#endif return SQLITE_TOOBIG; } SQLITE_API void sqlite3_result_blob(

@@ -87680,6 +89901,12 @@ const void *z,

int n, void (*xDel)(void *) ){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 || n<0 ){ + invokeValueDestructor(z, xDel, pCtx); + return; + } +#endif assert( n>=0 ); assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); setResultStrOrError(pCtx, z, n, 0, xDel);

@@ -87690,8 +89917,14 @@ const void *z,

sqlite3_uint64 n, void (*xDel)(void *) ){ + assert( xDel!=SQLITE_DYNAMIC ); +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ){ + invokeValueDestructor(z, xDel, 0); + return; + } +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); - assert( xDel!=SQLITE_DYNAMIC ); if( n>0x7fffffff ){ (void)invokeValueDestructor(z, xDel, pCtx); }else{

@@ -87699,30 +89932,48 @@ setResultStrOrError(pCtx, z, (int)n, 0, xDel);

} } SQLITE_API void sqlite3_result_double(sqlite3_context *pCtx, double rVal){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetDouble(pCtx->pOut, rVal); } SQLITE_API void sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); pCtx->isError = SQLITE_ERROR; sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF8, SQLITE_TRANSIENT); } #ifndef SQLITE_OMIT_UTF16 SQLITE_API void sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); pCtx->isError = SQLITE_ERROR; sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT); } #endif SQLITE_API void sqlite3_result_int(sqlite3_context *pCtx, int iVal){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetInt64(pCtx->pOut, (i64)iVal); } SQLITE_API void sqlite3_result_int64(sqlite3_context *pCtx, i64 iVal){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetInt64(pCtx->pOut, iVal); } SQLITE_API void sqlite3_result_null(sqlite3_context *pCtx){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetNull(pCtx->pOut); }

@@ -87732,14 +89983,37 @@ void *pPtr,

const char *zPType, void (*xDestructor)(void*) ){ - Mem *pOut = pCtx->pOut; + Mem *pOut; +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ){ + invokeValueDestructor(pPtr, xDestructor, 0); + return; + } +#endif + pOut = pCtx->pOut; assert( sqlite3_mutex_held(pOut->db->mutex) ); sqlite3VdbeMemRelease(pOut); pOut->flags = MEM_Null; sqlite3VdbeMemSetPointer(pOut, pPtr, zPType, xDestructor); } SQLITE_API void sqlite3_result_subtype(sqlite3_context *pCtx, unsigned int eSubtype){ - Mem *pOut = pCtx->pOut; + Mem *pOut; +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif +#if defined(SQLITE_STRICT_SUBTYPE) && SQLITE_STRICT_SUBTYPE+0!=0 + if( pCtx->pFunc!=0 + && (pCtx->pFunc->funcFlags & SQLITE_RESULT_SUBTYPE)==0 + ){ + char zErr[200]; + sqlite3_snprintf(sizeof(zErr), zErr, + "misuse of sqlite3_result_subtype() by %s()", + pCtx->pFunc->zName); + sqlite3_result_error(pCtx, zErr, -1); + return; + } +#endif /* SQLITE_STRICT_SUBTYPE */ + pOut = pCtx->pOut; assert( sqlite3_mutex_held(pOut->db->mutex) ); pOut->eSubtype = eSubtype & 0xff; pOut->flags |= MEM_Subtype;

@@ -87750,6 +90024,12 @@ const char *z,

int n, void (*xDel)(void *) ){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ){ + invokeValueDestructor(z, xDel, 0); + return; + } +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); setResultStrOrError(pCtx, z, n, SQLITE_UTF8, xDel); }

@@ -87760,13 +90040,23 @@ sqlite3_uint64 n,

void (*xDel)(void *), unsigned char enc ){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ){ + invokeValueDestructor(z, xDel, 0); + return; + } +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); assert( xDel!=SQLITE_DYNAMIC ); - if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; + if( enc!=SQLITE_UTF8 ){ + if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; + n &= ~(u64)1; + } if( n>0x7fffffff ){ (void)invokeValueDestructor(z, xDel, pCtx); }else{ setResultStrOrError(pCtx, z, (int)n, enc, xDel); + sqlite3VdbeMemZeroTerminateIfAble(pCtx->pOut); } } #ifndef SQLITE_OMIT_UTF16

@@ -87777,7 +90067,7 @@ int n,

void (*xDel)(void *) ){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); - setResultStrOrError(pCtx, z, n, SQLITE_UTF16NATIVE, xDel); + setResultStrOrError(pCtx, z, n & ~(u64)1, SQLITE_UTF16NATIVE, xDel); } SQLITE_API void sqlite3_result_text16be( sqlite3_context *pCtx,

@@ -87786,7 +90076,7 @@ int n,

void (*xDel)(void *) ){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); - setResultStrOrError(pCtx, z, n, SQLITE_UTF16BE, xDel); + setResultStrOrError(pCtx, z, n & ~(u64)1, SQLITE_UTF16BE, xDel); } SQLITE_API void sqlite3_result_text16le( sqlite3_context *pCtx,

@@ -87795,11 +90085,20 @@ int n,

void (*xDel)(void *) ){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); - setResultStrOrError(pCtx, z, n, SQLITE_UTF16LE, xDel); + setResultStrOrError(pCtx, z, n & ~(u64)1, SQLITE_UTF16LE, xDel); } #endif /* SQLITE_OMIT_UTF16 */ SQLITE_API void sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){ - Mem *pOut = pCtx->pOut; + Mem *pOut; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; + if( pValue==0 ){ + sqlite3_result_null(pCtx); + return; + } +#endif + pOut = pCtx->pOut; assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemCopy(pOut, pValue); sqlite3VdbeChangeEncoding(pOut, pCtx->enc);

@@ -87811,7 +90110,12 @@ SQLITE_API void sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){

sqlite3_result_zeroblob64(pCtx, n>0 ? n : 0); } SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){ - Mem *pOut = pCtx->pOut; + Mem *pOut; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return SQLITE_MISUSE_BKPT; +#endif + pOut = pCtx->pOut; assert( sqlite3_mutex_held(pOut->db->mutex) ); if( n>(u64)pOut->db->aLimit[SQLITE_LIMIT_LENGTH] ){ sqlite3_result_error_toobig(pCtx);

@@ -87825,6 +90129,9 @@ return sqlite3VdbeMemSetZeroBlob(pCtx->pOut, (int)n);

#endif } SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif pCtx->isError = errCode ? errCode : -1; #ifdef SQLITE_DEBUG if( pCtx->pVdbe ) pCtx->pVdbe->rcApp = errCode;

@@ -87837,6 +90144,9 @@ }

/* Force an SQLITE_TOOBIG error. */ SQLITE_API void sqlite3_result_error_toobig(sqlite3_context *pCtx){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); pCtx->isError = SQLITE_TOOBIG; sqlite3VdbeMemSetStr(pCtx->pOut, "string or blob too big", -1,

@@ -87845,6 +90155,9 @@ }

/* An SQLITE_NOMEM error. */ SQLITE_API void sqlite3_result_error_nomem(sqlite3_context *pCtx){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetNull(pCtx->pOut); pCtx->isError = SQLITE_NOMEM_BKPT;

@@ -88006,7 +90319,7 @@ #ifndef SQLITE_OMIT_TRACE

/* If the statement completed successfully, invoke the profile callback */ checkProfileCallback(db, p); #endif - + p->pResultRow = 0; if( rc==SQLITE_DONE && db->autoCommit ){ assert( p->rc==SQLITE_OK ); p->rc = doWalCallbacks(db);

@@ -88097,7 +90410,11 @@ ** Extract the user data from a sqlite3_context structure and return a

** pointer to it. */ SQLITE_API void *sqlite3_user_data(sqlite3_context *p){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( p==0 ) return 0; +#else assert( p && p->pFunc ); +#endif return p->pFunc->pUserData; }

@@ -88112,7 +90429,11 @@ ** sqlite3_create_function16() routines that originally registered the

** application defined function. */ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context *p){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( p==0 ) return 0; +#else assert( p && p->pOut ); +#endif return p->pOut->db; }

@@ -88131,11 +90452,26 @@ ** performance by substituting a NULL result, or some other light-weight

** value, as a signal to the xUpdate routine that the column is unchanged. */ SQLITE_API int sqlite3_vtab_nochange(sqlite3_context *p){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( p==0 ) return 0; +#else assert( p ); +#endif return sqlite3_value_nochange(p->pOut); } /* +** The destructor function for a ValueList object. This needs to be +** a separate function, unknowable to the application, to ensure that +** calls to sqlite3_vtab_in_first()/sqlite3_vtab_in_next() that are not +** preceded by activation of IN processing via sqlite3_vtab_int() do not +** try to access a fake ValueList object inserted by a hostile extension. +*/ +SQLITE_PRIVATE void sqlite3VdbeValueListFree(void *pToDelete){ + sqlite3_free(pToDelete); +} + +/* ** Implementation of sqlite3_vtab_in_first() (if bNext==0) and ** sqlite3_vtab_in_next() (if bNext!=0). */

@@ -88148,9 +90484,16 @@ int rc;

ValueList *pRhs; *ppOut = 0; - if( pVal==0 ) return SQLITE_MISUSE; - pRhs = (ValueList*)sqlite3_value_pointer(pVal, "ValueList"); - if( pRhs==0 ) return SQLITE_MISUSE; + if( pVal==0 ) return SQLITE_MISUSE_BKPT; + if( (pVal->flags & MEM_Dyn)==0 || pVal->xDel!=sqlite3VdbeValueListFree ){ + return SQLITE_ERROR; + }else{ + assert( (pVal->flags&(MEM_TypeMask|MEM_Term|MEM_Subtype)) == + (MEM_Null|MEM_Term|MEM_Subtype) ); + assert( pVal->eSubtype=='p' ); + assert( pVal->u.zPType!=0 && strcmp(pVal->u.zPType,"ValueList")==0 ); + pRhs = (ValueList*)pVal->z; + } if( bNext ){ rc = sqlite3BtreeNext(pRhs->pCsr, 0); }else{

@@ -88272,6 +90615,9 @@ */

SQLITE_API void *sqlite3_get_auxdata(sqlite3_context *pCtx, int iArg){ AuxData *pAuxData; +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return 0; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); #if SQLITE_ENABLE_STAT4 if( pCtx->pVdbe==0 ) return 0;

@@ -88304,8 +90650,12 @@ void *pAux,

void (*xDelete)(void*) ){ AuxData *pAuxData; - Vdbe *pVdbe = pCtx->pVdbe; + Vdbe *pVdbe; +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif + pVdbe= pCtx->pVdbe; assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); #ifdef SQLITE_ENABLE_STAT4 if( pVdbe==0 ) goto failed;

@@ -88361,7 +90711,8 @@ ** Return the number of columns in the result set for the statement pStmt.

*/ SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt){ Vdbe *pVm = (Vdbe *)pStmt; - return pVm ? pVm->nResColumn : 0; + if( pVm==0 ) return 0; + return pVm->nResColumn; } /*

@@ -88370,7 +90721,7 @@ ** currently executing statement pStmt.

*/ SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt){ Vdbe *pVm = (Vdbe *)pStmt; - if( pVm==0 || pVm->pResultSet==0 ) return 0; + if( pVm==0 || pVm->pResultRow==0 ) return 0; return pVm->nResColumn; }

@@ -88425,8 +90776,8 @@ pVm = (Vdbe *)pStmt;

if( pVm==0 ) return (Mem*)columnNullValue(); assert( pVm->db ); sqlite3_mutex_enter(pVm->db->mutex); - if( pVm->pResultSet!=0 && i<pVm->nResColumn && i>=0 ){ - pOut = &pVm->pResultSet[i]; + if( pVm->pResultRow!=0 && i<pVm->nResColumn && i>=0 ){ + pOut = &pVm->pResultRow[i]; }else{ sqlite3Error(pVm->db, SQLITE_RANGE); pOut = (Mem*)columnNullValue();

@@ -88450,7 +90801,7 @@ ** sqlite3_column_text16()

** sqlite3_column_real() ** sqlite3_column_bytes() ** sqlite3_column_bytes16() -** sqiite3_column_blob() +** sqlite3_column_blob() */ static void columnMallocFailure(sqlite3_stmt *pStmt) {

@@ -88535,6 +90886,32 @@ return iType;

} /* +** Column names appropriate for EXPLAIN or EXPLAIN QUERY PLAN. +*/ +static const char * const azExplainColNames8[] = { + "addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment", /* EXPLAIN */ + "id", "parent", "notused", "detail" /* EQP */ +}; +static const u16 azExplainColNames16data[] = { + /* 0 */ 'a', 'd', 'd', 'r', 0, + /* 5 */ 'o', 'p', 'c', 'o', 'd', 'e', 0, + /* 12 */ 'p', '1', 0, + /* 15 */ 'p', '2', 0, + /* 18 */ 'p', '3', 0, + /* 21 */ 'p', '4', 0, + /* 24 */ 'p', '5', 0, + /* 27 */ 'c', 'o', 'm', 'm', 'e', 'n', 't', 0, + /* 35 */ 'i', 'd', 0, + /* 38 */ 'p', 'a', 'r', 'e', 'n', 't', 0, + /* 45 */ 'n', 'o', 't', 'u', 's', 'e', 'd', 0, + /* 53 */ 'd', 'e', 't', 'a', 'i', 'l', 0 +}; +static const u8 iExplainColNames16[] = { + 0, 5, 12, 15, 18, 21, 24, 27, + 35, 38, 45, 53 +}; + +/* ** Convert the N-th element of pStmt->pColName[] into a string using ** xFunc() then return that string. If N is out of range, return 0. **

@@ -88566,15 +90943,29 @@ (void)SQLITE_MISUSE_BKPT;

return 0; } #endif + if( N<0 ) return 0; ret = 0; p = (Vdbe *)pStmt; db = p->db; assert( db!=0 ); - n = sqlite3_column_count(pStmt); - if( N<n && N>=0 ){ + sqlite3_mutex_enter(db->mutex); + + if( p->explain ){ + if( useType>0 ) goto columnName_end; + n = p->explain==1 ? 8 : 4; + if( N>=n ) goto columnName_end; + if( useUtf16 ){ + int i = iExplainColNames16[N + 8*p->explain - 8]; + ret = (void*)&azExplainColNames16data[i]; + }else{ + ret = (void*)azExplainColNames8[N + 8*p->explain - 8]; + } + goto columnName_end; + } + n = p->nResColumn; + if( N<n ){ + u8 prior_mallocFailed = db->mallocFailed; N += useType*n; - sqlite3_mutex_enter(db->mutex); - assert( db->mallocFailed==0 ); #ifndef SQLITE_OMIT_UTF16 if( useUtf16 ){ ret = sqlite3_value_text16((sqlite3_value*)&p->aColName[N]);

@@ -88586,12 +90977,14 @@ }

/* A malloc may have failed inside of the _text() call. If this ** is the case, clear the mallocFailed flag and return NULL. */ - if( db->mallocFailed ){ + assert( db->mallocFailed==0 || db->mallocFailed==1 ); + if( db->mallocFailed > prior_mallocFailed ){ sqlite3OomClear(db); ret = 0; } - sqlite3_mutex_leave(db->mutex); } +columnName_end: + sqlite3_mutex_leave(db->mutex); return ret; }

@@ -88684,7 +91077,7 @@ */

/* ** Unbind the value bound to variable i in virtual machine p. This is the ** the same as binding a NULL value to the column. If the "i" parameter is -** out of range, then SQLITE_RANGE is returned. Othewise SQLITE_OK. +** out of range, then SQLITE_RANGE is returned. Otherwise SQLITE_OK. ** ** A successful evaluation of this routine acquires the mutex on p. ** the mutex is released if any kind of error occurs.

@@ -88699,7 +91092,7 @@ return SQLITE_MISUSE_BKPT;

} || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + sqlite3Error(p->db, SQLITE_MISUSE_BKPT); sqlite3_mutex_leave(p->db->mutex); sqlite3_log(SQLITE_MISUSE, || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -88860,7 +91253,10 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

unsigned char enc ){ assert( xDel!=SQLITE_DYNAMIC ); - if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; + if( enc!=SQLITE_UTF8 ){ + if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; + nData &= ~(u16)1; + } || sqlite3GetInt32(pToken->z, &iValue)==0 ){ } #ifndef SQLITE_OMIT_UTF16

@@ -88868,10 +91264,10 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + int n, || sqlite3GetInt32(pToken->z, &iValue)==0 ){ ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + return bindText(pStmt, i, zData, n & ~(u64)1, xDel, SQLITE_UTF16NATIVE); } #endif /* SQLITE_OMIT_UTF16 */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -88925,6 +91321,9 @@ }

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ int rc; Vdbe *p = (Vdbe *)pStmt; +#ifdef SQLITE_ENABLE_API_ARMOR + if( p==0 ) return SQLITE_MISUSE_BKPT; +#endif || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -89046,6 +91445,42 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

} /* +** Set the explain mode for a statement. +*/ +SQLITE_API int sqlite3_stmt_explain(sqlite3_stmt *pStmt, int eMode){ + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + int rc; +#ifdef SQLITE_ENABLE_API_ARMOR + if( pStmt==0 ) return SQLITE_MISUSE_BKPT; +#endif + sqlite3_mutex_enter(v->db->mutex); + if( ((int)v->explain)==eMode ){ + rc = SQLITE_OK; + }else if( eMode<0 || eMode>2 ){ + rc = SQLITE_ERROR; + }else if( (v->prepFlags & SQLITE_PREPARE_SAVESQL)==0 ){ + rc = SQLITE_ERROR; + }else if( v->eVdbeState!=VDBE_READY_STATE ){ + rc = SQLITE_BUSY; + }else if( v->nMem>=10 && (eMode!=2 || v->haveEqpOps) ){ + /* No reprepare necessary */ + v->explain = eMode; + rc = SQLITE_OK; + }else{ + v->explain = eMode; + rc = sqlite3Reprepare(v); + v->haveEqpOps = eMode==2; + } + if( v->explain ){ + v->nResColumn = 12 - 4*v->explain; + }else{ + v->nResColumn = v->nResAlloc; + } + sqlite3_mutex_leave(v->db->mutex); + return rc; +} + +/* || sqlite3GetInt32(pToken->z, &iValue)==0 ){ */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -89184,10 +91619,16 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + PreUpdate *p; || sqlite3GetInt32(pToken->z, &iValue)==0 ){ int rc = SQLITE_OK; +#ifdef SQLITE_ENABLE_API_ARMOR + if( db==0 || ppValue==0 ){ + return SQLITE_MISUSE_BKPT; + } +#endif + p = db->pPreUpdate; || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -89248,7 +91689,12 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + PreUpdate *p; +#ifdef SQLITE_ENABLE_API_ARMOR + p = db!=0 ? db->pPreUpdate : 0; +#else + p = db->pPreUpdate; +#endif || sqlite3GetInt32(pToken->z, &iValue)==0 ){ } #endif /* SQLITE_ENABLE_PREUPDATE_HOOK */

@@ -89266,7 +91712,12 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + PreUpdate *p; +#ifdef SQLITE_ENABLE_API_ARMOR + p = db!=0 ? db->pPreUpdate : 0; +#else + p = db->pPreUpdate; +#endif || sqlite3GetInt32(pToken->z, &iValue)==0 ){ } #endif /* SQLITE_ENABLE_PREUPDATE_HOOK */

@@ -89277,7 +91728,12 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + PreUpdate *p; +#ifdef SQLITE_ENABLE_API_ARMOR + p = db!=0 ? db->pPreUpdate : 0; +#else + p = db->pPreUpdate; +#endif || sqlite3GetInt32(pToken->z, &iValue)==0 ){ } #endif

@@ -89288,10 +91744,16 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + PreUpdate *p; int rc = SQLITE_OK; || sqlite3GetInt32(pToken->z, &iValue)==0 ){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( db==0 || ppValue==0 ){ + return SQLITE_MISUSE_BKPT; + } +#endif + p = db->pPreUpdate; || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -89362,23 +91824,78 @@ #ifdef SQLITE_ENABLE_STMT_SCANSTATUS

/* || sqlite3GetInt32(pToken->z, &iValue)==0 ){ */ -SQLITE_API int sqlite3_stmt_scanstatus( +SQLITE_API int sqlite3_stmt_scanstatus_v2( || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + int iScan, /* Index of loop to report on */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + int flags, || sqlite3GetInt32(pToken->z, &iValue)==0 ){ ){ Vdbe *p = (Vdbe*)pStmt; - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + VdbeOp *aOp; + int nOp; + ScanStatus *pScan = 0; + int idx; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( p==0 || pOut==0 + || iScanStatusOp<SQLITE_SCANSTAT_NLOOP + || iScanStatusOp>SQLITE_SCANSTAT_NCYCLE ){ + return 1; + } +#endif + aOp = p->aOp; + nOp = p->nOp; + if( p->pFrame ){ + VdbeFrame *pFrame; + for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent); + aOp = pFrame->aOp; + nOp = pFrame->nOp; + } + + if( iScan<0 ){ + int ii; + if( iScanStatusOp==SQLITE_SCANSTAT_NCYCLE ){ + i64 res = 0; + for(ii=0; ii<nOp; ii++){ + res += aOp[ii].nCycle; + } + *(i64*)pOut = res; + return 0; + } + return 1; + } + if( flags & SQLITE_SCANSTAT_COMPLEX ){ + idx = iScan; + pScan = &p->aScan[idx]; + }else{ + /* If the COMPLEX flag is clear, then this function must ignore any + ** ScanStatus structures with ScanStatus.addrLoop set to 0. */ + for(idx=0; idx<p->nScan; idx++){ + pScan = &p->aScan[idx]; + if( pScan->zName ){ + iScan--; + if( iScan<0 ) break; + } + } + } + if( idx>=p->nScan ) return 1; + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + if( pScan->addrLoop>0 ){ + *(sqlite3_int64*)pOut = aOp[pScan->addrLoop].nExec; + }else{ + *(sqlite3_int64*)pOut = -1; + } break; } || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + if( pScan->addrVisit>0 ){ + *(sqlite3_int64*)pOut = aOp[pScan->addrVisit].nExec; + }else{ + *(sqlite3_int64*)pOut = -1; + } break; } || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -89397,7 +91914,7 @@ break;

} || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + *(const char**)pOut = aOp[ pScan->addrExplain ].p4.z; }else{ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ }

@@ -89405,12 +91922,51 @@ break;

} || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + *(int*)pOut = aOp[ pScan->addrExplain ].p1; }else{ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ } break; } + case SQLITE_SCANSTAT_PARENTID: { + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + *(int*)pOut = aOp[ pScan->addrExplain ].p2; + }else{ + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + } + break; + } + case SQLITE_SCANSTAT_NCYCLE: { + i64 res = 0; + if( pScan->aAddrRange[0]==0 ){ + res = -1; + }else{ + int ii; + for(ii=0; ii<ArraySize(pScan->aAddrRange); ii+=2){ + int iIns = pScan->aAddrRange[ii]; + int iEnd = pScan->aAddrRange[ii+1]; + if( iIns==0 ) break; + if( iIns>0 ){ + while( iIns<=iEnd ){ + res += aOp[iIns].nCycle; + iIns++; + } + }else{ + int iOp; + for(iOp=0; iOp<nOp; iOp++){ + Op *pOp = &aOp[iOp]; + if( pOp->p1!=iEnd ) continue; + if( (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_NCYCLE)==0 ){ + continue; + } + res += aOp[iOp].nCycle; + } + } + } + } + *(i64*)pOut = res; + break; + } default: { return 1; }

@@ -89419,11 +91975,28 @@ return 0;

} /* + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ +*/ +SQLITE_API int sqlite3_stmt_scanstatus( + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + int iScan, /* Index of loop to report on */ + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ +){ + return sqlite3_stmt_scanstatus_v2(pStmt, iScan, iScanStatusOp, 0, pOut); +} + +/* || sqlite3GetInt32(pToken->z, &iValue)==0 ){ */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ Vdbe *p = (Vdbe*)pStmt; - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + int ii; + for(ii=0; p!=0 && ii<p->nOp; ii++){ + Op *pOp = &p->aOp[ii]; + pOp->nExec = 0; + pOp->nCycle = 0; + } } || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -89758,8 +92331,12 @@ ** sqlite3MisuseError(lineno)

** sqlite3CantopenError(lineno) */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - static int n = 0; + static u64 n = 0; + (void)pc; + (void)pOp; + (void)v; n++; + if( n==LARGEST_UINT64 ) abort(); /* So that n is used, preventing a warning */ } #endif

@@ -89997,6 +92574,10 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ ** +** SQLITE_AFF_FLEXNUM: +** If the value is text, then try to convert it into a number of +** some kind (integer or real) but do not make any other changes. +** || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ **

@@ -90011,11 +92592,11 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + || affinity==SQLITE_AFF_NUMERIC || affinity==SQLITE_AFF_FLEXNUM ); || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + if( (pRec->flags & (MEM_Real|MEM_IntReal))==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - }else{ + }else if( affinity<=SQLITE_AFF_REAL ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ } }

@@ -90175,6 +92756,9 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ } || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + if( f & MEM_Term ){ + sqlite3_str_appendf(pStr, "(0-term)"); + } } } #endif

@@ -90243,17 +92827,6 @@ #else

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ #endif - -#ifdef VDBE_PROFILE - -/* -** hwtime.h contains inline assembler code for implementing -** high-performance timing routines. -*/ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - -#endif - #ifndef NDEBUG /* || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -90313,13 +92886,102 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + /* All strings have the same hash and all blobs have the same hash, + ** though, at least, those hashes are different from each other and + ** from NULL. */ + h += 4093 + (p->flags & (MEM_Str|MEM_Blob)); } } return h; } + +/* +** For OP_Column, factor out the case where content is loaded from +** overflow pages, so that the code to implement this case is separate +** the common case where all content fits on the page. Factoring out +** the code reduces register pressure and helps the common case +** to run faster. +*/ +static SQLITE_NOINLINE int vdbeColumnFromOverflow( + VdbeCursor *pC, /* The BTree cursor from which we are reading */ + int iCol, /* The column to read */ + int t, /* The serial-type code for the column value */ + i64 iOffset, /* Offset to the start of the content value */ + u32 cacheStatus, /* Current Vdbe.cacheCtr value */ + u32 colCacheCtr, /* Current value of the column cache counter */ + Mem *pDest /* Store the value into this register. */ +){ + int rc; + sqlite3 *db = pDest->db; + int encoding = pDest->enc; + int len = sqlite3VdbeSerialTypeLen(t); + assert( pC->eCurType==CURTYPE_BTREE ); + if( len>db->aLimit[SQLITE_LIMIT_LENGTH] ) return SQLITE_TOOBIG; + if( len > 4000 && pC->pKeyInfo==0 ){ + /* Cache large column values that are on overflow pages using + ** an RCStr (reference counted string) so that if they are reloaded, + ** that do not have to be copied a second time. The overhead of + ** creating and managing the cache is such that this is only + ** profitable for larger TEXT and BLOB values. + ** + ** Only do this on table-btrees so that writes to index-btrees do not + ** need to clear the cache. This buys performance in the common case + ** in exchange for generality. + */ + VdbeTxtBlbCache *pCache; + char *pBuf; + if( pC->colCache==0 ){ + pC->pCache = sqlite3DbMallocZero(db, sizeof(VdbeTxtBlbCache) ); + if( pC->pCache==0 ) return SQLITE_NOMEM; + pC->colCache = 1; + } + pCache = pC->pCache; + if( pCache->pCValue==0 + || pCache->iCol!=iCol + || pCache->cacheStatus!=cacheStatus + || pCache->colCacheCtr!=colCacheCtr + || pCache->iOffset!=sqlite3BtreeOffset(pC->uc.pCursor) + ){ + if( pCache->pCValue ) sqlite3RCStrUnref(pCache->pCValue); + pBuf = pCache->pCValue = sqlite3RCStrNew( len+3 ); + if( pBuf==0 ) return SQLITE_NOMEM; + rc = sqlite3BtreePayload(pC->uc.pCursor, iOffset, len, pBuf); + if( rc ) return rc; + pBuf[len] = 0; + pBuf[len+1] = 0; + pBuf[len+2] = 0; + pCache->iCol = iCol; + pCache->cacheStatus = cacheStatus; + pCache->colCacheCtr = colCacheCtr; + pCache->iOffset = sqlite3BtreeOffset(pC->uc.pCursor); + }else{ + pBuf = pCache->pCValue; + } + assert( t>=12 ); + sqlite3RCStrRef(pBuf); + if( t&1 ){ + rc = sqlite3VdbeMemSetStr(pDest, pBuf, len, encoding, + sqlite3RCStrUnref); + pDest->flags |= MEM_Term; + }else{ + rc = sqlite3VdbeMemSetStr(pDest, pBuf, len, 0, + sqlite3RCStrUnref); + } + }else{ + rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, iOffset, len, pDest); + if( rc ) return rc; + sqlite3VdbeSerialGet((const u8*)pDest->z, t, pDest); + if( (t&1)!=0 && encoding==SQLITE_UTF8 ){ + pDest->z[len] = 0; + pDest->flags |= MEM_Term; + } + } + pDest->flags &= ~MEM_Ephem; + return rc; +} + + /* || sqlite3GetInt32(pToken->z, &iValue)==0 ){ */

@@ -90343,11 +93005,10 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ -#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ -#endif #ifdef SQLITE_DEBUG || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + u8 iCompareIsInit = 0; /* iCompare is initialized */ #endif || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -90363,13 +93024,17 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ -#ifdef VDBE_PROFILE - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + u32 colCacheCtr = 0; /* Column cache counter */ +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE) + u64 *pnCycle = 0; + int bStmtScanStatus = IS_STMT_SCANSTATUS(db)!=0; #endif || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + if( DbMaskNonZero(p->lockMask) ){ + sqlite3VdbeEnter(p); + } #ifndef SQLITE_OMIT_PROGRESS_CALLBACK || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -90390,7 +93055,6 @@ p->rc = SQLITE_OK;

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - p->pResultSet = 0; || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -90427,12 +93091,18 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

assert( rc==SQLITE_OK ); || sqlite3GetInt32(pToken->z, &iValue)==0 ){ -#ifdef VDBE_PROFILE - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ -#endif || sqlite3GetInt32(pToken->z, &iValue)==0 ){ -#ifdef SQLITE_ENABLE_STMT_SCANSTATUS - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + +#if defined(VDBE_PROFILE) + pOp->nExec++; + pnCycle = &pOp->nCycle; + if( sqlite3NProfileCnt==0 ) *pnCycle -= sqlite3Hwtime(); +#elif defined(SQLITE_ENABLE_STMT_SCANSTATUS) + if( bStmtScanStatus ){ + pOp->nExec++; + pnCycle = &pOp->nCycle; + *pnCycle -= sqlite3Hwtime(); + } #endif || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -90494,7 +93164,7 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

} } #endif -#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) +#ifdef SQLITE_DEBUG || sqlite3GetInt32(pToken->z, &iValue)==0 ){ #endif

@@ -90550,8 +93220,8 @@ */

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ #ifdef SQLITE_DEBUG - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + /* In debugging mode, when the p5 flags is set on an OP_Goto, that + ** means we should really jump back to the preceding OP_ReleaseReg || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -90759,7 +93429,7 @@ **

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ ** || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ +** 1: NOT NULL constraint failed: P4 || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -90778,6 +93448,12 @@

#ifdef SQLITE_DEBUG || sqlite3GetInt32(pToken->z, &iValue)==0 ){ #endif + + /* A deliberately coded "OP_Halt SQLITE_INTERNAL * * * *" opcode indicates + ** something is wrong with the code generator. Raise an assertion in order + ** to bring this to the attention of fuzzers and other testing tools. */ + assert( pOp->p1!=SQLITE_INTERNAL ); + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -91219,10 +93895,10 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + p->pResultRow = &aMem[pOp->p1]; #ifdef SQLITE_DEBUG { - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + Mem *pMem = p->pResultRow; int i; || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -91752,7 +94428,6 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -91760,18 +94435,21 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ } || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + VVA_ONLY( iCompareIsInit = 1; ) || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ } || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + VVA_ONLY( iCompareIsInit = 1; ) }else{ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ } || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + VVA_ONLY( iCompareIsInit = 1; ) } || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -91803,6 +94481,7 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ } || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + VVA_ONLY( iCompareIsInit = 1; ) break; } }else{

@@ -91813,14 +94492,14 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + assert( flags3==pIn3->flags || CORRUPT_DB ); || sqlite3GetInt32(pToken->z, &iValue)==0 ){ } || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ } } - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + }else if( affinity==SQLITE_AFF_TEXT && ((flags1 | flags3) & MEM_Str)!=0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -91828,7 +94507,7 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + if( NEVER(pIn1==pIn3) ) flags3 = flags1 | MEM_Str; } || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -91859,6 +94538,7 @@ }else{

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ } || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + VVA_ONLY( iCompareIsInit = 1; ) || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -91880,10 +94560,10 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ ** - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ +** If the result of an OP_Eq comparison on the same two operands as +** the prior OP_Lt or OP_Gt would have been true, then jump to P2. If +** the result of an OP_Eq comparison on the two previous operands +** would have been false or NULL, then fall through. */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -91897,6 +94577,7 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ } #endif /* SQLITE_DEBUG */ + assert( iCompareIsInit ); || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -91991,6 +94672,7 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + VVA_ONLY( iCompareIsInit = 1; ) || sqlite3GetInt32(pToken->z, &iValue)==0 ){ if( (pKeyInfo->aSortFlags[i] & KEYINFO_ORDER_BIGNULL) || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -92008,13 +94690,14 @@

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ ** || sqlite3GetInt32(pToken->z, &iValue)==0 ){ - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ +** in the most recent OP_Compare instruction the P1 vector was less than, || sqlite3GetInt32(pToken->z, &iValue)==0 ){ ** || sqlite3GetInt32(pToken->z, &iValue)==0 ){ */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + assert( iCompareIsInit ); || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -92234,6 +94917,12 @@ ** P5 is a bitmask of data types. SQLITE_INTEGER is the least significant

** (0x01) bit. SQLITE_FLOAT is the 0x02 bit. SQLITE_TEXT is 0x04. ** SQLITE_BLOB is 0x08. SQLITE_NULL is 0x10. ** +** WARNING: This opcode does not reliably distinguish between NULL and REAL +** when P1>=0. If the database contains a NaN value, this opcode will think +** that the datatype is REAL when it should be NULL. When P1<0 and the value +** is already stored in register P3, then this opcode does reliably +** distinguish between NULL and REAL. The problem only arises then P1>=0. +** ** Take the jump to address P2 if and only if the datatype of the ** value determined by P1 and P3 corresponds to one of the bits in the ** P5 bitmask.

@@ -92304,7 +94993,7 @@

/* Opcode: ZeroOrNull P1 P2 P3 * * ** Synopsis: r[P2] = 0 OR NULL ** -** If all both registers P1 and P3 are NOT NULL, then store a zero in +** If both registers P1 and P3 are NOT NULL, then store a zero in ** register P2. If either registers P1 or P3 are NULL then put ** a NULL in register P2. */

@@ -92347,7 +95036,7 @@ case OP_IfNullRow: { /* jump */

VdbeCursor *pC; assert( pOp->p1>=0 && pOp->p1<p->nCursor ); pC = p->apCsr[pOp->p1]; - if( ALWAYS(pC) && pC->nullRow ){ + if( pC && pC->nullRow ){ sqlite3VdbeMemSetNull(aMem + pOp->p3); || sqlite3GetInt32(pToken->z, &iValue)==0 ){ }

@@ -92414,7 +95103,7 @@ ** OPFLAG_TYPEOFARG bit is set then the result will only be used by the

** typeof() function or the IS NULL or IS NOT NULL operators or the ** equivalent. In this case, all content loading can be omitted. */ -case OP_Column: { +case OP_Column: { /* ncycle */ u32 p2; /* column number to retrieve */ VdbeCursor *pC; /* The VDBE cursor */ BtCursor *pCrsr; /* The B-Tree cursor corresponding to pC */

@@ -92658,11 +95347,16 @@ pDest->z[len+1] = 0;

pDest->flags = aFlag[t&1]; } }else{ + u8 p5; pDest->enc = encoding; + assert( pDest->db==db ); /* This branch happens only when content is on overflow pages */ - if( ((pOp->p5 & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG))!=0 - && ((t>=12 && (t&1)==0) || (pOp->p5 & OPFLAG_TYPEOFARG)!=0)) - || (len = sqlite3VdbeSerialTypeLen(t))==0 + if( ((p5 = (pOp->p5 & OPFLAG_BYTELENARG))!=0 + && (p5==OPFLAG_TYPEOFARG + || (t>=12 && ((t&1)==0 || p5==OPFLAG_BYTELENARG)) + ) + ) + || sqlite3VdbeSerialTypeLen(t)==0 ){ /* Content is irrelevant for ** 1. the typeof() function,

@@ -92679,11 +95373,13 @@ ** and it begins with a bunch of zeros.

*/ sqlite3VdbeSerialGet((u8*)sqlite3CtypeMap, t, pDest); }else{ - if( len>db->aLimit[SQLITE_LIMIT_LENGTH] ) goto too_big; - rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, aOffset[p2], len, pDest); - if( rc!=SQLITE_OK ) goto abort_due_to_error; - sqlite3VdbeSerialGet((const u8*)pDest->z, t, pDest); - pDest->flags &= ~MEM_Ephem; + rc = vdbeColumnFromOverflow(pC, p2, t, aOffset[p2], + p->cacheCtr, colCacheCtr, pDest); + if( rc ){ + if( rc==SQLITE_NOMEM ) goto no_mem; + if( rc==SQLITE_TOOBIG ) goto too_big; + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + } } }

@@ -92763,7 +95459,7 @@ break;

} case COLTYPE_REAL: { testcase( (pIn1->flags & (MEM_Real|MEM_IntReal))==MEM_Real ); - testcase( (pIn1->flags & (MEM_Real|MEM_IntReal))==MEM_IntReal ); + assert( (pIn1->flags & MEM_IntReal)==0 ); if( pIn1->flags & MEM_Int ){ /* When applying REAL affinity, if the result is still an MEM_Int ** that will fit in 6 bytes, then change the type to MEM_IntReal

@@ -92842,7 +95538,7 @@ pIn1->flags &= ~MEM_Int;

}else{ pIn1->u.r = (double)pIn1->u.i; pIn1->flags |= MEM_Real; - pIn1->flags &= ~MEM_Int; + pIn1->flags &= ~(MEM_Int|MEM_Str); } } REGISTER_TRACE((int)(pIn1-aMem), pIn1);

@@ -93145,7 +95841,6 @@ if( serial_type==0 ){

/* NULL value. No change in zPayload */ }else{ u64 v; - u32 i; if( serial_type==7 ){ assert( sizeof(v)==sizeof(pRec->u.r) ); memcpy(&v, &pRec->u.r, sizeof(v));

@@ -93153,12 +95848,17 @@ swapMixedEndianFloat(v);

}else{ v = pRec->u.i; } - len = i = sqlite3SmallTypeSizes[serial_type]; - assert( i>0 ); - while( 1 /*exit-by-break*/ ){ - zPayload[--i] = (u8)(v&0xFF); - if( i==0 ) break; - v >>= 8; + len = sqlite3SmallTypeSizes[serial_type]; + assert( len>=1 && len<=8 && len!=5 && len!=7 ); + switch( len ){ + default: zPayload[7] = (u8)(v&0xff); v >>= 8; + zPayload[6] = (u8)(v&0xff); v >>= 8; + case 6: zPayload[5] = (u8)(v&0xff); v >>= 8; + zPayload[4] = (u8)(v&0xff); v >>= 8; + case 4: zPayload[3] = (u8)(v&0xff); v >>= 8; + case 3: zPayload[2] = (u8)(v&0xff); v >>= 8; + case 2: zPayload[1] = (u8)(v&0xff); v >>= 8; + case 1: zPayload[0] = (u8)(v&0xff); } zPayload += len; }

@@ -93766,7 +96466,7 @@ ** in read/write mode.

** ** See also: OP_OpenRead, OP_ReopenIdx */ -case OP_ReopenIdx: { +case OP_ReopenIdx: { /* ncycle */ int nField; KeyInfo *pKeyInfo; u32 p2;

@@ -93787,7 +96487,7 @@ goto open_cursor_set_hints;

} /* If the cursor is not currently open or is open on a different ** index, then fall through into OP_OpenRead to force a reopen */ -case OP_OpenRead: +case OP_OpenRead: /* ncycle */ case OP_OpenWrite: assert( pOp->opcode==OP_OpenWrite || pOp->p5==0 || pOp->p5==OPFLAG_SEEKEQ );

@@ -93881,7 +96581,7 @@ ** opcode. Only ephemeral cursors may be duplicated.

** ** Duplicate ephemeral cursors are used for self-joins of materialized views. */ -case OP_OpenDup: { +case OP_OpenDup: { /* ncycle */ VdbeCursor *pOrig; /* The original cursor to be duplicated */ VdbeCursor *pCx; /* The new cursor */

@@ -93943,8 +96643,8 @@ ** different name to distinguish its use. Tables created using

** by this opcode will be used for automatically created transient ** indices in joins. */ -case OP_OpenAutoindex: -case OP_OpenEphemeral: { +case OP_OpenAutoindex: /* ncycle */ +case OP_OpenEphemeral: { /* ncycle */ VdbeCursor *pCx; KeyInfo *pKeyInfo;

@@ -93967,7 +96667,7 @@ aMem[pOp->p3].z = "";

} pCx = p->apCsr[pOp->p1]; if( pCx && !pCx->noReuse && ALWAYS(pOp->p2<=pCx->nField) ){ - /* If the ephermeral table is already open and has no duplicates from + /* If the ephemeral table is already open and has no duplicates from ** OP_OpenDup, then erase all existing content so that the table is ** empty again, rather than creating a new table. */ assert( pCx->isEphemeral );

@@ -94102,7 +96802,7 @@ **

** Close a cursor previously opened as P1. If P1 is not ** currently open, this instruction is a no-op. */ -case OP_Close: { +case OP_Close: { /* ncycle */ assert( pOp->p1>=0 && pOp->p1<p->nCursor ); sqlite3VdbeFreeCursor(p, p->apCsr[pOp->p1]); p->apCsr[pOp->p1] = 0;

@@ -94219,10 +96919,10 @@ ** is an equality search.

** ** See also: Found, NotFound, SeekGt, SeekGe, SeekLt */ -case OP_SeekLT: /* jump, in3, group */ -case OP_SeekLE: /* jump, in3, group */ -case OP_SeekGE: /* jump, in3, group */ -case OP_SeekGT: { /* jump, in3, group */ +case OP_SeekLT: /* jump, in3, group, ncycle */ +case OP_SeekLE: /* jump, in3, group, ncycle */ +case OP_SeekGE: /* jump, in3, group, ncycle */ +case OP_SeekGT: { /* jump, in3, group, ncycle */ int res; /* Comparison result */ int oc; /* Opcode */ VdbeCursor *pC; /* The cursor to seek */

@@ -94458,7 +97158,7 @@ ** cursor ends up pointing at a valid row that is past the target

** row. If This.P5 is false (0) then a jump is made to SeekGE.P2. If ** This.P5 is true (non-zero) then a jump is made to This.P2. The P5==0 ** case occurs when there are no inequality constraints to the right of -** the IN constraing. The jump to SeekGE.P2 ends the loop. The P5!=0 case +** the IN constraint. The jump to SeekGE.P2 ends the loop. The P5!=0 case ** occurs when there are inequality constraints to the right of the IN ** operator. In that case, the This.P2 will point either directly to or ** to setup code prior to the OP_IdxGT or OP_IdxGE opcode that checks for

@@ -94466,7 +97166,7 @@ ** loop terminate.

** ** Possible outcomes from this opcode:<ol> ** -** <li> If the cursor is initally not pointed to any valid row, then +** <li> If the cursor is initially not pointed to any valid row, then ** fall through into the subsequent OP_SeekGE opcode. ** ** <li> If the cursor is left pointing to a row that is before the target

@@ -94488,7 +97188,7 @@ ** (indicating that the target row does not exist in the btree) then

** jump to SeekOP.P2 if This.P5==0 or to This.P2 if This.P5>0. ** </ol> */ -case OP_SeekScan: { +case OP_SeekScan: { /* ncycle */ VdbeCursor *pC; int res; int nStep;

@@ -94581,6 +97281,7 @@ VdbeBranchTaken(0,3);

break; } nStep--; + pC->cacheStatus = CACHE_STALE; rc = sqlite3BtreeNext(pC->uc.pCursor, 0); if( rc ){ if( rc==SQLITE_DONE ){

@@ -94610,7 +97311,7 @@ ** early, thus saving work. This is part of the IN-early-out optimization.

** ** P1 must be a valid b-tree cursor. */ -case OP_SeekHit: { +case OP_SeekHit: { /* ncycle */ VdbeCursor *pC; assert( pOp->p1>=0 && pOp->p1<p->nCursor ); pC = p->apCsr[pOp->p1];

@@ -94697,13 +97398,13 @@ ** In other words, the operands to this opcode are the same as the

** operands to OP_NotFound and OP_IdxGT. ** ** This opcode is an optimization attempt only. If this opcode always -** falls through, the correct answer is still obtained, but extra works +** falls through, the correct answer is still obtained, but extra work ** is performed. ** ** A value of N in the seekHit flag of cursor P1 means that there exists ** a key P3:N that will match some record in the index. We want to know ** if it is possible for a record P3:P4 to match some record in the -** index. If it is not possible, we can skips some work. So if seekHit +** index. If it is not possible, we can skip some work. So if seekHit ** is less than P4, attempt to find out if a match is possible by running ** OP_NotFound. **

@@ -94742,7 +97443,7 @@ ** opcodes do not work after this operation.

** ** See also: NotFound, Found, NotExists */ -case OP_IfNoHope: { /* jump, in3 */ +case OP_IfNoHope: { /* jump, in3, ncycle */ VdbeCursor *pC; assert( pOp->p1>=0 && pOp->p1<p->nCursor ); pC = p->apCsr[pOp->p1];

@@ -94756,9 +97457,9 @@ if( pC->seekHit>=pOp->p4.i ) break;

/* Fall through into OP_NotFound */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ } -case OP_NoConflict: /* jump, in3 */ -case OP_NotFound: /* jump, in3 */ -case OP_Found: { /* jump, in3 */ +case OP_NoConflict: /* jump, in3, ncycle */ +case OP_NotFound: /* jump, in3, ncycle */ +case OP_Found: { /* jump, in3, ncycle */ int alreadyExists; int ii; VdbeCursor *pC;

@@ -94888,7 +97589,7 @@ ** not work following this opcode.

** ** See also: Found, NotFound, NoConflict, SeekRowid */ -case OP_SeekRowid: { /* jump, in3 */ +case OP_SeekRowid: { /* jump, in3, ncycle */ VdbeCursor *pC; BtCursor *pCrsr; int res;

@@ -94913,7 +97614,7 @@ goto notExistsWithKey;

} /* Fall through into OP_NotExists */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ -case OP_NotExists: /* jump, in3 */ +case OP_NotExists: /* jump, in3, ncycle */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ assert( (pIn3->flags & MEM_Int)!=0 || pOp->opcode==OP_SeekRowid ); assert( pOp->p1>=0 && pOp->p1<p->nCursor );

@@ -95193,8 +97894,11 @@ }

if( pOp->p5 & OPFLAG_ISNOOP ) break; #endif - if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++; - if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = x.nKey; + assert( (pOp->p5 & OPFLAG_LASTROWID)==0 || (pOp->p5 & OPFLAG_NCHANGE)!=0 ); + if( pOp->p5 & OPFLAG_NCHANGE ){ + p->nChange++; + if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = x.nKey; + } assert( (pData->flags & (MEM_Blob|MEM_Str))!=0 || pData->n==0 ); x.pData = pData->z; x.nData = pData->n;

@@ -95205,12 +97909,14 @@ }else{

x.nZero = 0; } x.pKey = 0; + assert( BTREE_PREFORMAT==OPFLAG_PREFORMAT ); rc = sqlite3BtreeInsert(pC->uc.pCursor, &x, (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION|OPFLAG_PREFORMAT)), seekResult ); pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; + colCacheCtr++; /* Invoke the update-hook if required. */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -95264,13 +97970,18 @@ ** OPFLAG_SAVEPOSITION bit of P5 is clear, then the cursor will be

** left in an undefined state. ** ** If the OPFLAG_AUXDELETE bit is set on P5, that indicates that this -** delete one of several associated with deleting a table row and all its -** associated index entries. Exactly one of those deletes is the "primary" -** delete. The others are all on OPFLAG_FORDELETE cursors or else are -** marked with the AUXDELETE flag. +** delete is one of several associated with deleting a table row and +** all its associated index entries. Exactly one of those deletes is +** the "primary" delete. The others are all on OPFLAG_FORDELETE +** cursors or else are marked with the AUXDELETE flag. +** +** If the OPFLAG_NCHANGE (0x01) flag of P2 (NB: P2 not P5) is set, then +** the row change count is incremented (otherwise not). ** -** If the OPFLAG_NCHANGE flag of P2 (NB: P2 not P5) is set, then the row -** change count is incremented (otherwise not). +** If the OPFLAG_ISNOOP (0x40) flag of P2 (not P5!) is set, then the +** pre-update-hook for deletes is run, but the btree is otherwise unchanged. +** This happens when the OP_Delete is to be shortly followed by an OP_Insert +** with the same key, causing the btree entry to be overwritten. ** ** P1 must not be pseudo-table. It has to be a real table with ** multiple rows.

@@ -95371,6 +98082,7 @@ #endif

rc = sqlite3BtreeDelete(pC->uc.pCursor, pOp->p5); pC->cacheStatus = CACHE_STALE; + colCacheCtr++; pC->seekResult = 0; || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -95438,13 +98150,13 @@ **

** Write into register P2 the current sorter data for sorter cursor P1. ** Then clear the column header cache on cursor P3. ** -** This opcode is normally use to move a record out of the sorter and into +** This opcode is normally used to move a record out of the sorter and into ** a register that is the source for a pseudo-table cursor created using ** OpenPseudo. That pseudo-table cursor is the one that is identified by ** parameter P3. Clearing the P3 column cache as part of this opcode saves ** us from having to issue a separate NullRow instruction to clear that cache. */ -case OP_SorterData: { +case OP_SorterData: { /* ncycle */ VdbeCursor *pC; || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -95536,7 +98248,7 @@ ** P1 can be either an ordinary table or a virtual table. There used to

** be a separate OP_VRowid opcode for use with virtual tables, but this ** one opcode now works for both table types. */ -case OP_Rowid: { /* out2 */ +case OP_Rowid: { /* out2, ncycle */ VdbeCursor *pC; i64 v; sqlite3_vtab *pVtab;

@@ -95635,8 +98347,8 @@ ** This opcode leaves the cursor configured to move in reverse order,

** from the end toward the beginning. In other words, the cursor is ** configured to use Prev, not Next. */ -case OP_SeekEnd: -case OP_Last: { /* jump */ +case OP_SeekEnd: /* ncycle */ +case OP_Last: { /* jump, ncycle */ VdbeCursor *pC; BtCursor *pCrsr; int res;

@@ -95719,8 +98431,8 @@ ** rewinding so that the global variable will be incremented and

** regression tests can determine whether or not the optimizer is ** correctly optimizing out sorts. */ -case OP_SorterSort: /* jump */ -case OP_Sort: { /* jump */ +case OP_SorterSort: /* jump ncycle */ +case OP_Sort: { /* jump ncycle */ #ifdef SQLITE_TEST sqlite3_sort_count++; sqlite3_search_count--;

@@ -95737,17 +98449,22 @@ ** If the table or index is empty, jump immediately to P2.

** If the table or index is not empty, fall through to the following ** instruction. ** +** If P2 is zero, that is an assertion that the P1 table is never +** empty and hence the jump will never be taken. +** ** This opcode leaves the cursor configured to move in forward order, ** from the beginning toward the end. In other words, the cursor is ** configured to use Next, not Prev. */ -case OP_Rewind: { /* jump */ +case OP_Rewind: { /* jump, ncycle */ VdbeCursor *pC; BtCursor *pCrsr; int res; assert( pOp->p1>=0 && pOp->p1<p->nCursor ); assert( pOp->p5==0 ); + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + pC = p->apCsr[pOp->p1]; assert( pC!=0 ); assert( isSorter(pC)==(pOp->opcode==OP_SorterSort) );

@@ -95767,9 +98484,10 @@ pC->cacheStatus = CACHE_STALE;

} || sqlite3GetInt32(pToken->z, &iValue)==0 ){ pC->nullRow = (u8)res; - assert( pOp->p2>0 && pOp->p2<p->nOp ); - VdbeBranchTaken(res!=0,2); - if( res ) goto jump_to_p2; + if( pOp->p2>0 ){ + VdbeBranchTaken(res!=0,2); + if( res ) goto jump_to_p2; + } || sqlite3GetInt32(pToken->z, &iValue)==0 ){ }

@@ -95835,7 +98553,7 @@ assert( isSorter(pC) );

rc = sqlite3VdbeSorterNext(db, pC); goto next_tail; -case OP_Prev: /* jump */ +case OP_Prev: /* jump, ncycle */ assert( pOp->p1>=0 && pOp->p1<p->nCursor ); assert( pOp->p5==0 || pOp->p5==SQLITE_STMTSTATUS_FULLSCAN_STEP

@@ -95850,7 +98568,7 @@ || pC->seekOp==OP_NullRow);

rc = sqlite3BtreePrevious(pC->uc.pCursor, pOp->p3); goto next_tail; -case OP_Next: /* jump */ +case OP_Next: /* jump, ncycle */ assert( pOp->p1>=0 && pOp->p1<p->nCursor ); assert( pOp->p5==0 || pOp->p5==SQLITE_STMTSTATUS_FULLSCAN_STEP

@@ -96042,8 +98760,8 @@ ** the rowid of the table entry to which this index entry points.

** ** See also: Rowid, MakeRecord. */ -case OP_DeferredSeek: -case OP_IdxRowid: { /* out2 */ +case OP_DeferredSeek: /* ncycle */ +case OP_IdxRowid: { /* out2, ncycle */ VdbeCursor *pC; /* The P1 index cursor */ VdbeCursor *pTabCur; /* The P2 table cursor (OP_DeferredSeek only) */ i64 rowid; /* Rowid that P1 current points to */

@@ -96105,8 +98823,8 @@ ** If cursor P1 was previously moved via OP_DeferredSeek, complete that

** seek operation now, without further delay. If the cursor seek has ** already occurred, this instruction is a no-op. */ -case OP_FinishSeek: { - VdbeCursor *pC; /* The P1 index cursor */ +case OP_FinishSeek: { /* ncycle */ + VdbeCursor *pC; /* The P1 index cursor */ assert( pOp->p1>=0 && pOp->p1<p->nCursor ); pC = p->apCsr[pOp->p1];

@@ -96161,10 +98879,10 @@ **

** If the P1 index entry is less than or equal to the key value then jump ** to P2. Otherwise fall through to the next instruction. */ -case OP_IdxLE: /* jump */ -case OP_IdxGT: /* jump */ -case OP_IdxLT: /* jump */ -case OP_IdxGE: { /* jump */ +case OP_IdxLE: /* jump, ncycle */ +case OP_IdxGT: /* jump, ncycle */ +case OP_IdxLT: /* jump, ncycle */ +case OP_IdxGE: { /* jump, ncycle */ VdbeCursor *pC; int res; UnpackedRecord r;

@@ -96241,7 +98959,7 @@ ** Delete an entire database table or index whose root page in the database

** file is given by P1. ** ** The table being destroyed is in the main database file if P3==0. If -** P3==1 then the table to be clear is in the auxiliary database file +** P3==1 then the table to be destroyed is in the auxiliary database file ** that is used to store tables create using CREATE TEMPORARY TABLE. ** ** If AUTOVACUUM is enabled then it is possible that another root page

@@ -96301,8 +99019,8 @@ ** Delete all contents of the database table or index whose root page

** in the database file is given by P1. But, unlike Destroy, do not ** remove the table or index from the database file. ** -** The table being clear is in the main database file if P2==0. If -** P2==1 then the table to be clear is in the auxiliary database file +** The table being cleared is in the main database file if P2==0. If +** P2==1 then the table to be cleared is in the auxiliary database file ** that is used to store tables create using CREATE TEMPORARY TABLE. ** ** If the P3 value is non-zero, then the row change count is incremented

@@ -96388,13 +99106,41 @@

/* Opcode: SqlExec * * * P4 * ** ** Run the SQL statement or statements specified in the P4 string. +** Disable Auth and Trace callbacks while those statements are running if +** P1 is true. */ case OP_SqlExec: { + char *zErr; +#ifndef SQLITE_OMIT_AUTHORIZATION + sqlite3_xauth xAuth; +#endif + u8 mTrace; + sqlite3VdbeIncrWriteCounter(p, 0); db->nSqlExec++; - rc = sqlite3_exec(db, pOp->p4.z, 0, 0, 0); + zErr = 0; +#ifndef SQLITE_OMIT_AUTHORIZATION + xAuth = db->xAuth; +#endif + mTrace = db->mTrace; + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ +#ifndef SQLITE_OMIT_AUTHORIZATION + db->xAuth = 0; +#endif + db->mTrace = 0; + } + rc = sqlite3_exec(db, pOp->p4.z, 0, 0, &zErr); db->nSqlExec--; - || sqlite3GetInt32(pToken->z, &iValue)==0 ){ +#ifndef SQLITE_OMIT_AUTHORIZATION + db->xAuth = xAuth; +#endif + db->mTrace = mTrace; + if( zErr || rc ){ + sqlite3VdbeError(p, "%s", zErr); + sqlite3_free(zErr); + if( rc==SQLITE_NOMEM ) goto no_mem; + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + } || sqlite3GetInt32(pToken->z, &iValue)==0 ){ }

@@ -96575,12 +99321,13 @@ assert( (pnErr->flags & (MEM_Str|MEM_Blob))==0 );

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ assert( pOp->p5<db->nDb ); assert( DbMaskTest(p->btreeMask, pOp->p5) ); - z = sqlite3BtreeIntegrityCheck(db, db->aDb[pOp->p5].pBt, &aRoot[1], nRoot, - (int)pnErr->u.i+1, &nErr); + rc = sqlite3BtreeIntegrityCheck(db, db->aDb[pOp->p5].pBt, &aRoot[1], nRoot, + (int)pnErr->u.i+1, &nErr, &z); sqlite3VdbeMemSetNull(pIn1); if( nErr==0 ){ assert( z==0 ); - }else if( z==0 ){ + }else if( rc ){ + sqlite3_free(z); || sqlite3GetInt32(pToken->z, &iValue)==0 ){ }else{ pnErr->u.i -= nErr-1;

@@ -96785,9 +99532,6 @@ pFrame->nCursor = p->nCursor;

pFrame->aOp = p->aOp; pFrame->nOp = p->nOp; pFrame->token = pProgram->token; -#ifdef SQLITE_ENABLE_STMT_SCANSTATUS - pFrame->anExec = p->anExec; -#endif #ifdef SQLITE_DEBUG pFrame->iFrameMagic = SQLITE_FRAME_MAGIC; #endif

@@ -96824,9 +99568,6 @@ pFrame->aOnce = (u8*)&p->apCsr[pProgram->nCsr];

memset(pFrame->aOnce, 0, (pProgram->nOp + 7)/8); p->aOp = aOp = pProgram->aOp; p->nOp = pProgram->nOp; -#ifdef SQLITE_ENABLE_STMT_SCANSTATUS - p->anExec = 0; -#endif #ifdef SQLITE_DEBUG /* Verify that second and subsequent executions of the same trigger do not ** try to reuse register values from the first use. */

@@ -97133,7 +99874,7 @@

/* If this function is inside of a trigger, the register array in aMem[] ** might change from one evaluation to the next. The next block of code ** checks to see if the register array has changed, and if so it - ** reinitializes the relavant parts of the sqlite3_context object */ + ** reinitializes the relevant parts of the sqlite3_context object */ if( pCtx->pMem != pMem ){ pCtx->pMem = pMem; for(i=pCtx->argc-1; i>=0; i--) pCtx->argv[i] = &aMem[pOp->p2+i];

@@ -97228,6 +99969,7 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

} sqlite3VdbeChangeEncoding(pMem, encoding); UPDATE_MAX_BLOBSIZE(pMem); + REGISTER_TRACE((int)(pMem-aMem), pMem); || sqlite3GetInt32(pToken->z, &iValue)==0 ){ }

@@ -97583,7 +100325,7 @@ ** P4 is a pointer to a virtual table object, an sqlite3_vtab structure.

** P1 is a cursor number. This opcode opens a cursor to the virtual ** table and stores that cursor in P1. */ -case OP_VOpen: { +case OP_VOpen: { /* ncycle */ VdbeCursor *pCur; sqlite3_vtab_cursor *pVCur; sqlite3_vtab *pVtab;

@@ -97620,6 +100362,53 @@ }

#endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_VIRTUALTABLE +/* Opcode: VCheck P1 P2 P3 P4 * +** +** P4 is a pointer to a Table object that is a virtual table in schema P1 +** that supports the xIntegrity() method. This opcode runs the xIntegrity() +** method for that virtual table, using P3 as the integer argument. If +** an error is reported back, the table name is prepended to the error +** message and that message is stored in P2. If no errors are seen, +** register P2 is set to NULL. +*/ +case OP_VCheck: { /* out2 */ + Table *pTab; + sqlite3_vtab *pVtab; + const sqlite3_module *pModule; + char *zErr = 0; + + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + sqlite3VdbeMemSetNull(pOut); /* Innocent until proven guilty */ + assert( pOp->p4type==P4_TABLE ); + pTab = pOp->p4.pTab; + assert( pTab!=0 ); + assert( IsVirtual(pTab) ); + if( pTab->u.vtab.p==0 ) break; + pVtab = pTab->u.vtab.p->pVtab; + assert( pVtab!=0 ); + pModule = pVtab->pModule; + assert( pModule!=0 ); + assert( pModule->iVersion>=4 ); + assert( pModule->xIntegrity!=0 ); + pTab->nTabRef++; + sqlite3VtabLock(pTab->u.vtab.p); + assert( pOp->p1>=0 && pOp->p1<db->nDb ); + rc = pModule->xIntegrity(pVtab, db->aDb[pOp->p1].zDbSName, pTab->zName, + pOp->p3, &zErr); + sqlite3VtabUnlock(pTab->u.vtab.p); + sqlite3DeleteTable(db, pTab); + if( rc ){ + sqlite3_free(zErr); + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + } + if( zErr ){ + sqlite3VdbeMemSetStr(pOut, zErr, -1, SQLITE_UTF8, sqlite3_free); + } + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +#ifndef SQLITE_OMIT_VIRTUALTABLE /* Opcode: VInitIn P1 P2 P3 * * ** Synopsis: r[P2]=ValueList(P1,P3) **

@@ -97630,7 +100419,7 @@ ** sqlite3_vtab_in_next() to extract all of the values stored in the P1

** cursor. Register P3 is used to hold the values returned by ** sqlite3_vtab_in_first() and sqlite3_vtab_in_next(). */ -case OP_VInitIn: { /* out2 */ +case OP_VInitIn: { /* out2, ncycle */ VdbeCursor *pC; /* The cursor containing the RHS values */ ValueList *pRhs; /* New ValueList object to put in reg[P2] */

@@ -97641,7 +100430,7 @@ pRhs->pCsr = pC->uc.pCursor;

pRhs->pOut = &aMem[pOp->p3]; || sqlite3GetInt32(pToken->z, &iValue)==0 ){ pOut->flags = MEM_Null; - sqlite3VdbeMemSetPointer(pOut, pRhs, "ValueList", sqlite3_free); + sqlite3VdbeMemSetPointer(pOut, pRhs, "ValueList", sqlite3VdbeValueListFree); || sqlite3GetInt32(pToken->z, &iValue)==0 ){ } #endif /* SQLITE_OMIT_VIRTUALTABLE */

@@ -97667,7 +100456,7 @@ ** xFilter as argv. Register P3+2 becomes argv[0] when passed to xFilter.

** ** A jump is made to P2 if the result set after filtering would be empty. */ -case OP_VFilter: { /* jump */ +case OP_VFilter: { /* jump, ncycle */ int nArg; int iQuery; const sqlite3_module *pModule;

@@ -97727,7 +100516,7 @@ ** table implementation. The P5 column might also contain other

** bits (OPFLAG_LENGTHARG or OPFLAG_TYPEOFARG) but those bits are ** unused by OP_VColumn. */ -case OP_VColumn: { +case OP_VColumn: { /* ncycle */ sqlite3_vtab *pVtab; const sqlite3_module *pModule; Mem *pDest;

@@ -97779,7 +100568,7 @@ ** Advance virtual table P1 to the next row in its result set and

** jump to instruction P2. Or, if the virtual table has reached ** the end of its result set, then fall through to the next instruction. */ -case OP_VNext: { /* jump */ +case OP_VNext: { /* jump, ncycle */ sqlite3_vtab *pVtab; const sqlite3_module *pModule; int res;

@@ -98010,7 +100799,7 @@ **

** This opcode works exactly like OP_Function. The only difference is in ** its name. This opcode is used in places where the function must be ** purely non-deterministic. Some built-in date/time functions can be -** either determinitic of non-deterministic, depending on their arguments. +** either deterministic of non-deterministic, depending on their arguments. ** When those function are used in a non-deterministic way, they will check ** to see if they were called using OP_PureFunc instead of OP_Function, and ** if they were, they throw an error.

@@ -98028,7 +100817,7 @@

/* If this function is inside of a trigger, the register array in aMem[] ** might change from one evaluation to the next. The next block of code ** checks to see if the register array has changed, and if so it - ** reinitializes the relavant parts of the sqlite3_context object */ + ** reinitializes the relevant parts of the sqlite3_context object */ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ if( pCtx->pOut != pOut ){ pCtx->pVdbe = p;

@@ -98104,7 +100893,7 @@ }

printf("hash: %llu modulo %d -> %u\n", h, pIn1->n, (int)(h%pIn1->n)); } #endif - h %= pIn1->n; + h %= (pIn1->n*8); pIn1->z[h/8] |= 1<<(h&7); || sqlite3GetInt32(pToken->z, &iValue)==0 ){ }

@@ -98140,7 +100929,7 @@ }

printf("hash: %llu modulo %d -> %u\n", h, pIn1->n, (int)(h%pIn1->n)); } #endif - h %= pIn1->n; + h %= (pIn1->n*8); if( (pIn1->z[h/8] & (1<<(h&7)))==0 ){ VdbeBranchTaken(1, 2); p->aCounter[SQLITE_STMTSTATUS_FILTER_HIT]++;

@@ -98362,11 +101151,13 @@ ** restored.

*****************************************************************************/ } -#ifdef VDBE_PROFILE - { - u64 endTime = sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime(); - if( endTime>start ) pOrigOp->cycles += endTime - start; - pOrigOp->cnt++; +#if defined(VDBE_PROFILE) + *pnCycle += sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime(); + pnCycle = 0; +#elif defined(SQLITE_ENABLE_STMT_SCANSTATUS) + if( pnCycle ){ + *pnCycle += sqlite3Hwtime(); + pnCycle = 0; } #endif

@@ -98390,7 +101181,7 @@ registerTrace(pOrigOp->p3, &aMem[pOrigOp->p3]);

} if( opProperty==0xff ){ /* Never happens. This code exists to avoid a harmless linkage - ** warning aboud sqlite3VdbeRegisterDump() being defined but not + ** warning about sqlite3VdbeRegisterDump() being defined but not ** used. */ sqlite3VdbeRegisterDump(p); }

@@ -98443,6 +101234,18 @@ /* This is the only way out of this procedure. We have to

** release the mutexes on btrees that were acquired at the ** top. */ vdbe_return: +#if defined(VDBE_PROFILE) + if( pnCycle ){ + *pnCycle += sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime(); + pnCycle = 0; + } +#elif defined(SQLITE_ENABLE_STMT_SCANSTATUS) + if( pnCycle ){ + *pnCycle += sqlite3Hwtime(); + pnCycle = 0; + } +#endif + #ifndef SQLITE_OMIT_PROGRESS_CALLBACK || sqlite3GetInt32(pToken->z, &iValue)==0 ){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -98454,7 +101257,9 @@ }

} #endif p->aCounter[SQLITE_STMTSTATUS_VM_STEP] += (int)nVmStep; - sqlite3VdbeLeave(p); + if( DbMaskNonZero(p->lockMask) ){ + sqlite3VdbeLeave(p); + } assert( rc!=SQLITE_OK || nExtraDelete==0 || sqlite3_strlike("DELETE%",p->zSql,0)!=0 );

@@ -98549,8 +101354,7 @@

/* Set the value of register r[1] in the SQL statement to integer iRow. ** This is done directly as a performance optimization */ - v->aMem[1].flags = MEM_Int; - v->aMem[1].u.i = iRow; + sqlite3VdbeMemSetInt64(&v->aMem[1], iRow); /* If the statement has been run before (and is paused at the OP_ResultRow) ** then back it up to the point where it does the OP_NotExists. This could

@@ -98633,7 +101437,7 @@ }

#endif *ppBlob = 0; #ifdef SQLITE_ENABLE_API_ARMOR - if( !sqlite3SafetyCheckOk(db) || zTable==0 ){ + if( !sqlite3SafetyCheckOk(db) || zTable==0 || zColumn==0 ){ return SQLITE_MISUSE_BKPT; } #endif

@@ -98832,7 +101636,7 @@ }else{

if( pBlob && pBlob->pStmt ) sqlite3VdbeFinalize((Vdbe *)pBlob->pStmt); sqlite3DbFree(db, pBlob); } - sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr); + sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : (char*)0), zErr); sqlite3DbFree(db, zErr); sqlite3ParseObjectReset(&sParse); rc = sqlite3ApiExit(db, rc);

@@ -98991,7 +101795,7 @@ char *zErr;

((Vdbe*)p->pStmt)->rc = SQLITE_OK; rc = blobSeekToRow(p, iRow, &zErr); if( rc!=SQLITE_OK ){ - sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr); + sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : (char*)0), zErr); sqlite3DbFree(db, zErr); } assert( rc!=SQLITE_SCHEMA );

@@ -99094,7 +101898,7 @@ **

** The threshold for the amount of main memory to use before flushing ** records to a PMA is roughly the same as the limit configured for the ** page-cache of the main database. Specifically, the threshold is set to -** the value returned by "PRAGMA main.page_size" multipled by +** the value returned by "PRAGMA main.page_size" multiplied by ** that returned by "PRAGMA main.cache_size", in bytes. ** ** If the sorter is running in single-threaded mode, then all PMAs generated

@@ -99117,7 +101921,7 @@ ** PMAs within temporary files on disk.

** ** If there are fewer than SORTER_MAX_MERGE_COUNT PMAs in total and the ** sorter is running in single-threaded mode, then these PMAs are merged -** incrementally as keys are retreived from the sorter by the VDBE. The +** incrementally as keys are retrieved from the sorter by the VDBE. The ** MergeEngine object, described in further detail below, performs this ** merge. **

@@ -99195,7 +101999,7 @@ */

struct SorterList { SorterRecord *pList; /* Linked list of records */ u8 *aMemory; /* If non-NULL, bulk memory to hold pList */ - int szPMA; /* Size of pList as PMA in bytes */ + i64 szPMA; /* Size of pList as PMA in bytes */ }; /*

@@ -99280,7 +102084,7 @@ ** and for multi-threaded operation there are two or more instances.

** ** Essentially, this structure contains all those fields of the VdbeSorter ** structure for which each thread requires a separate instance. For example, -** each thread requries its own UnpackedRecord object to unpack records in +** each thread requeries its own UnpackedRecord object to unpack records in ** as part of comparison operations. ** ** Before a background thread is launched, variable bDone is set to 0. Then,

@@ -99304,10 +102108,10 @@ typedef int (*SorterCompare)(SortSubtask*,int*,const void*,int,const void*,int);

struct SortSubtask { SQLiteThread *pThread; /* Background thread, if any */ int bDone; /* Set if thread is finished but not joined */ + int nPMA; /* Number of PMAs currently in file */ VdbeSorter *pSorter; /* Sorter that owns this sub-task */ UnpackedRecord *pUnpacked; /* Space to unpack a record */ SorterList list; /* List for thread to write to a PMA */ - int nPMA; /* Number of PMAs currently in file */ SorterCompare xCompare; /* Compare function to use */ SorterFile file; /* Temp file for level-0 PMAs */ SorterFile file2; /* Space for other PMAs */

@@ -99352,7 +102156,7 @@ ** An instance of the following object is used to read records out of a

** PMA, in sorted order. The next key to be read is cached in nKey/aKey. ** aKey might point into aMap or into aBuffer. If neither of those locations ** contain a contiguous representation of the key, then aAlloc is allocated -** and the key is copied into aAlloc and aKey is made to poitn to aAlloc. +** and the key is copied into aAlloc and aKey is made to point to aAlloc. ** ** pFd==0 at EOF. */

@@ -100723,7 +103527,7 @@ ** round-robin between the first (pSorter->nTask-1) tasks. Except, if

** the background thread from a sub-tasks previous turn is still running, ** skip it. If the first (pSorter->nTask-1) sub-tasks are all still busy, ** fall back to using the final sub-task. The first (pSorter->nTask-1) - ** sub-tasks are prefered as they use background threads - the final + ** sub-tasks are preferred as they use background threads - the final ** sub-task uses the main thread. */ for(i=0; i<nWorker; i++){ int iTest = (pSorter->iPrev + i + 1) % nWorker;

@@ -100781,8 +103585,8 @@ VdbeSorter *pSorter;

int rc = SQLITE_OK; /* Return Code */ SorterRecord *pNew; /* New list element */ int bFlush; /* True to flush contents of memory to PMA */ - int nReq; /* Bytes of memory required */ - int nPMA; /* Bytes of PMA space required */ + i64 nReq; /* Bytes of memory required */ + i64 nPMA; /* Bytes of PMA space required */ int t; /* serial type of first record field */ assert( pCsr->eCurType==CURTYPE_SORTER );

@@ -101207,7 +104011,7 @@ assert( SQLITE_MAX_WORKER_THREADS>0 || eMode==INCRINIT_NORMAL );

rc = vdbeMergeEngineInit(pTask, pIncr->pMerger, eMode); - /* Set up the required files for pIncr. A multi-theaded IncrMerge object + /* Set up the required files for pIncr. A multi-threaded IncrMerge object ** requires two temp files to itself, whereas a single-threaded object ** only requires a region of pTask->file2. */ if( rc==SQLITE_OK ){

@@ -101847,6 +104651,8 @@ "p4 TEXT,"

"p5 INT," "comment TEXT," "subprog TEXT," + "nexec INT," + "ncycle INT," "stmt HIDDEN" ");",

@@ -101861,6 +104667,9 @@ "stmt HIDDEN"

");" }; + (void)argc; + (void)argv; + (void)pzErr; rc = sqlite3_declare_vtab(db, azSchema[isTabUsed]); if( rc==SQLITE_OK ){ pNew = sqlite3_malloc( sizeof(*pNew) );

@@ -102006,7 +104815,7 @@ }

} } } - i += 10; + i += 20; } } switch( i ){

@@ -102056,16 +104865,31 @@ sqlite3_result_text(ctx, "(FK)", 4, SQLITE_STATIC);

} break; } - case 10: /* tables_used.type */ + +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + case 9: /* nexec */ + sqlite3_result_int(ctx, pOp->nExec); + break; + case 10: /* ncycle */ + sqlite3_result_int(ctx, pOp->nCycle); + break; +#else + case 9: /* nexec */ + case 10: /* ncycle */ + sqlite3_result_int(ctx, 0); + break; +#endif + + case 20: /* tables_used.type */ sqlite3_result_text(ctx, pCur->zType, -1, SQLITE_STATIC); break; - case 11: /* tables_used.schema */ + case 21: /* tables_used.schema */ sqlite3_result_text(ctx, pCur->zSchema, -1, SQLITE_STATIC); break; - case 12: /* tables_used.name */ + case 22: /* tables_used.name */ sqlite3_result_text(ctx, pCur->zName, -1, SQLITE_STATIC); break; - case 13: /* tables_used.wr */ + case 23: /* tables_used.wr */ sqlite3_result_int(ctx, pOp->opcode==OP_OpenWrite); break; }

@@ -102096,6 +104920,7 @@ ){

bytecodevtab_cursor *pCur = (bytecodevtab_cursor *)pVtabCursor; bytecodevtab *pVTab = (bytecodevtab *)pVtabCursor->pVtab; int rc = SQLITE_OK; + (void)idxStr; bytecodevtabCursorClear(pCur); pCur->iRowid = 0;

@@ -102138,7 +104963,7 @@ int i;

int rc = SQLITE_CONSTRAINT; struct sqlite3_index_constraint *p; bytecodevtab *pVTab = (bytecodevtab*)tab; - int iBaseCol = pVTab->bTablesUsed ? 4 : 8; + int iBaseCol = pVTab->bTablesUsed ? 4 : 10; pIdxInfo->estimatedCost = (double)100; pIdxInfo->estimatedRows = 100; pIdxInfo->idxNum = 0;

@@ -102185,7 +105010,8 @@ /* xRename */ 0,

/* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ 0, - /* xShadowName */ 0 + /* xShadowName */ 0, + /* xIntegrity */ 0 };

@@ -102709,7 +105535,7 @@ **

** The return value from this routine is WRC_Abort to abandon the tree walk ** and WRC_Continue to continue. */ -static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){ +SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3WalkExprNN(Walker *pWalker, Expr *pExpr){ int rc; testcase( ExprHasProperty(pExpr, EP_TokenOnly) ); testcase( ExprHasProperty(pExpr, EP_Reduced) );

@@ -102718,7 +105544,9 @@ rc = pWalker->xExprCallback(pWalker, pExpr);

if( rc ) return rc & WRC_Abort; if( !ExprHasProperty(pExpr,(EP_TokenOnly|EP_Leaf)) ){ assert( pExpr->x.pList==0 || pExpr->pRight==0 ); - if( pExpr->pLeft && walkExpr(pWalker, pExpr->pLeft) ) return WRC_Abort; + if( pExpr->pLeft && sqlite3WalkExprNN(pWalker, pExpr->pLeft) ){ + return WRC_Abort; + } if( pExpr->pRight ){ assert( !ExprHasProperty(pExpr, EP_WinFunc) ); pExpr = pExpr->pRight;

@@ -102742,7 +105570,7 @@ }

return WRC_Continue; } SQLITE_PRIVATE int sqlite3WalkExpr(Walker *pWalker, Expr *pExpr){ - return pExpr ? walkExpr(pWalker,pExpr) : WRC_Continue; + return pExpr ? sqlite3WalkExprNN(pWalker,pExpr) : WRC_Continue; } /*

@@ -102868,7 +105696,7 @@ return WRC_Continue;

} /* Increase the walkerDepth when entering a subquery, and -** descrease when leaving the subquery. +** decrease when leaving the subquery. */ SQLITE_PRIVATE int sqlite3WalkerDepthIncrease(Walker *pWalker, Select *pSelect){ UNUSED_PARAMETER(pSelect);

@@ -103012,21 +105840,36 @@ }

} /* -** Subqueries stores the original database, table and column names for their -** result sets in ExprList.a[].zSpan, in the form "DATABASE.TABLE.COLUMN". -** Check to see if the zSpan given to this routine matches the zDb, zTab, -** and zCol. If any of zDb, zTab, and zCol are NULL then those fields will -** match anything. +** Subqueries store the original database, table and column names for their +** result sets in ExprList.a[].zSpan, in the form "DATABASE.TABLE.COLUMN", +** and mark the expression-list item by setting ExprList.a[].fg.eEName +** to ENAME_TAB. +** +** Check to see if the zSpan/eEName of the expression-list item passed to this +** routine matches the zDb, zTab, and zCol. If any of zDb, zTab, and zCol are +** NULL then those fields will match anything. Return true if there is a match, +** or false otherwise. +** +** SF_NestedFrom subqueries also store an entry for the implicit rowid (or +** _rowid_, or oid) column by setting ExprList.a[].fg.eEName to ENAME_ROWID, +** and setting zSpan to "DATABASE.TABLE.<rowid-alias>". This type of pItem +** argument matches if zCol is a rowid alias. If it is not NULL, (*pbRowid) +** is set to 1 if there is this kind of match. */ SQLITE_PRIVATE int sqlite3MatchEName( const struct ExprList_item *pItem, const char *zCol, const char *zTab, - const char *zDb + const char *zDb, + int *pbRowid ){ int n; const char *zSpan; - if( pItem->fg.eEName!=ENAME_TAB ) return 0; + int eEName = pItem->fg.eEName; + if( eEName!=ENAME_TAB && (eEName!=ENAME_ROWID || NEVER(pbRowid==0)) ){ + return 0; + } + assert( pbRowid==0 || *pbRowid==0 ); zSpan = pItem->zEName; for(n=0; ALWAYS(zSpan[n]) && zSpan[n]!='.'; n++){} if( zDb && (sqlite3StrNICmp(zSpan, zDb, n)!=0 || zDb[n]!=0) ){

@@ -103038,9 +105881,11 @@ if( zTab && (sqlite3StrNICmp(zSpan, zTab, n)!=0 || zTab[n]!=0) ){

return 0; } zSpan += n+1; - if( zCol && sqlite3StrICmp(zSpan, zCol)!=0 ){ - return 0; + if( zCol ){ + if( eEName==ENAME_TAB && sqlite3StrICmp(zSpan, zCol)!=0 ) return 0; + if( eEName==ENAME_ROWID && sqlite3IsRowid(zCol)==0 ) return 0; } + if( eEName==ENAME_ROWID ) *pbRowid = 1; return 1; }

@@ -103111,6 +105956,32 @@ }

} /* +** Return TRUE (non-zero) if zTab is a valid name for the schema table pTab. +*/ +static SQLITE_NOINLINE int isValidSchemaTableName( + const char *zTab, /* Name as it appears in the SQL */ + Table *pTab, /* The schema table we are trying to match */ + Schema *pSchema /* non-NULL if a database qualifier is present */ +){ + const char *zLegacy; + assert( pTab!=0 ); + assert( pTab->tnum==1 ); + if( sqlite3StrNICmp(zTab, "sqlite_", 7)!=0 ) return 0; + zLegacy = pTab->zName; + if( strcmp(zLegacy+7, &LEGACY_TEMP_SCHEMA_TABLE[7])==0 ){ + if( sqlite3StrICmp(zTab+7, &PREFERRED_TEMP_SCHEMA_TABLE[7])==0 ){ + return 1; + } + if( pSchema==0 ) return 0; + if( sqlite3StrICmp(zTab+7, &LEGACY_SCHEMA_TABLE[7])==0 ) return 1; + if( sqlite3StrICmp(zTab+7, &PREFERRED_SCHEMA_TABLE[7])==0 ) return 1; + }else{ + if( sqlite3StrICmp(zTab+7, &PREFERRED_SCHEMA_TABLE[7])==0 ) return 1; + } + return 0; +} + +/* ** Given the name of a column of the form X.Y.Z or Y.Z or just Z, look up ** that name in the set of source tables in pSrcList and make the pExpr ** expression node refer back to that source column. The following changes

@@ -103147,7 +106018,7 @@ Expr *pExpr /* Make this EXPR node point to the selected column */

){ int i, j; /* Loop counters */ int cnt = 0; /* Number of matching column names */ - int cntTab = 0; /* Number of matching table names */ + int cntTab = 0; /* Number of potential "rowid" matches */ int nSubquery = 0; /* How many levels of subquery */ sqlite3 *db = pParse->db; /* The database connection */ SrcItem *pItem; /* Use for looping over pSrcList items */

@@ -103224,54 +106095,66 @@ pEList = pItem->pSelect->pEList;

assert( pEList!=0 ); assert( pEList->nExpr==pTab->nCol ); for(j=0; j<pEList->nExpr; j++){ - if( !sqlite3MatchEName(&pEList->a[j], zCol, zTab, zDb) ){ + int bRowid = 0; /* True if possible rowid match */ + if( !sqlite3MatchEName(&pEList->a[j], zCol, zTab, zDb, &bRowid) ){ continue; } - if( cnt>0 ){ - if( pItem->fg.isUsing==0 - || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0 - ){ - /* Two or more tables have the same column name which is - ** not joined by USING. This is an error. Signal as much - ** by clearing pFJMatch and letting cnt go above 1. */ - sqlite3ExprListDelete(db, pFJMatch); - pFJMatch = 0; - }else - if( (pItem->fg.jointype & JT_RIGHT)==0 ){ - /* An INNER or LEFT JOIN. Use the left-most table */ - continue; - }else - if( (pItem->fg.jointype & JT_LEFT)==0 ){ - /* A RIGHT JOIN. Use the right-most table */ - cnt = 0; - sqlite3ExprListDelete(db, pFJMatch); - pFJMatch = 0; - }else{ - /* For a FULL JOIN, we must construct a coalesce() func */ - extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn); + if( bRowid==0 ){ + if( cnt>0 ){ + if( pItem->fg.isUsing==0 + || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0 + ){ + /* Two or more tables have the same column name which is + ** not joined by USING. This is an error. Signal as much + ** by clearing pFJMatch and letting cnt go above 1. */ + sqlite3ExprListDelete(db, pFJMatch); + pFJMatch = 0; + }else + if( (pItem->fg.jointype & JT_RIGHT)==0 ){ + /* An INNER or LEFT JOIN. Use the left-most table */ + continue; + }else + if( (pItem->fg.jointype & JT_LEFT)==0 ){ + /* A RIGHT JOIN. Use the right-most table */ + cnt = 0; + sqlite3ExprListDelete(db, pFJMatch); + pFJMatch = 0; + }else{ + /* For a FULL JOIN, we must construct a coalesce() func */ + extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn); + } } + cnt++; + hit = 1; + }else if( cnt>0 ){ + /* This is a potential rowid match, but there has already been + ** a real match found. So this can be ignored. */ + continue; } - cnt++; - cntTab = 2; + cntTab++; pMatch = pItem; pExpr->iColumn = j; pEList->a[j].fg.bUsed = 1; - hit = 1; + + /* rowid cannot be part of a USING clause - assert() this. */ + assert( bRowid==0 || pEList->a[j].fg.bUsingTerm==0 ); if( pEList->a[j].fg.bUsingTerm ) break; } if( hit || zTab==0 ) continue; } assert( zDb==0 || zTab!=0 ); if( zTab ){ - const char *zTabName; if( zDb ){ if( pTab->pSchema!=pSchema ) continue; if( pSchema==0 && strcmp(zDb,"*")!=0 ) continue; } - zTabName = pItem->zAlias ? pItem->zAlias : pTab->zName; - assert( zTabName!=0 ); - if( sqlite3StrICmp(zTabName, zTab)!=0 ){ - continue; + if( pItem->zAlias!=0 ){ + if( sqlite3StrICmp(zTab, pItem->zAlias)!=0 ){ + continue; + } + }else if( sqlite3StrICmp(zTab, pTab->zName)!=0 ){ + if( pTab->tnum!=1 ) continue; + if( !isValidSchemaTableName(zTab, pTab, pSchema) ) continue; } assert( ExprUseYTab(pExpr) ); if( IN_RENAME_OBJECT && pItem->zAlias ){

@@ -103347,7 +106230,8 @@ int op = pParse->eTriggerOp;

assert( op==TK_DELETE || op==TK_UPDATE || op==TK_INSERT ); if( pParse->bReturning ){ if( (pNC->ncFlags & NC_UBaseReg)!=0 - && (zTab==0 || sqlite3StrICmp(zTab,pParse->pTriggerTab->zName)==0) + && ALWAYS(zTab==0 + || sqlite3StrICmp(zTab,pParse->pTriggerTab->zName)==0) ){ pExpr->iTable = op!=TK_DELETE; pTab = pParse->pTriggerTab;

@@ -103414,6 +106298,7 @@ pExpr->y.pTab = pTab;

if( pParse->bReturning ){ eNewExprOp = TK_REGISTER; pExpr->op2 = TK_COLUMN; + pExpr->iColumn = iCol; pExpr->iTable = pNC->uNC.iBaseReg + (pTab->nCol+1)*pExpr->iTable + sqlite3TableColumnToStorage(pTab, iCol) + 1; }else{

@@ -103447,10 +106332,10 @@ && cntTab==1

&& pMatch && (pNC->ncFlags & (NC_IdxExpr|NC_GenCol))==0 && sqlite3IsRowid(zCol) - && ALWAYS(VisibleRowid(pMatch->pTab)) + && ALWAYS(VisibleRowid(pMatch->pTab) || pMatch->fg.isNestedFrom) ){ cnt = 1; - pExpr->iColumn = -1; + if( pMatch->fg.isNestedFrom==0 ) pExpr->iColumn = -1; pExpr->affExpr = SQLITE_AFF_INTEGER; }

@@ -103826,14 +106711,10 @@ sqlite3WalkExpr(pWalker, pExpr->pLeft);

if( 0==sqlite3ExprCanBeNull(pExpr->pLeft) && !IN_RENAME_OBJECT ){ testcase( ExprHasProperty(pExpr, EP_OuterON) ); assert( !ExprHasProperty(pExpr, EP_IntValue) ); - if( pExpr->op==TK_NOTNULL ){ - pExpr->u.zToken = "true"; - ExprSetProperty(pExpr, EP_IsTrue); - }else{ - pExpr->u.zToken = "false"; - ExprSetProperty(pExpr, EP_IsFalse); - } - pExpr->op = TK_TRUEFALSE; + pExpr->u.iValue = (pExpr->op==TK_NOTNULL); + pExpr->flags |= EP_IntValue; + pExpr->op = TK_INTEGER; + for(i=0, p=pNC; p && i<ArraySize(anRef); p=p->pNext, i++){ p->nRef = anRef[i]; }

@@ -103907,6 +106788,7 @@ #ifndef SQLITE_OMIT_WINDOWFUNC

Window *pWin = (IsWindowFunc(pExpr) ? pExpr->y.pWin : 0); #endif assert( !ExprHasProperty(pExpr, EP_xIsSelect|EP_IntValue) ); + assert( pExpr->pLeft==0 || pExpr->pLeft->op==TK_ORDER ); zId = pExpr->u.zToken; pDef = sqlite3FindFunction(pParse->db, zId, n, enc, 0); if( pDef==0 ){

@@ -104048,6 +106930,10 @@ );

pNC->nNcErr++; } #endif + else if( is_agg==0 && pExpr->pLeft ){ + sqlite3ExprOrderByAggregateError(pParse, pExpr); + pNC->nNcErr++; + } if( is_agg ){ /* Window functions may not be arguments of aggregate functions. ** Or arguments of other window functions. But aggregate functions

@@ -104066,6 +106952,11 @@ }

#endif sqlite3WalkExprList(pWalker, pList); if( is_agg ){ + if( pExpr->pLeft ){ + assert( pExpr->pLeft->op==TK_ORDER ); + assert( ExprUseXList(pExpr->pLeft) ); + sqlite3WalkExprList(pWalker, pExpr->pLeft->x.pList); + } #ifndef SQLITE_OMIT_WINDOWFUNC if( pWin ){ Select *pSel = pNC->pWinSelect;

@@ -104135,8 +107026,8 @@ }

assert( pNC->nRef>=nRef ); if( nRef!=pNC->nRef ){ ExprSetProperty(pExpr, EP_VarSelect); - pNC->ncFlags |= NC_VarSelect; } + pNC->ncFlags |= NC_Subquery; } break; }

@@ -104576,7 +107467,7 @@ return 1;

} for(j=0; j<pSelect->pEList->nExpr; j++){ if( sqlite3ExprCompare(0, pE, pSelect->pEList->a[j].pExpr, -1)==0 ){ - /* Since this expresion is being changed into a reference + /* Since this expression is being changed into a reference ** to an identical expression in the result set, remove all Window ** objects belonging to the expression from the Select.pWin list. */ windowRemoveExprFromSelect(pSelect, pE);

@@ -104629,9 +107520,7 @@ pLeftmost = p;

while( p ){ assert( (p->selFlags & SF_Expanded)!=0 ); assert( (p->selFlags & SF_Resolved)==0 ); - assert( db->suppressErr==0 ); /* SF_Resolved not set if errors suppressed */ p->selFlags |= SF_Resolved; - /* Resolve the expressions in the LIMIT and OFFSET clauses. These ** are not allowed to refer to any names, so pass an empty NameContext.

@@ -104899,7 +107788,8 @@ if( sqlite3ExprCheckHeight(w.pParse, w.pParse->nHeight) ){

return SQLITE_ERROR; } #endif - sqlite3WalkExpr(&w, pExpr); + assert( pExpr!=0 ); + sqlite3WalkExprNN(&w, pExpr); #if SQLITE_MAX_EXPR_DEPTH>0 w.pParse->nHeight -= pExpr->nHeight; #endif

@@ -104941,7 +107831,7 @@ if( sqlite3ExprCheckHeight(w.pParse, w.pParse->nHeight) ){

return WRC_Abort; } #endif - sqlite3WalkExpr(&w, pExpr); + sqlite3WalkExprNN(&w, pExpr); #if SQLITE_MAX_EXPR_DEPTH>0 w.pParse->nHeight -= pExpr->nHeight; #endif

@@ -104963,7 +107853,7 @@ }

/* ** Resolve all names in all expressions of a SELECT and in all -** decendents of the SELECT, including compounds off of p->pPrior, +** descendants of the SELECT, including compounds off of p->pPrior, ** subqueries in expressions, and subqueries used as FROM clause ** terms. **

@@ -105090,49 +107980,123 @@ ** SELECT * FROM t1 WHERE (select a from t1);

*/ SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){ int op; - while( ExprHasProperty(pExpr, EP_Skip|EP_IfNullRow) ){ - assert( pExpr->op==TK_COLLATE - || pExpr->op==TK_IF_NULL_ROW - || (pExpr->op==TK_REGISTER && pExpr->op2==TK_IF_NULL_ROW) ); - pExpr = pExpr->pLeft; - assert( pExpr!=0 ); - } op = pExpr->op; - if( op==TK_REGISTER ) op = pExpr->op2; - if( op==TK_COLUMN || op==TK_AGG_COLUMN ){ - assert( ExprUseYTab(pExpr) ); - assert( pExpr->y.pTab!=0 ); - return sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn); - } - if( op==TK_SELECT ){ - assert( ExprUseXSelect(pExpr) ); - assert( pExpr->x.pSelect!=0 ); - assert( pExpr->x.pSelect->pEList!=0 ); - assert( pExpr->x.pSelect->pEList->a[0].pExpr!=0 ); - return sqlite3ExprAffinity(pExpr->x.pSelect->pEList->a[0].pExpr); - } + while( 1 /* exit-by-break */ ){ + if( op==TK_COLUMN || (op==TK_AGG_COLUMN && pExpr->y.pTab!=0) ){ + assert( ExprUseYTab(pExpr) ); + assert( pExpr->y.pTab!=0 ); + return sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn); + } + if( op==TK_SELECT ){ + assert( ExprUseXSelect(pExpr) ); + assert( pExpr->x.pSelect!=0 ); + assert( pExpr->x.pSelect->pEList!=0 ); + assert( pExpr->x.pSelect->pEList->a[0].pExpr!=0 ); + return sqlite3ExprAffinity(pExpr->x.pSelect->pEList->a[0].pExpr); + } #ifndef SQLITE_OMIT_CAST - if( op==TK_CAST ){ - assert( !ExprHasProperty(pExpr, EP_IntValue) ); - return sqlite3AffinityType(pExpr->u.zToken, 0); - } + if( op==TK_CAST ){ + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + return sqlite3AffinityType(pExpr->u.zToken, 0); + } #endif - if( op==TK_SELECT_COLUMN ){ - assert( pExpr->pLeft!=0 && ExprUseXSelect(pExpr->pLeft) ); - assert( pExpr->iColumn < pExpr->iTable ); - assert( pExpr->iTable==pExpr->pLeft->x.pSelect->pEList->nExpr ); - return sqlite3ExprAffinity( - pExpr->pLeft->x.pSelect->pEList->a[pExpr->iColumn].pExpr - ); - } - if( op==TK_VECTOR ){ - assert( ExprUseXList(pExpr) ); - return sqlite3ExprAffinity(pExpr->x.pList->a[0].pExpr); + if( op==TK_SELECT_COLUMN ){ + assert( pExpr->pLeft!=0 && ExprUseXSelect(pExpr->pLeft) ); + assert( pExpr->iColumn < pExpr->iTable ); + assert( pExpr->iColumn >= 0 ); + assert( pExpr->iTable==pExpr->pLeft->x.pSelect->pEList->nExpr ); + return sqlite3ExprAffinity( + pExpr->pLeft->x.pSelect->pEList->a[pExpr->iColumn].pExpr + ); + } + if( op==TK_VECTOR ){ + assert( ExprUseXList(pExpr) ); + return sqlite3ExprAffinity(pExpr->x.pList->a[0].pExpr); + } + if( ExprHasProperty(pExpr, EP_Skip|EP_IfNullRow) ){ + assert( pExpr->op==TK_COLLATE + || pExpr->op==TK_IF_NULL_ROW + || (pExpr->op==TK_REGISTER && pExpr->op2==TK_IF_NULL_ROW) ); + pExpr = pExpr->pLeft; + op = pExpr->op; + continue; + } + if( op!=TK_REGISTER || (op = pExpr->op2)==TK_REGISTER ) break; } return pExpr->affExpr; } /* +** Make a guess at all the possible datatypes of the result that could +** be returned by an expression. Return a bitmask indicating the answer: +** +** 0x01 Numeric +** 0x02 Text +** 0x04 Blob +** +** If the expression must return NULL, then 0x00 is returned. +*/ +SQLITE_PRIVATE int sqlite3ExprDataType(const Expr *pExpr){ + while( pExpr ){ + switch( pExpr->op ){ + case TK_COLLATE: + case TK_IF_NULL_ROW: + case TK_UPLUS: { + pExpr = pExpr->pLeft; + break; + } + case TK_NULL: { + pExpr = 0; + break; + } + case TK_STRING: { + return 0x02; + } + case TK_BLOB: { + return 0x04; + } + case TK_CONCAT: { + return 0x06; + } + case TK_VARIABLE: + case TK_AGG_FUNCTION: + case TK_FUNCTION: { + return 0x07; + } + case TK_COLUMN: + case TK_AGG_COLUMN: + case TK_SELECT: + case TK_CAST: + case TK_SELECT_COLUMN: + case TK_VECTOR: { + int aff = sqlite3ExprAffinity(pExpr); + if( aff>=SQLITE_AFF_NUMERIC ) return 0x05; + if( aff==SQLITE_AFF_TEXT ) return 0x06; + return 0x07; + } + case TK_CASE: { + int res = 0; + int ii; + ExprList *pList = pExpr->x.pList; + assert( ExprUseXList(pExpr) && pList!=0 ); + assert( pList->nExpr > 0); + for(ii=1; ii<pList->nExpr; ii+=2){ + res |= sqlite3ExprDataType(pList->a[ii].pExpr); + } + if( pList->nExpr % 2 ){ + res |= sqlite3ExprDataType(pList->a[pList->nExpr-1].pExpr); + } + return res; + } + default: { + return 0x01; + } + } /* End of switch(op) */ + } /* End of while(pExpr) */ + return 0x00; +} + +/* ** Set the collating sequence for expression pExpr to be the collating ** sequence named by pToken. Return a pointer to a new Expr node that ** implements the COLLATE operator.

@@ -105219,7 +108183,9 @@ const Expr *p = pExpr;

while( p ){ int op = p->op; if( op==TK_REGISTER ) op = p->op2; - if( op==TK_AGG_COLUMN || op==TK_COLUMN || op==TK_TRIGGER ){ + if( (op==TK_AGG_COLUMN && p->y.pTab!=0) + || op==TK_COLUMN || op==TK_TRIGGER + ){ int j; assert( ExprUseYTab(p) ); assert( p->y.pTab!=0 );

@@ -105249,11 +108215,10 @@ p = p->pLeft;

}else{ Expr *pNext = p->pRight; /* The Expr.x union is never used at the same time as Expr.pRight */ - assert( ExprUseXList(p) ); - assert( p->x.pList==0 || p->pRight==0 ); - if( p->x.pList!=0 && !db->mallocFailed ){ + assert( !ExprUseXList(p) || p->x.pList==0 || p->pRight==0 ); + if( ExprUseXList(p) && p->x.pList!=0 && !db->mallocFailed ){ int i; - for(i=0; ALWAYS(i<p->x.pList->nExpr); i++){ + for(i=0; i<p->x.pList->nExpr; i++){ if( ExprHasProperty(p->x.pList->a[i].pExpr, EP_Collate) ){ pNext = p->x.pList->a[i].pExpr; break;

@@ -105275,7 +108240,7 @@

/* ** Return the collation sequence for the expression pExpr. If ** there is no defined collating sequence, return a pointer to the -** defautl collation sequence. +** default collation sequence. ** ** See also: sqlite3ExprCollSeq() **

@@ -105405,7 +108370,7 @@ }

return pColl; } -/* Expresssion p is a comparison operator. Return a collation sequence +/* Expression p is a comparison operator. Return a collation sequence ** appropriate for the comparison operator. ** ** This is normally just a wrapper around sqlite3BinaryCompareCollSeq().

@@ -105562,6 +108527,7 @@ ** will own the pVector.

*/ pRet = sqlite3PExpr(pParse, TK_SELECT_COLUMN, 0, 0); if( pRet ){ + ExprSetProperty(pRet, EP_FullSize); pRet->iTable = nField; pRet->iColumn = iField; pRet->pLeft = pVector;

@@ -105862,6 +108828,15 @@ #define exprSetHeight(y)

#endif /* SQLITE_MAX_EXPR_DEPTH>0 */ /* +** Set the error offset for an Expr node, if possible. +*/ +SQLITE_PRIVATE void sqlite3ExprSetErrorOffset(Expr *pExpr, int iOfst){ + if( pExpr==0 ) return; + if( NEVER(ExprUseWJoin(pExpr)) ) return; + pExpr->w.iOfst = iOfst; +} + +/* ** This routine is the core allocator for Expr nodes. ** ** Construct a new expression node and return a pointer to it. Memory

@@ -106085,9 +109060,9 @@ /*

** Join two expressions using an AND operator. If either expression is ** NULL, then just return the other expression. ** -** If one side or the other of the AND is known to be false, then instead -** of returning an AND expression, just return a constant expression with -** a value of false. +** If one side or the other of the AND is known to be false, and neither side +** is part of an ON clause, then instead of returning an AND expression, +** just return a constant expression with a value of false. */ SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse *pParse, Expr *pLeft, Expr *pRight){ sqlite3 *db = pParse->db;

@@ -106095,14 +109070,17 @@ if( pLeft==0 ){

return pRight; }else if( pRight==0 ){ return pLeft; - }else if( (ExprAlwaysFalse(pLeft) || ExprAlwaysFalse(pRight)) - && !IN_RENAME_OBJECT - ){ - sqlite3ExprDeferredDelete(pParse, pLeft); - sqlite3ExprDeferredDelete(pParse, pRight); - return sqlite3Expr(db, TK_INTEGER, "0"); }else{ - return sqlite3PExpr(pParse, TK_AND, pLeft, pRight); + u32 f = pLeft->flags | pRight->flags; + if( (f&(EP_OuterON|EP_InnerON|EP_IsFalse))==EP_IsFalse + && !IN_RENAME_OBJECT + ){ + sqlite3ExprDeferredDelete(pParse, pLeft); + sqlite3ExprDeferredDelete(pParse, pRight); + return sqlite3Expr(db, TK_INTEGER, "0"); + }else{ + return sqlite3PExpr(pParse, TK_AND, pLeft, pRight); + } } }

@@ -106141,6 +109119,69 @@ return pNew;

} /* +** Report an error when attempting to use an ORDER BY clause within +** the arguments of a non-aggregate function. +*/ +SQLITE_PRIVATE void sqlite3ExprOrderByAggregateError(Parse *pParse, Expr *p){ + sqlite3ErrorMsg(pParse, + "ORDER BY may not be used with non-aggregate %#T()", p + ); +} + +/* +** Attach an ORDER BY clause to a function call. +** +** functionname( arguments ORDER BY sortlist ) +** \_____________________/ \______/ +** pExpr pOrderBy +** +** The ORDER BY clause is inserted into a new Expr node of type TK_ORDER +** and added to the Expr.pLeft field of the parent TK_FUNCTION node. +*/ +SQLITE_PRIVATE void sqlite3ExprAddFunctionOrderBy( + Parse *pParse, /* Parsing context */ + Expr *pExpr, /* The function call to which ORDER BY is to be added */ + ExprList *pOrderBy /* The ORDER BY clause to add */ +){ + Expr *pOB; + sqlite3 *db = pParse->db; + if( NEVER(pOrderBy==0) ){ + assert( db->mallocFailed ); + return; + } + if( pExpr==0 ){ + assert( db->mallocFailed ); + sqlite3ExprListDelete(db, pOrderBy); + return; + } + assert( pExpr->op==TK_FUNCTION ); + assert( pExpr->pLeft==0 ); + assert( ExprUseXList(pExpr) ); + if( pExpr->x.pList==0 || NEVER(pExpr->x.pList->nExpr==0) ){ + /* Ignore ORDER BY on zero-argument aggregates */ + sqlite3ParserAddCleanup(pParse, + (void(*)(sqlite3*,void*))sqlite3ExprListDelete, + pOrderBy); + return; + } + if( IsWindowFunc(pExpr) ){ + sqlite3ExprOrderByAggregateError(pParse, pExpr); + sqlite3ExprListDelete(db, pOrderBy); + return; + } + + pOB = sqlite3ExprAlloc(db, TK_ORDER, 0, 0); + if( pOB==0 ){ + sqlite3ExprListDelete(db, pOrderBy); + return; + } + pOB->x.pList = pOrderBy; + assert( ExprUseXList(pOB) ); + pExpr->pLeft = pOB; + ExprSetProperty(pOB, EP_FullSize); +} + +/* ** Check to see if a function is usable according to current access ** rules: **

@@ -106318,7 +109359,7 @@

/* ** Arrange to cause pExpr to be deleted when the pParse is deleted. ** This is similar to sqlite3ExprDelete() except that the delete is -** deferred untilthe pParse is deleted. +** deferred until the pParse is deleted. ** ** The pExpr might be deleted immediately on an OOM error. **

@@ -106393,11 +109434,7 @@ int nSize;

assert( flags==EXPRDUP_REDUCE || flags==0 ); /* Only one flag value allowed */ assert( EXPR_FULLSIZE<=0xfff ); assert( (0xfff & (EP_Reduced|EP_TokenOnly))==0 ); - if( 0==flags || p->op==TK_SELECT_COLUMN -#ifndef SQLITE_OMIT_WINDOWFUNC - || ExprHasProperty(p, EP_WinFunc) -#endif - ){ + if( 0==flags || ExprHasProperty(p, EP_FullSize) ){ nSize = EXPR_FULLSIZE; }else{ assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) );

@@ -106428,56 +109465,93 @@ }

/* ** Return the number of bytes required to create a duplicate of the -** expression passed as the first argument. The second argument is a -** mask containing EXPRDUP_XXX flags. +** expression passed as the first argument. ** ** The value returned includes space to create a copy of the Expr struct ** itself and the buffer referred to by Expr.u.zToken, if any. ** -** If the EXPRDUP_REDUCE flag is set, then the return value includes -** space to duplicate all Expr nodes in the tree formed by Expr.pLeft -** and Expr.pRight variables (but not for any structures pointed to or -** descended from the Expr.x.pList or Expr.x.pSelect variables). +** The return value includes space to duplicate all Expr nodes in the +** tree formed by Expr.pLeft and Expr.pRight, but not any other +** substructure such as Expr.x.pList, Expr.x.pSelect, and Expr.y.pWin. */ -static int dupedExprSize(const Expr *p, int flags){ - int nByte = 0; - if( p ){ - nByte = dupedExprNodeSize(p, flags); - if( flags&EXPRDUP_REDUCE ){ - nByte += dupedExprSize(p->pLeft, flags) + dupedExprSize(p->pRight, flags); - } - } +static int dupedExprSize(const Expr *p){ + int nByte; + assert( p!=0 ); + nByte = dupedExprNodeSize(p, EXPRDUP_REDUCE); + if( p->pLeft ) nByte += dupedExprSize(p->pLeft); + if( p->pRight ) nByte += dupedExprSize(p->pRight); + assert( nByte==ROUND8(nByte) ); return nByte; } /* -** This function is similar to sqlite3ExprDup(), except that if pzBuffer -** is not NULL then *pzBuffer is assumed to point to a buffer large enough -** to store the copy of expression p, the copies of p->u.zToken -** (if applicable), and the copies of the p->pLeft and p->pRight expressions, -** if any. Before returning, *pzBuffer is set to the first byte past the -** portion of the buffer copied into by this function. +** An EdupBuf is a memory allocation used to stored multiple Expr objects +** together with their Expr.zToken content. This is used to help implement +** compression while doing sqlite3ExprDup(). The top-level Expr does the +** allocation for itself and many of its decendents, then passes an instance +** of the structure down into exprDup() so that they decendents can have +** access to that memory. +*/ +typedef struct EdupBuf EdupBuf; +struct EdupBuf { + u8 *zAlloc; /* Memory space available for storage */ +#ifdef SQLITE_DEBUG + u8 *zEnd; /* First byte past the end of memory */ +#endif +}; + +/* +** This function is similar to sqlite3ExprDup(), except that if pEdupBuf +** is not NULL then it points to memory that can be used to store a copy +** of the input Expr p together with its p->u.zToken (if any). pEdupBuf +** is updated with the new buffer tail prior to returning. */ -static Expr *exprDup(sqlite3 *db, const Expr *p, int dupFlags, u8 **pzBuffer){ +static Expr *exprDup( + sqlite3 *db, /* Database connection (for memory allocation) */ + const Expr *p, /* Expr tree to be duplicated */ + int dupFlags, /* EXPRDUP_REDUCE for compression. 0 if not */ + EdupBuf *pEdupBuf /* Preallocated storage space, or NULL */ +){ Expr *pNew; /* Value to return */ - u8 *zAlloc; /* Memory space from which to build Expr object */ + EdupBuf sEdupBuf; /* Memory space from which to build Expr object */ u32 staticFlag; /* EP_Static if space not obtained from malloc */ + int nToken = -1; /* Space needed for p->u.zToken. -1 means unknown */ assert( db!=0 ); assert( p ); assert( dupFlags==0 || dupFlags==EXPRDUP_REDUCE ); - assert( pzBuffer==0 || dupFlags==EXPRDUP_REDUCE ); + assert( pEdupBuf==0 || dupFlags==EXPRDUP_REDUCE ); /* Figure out where to write the new Expr structure. */ - if( pzBuffer ){ - zAlloc = *pzBuffer; + if( pEdupBuf ){ + sEdupBuf.zAlloc = pEdupBuf->zAlloc; +#ifdef SQLITE_DEBUG + sEdupBuf.zEnd = pEdupBuf->zEnd; +#endif staticFlag = EP_Static; - assert( zAlloc!=0 ); + assert( sEdupBuf.zAlloc!=0 ); + assert( dupFlags==EXPRDUP_REDUCE ); }else{ - zAlloc = sqlite3DbMallocRawNN(db, dupedExprSize(p, dupFlags)); + int nAlloc; + if( dupFlags ){ + nAlloc = dupedExprSize(p); + }else if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ + nToken = sqlite3Strlen30NN(p->u.zToken)+1; + nAlloc = ROUND8(EXPR_FULLSIZE + nToken); + }else{ + nToken = 0; + nAlloc = ROUND8(EXPR_FULLSIZE); + } + assert( nAlloc==ROUND8(nAlloc) ); + sEdupBuf.zAlloc = sqlite3DbMallocRawNN(db, nAlloc); +#ifdef SQLITE_DEBUG + sEdupBuf.zEnd = sEdupBuf.zAlloc ? sEdupBuf.zAlloc+nAlloc : 0; +#endif + staticFlag = 0; } - pNew = (Expr *)zAlloc; + pNew = (Expr *)sEdupBuf.zAlloc; + assert( EIGHT_BYTE_ALIGNMENT(pNew) ); if( pNew ){ /* Set nNewSize to the size allocated for the structure pointed to

@@ -106486,22 +109560,27 @@ ** EXPR_TOKENONLYSIZE. nToken is set to the number of bytes consumed

** by the copy of the p->u.zToken string (if any). */ const unsigned nStructSize = dupedExprStructSize(p, dupFlags); - const int nNewSize = nStructSize & 0xfff; - int nToken; - if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ - nToken = sqlite3Strlen30(p->u.zToken) + 1; - }else{ - nToken = 0; + int nNewSize = nStructSize & 0xfff; + if( nToken<0 ){ + if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ + nToken = sqlite3Strlen30(p->u.zToken) + 1; + }else{ + nToken = 0; + } } if( dupFlags ){ + assert( (int)(sEdupBuf.zEnd - sEdupBuf.zAlloc) >= nNewSize+nToken ); assert( ExprHasProperty(p, EP_Reduced)==0 ); - memcpy(zAlloc, p, nNewSize); + memcpy(sEdupBuf.zAlloc, p, nNewSize); }else{ u32 nSize = (u32)exprStructSize(p); - memcpy(zAlloc, p, nSize); + assert( (int)(sEdupBuf.zEnd - sEdupBuf.zAlloc) >= + (int)EXPR_FULLSIZE+nToken ); + memcpy(sEdupBuf.zAlloc, p, nSize); if( nSize<EXPR_FULLSIZE ){ - memset(&zAlloc[nSize], 0, EXPR_FULLSIZE-nSize); + memset(&sEdupBuf.zAlloc[nSize], 0, EXPR_FULLSIZE-nSize); } + nNewSize = EXPR_FULLSIZE; } /* Set the EP_Reduced, EP_TokenOnly, and EP_Static flags appropriately. */

@@ -106514,44 +109593,50 @@ ExprSetVVAProperty(pNew, EP_Immutable);

} /* Copy the p->u.zToken string, if any. */ - if( nToken ){ - char *zToken = pNew->u.zToken = (char*)&zAlloc[nNewSize]; + assert( nToken>=0 ); + if( nToken>0 ){ + char *zToken = pNew->u.zToken = (char*)&sEdupBuf.zAlloc[nNewSize]; memcpy(zToken, p->u.zToken, nToken); + nNewSize += nToken; } + sEdupBuf.zAlloc += ROUND8(nNewSize); + + if( ((p->flags|pNew->flags)&(EP_TokenOnly|EP_Leaf))==0 ){ - if( 0==((p->flags|pNew->flags) & (EP_TokenOnly|EP_Leaf)) ){ /* Fill in the pNew->x.pSelect or pNew->x.pList member. */ if( ExprUseXSelect(p) ){ pNew->x.pSelect = sqlite3SelectDup(db, p->x.pSelect, dupFlags); }else{ - pNew->x.pList = sqlite3ExprListDup(db, p->x.pList, dupFlags); + pNew->x.pList = sqlite3ExprListDup(db, p->x.pList, + p->op!=TK_ORDER ? dupFlags : 0); } - } - /* Fill in pNew->pLeft and pNew->pRight. */ - if( ExprHasProperty(pNew, EP_Reduced|EP_TokenOnly|EP_WinFunc) ){ - zAlloc += dupedExprNodeSize(p, dupFlags); - if( !ExprHasProperty(pNew, EP_TokenOnly|EP_Leaf) ){ - pNew->pLeft = p->pLeft ? - exprDup(db, p->pLeft, EXPRDUP_REDUCE, &zAlloc) : 0; - pNew->pRight = p->pRight ? - exprDup(db, p->pRight, EXPRDUP_REDUCE, &zAlloc) : 0; - } #ifndef SQLITE_OMIT_WINDOWFUNC if( ExprHasProperty(p, EP_WinFunc) ){ pNew->y.pWin = sqlite3WindowDup(db, pNew, p->y.pWin); assert( ExprHasProperty(pNew, EP_WinFunc) ); } #endif /* SQLITE_OMIT_WINDOWFUNC */ - if( pzBuffer ){ - *pzBuffer = zAlloc; - } - }else{ - if( !ExprHasProperty(p, EP_TokenOnly|EP_Leaf) ){ - if( pNew->op==TK_SELECT_COLUMN ){ + + /* Fill in pNew->pLeft and pNew->pRight. */ + if( dupFlags ){ + if( p->op==TK_SELECT_COLUMN ){ + pNew->pLeft = p->pLeft; + assert( p->pRight==0 + || p->pRight==p->pLeft + || ExprHasProperty(p->pLeft, EP_Subquery) ); + }else{ + pNew->pLeft = p->pLeft ? + exprDup(db, p->pLeft, EXPRDUP_REDUCE, &sEdupBuf) : 0; + } + pNew->pRight = p->pRight ? + exprDup(db, p->pRight, EXPRDUP_REDUCE, &sEdupBuf) : 0; + }else{ + if( p->op==TK_SELECT_COLUMN ){ pNew->pLeft = p->pLeft; - assert( p->pRight==0 || p->pRight==p->pLeft - || ExprHasProperty(p->pLeft, EP_Subquery) ); + assert( p->pRight==0 + || p->pRight==p->pLeft + || ExprHasProperty(p->pLeft, EP_Subquery) ); }else{ pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0); }

@@ -106559,6 +109644,8 @@ pNew->pRight = sqlite3ExprDup(db, p->pRight, 0);

} } } + if( pEdupBuf ) memcpy(pEdupBuf, &sEdupBuf, sizeof(sEdupBuf)); + assert( sEdupBuf.zAlloc <= sEdupBuf.zEnd ); return pNew; }

@@ -106823,11 +109910,7 @@ ** Add a new element to the end of an expression list. If pList is

** initially NULL, then create a new expression list. ** ** The pList argument must be either NULL or a pointer to an ExprList -** obtained from a prior call to sqlite3ExprListAppend(). This routine -** may not be used with an ExprList obtained from sqlite3ExprListDup(). -** Reason: This routine assumes that the number of slots in pList->a[] -** is a power of two. That is true for sqlite3ExprListAppend() returns -** but is not necessarily true from the return value of sqlite3ExprListDup(). +** obtained from a prior call to sqlite3ExprListAppend(). ** ** If a memory allocation error occurs, the entire list is freed and ** NULL is returned. If non-NULL is returned, then it is guaranteed

@@ -107160,7 +110243,7 @@ ** The argument must be a TK_TRUEFALSE Expr node. Return 1 if it is TRUE

** and 0 if it is FALSE. */ SQLITE_PRIVATE int sqlite3ExprTruthValue(const Expr *pExpr){ - pExpr = sqlite3ExprSkipCollate((Expr*)pExpr); + pExpr = sqlite3ExprSkipCollateAndLikely((Expr*)pExpr); assert( pExpr->op==TK_TRUEFALSE ); assert( !ExprHasProperty(pExpr, EP_IntValue) ); assert( sqlite3StrICmp(pExpr->u.zToken,"true")==0

@@ -107347,12 +110430,17 @@ return exprIsConst(p, 3, iCur);

} /* -** Check pExpr to see if it is an invariant constraint on data source pSrc. +** Check pExpr to see if it is an constraint on the single data source +** pSrc = &pSrcList->a[iSrc]. In other words, check to see if pExpr +** constrains pSrc but does not depend on any other tables or data +** sources anywhere else in the query. Return true (non-zero) if pExpr +** is a constraint on pSrc only. +** ** This is an optimization. False negatives will perhaps cause slower ** queries, but false positives will yield incorrect answers. So when in ** doubt, return 0. ** -** To be an invariant constraint, the following must be true: +** To be an single-source constraint, the following must be true: ** ** (1) pExpr cannot refer to any table other than pSrc->iCursor. **

@@ -107363,13 +110451,31 @@ ** (Is there some way to relax this constraint?)

** ** (4) If pSrc is the right operand of a LEFT JOIN, then... ** (4a) pExpr must come from an ON clause.. - (4b) and specifically the ON clause associated with the LEFT JOIN. +** (4b) and specifically the ON clause associated with the LEFT JOIN. ** ** (5) If pSrc is not the right operand of a LEFT JOIN or the left ** operand of a RIGHT JOIN, then pExpr must be from the WHERE ** clause, not an ON clause. +** +** (6) Either: +** +** (6a) pExpr does not originate in an ON or USING clause, or +** +** (6b) The ON or USING clause from which pExpr is derived is +** not to the left of a RIGHT JOIN (or FULL JOIN). +** +** Without this restriction, accepting pExpr as a single-table +** constraint might move the the ON/USING filter expression +** from the left side of a RIGHT JOIN over to the right side, +** which leads to incorrect answers. See also restriction (9) +** on push-down. */ -SQLITE_PRIVATE int sqlite3ExprIsTableConstraint(Expr *pExpr, const SrcItem *pSrc){ +SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint( + Expr *pExpr, /* The constraint */ + const SrcList *pSrcList, /* Complete FROM clause */ + int iSrc /* Which element of pSrcList to use */ +){ + const SrcItem *pSrc = &pSrcList->a[iSrc]; if( pSrc->fg.jointype & JT_LTORJ ){ return 0; /* rule (3) */ }

@@ -107378,6 +110484,19 @@ if( !ExprHasProperty(pExpr, EP_OuterON) ) return 0; /* rule (4a) */

if( pExpr->w.iJoin!=pSrc->iCursor ) return 0; /* rule (4b) */ }else{ if( ExprHasProperty(pExpr, EP_OuterON) ) return 0; /* rule (5) */ + } + if( ExprHasProperty(pExpr, EP_OuterON|EP_InnerON) /* (6a) */ + && (pSrcList->a[0].fg.jointype & JT_LTORJ)!=0 /* Fast pre-test of (6b) */ + ){ + int jj; + for(jj=0; jj<iSrc; jj++){ + if( pExpr->w.iJoin==pSrcList->a[jj].iCursor ){ + if( (pSrcList->a[jj].fg.jointype & JT_LTORJ)!=0 ){ + return 0; /* restriction (6) */ + } + break; + } + } } return sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor); /* rules (1), (2) */ }

@@ -107614,6 +110733,27 @@ SQLITE_PRIVATE int sqlite3IsRowid(const char *z){

if( sqlite3StrICmp(z, "_ROWID_")==0 ) return 1; if( sqlite3StrICmp(z, "ROWID")==0 ) return 1; if( sqlite3StrICmp(z, "OID")==0 ) return 1; + return 0; +} + +/* +** Return a pointer to a buffer containing a usable rowid alias for table +** pTab. An alias is usable if there is not an explicit user-defined column +** of the same name. +*/ +SQLITE_PRIVATE const char *sqlite3RowidAlias(Table *pTab){ + const char *azOpt[] = {"_ROWID_", "ROWID", "OID"}; + int ii; + assert( VisibleRowid(pTab) ); + for(ii=0; ii<ArraySize(azOpt); ii++){ + int iCol; + for(iCol=0; iCol<pTab->nCol; iCol++){ + if( sqlite3_stricmp(azOpt[ii], pTab->aCol[iCol].zCnName)==0 ) break; + } + if( iCol==pTab->nCol ){ + return azOpt[ii]; + } + } return 0; }

@@ -107621,7 +110761,7 @@ /*

** pX is the RHS of an IN operator. If pX is a SELECT statement ** that can be simplified to a direct table access, then return ** a pointer to the SELECT statement. If pX is not a SELECT statement, -** or if the SELECT statement needs to be manifested into a transient +** or if the SELECT statement needs to be materialized into a transient ** table, then return NULL. */ #ifndef SQLITE_OMIT_SUBQUERY

@@ -107717,7 +110857,7 @@ ** IN_INDEX_ROWID - The cursor was opened on a database table.

** IN_INDEX_INDEX_ASC - The cursor was opened on an ascending index. ** IN_INDEX_INDEX_DESC - The cursor was opened on a descending index. ** IN_INDEX_EPH - The cursor was opened on a specially created and -** populated epheremal table. +** populated ephemeral table. ** IN_INDEX_NOOP - No cursor was allocated. The IN operator must be ** implemented as a sequence of comparisons. **

@@ -107730,7 +110870,7 @@ ** If the RHS of the IN operator is a list or a more complex subquery, then

** an ephemeral table might need to be generated from the RHS and then ** pX->iTable made to point to the ephemeral table instead of an ** existing table. In this case, the creation and initialization of the -** ephmeral table might be put inside of a subroutine, the EP_Subrtn flag +** ephemeral table might be put inside of a subroutine, the EP_Subrtn flag ** will be set on pX and the pX->y.sub fields will be set to show where ** the subroutine is coded. **

@@ -107742,12 +110882,12 @@ ** be used to loop over all values of the RHS of the IN operator.

** ** When IN_INDEX_LOOP is used (and the b-tree will be used to iterate ** through the set members) then the b-tree must not contain duplicates. -** An epheremal table will be created unless the selected columns are guaranteed +** An ephemeral table will be created unless the selected columns are guaranteed ** to be unique - either because it is an INTEGER PRIMARY KEY or due to ** a UNIQUE constraint or index. ** ** When IN_INDEX_MEMBERSHIP is used (and the b-tree will be used -** for fast set membership tests) then an epheremal table must +** for fast set membership tests) then an ephemeral table must ** be used unless <columns> is a single INTEGER PRIMARY KEY column or an ** index can be found with the specified <columns> as its left-most. **

@@ -107907,7 +111047,6 @@ Expr *pRhs = pEList->a[i].pExpr;

CollSeq *pReq = sqlite3BinaryCompareCollSeq(pParse, pLhs, pRhs); int j; - assert( pReq!=0 || pRhs->iColumn==XN_ROWID || pParse->nErr ); for(j=0; j<nExpr; j++){ if( pIdx->aiColumn[j]!=pRhs->iColumn ) continue; assert( pIdx->azColl[j] );

@@ -108081,7 +111220,7 @@ ** x IN (4,5,11) -- IN operator with list on right-hand side

** x IN (SELECT a FROM b) -- IN operator with subquery on the right ** ** The pExpr parameter is the IN operator. The cursor number for the -** constructed ephermeral table is returned. The first time the ephemeral +** constructed ephemeral table is returned. The first time the ephemeral ** table is computed, the cursor number is also stored in pExpr->iTable, ** however the cursor number returned might not be the same, as it might ** have been duplicated using OP_OpenDup.

@@ -108301,6 +111440,9 @@ Select *pSel; /* SELECT statement to encode */

SelectDest dest; /* How to deal with SELECT result */ int nReg; /* Registers to allocate */ Expr *pLimit; /* New limit expression */ +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + int addrExplain; /* Address of OP_Explain instruction */ +#endif Vdbe *v = pParse->pVdbe; assert( v!=0 );

@@ -108353,8 +111495,9 @@ **

** In both cases, the query is augmented with "LIMIT 1". Any ** preexisting limit is discarded in place of the new LIMIT 1. */ - ExplainQueryPlan((pParse, 1, "%sSCALAR SUBQUERY %d", + ExplainQueryPlan2(addrExplain, (pParse, 1, "%sSCALAR SUBQUERY %d", addrOnce?"":"CORRELATED ", pSel->selId)); + sqlite3VdbeScanStatusCounters(v, addrExplain, addrExplain, -1); nReg = pExpr->op==TK_SELECT ? pSel->pEList->nExpr : 1; sqlite3SelectDestInit(&dest, 0, pParse->nMem+1); pParse->nMem += nReg;

@@ -108397,6 +111540,7 @@ ExprSetVVAProperty(pExpr, EP_NoReduce);

if( addrOnce ){ sqlite3VdbeJumpHere(v, addrOnce); } + sqlite3VdbeScanStatusRange(v, addrExplain, addrExplain, -1); /* Subroutine return */ assert( ExprUseYSub(pExpr) );

@@ -108805,6 +111949,7 @@ int regOut /* Put the result in this register */

){ || sqlite3GetInt32(pToken->z, &iValue)==0 ){ Vdbe *v = pParse->pVdbe; + int nErr = pParse->nErr; assert( v!=0 ); assert( pParse->iSelfTab!=0 ); if( pParse->iSelfTab>0 ){

@@ -108817,6 +111962,7 @@ if( pCol->affinity>=SQLITE_AFF_TEXT ){

sqlite3VdbeAddOp4(v, OP_Affinity, regOut, 1, 0, &pCol->affinity, 1); } if( iAddr ) sqlite3VdbeJumpHere(v, iAddr); + if( pParse->nErr>nErr ) pParse->db->errByteOffset = -1; } #endif /* SQLITE_OMIT_GENERATED_COLUMNS */

@@ -108833,6 +111979,7 @@ ){

Column *pCol; assert( v!=0 ); assert( pTab!=0 ); + assert( iCol!=XN_EXPR ); if( iCol<0 || iCol==pTab->iPKey ){ sqlite3VdbeAddOp2(v, OP_Rowid, iTabCur, regOut); VdbeComment((v, "%s.rowid", pTab->zName));

@@ -108888,10 +112035,13 @@ int iReg, /* Store results here */

u8 p5 /* P5 value for OP_Column + FLAGS */ ){ assert( pParse->pVdbe!=0 ); + assert( (p5 & (OPFLAG_NOCHNG|OPFLAG_TYPEOFARG|OPFLAG_LENGTHARG))==p5 ); + assert( IsVirtual(pTab) || (p5 & OPFLAG_NOCHNG)==0 ); sqlite3ExprCodeGetColumnOfTable(pParse->pVdbe, pTab, iTable, iColumn, iReg); if( p5 ){ VdbeOp *pOp = sqlite3VdbeGetLastOp(pParse->pVdbe); if( pOp->opcode==OP_Column ) pOp->p5 = p5; + if( pOp->opcode==OP_VColumn ) pOp->p5 = (p5 & OPFLAG_NOCHNG); } return iReg; }

@@ -108920,7 +112070,7 @@ }

/* ** Evaluate an expression (either a vector or a scalar expression) and store -** the result in continguous temporary registers. Return the index of +** the result in contiguous temporary registers. Return the index of ** the first register used to store the result. ** ** If the returned result register is a temporary scalar, then also write

@@ -108960,7 +112110,7 @@ ** so that a subsequent copy will not be merged into this one.

*/ static void setDoNotMergeFlagOnCopy(Vdbe *v){ if( sqlite3VdbeGetLastOp(v)->opcode==OP_Copy ){ - sqlite3VdbeChangeP5(v, 1); /* Tag trailing OP_Copy as not mergable */ + sqlite3VdbeChangeP5(v, 1); /* Tag trailing OP_Copy as not mergeable */ } }

@@ -109050,13 +112200,13 @@ break;

} case INLINEFUNC_implies_nonnull_row: { - /* REsult of sqlite3ExprImpliesNonNullRow() */ + /* Result of sqlite3ExprImpliesNonNullRow() */ Expr *pA1; assert( nFarg==2 ); pA1 = pFarg->a[1].pExpr; if( pA1->op==TK_COLUMN ){ sqlite3VdbeAddOp2(v, OP_Integer, - sqlite3ExprImpliesNonNullRow(pFarg->a[0].pExpr,pA1->iTable), + sqlite3ExprImpliesNonNullRow(pFarg->a[0].pExpr,pA1->iTable,1), target); }else{ sqlite3VdbeAddOp2(v, OP_Null, 0, target);

@@ -109069,10 +112219,13 @@ /* The AFFINITY() function evaluates to a string that describes

** the type affinity of the argument. This is used for testing of ** the SQLite type logic. */ - const char *azAff[] = { "blob", "text", "numeric", "integer", "real" }; + const char *azAff[] = { "blob", "text", "numeric", "integer", + "real", "flexnum" }; char aff; assert( nFarg==1 ); aff = sqlite3ExprAffinity(pFarg->a[0].pExpr); + assert( aff<=SQLITE_AFF_NONE + || (aff>=SQLITE_AFF_BLOB && aff<=SQLITE_AFF_FLEXNUM) ); sqlite3VdbeLoadString(v, target, (aff<=SQLITE_AFF_NONE) ? "none" : azAff[aff-SQLITE_AFF_BLOB]); break;

@@ -109083,7 +112236,7 @@ return target;

} /* -** Check to see if pExpr is one of the indexed expressions on pParse->pIdxExpr. +** Check to see if pExpr is one of the indexed expressions on pParse->pIdxEpr. ** If it is, then resolve the expression by reading from the index and ** return the register into which the value has been read. If pExpr is ** not an indexed expression, then return negative.

@@ -109095,7 +112248,8 @@ int target /* Where to store the result of the expression */

){ IndexedExpr *p; Vdbe *v; - for(p=pParse->pIdxExpr; p; p=p->pIENext){ + for(p=pParse->pIdxEpr; p; p=p->pIENext){ + u8 exprAff; int iDataCur = p->iDataCur; if( iDataCur<0 ) continue; if( pParse->iSelfTab ){

@@ -109103,6 +112257,16 @@ if( p->iDataCur!=pParse->iSelfTab-1 ) continue;

iDataCur = -1; } if( sqlite3ExprCompare(0, pExpr, p->pExpr, iDataCur)!=0 ) continue; + assert( p->aff>=SQLITE_AFF_BLOB && p->aff<=SQLITE_AFF_NUMERIC ); + exprAff = sqlite3ExprAffinity(pExpr); + if( (exprAff<=SQLITE_AFF_BLOB && p->aff!=SQLITE_AFF_BLOB) + || (exprAff==SQLITE_AFF_TEXT && p->aff!=SQLITE_AFF_TEXT) + || (exprAff>=SQLITE_AFF_NUMERIC && p->aff!=SQLITE_AFF_NUMERIC) + ){ + /* Affinity mismatch on a generated column */ + continue; + } + v = pParse->pVdbe; assert( v!=0 ); if( p->bMaybeNullRow ){

@@ -109115,10 +112279,10 @@ VdbeCoverage(v);

sqlite3VdbeAddOp3(v, OP_Column, p->iIdxCur, p->iIdxCol, target); VdbeComment((v, "%s expr-column %d", p->zIdxName, p->iIdxCol)); sqlite3VdbeGoto(v, 0); - p = pParse->pIdxExpr; - pParse->pIdxExpr = 0; + p = pParse->pIdxEpr; + pParse->pIdxEpr = 0; sqlite3ExprCode(pParse, pExpr, target); - pParse->pIdxExpr = p; + pParse->pIdxEpr = p; sqlite3VdbeJumpHere(v, addr+2); }else{ sqlite3VdbeAddOp3(v, OP_Column, p->iIdxCur, p->iIdxCol, target);

@@ -109131,6 +112295,41 @@ }

/* +** Expresion pExpr is guaranteed to be a TK_COLUMN or equivalent. This +** function checks the Parse.pIdxPartExpr list to see if this column +** can be replaced with a constant value. If so, it generates code to +** put the constant value in a register (ideally, but not necessarily, +** register iTarget) and returns the register number. +** +** Or, if the TK_COLUMN cannot be replaced by a constant, zero is +** returned. +*/ +static int exprPartidxExprLookup(Parse *pParse, Expr *pExpr, int iTarget){ + IndexedExpr *p; + for(p=pParse->pIdxPartExpr; p; p=p->pIENext){ + if( pExpr->iColumn==p->iIdxCol && pExpr->iTable==p->iDataCur ){ + Vdbe *v = pParse->pVdbe; + int addr = 0; + int ret; + + if( p->bMaybeNullRow ){ + addr = sqlite3VdbeAddOp1(v, OP_IfNullRow, p->iIdxCur); + } + ret = sqlite3ExprCodeTarget(pParse, p->pExpr, iTarget); + sqlite3VdbeAddOp4(pParse->pVdbe, OP_Affinity, ret, 1, 0, + (const char*)&p->aff, 1); + if( addr ){ + sqlite3VdbeJumpHere(v, addr); + sqlite3VdbeChangeP3(v, addr, ret); + } + return ret; + } + } + return 0; +} + + +/* ** Generate code into the current Vdbe to evaluate the given ** expression. Attempt to store the results in register "target". ** Return the register where results are stored.

@@ -109157,7 +112356,7 @@

expr_code_doover: if( pExpr==0 ){ op = TK_NULL; - }else if( pParse->pIdxExpr!=0 + }else if( pParse->pIdxEpr!=0 && !ExprHasProperty(pExpr, EP_Leaf) && (r1 = sqlite3IndexedExprLookup(pParse, pExpr, target))>=0 ){

@@ -109166,23 +112365,37 @@ }else{

assert( !ExprHasVVAProperty(pExpr,EP_Immutable) ); op = pExpr->op; } + assert( op!=TK_ORDER ); switch( op ){ case TK_AGG_COLUMN: { AggInfo *pAggInfo = pExpr->pAggInfo; struct AggInfo_col *pCol; assert( pAggInfo!=0 ); - assert( pExpr->iAgg>=0 && pExpr->iAgg<pAggInfo->nColumn ); + assert( pExpr->iAgg>=0 ); + if( pExpr->iAgg>=pAggInfo->nColumn ){ + /* Happens when the left table of a RIGHT JOIN is null and + ** is using an expression index */ + sqlite3VdbeAddOp2(v, OP_Null, 0, target); +#ifdef SQLITE_VDBE_COVERAGE + /* Verify that the OP_Null above is exercised by tests + ** tag-20230325-2 */ + sqlite3VdbeAddOp3(v, OP_NotNull, target, 1, 20230325); + VdbeCoverageNeverTaken(v); +#endif + break; + } pCol = &pAggInfo->aCol[pExpr->iAgg]; if( !pAggInfo->directMode ){ - assert( pCol->iMem>0 ); - return pCol->iMem; + return AggInfoColumnReg(pAggInfo, pExpr->iAgg); }else if( pAggInfo->useSortingIdx ){ Table *pTab = pCol->pTab; sqlite3VdbeAddOp3(v, OP_Column, pAggInfo->sortingIdxPTab, pCol->iSorterColumn, target); - if( pCol->iColumn<0 ){ + if( pTab==0 ){ + /* No comment added */ + }else if( pCol->iColumn<0 ){ VdbeComment((v,"%s.rowid",pTab->zName)); - }else if( ALWAYS(pTab!=0) ){ + }else{ VdbeComment((v,"%s.%s", pTab->zName, pTab->aCol[pCol->iColumn].zCnName)); if( pTab->aCol[pCol->iColumn].affinity==SQLITE_AFF_REAL ){

@@ -109190,6 +112403,11 @@ sqlite3VdbeAddOp1(v, OP_RealAffinity, target);

} } return target; + }else if( pExpr->y.pTab==0 ){ + /* This case happens when the argument to an aggregate function + ** is rewritten by aggregateConvertIndexedExprRefToColumn() */ + sqlite3VdbeAddOp3(v, OP_Column, pExpr->iTable, pExpr->iColumn, target); + return target; } /* Otherwise, fall thru into the TK_COLUMN case */ /* no break */ deliberate_fall_through

@@ -109200,7 +112418,7 @@ int iReg;

if( ExprHasProperty(pExpr, EP_FixedCol) ){ /* This COLUMN expression is really a constant due to WHERE clause ** constraints, and that constant is coded by the pExpr->pLeft - ** expresssion. However, make sure the constant has the correct + ** expression. However, make sure the constant has the correct ** datatype by applying the Affinity of the table column to the ** constant. */

@@ -109210,7 +112428,7 @@ assert( ExprUseYTab(pExpr) );

assert( pExpr->y.pTab!=0 ); aff = sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn); if( aff>SQLITE_AFF_BLOB ){ - static const char zAff[] = "B\000C\000D\000E"; + static const char zAff[] = "B\000C\000D\000E\000F"; assert( SQLITE_AFF_BLOB=='A' ); assert( SQLITE_AFF_TEXT=='B' ); sqlite3VdbeAddOp4(v, OP_Affinity, iReg, 1, 0,

@@ -109269,6 +112487,11 @@ ** in the index refer to the table to which the index belongs */

iTab = pParse->iSelfTab - 1; } } + else if( pParse->pIdxPartExpr + && 0!=(r1 = exprPartidxExprLookup(pParse, pExpr, target)) + ){ + return r1; + } assert( ExprUseYTab(pExpr) ); assert( pExpr->y.pTab!=0 ); iReg = sqlite3ExprCodeGetColumn(pParse, pExpr->y.pTab,

@@ -109340,11 +112563,8 @@ }

#ifndef SQLITE_OMIT_CAST case TK_CAST: { /* Expressions of the form: CAST(pLeft AS token) */ - inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); - if( inReg!=target ){ - sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target); - inReg = target; - } + sqlite3ExprCode(pParse, pExpr->pLeft, target); + assert( inReg==target ); assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3VdbeAddOp2(v, OP_Cast, target, sqlite3AffinityType(pExpr->u.zToken, 0));

@@ -109487,7 +112707,7 @@ ){

assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3ErrorMsg(pParse, "misuse of aggregate: %#T()", pExpr); }else{ - return pInfo->aFunc[pExpr->iAgg].iMem; + return AggInfoFuncReg(pInfo, pExpr->iAgg); } break; }

@@ -109529,7 +112749,7 @@ if( pDef==0 || pDef->xFinalize!=0 ){

sqlite3ErrorMsg(pParse, "unknown function: %#T()", pExpr); break; } - if( pDef->funcFlags & SQLITE_FUNC_INLINE ){ + if( (pDef->funcFlags & SQLITE_FUNC_INLINE)!=0 && ALWAYS(pFarg!=0) ){ assert( (pDef->funcFlags & SQLITE_FUNC_UNSAFE)==0 ); assert( (pDef->funcFlags & SQLITE_FUNC_DIRECT)==0 ); return exprCodeInlineFunction(pParse, pFarg,

@@ -109555,10 +112775,10 @@ }else{

r1 = sqlite3GetTempRange(pParse, nFarg); } - /* For length() and typeof() functions with a column argument, + /* For length() and typeof() and octet_length() functions, ** set the P5 parameter to the OP_Column opcode to OPFLAG_LENGTHARG - ** or OPFLAG_TYPEOFARG respectively, to avoid unnecessary data - ** loading. + ** or OPFLAG_TYPEOFARG or OPFLAG_BYTELENARG respectively, to avoid + ** unnecessary data loading. */ if( (pDef->funcFlags & (SQLITE_FUNC_LENGTH|SQLITE_FUNC_TYPEOF))!=0 ){ u8 exprOp;

@@ -109568,14 +112788,16 @@ exprOp = pFarg->a[0].pExpr->op;

if( exprOp==TK_COLUMN || exprOp==TK_AGG_COLUMN ){ assert( SQLITE_FUNC_LENGTH==OPFLAG_LENGTHARG ); assert( SQLITE_FUNC_TYPEOF==OPFLAG_TYPEOFARG ); - testcase( pDef->funcFlags & OPFLAG_LENGTHARG ); - pFarg->a[0].pExpr->op2 = - pDef->funcFlags & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG); + assert( SQLITE_FUNC_BYTELEN==OPFLAG_BYTELENARG ); + assert( (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG)==OPFLAG_BYTELENARG ); + testcase( (pDef->funcFlags & OPFLAG_BYTELENARG)==OPFLAG_LENGTHARG ); + testcase( (pDef->funcFlags & OPFLAG_BYTELENARG)==OPFLAG_TYPEOFARG ); + testcase( (pDef->funcFlags & OPFLAG_BYTELENARG)==OPFLAG_BYTELENARG); + pFarg->a[0].pExpr->op2 = pDef->funcFlags & OPFLAG_BYTELENARG; } } - sqlite3ExprCodeExprList(pParse, pFarg, r1, 0, - SQLITE_ECEL_DUP|SQLITE_ECEL_FACTOR); + sqlite3ExprCodeExprList(pParse, pFarg, r1, 0, SQLITE_ECEL_FACTOR); }else{ r1 = 0; }

@@ -109676,17 +112898,16 @@ exprCodeBetween(pParse, pExpr, target, 0, 0);

return target; } case TK_COLLATE: { - if( !ExprHasProperty(pExpr, EP_Collate) - && ALWAYS(pExpr->pLeft) - && pExpr->pLeft->op==TK_FUNCTION - ){ - inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); - if( inReg!=target ){ - sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target); - inReg = target; - } - sqlite3VdbeAddOp1(v, OP_ClrSubtype, inReg); - return inReg; + if( !ExprHasProperty(pExpr, EP_Collate) ){ + /* A TK_COLLATE Expr node without the EP_Collate tag is a so-called + ** "SOFT-COLLATE" that is added to constraints that are pushed down + ** from outer queries into sub-queries by the push-down optimization. + ** Clear subtypes as subtypes may not cross a subquery boundary. + */ + assert( pExpr->pLeft ); + sqlite3ExprCode(pParse, pExpr->pLeft, target); + sqlite3VdbeAddOp1(v, OP_ClrSubtype, target); + return target; }else{ pExpr = pExpr->pLeft; goto expr_code_doover; /* 2018-04-28: Prevent deep recursion. */

@@ -109776,7 +112997,7 @@ AggInfo *pAggInfo = pExpr->pAggInfo;

if( pAggInfo ){ assert( pExpr->iAgg>=0 && pExpr->iAgg<pAggInfo->nColumn ); if( !pAggInfo->directMode ){ - inReg = pAggInfo->aCol[pExpr->iAgg].iMem; + inReg = AggInfoColumnReg(pAggInfo, pExpr->iAgg); break; } if( pExpr->pAggInfo->useSortingIdx ){

@@ -109787,16 +113008,19 @@ inReg = target;

break; } } - addrINR = sqlite3VdbeAddOp1(v, OP_IfNullRow, pExpr->iTable); - /* Temporarily disable factoring of constant expressions, since - ** even though expressions may appear to be constant, they are not - ** really constant because they originate from the right-hand side - ** of a LEFT JOIN. */ - pParse->okConstFactor = 0; - inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); + addrINR = sqlite3VdbeAddOp3(v, OP_IfNullRow, pExpr->iTable, 0, target); + /* The OP_IfNullRow opcode above can overwrite the result register with + ** NULL. So we have to ensure that the result register is not a value + ** that is suppose to be a constant. Two defenses are needed: + ** (1) Temporarily disable factoring of constant expressions + ** (2) Make sure the computed value really is stored in register + ** "target" and not someplace else. + */ + pParse->okConstFactor = 0; /* note (1) above */ + sqlite3ExprCode(pParse, pExpr->pLeft, target); + assert( target==inReg ); pParse->okConstFactor = okConstFactor; sqlite3VdbeJumpHere(v, addrINR); - sqlite3VdbeChangeP3(v, addrINR, inReg); break; }

@@ -109928,9 +113152,9 @@ ** guard them with an OP_Once opcode to ensure that the code is only executed

** once. If no functions are involved, then factor the code out and put it at ** the end of the prepared statement in the initialization section. ** -** If regDest>=0 then the result is always stored in that register and the +** If regDest>0 then the result is always stored in that register and the ** result is not reusable. If regDest<0 then this routine is free to -** store the value whereever it wants. The register where the expression +** store the value wherever it wants. The register where the expression ** is stored is returned. When regDest<0, two identical expressions might ** code to the same register, if they do not contain function calls and hence ** are factored out into the initialization section at the end of the

@@ -109943,6 +113167,7 @@ int regDest /* Store the value in this register */

){ ExprList *p; assert( ConstFactorOk(pParse) ); + assert( regDest!=0 ); p = pParse->pConstExpr; if( regDest<0 && p ){ struct ExprList_item *pItem;

@@ -110033,7 +113258,9 @@ if( pParse->pVdbe==0 ) return;

inReg = sqlite3ExprCodeTarget(pParse, pExpr, target); if( inReg!=target ){ u8 op; - if( ALWAYS(pExpr) && ExprHasProperty(pExpr,EP_Subquery) ){ + if( ALWAYS(pExpr) + && (ExprHasProperty(pExpr,EP_Subquery) || pExpr->op==TK_REGISTER) + ){ op = OP_Copy; }else{ op = OP_SCopy;

@@ -110752,8 +113979,8 @@ ** are ignored.

*/ SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr *pA,Expr *pB, int iTab){ return sqlite3ExprCompare(0, - sqlite3ExprSkipCollateAndLikely(pA), - sqlite3ExprSkipCollateAndLikely(pB), + sqlite3ExprSkipCollate(pA), + sqlite3ExprSkipCollate(pB), iTab); }

@@ -110846,7 +114073,7 @@ ** pE1: x=21 pE2: x=21 OR y=43 Result: true

** pE1: x!=123 pE2: x IS NOT NULL Result: true ** pE1: x!=?1 pE2: x IS NOT NULL Result: true ** pE1: x IS NULL pE2: x IS NOT NULL Result: false -** pE1: x IS ?2 pE2: x IS NOT NULL Reuslt: false +** pE1: x IS ?2 pE2: x IS NOT NULL Result: false ** ** When comparing TK_COLUMN nodes between pE1 and pE2, if pE2 has ** Expr.iTable<0 then assume a table number given by iTab.

@@ -110883,11 +114110,29 @@ }

return 0; } +/* This is a helper function to impliesNotNullRow(). In this routine, +** set pWalker->eCode to one only if *both* of the input expressions +** separately have the implies-not-null-row property. +*/ +static void bothImplyNotNullRow(Walker *pWalker, Expr *pE1, Expr *pE2){ + if( pWalker->eCode==0 ){ + sqlite3WalkExpr(pWalker, pE1); + if( pWalker->eCode ){ + pWalker->eCode = 0; + sqlite3WalkExpr(pWalker, pE2); + } + } +} + /* ** This is the Expr node callback for sqlite3ExprImpliesNonNullRow(). ** If the expression node requires that the table at pWalker->iCur ** have one or more non-NULL column, then set pWalker->eCode to 1 and abort. ** +** pWalker->mWFlags is non-zero if this inquiry is being undertaking on +** behalf of a RIGHT JOIN (or FULL JOIN). That makes a difference when +** evaluating terms in the ON clause of an inner join. +** ** This routine controls an optimization. False positives (setting ** pWalker->eCode to 1 when it should not be) are deadly, but false-negatives ** (never setting pWalker->eCode) is a harmless missed optimization.

@@ -110896,28 +114141,33 @@ static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){

testcase( pExpr->op==TK_AGG_COLUMN ); testcase( pExpr->op==TK_AGG_FUNCTION ); if( ExprHasProperty(pExpr, EP_OuterON) ) return WRC_Prune; + if( ExprHasProperty(pExpr, EP_InnerON) && pWalker->mWFlags ){ + /* If iCur is used in an inner-join ON clause to the left of a + ** RIGHT JOIN, that does *not* mean that the table must be non-null. + ** But it is difficult to check for that condition precisely. + ** To keep things simple, any use of iCur from any inner-join is + ** ignored while attempting to simplify a RIGHT JOIN. */ + return WRC_Prune; + } switch( pExpr->op ){ case TK_ISNOT: case TK_ISNULL: case TK_NOTNULL: case TK_IS: - case TK_OR: case TK_VECTOR: - case TK_CASE: - case TK_IN: case TK_FUNCTION: case TK_TRUTH: + case TK_CASE: testcase( pExpr->op==TK_ISNOT ); testcase( pExpr->op==TK_ISNULL ); testcase( pExpr->op==TK_NOTNULL ); testcase( pExpr->op==TK_IS ); - testcase( pExpr->op==TK_OR ); testcase( pExpr->op==TK_VECTOR ); - testcase( pExpr->op==TK_CASE ); - testcase( pExpr->op==TK_IN ); testcase( pExpr->op==TK_FUNCTION ); testcase( pExpr->op==TK_TRUTH ); + testcase( pExpr->op==TK_CASE ); return WRC_Prune; + case TK_COLUMN: if( pWalker->u.iCur==pExpr->iTable ){ pWalker->eCode = 1;

@@ -110925,21 +114175,38 @@ return WRC_Abort;

} return WRC_Prune; + case TK_OR: case TK_AND: - if( pWalker->eCode==0 ){ + /* Both sides of an AND or OR must separately imply non-null-row. + ** Consider these cases: + ** 1. NOT (x AND y) + ** 2. x OR y + ** If only one of x or y is non-null-row, then the overall expression + ** can be true if the other arm is false (case 1) or true (case 2). + */ + testcase( pExpr->op==TK_OR ); + testcase( pExpr->op==TK_AND ); + bothImplyNotNullRow(pWalker, pExpr->pLeft, pExpr->pRight); + return WRC_Prune; + + case TK_IN: + /* Beware of "x NOT IN ()" and "x NOT IN (SELECT 1 WHERE false)", + ** both of which can be true. But apart from these cases, if + ** the left-hand side of the IN is NULL then the IN itself will be + ** NULL. */ + if( ExprUseXList(pExpr) && ALWAYS(pExpr->x.pList->nExpr>0) ){ sqlite3WalkExpr(pWalker, pExpr->pLeft); - if( pWalker->eCode ){ - pWalker->eCode = 0; - sqlite3WalkExpr(pWalker, pExpr->pRight); - } } return WRC_Prune; case TK_BETWEEN: - if( sqlite3WalkExpr(pWalker, pExpr->pLeft)==WRC_Abort ){ - assert( pWalker->eCode ); - return WRC_Abort; - } + /* In "x NOT BETWEEN y AND z" either x must be non-null-row or else + ** both y and z must be non-null row */ + assert( ExprUseXList(pExpr) ); + assert( pExpr->x.pList->nExpr==2 ); + sqlite3WalkExpr(pWalker, pExpr->pLeft); + bothImplyNotNullRow(pWalker, pExpr->x.pList->a[0].pExpr, + pExpr->x.pList->a[1].pExpr); return WRC_Prune; /* Virtual tables are allowed to use constraints like x=NULL. So

@@ -111001,7 +114268,7 @@ ** clause requires that some column of the right table of the LEFT JOIN

** be non-NULL, then the LEFT JOIN can be safely converted into an ** ordinary join. */ -SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab){ +SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab, int isRJ){ Walker w; p = sqlite3ExprSkipCollateAndLikely(p); if( p==0 ) return 0;

@@ -111009,7 +114276,7 @@ if( p->op==TK_NOTNULL ){

p = p->pLeft; }else{ while( p->op==TK_AND ){ - if( sqlite3ExprImpliesNonNullRow(p->pLeft, iTab) ) return 1; + if( sqlite3ExprImpliesNonNullRow(p->pLeft, iTab, isRJ) ) return 1; p = p->pRight; } }

@@ -111017,6 +114284,7 @@ w.xExprCallback = impliesNotNullRow;

w.xSelectCallback = 0; w.xSelectCallback2 = 0; w.eCode = 0; + w.mWFlags = isRJ!=0; w.u.iCur = iTab; sqlite3WalkExpr(&w, p); return w.eCode;

@@ -111077,7 +114345,7 @@ return !w.eCode;

} -/* Structure used to pass information throught the Walker in order to +/* Structure used to pass information throughout the Walker in order to ** implement sqlite3ReferencesSrcList(). */ struct RefSrcList {

@@ -111184,6 +114452,12 @@ x.pRef = pSrcList;

assert( pExpr->op==TK_AGG_FUNCTION ); assert( ExprUseXList(pExpr) ); sqlite3WalkExprList(&w, pExpr->x.pList); + if( pExpr->pLeft ){ + assert( pExpr->pLeft->op==TK_ORDER ); + assert( ExprUseXList(pExpr->pLeft) ); + assert( pExpr->pLeft->x.pList!=0 ); + sqlite3WalkExprList(&w, pExpr->pLeft->x.pList); + } #ifndef SQLITE_OMIT_WINDOWFUNC if( ExprHasProperty(pExpr, EP_WinFunc) ){ sqlite3WalkExpr(&w, pExpr->y.pWin->pFilter);

@@ -111207,10 +114481,8 @@ ** object that is referenced does not refer directly to the Expr. If

** it does, make a copy. This is done because the pExpr argument is ** subject to change. ** -** The copy is stored on pParse->pConstExpr with a register number of 0. -** This will cause the expression to be deleted automatically when the -** Parse object is destroyed, but the zero register number means that it -** will not generate any code in the preamble. +** The copy is scheduled for deletion using the sqlite3ExprDeferredDelete() +** which builds on the sqlite3ParserAddCleanup() mechanism. */ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ if( ALWAYS(!ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced))

@@ -111220,10 +114492,11 @@ AggInfo *pAggInfo = pExpr->pAggInfo;

int iAgg = pExpr->iAgg; Parse *pParse = pWalker->pParse; sqlite3 *db = pParse->db; + assert( iAgg>=0 ); if( pExpr->op!=TK_AGG_FUNCTION ){ - assert( pExpr->op==TK_AGG_COLUMN || pExpr->op==TK_IF_NULL_ROW ); - assert( iAgg>=0 && iAgg<pAggInfo->nColumn ); - if( pAggInfo->aCol[iAgg].pCExpr==pExpr ){ + if( iAgg<pAggInfo->nColumn + && pAggInfo->aCol[iAgg].pCExpr==pExpr + ){ pExpr = sqlite3ExprDup(db, pExpr, 0); if( pExpr ){ pAggInfo->aCol[iAgg].pCExpr = pExpr;

@@ -111232,8 +114505,9 @@ }

} }else{ assert( pExpr->op==TK_AGG_FUNCTION ); - assert( iAgg>=0 && iAgg<pAggInfo->nFunc ); - if( pAggInfo->aFunc[iAgg].pFExpr==pExpr ){ + if( ALWAYS(iAgg<pAggInfo->nFunc) + && pAggInfo->aFunc[iAgg].pFExpr==pExpr + ){ pExpr = sqlite3ExprDup(db, pExpr, 0); if( pExpr ){ pAggInfo->aFunc[iAgg].pFExpr = pExpr;

@@ -111289,6 +114563,74 @@ return i;

} /* +** Search the AggInfo object for an aCol[] entry that has iTable and iColumn. +** Return the index in aCol[] of the entry that describes that column. +** +** If no prior entry is found, create a new one and return -1. The +** new column will have an index of pAggInfo->nColumn-1. +*/ +static void findOrCreateAggInfoColumn( + Parse *pParse, /* Parsing context */ + AggInfo *pAggInfo, /* The AggInfo object to search and/or modify */ + Expr *pExpr /* Expr describing the column to find or insert */ +){ + struct AggInfo_col *pCol; + int k; + + assert( pAggInfo->iFirstReg==0 ); + pCol = pAggInfo->aCol; + for(k=0; k<pAggInfo->nColumn; k++, pCol++){ + if( pCol->pCExpr==pExpr ) return; + if( pCol->iTable==pExpr->iTable + && pCol->iColumn==pExpr->iColumn + && pExpr->op!=TK_IF_NULL_ROW + ){ + goto fix_up_expr; + } + } + k = addAggInfoColumn(pParse->db, pAggInfo); + if( k<0 ){ + /* OOM on resize */ + assert( pParse->db->mallocFailed ); + return; + } + pCol = &pAggInfo->aCol[k]; + assert( ExprUseYTab(pExpr) ); + pCol->pTab = pExpr->y.pTab; + pCol->iTable = pExpr->iTable; + pCol->iColumn = pExpr->iColumn; + pCol->iSorterColumn = -1; + pCol->pCExpr = pExpr; + if( pAggInfo->pGroupBy && pExpr->op!=TK_IF_NULL_ROW ){ + int j, n; + ExprList *pGB = pAggInfo->pGroupBy; + struct ExprList_item *pTerm = pGB->a; + n = pGB->nExpr; + for(j=0; j<n; j++, pTerm++){ + Expr *pE = pTerm->pExpr; + if( pE->op==TK_COLUMN + && pE->iTable==pExpr->iTable + && pE->iColumn==pExpr->iColumn + ){ + pCol->iSorterColumn = j; + break; + } + } + } + if( pCol->iSorterColumn<0 ){ + pCol->iSorterColumn = pAggInfo->nSortingColumn++; + } +fix_up_expr: + ExprSetVVAProperty(pExpr, EP_NoReduce); + assert( pExpr->pAggInfo==0 || pExpr->pAggInfo==pAggInfo ); + pExpr->pAggInfo = pAggInfo; + if( pExpr->op==TK_COLUMN ){ + pExpr->op = TK_AGG_COLUMN; + } + pExpr->iAgg = (i16)k; +} + +/* ** This is the xExprCallback for a tree walker. It is used to ** implement sqlite3ExprAnalyzeAggregates(). See sqlite3ExprAnalyzeAggregates ** for additional information.

@@ -111301,7 +114643,45 @@ SrcList *pSrcList = pNC->pSrcList;

AggInfo *pAggInfo = pNC->uNC.pAggInfo; assert( pNC->ncFlags & NC_UAggInfo ); + assert( pAggInfo->iFirstReg==0 ); switch( pExpr->op ){ + default: { + IndexedExpr *pIEpr; + Expr tmp; + assert( pParse->iSelfTab==0 ); + if( (pNC->ncFlags & NC_InAggFunc)==0 ) break; + if( pParse->pIdxEpr==0 ) break; + for(pIEpr=pParse->pIdxEpr; pIEpr; pIEpr=pIEpr->pIENext){ + int iDataCur = pIEpr->iDataCur; + if( iDataCur<0 ) continue; + if( sqlite3ExprCompare(0, pExpr, pIEpr->pExpr, iDataCur)==0 ) break; + } + if( pIEpr==0 ) break; + if( NEVER(!ExprUseYTab(pExpr)) ) break; + for(i=0; i<pSrcList->nSrc; i++){ + if( pSrcList->a[0].iCursor==pIEpr->iDataCur ) break; + } + if( i>=pSrcList->nSrc ) break; + if( NEVER(pExpr->pAggInfo!=0) ) break; /* Resolved by outer context */ + if( pParse->nErr ){ return WRC_Abort; } + + /* If we reach this point, it means that expression pExpr can be + ** translated into a reference to an index column as described by + ** pIEpr. + */ + memset(&tmp, 0, sizeof(tmp)); + tmp.op = TK_AGG_COLUMN; + tmp.iTable = pIEpr->iIdxCur; + tmp.iColumn = pIEpr->iIdxCol; + findOrCreateAggInfoColumn(pParse, pAggInfo, &tmp); + if( pParse->nErr ){ return WRC_Abort; } + assert( pAggInfo->aCol!=0 ); + assert( tmp.iAgg<pAggInfo->nColumn ); + pAggInfo->aCol[tmp.iAgg].pCExpr = pExpr; + pExpr->pAggInfo = pAggInfo; + pExpr->iAgg = tmp.iAgg; + return WRC_Prune; + } case TK_IF_NULL_ROW: case TK_AGG_COLUMN: case TK_COLUMN: {

@@ -111313,72 +114693,14 @@ ** clause of the aggregate query */

if( ALWAYS(pSrcList!=0) ){ SrcItem *pItem = pSrcList->a; for(i=0; i<pSrcList->nSrc; i++, pItem++){ - struct AggInfo_col *pCol; assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); if( pExpr->iTable==pItem->iCursor ){ - /* If we reach this point, it means that pExpr refers to a table - ** that is in the FROM clause of the aggregate query. - ** - ** Make an entry for the column in pAggInfo->aCol[] if there - ** is not an entry there already. - */ - int k; - pCol = pAggInfo->aCol; - for(k=0; k<pAggInfo->nColumn; k++, pCol++){ - if( pCol->iTable==pExpr->iTable - && pCol->iColumn==pExpr->iColumn - && pExpr->op!=TK_IF_NULL_ROW - ){ - break; - } - } - if( (k>=pAggInfo->nColumn) - && (k = addAggInfoColumn(pParse->db, pAggInfo))>=0 - ){ - pCol = &pAggInfo->aCol[k]; - assert( ExprUseYTab(pExpr) ); - pCol->pTab = pExpr->y.pTab; - pCol->iTable = pExpr->iTable; - pCol->iColumn = pExpr->iColumn; - pCol->iMem = ++pParse->nMem; - pCol->iSorterColumn = -1; - pCol->pCExpr = pExpr; - if( pAggInfo->pGroupBy && pExpr->op!=TK_IF_NULL_ROW ){ - int j, n; - ExprList *pGB = pAggInfo->pGroupBy; - struct ExprList_item *pTerm = pGB->a; - n = pGB->nExpr; - for(j=0; j<n; j++, pTerm++){ - Expr *pE = pTerm->pExpr; - if( pE->op==TK_COLUMN - && pE->iTable==pExpr->iTable - && pE->iColumn==pExpr->iColumn - ){ - pCol->iSorterColumn = j; - break; - } - } - } - if( pCol->iSorterColumn<0 ){ - pCol->iSorterColumn = pAggInfo->nSortingColumn++; - } - } - /* There is now an entry for pExpr in pAggInfo->aCol[] (either - ** because it was there before or because we just created it). - ** Convert the pExpr to be a TK_AGG_COLUMN referring to that - ** pAggInfo->aCol[] entry. - */ - ExprSetVVAProperty(pExpr, EP_NoReduce); - pExpr->pAggInfo = pAggInfo; - if( pExpr->op==TK_COLUMN ){ - pExpr->op = TK_AGG_COLUMN; - } - pExpr->iAgg = (i16)k; + findOrCreateAggInfoColumn(pParse, pAggInfo, pExpr); break; } /* endif pExpr->iTable==pItem->iCursor */ } /* end loop over pSrcList */ } - return WRC_Prune; + return WRC_Continue; } case TK_AGG_FUNCTION: { if( (pNC->ncFlags & NC_InAggFunc)==0

@@ -111400,15 +114722,42 @@ */

u8 enc = ENC(pParse->db); i = addAggInfoFunc(pParse->db, pAggInfo); if( i>=0 ){ + int nArg; assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); pItem = &pAggInfo->aFunc[i]; pItem->pFExpr = pExpr; - pItem->iMem = ++pParse->nMem; assert( ExprUseUToken(pExpr) ); + nArg = pExpr->x.pList ? pExpr->x.pList->nExpr : 0; pItem->pFunc = sqlite3FindFunction(pParse->db, - pExpr->u.zToken, - pExpr->x.pList ? pExpr->x.pList->nExpr : 0, enc, 0); - if( pExpr->flags & EP_Distinct ){ + pExpr->u.zToken, nArg, enc, 0); + assert( pItem->bOBUnique==0 ); + if( pExpr->pLeft + && (pItem->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL)==0 + ){ + /* The NEEDCOLL test above causes any ORDER BY clause on + ** aggregate min() or max() to be ignored. */ + ExprList *pOBList; + assert( nArg>0 ); + assert( pExpr->pLeft->op==TK_ORDER ); + assert( ExprUseXList(pExpr->pLeft) ); + pItem->iOBTab = pParse->nTab++; + pOBList = pExpr->pLeft->x.pList; + assert( pOBList->nExpr>0 ); + assert( pItem->bOBUnique==0 ); + if( pOBList->nExpr==1 + && nArg==1 + && sqlite3ExprCompare(0,pOBList->a[0].pExpr, + pExpr->x.pList->a[0].pExpr,0)==0 + ){ + pItem->bOBPayload = 0; + pItem->bOBUnique = ExprHasProperty(pExpr, EP_Distinct); + }else{ + pItem->bOBPayload = 1; + } + }else{ + pItem->iOBTab = -1; + } + if( ExprHasProperty(pExpr, EP_Distinct) && !pItem->bOBUnique ){ pItem->iDistinct = pParse->nTab++; }else{ pItem->iDistinct = -1;

@@ -111533,6 +114882,37 @@ pParse->nRangeReg = 0;

} /* +** Make sure sufficient registers have been allocated so that +** iReg is a valid register number. +*/ +SQLITE_PRIVATE void sqlite3TouchRegister(Parse *pParse, int iReg){ + if( pParse->nMem<iReg ) pParse->nMem = iReg; +} + +#if defined(SQLITE_ENABLE_STAT4) || defined(SQLITE_DEBUG) +/* +** Return the latest reusable register in the set of all registers. +** The value returned is no less than iMin. If any register iMin or +** greater is in permanent use, then return one more than that last +** permanent register. +*/ +SQLITE_PRIVATE int sqlite3FirstAvailableRegister(Parse *pParse, int iMin){ + const ExprList *pList = pParse->pConstExpr; + if( pList ){ + int i; + for(i=0; i<pList->nExpr; i++){ + if( pList->a[i].u.iConstExprReg>=iMin ){ + iMin = pList->a[i].u.iConstExprReg + 1; + } + } + } + pParse->nTempReg = 0; + pParse->nRangeReg = 0; + return iMin; +} +#endif /* SQLITE_ENABLE_STAT4 || SQLITE_DEBUG */ + +/* ** Validate that no temporary register falls within the range of ** iFirst..iLast, inclusive. This routine is only call from within assert() ** statements.

@@ -111549,6 +114929,14 @@ }

for(i=0; i<pParse->nTempReg; i++){ if( pParse->aTempReg[i]>=iFirst && pParse->aTempReg[i]<=iLast ){ return 0; + } + } + if( pParse->pConstExpr ){ + ExprList *pList = pParse->pConstExpr; + for(i=0; i<pList->nExpr; i++){ + int iReg = pList->a[i].u.iConstExprReg; + if( iReg==0 ) continue; + if( iReg>=iFirst && iReg<=iLast ) return 0; } } return 1;

@@ -112005,14 +115393,19 @@

/* Verify that constraints are still satisfied */ if( pNew->pCheck!=0 || (pCol->notNull && (pCol->colFlags & COLFLAG_GENERATED)!=0) + || (pTab->tabFlags & TF_Strict)!=0 ){ sqlite3NestedParse(pParse, "SELECT CASE WHEN quick_check GLOB 'CHECK*'" " THEN raise(ABORT,'CHECK constraint failed')" + " WHEN quick_check GLOB 'non-* value in*'" + " THEN raise(ABORT,'type mismatch on DEFAULT')" " ELSE raise(ABORT,'NOT NULL constraint failed')" " END" " FROM pragma_quick_check(%Q,%Q)" - " WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'", + " WHERE quick_check GLOB 'CHECK*'" + " OR quick_check GLOB 'NULL*'" + " OR quick_check GLOB 'non-* value in*'", zTab, zDb ); }

@@ -112101,7 +115494,7 @@ assert( IsOrdinaryTable(pNew) );

pNew->u.tab.pDfltList = sqlite3ExprListDup(db, pTab->u.tab.pDfltList, 0); pNew->pSchema = db->aDb[iDb].pSchema; pNew->u.tab.addColOffset = pTab->u.tab.addColOffset; - pNew->nTabRef = 1; + assert( pNew->nTabRef==1 ); exit_begin_add_column: sqlite3SrcListDelete(db, pSrc);

@@ -112300,13 +115693,14 @@ assert( pParse==pParse->db->pParse );

assert( pParse->db->mallocFailed==0 || pParse->nErr!=0 ); if( pParse->nErr==0 ){ const RenameToken *p; - u8 i = 0; + u32 i = 1; for(p=pParse->pRename; p; p=p->pNext){ if( p->p ){ assert( p->p!=pPtr ); - i += *(u8*)(p->p); + i += *(u8*)(p->p) | 1; } } + assert( i>0 ); } } #else

@@ -112605,7 +115999,7 @@ return pBest;

} /* -** An error occured while parsing or otherwise processing a database +** An error occurred while parsing or otherwise processing a database ** object (either pParse->pNewTable, pNewIndex or pNewTrigger) as part of an ** ALTER TABLE RENAME COLUMN program. The error message emitted by the ** sub-routine is currently stored in pParse->zErrMsg. This function

@@ -112838,6 +116232,19 @@ return rc;

} /* +** Set all pEList->a[].fg.eEName fields in the expression-list to val. +*/ +static void renameSetENames(ExprList *pEList, int val){ + if( pEList ){ + int i; + for(i=0; i<pEList->nExpr; i++){ + assert( val==ENAME_NAME || pEList->a[i].fg.eEName==ENAME_NAME ); + pEList->a[i].fg.eEName = val; + } + } +} + +/* ** Resolve all symbols in the trigger at pParse->pNewTrigger, assuming ** it was read from the schema of database zDb. Return SQLITE_OK if ** successful. Otherwise, return an SQLite error code and leave an error

@@ -112884,7 +116291,17 @@ pStep->pExprList = 0;

pSrc = 0; rc = SQLITE_NOMEM; }else{ + /* pStep->pExprList contains an expression-list used for an UPDATE + ** statement. So the a[].zEName values are the RHS of the + ** "<col> = <expr>" clauses of the UPDATE statement. So, before + ** running SelectPrep(), change all the eEName values in + ** pStep->pExprList to ENAME_SPAN (from their current value of + ** ENAME_NAME). This is to prevent any ids in ON() clauses that are + ** part of pSrc from being incorrectly resolved against the + ** a[].zEName values as if they were column aliases. */ + renameSetENames(pStep->pExprList, ENAME_SPAN); sqlite3SelectPrep(pParse, pSel, 0); + renameSetENames(pStep->pExprList, ENAME_NAME); rc = pParse->nErr ? SQLITE_ERROR : SQLITE_OK; assert( pStep->pExprList==0 || pStep->pExprList==pSel->pEList ); assert( pSrc==pSel->pSrc );

@@ -114833,11 +118250,15 @@ int regTabname = iMem++; /* Register containing table name */

int regIdxname = iMem++; /* Register containing index name */ int regStat1 = iMem++; /* Value for the stat column of sqlite_stat1 */ int regPrev = iMem; /* MUST BE LAST (see below) */ +#ifdef SQLITE_ENABLE_STAT4 + int doOnce = 1; /* Flag for a one-time computation */ +#endif #ifdef SQLITE_ENABLE_PREUPDATE_HOOK Table *pStat1 = 0; #endif - pParse->nMem = MAX(pParse->nMem, iMem); + sqlite3TouchRegister(pParse, iMem); + assert( sqlite3NoTempsInRange(pParse, regNewRowid, iMem) ); v = sqlite3GetVdbe(pParse); if( v==0 || NEVER(pTab==0) ){ return;

@@ -114943,7 +118364,7 @@ /* Make sure there are enough memory cells allocated to accommodate

** the regPrev array and a trailing rowid (the rowid slot is required ** when building a record to insert into the sample column of ** the sqlite_stat4 table. */ - pParse->nMem = MAX(pParse->nMem, regPrev+nColTest); + sqlite3TouchRegister(pParse, regPrev+nColTest); /* Open a read-only cursor on the index being analyzed. */ assert( iDb==sqlite3SchemaToIndex(db, pIdx->pSchema) );

@@ -115115,7 +118536,35 @@ int addrNext;

int addrIsNull; u8 seekOp = HasRowid(pTab) ? OP_NotExists : OP_NotFound; - pParse->nMem = MAX(pParse->nMem, regCol+nCol); + if( doOnce ){ + int mxCol = nCol; + Index *pX; + + /* Compute the maximum number of columns in any index */ + for(pX=pTab->pIndex; pX; pX=pX->pNext){ + int nColX; /* Number of columns in pX */ + if( !HasRowid(pTab) && IsPrimaryKeyIndex(pX) ){ + nColX = pX->nKeyCol; + }else{ + nColX = pX->nColumn; + } + if( nColX>mxCol ) mxCol = nColX; + } + + /* Allocate space to compute results for the largest index */ + sqlite3TouchRegister(pParse, regCol+mxCol); + doOnce = 0; +#ifdef SQLITE_DEBUG + /* Verify that the call to sqlite3ClearTempRegCache() below + ** really is needed. + ** https://sqlite.org/forum/forumpost/83cb4a95a0 (2023-03-25) + */ + testcase( !sqlite3NoTempsInRange(pParse, regEq, regCol+mxCol) ); +#endif + sqlite3ClearTempRegCache(pParse); /* tag-20230325-1 */ + assert( sqlite3NoTempsInRange(pParse, regEq, regCol+mxCol) ); + } + assert( sqlite3NoTempsInRange(pParse, regEq, regCol+nCol) ); addrNext = sqlite3VdbeCurrentAddr(v); callStatGet(pParse, regStat, STAT_GET_ROWID, regSampleRowid);

@@ -115196,6 +118645,11 @@ assert( sqlite3SchemaMutexHeld(db, iDb, 0) );

for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){ Table *pTab = (Table*)sqliteHashData(k); analyzeOneTable(pParse, pTab, 0, iStatCur, iMem, iTab); +#ifdef SQLITE_ENABLE_STAT4 + iMem = sqlite3FirstAvailableRegister(pParse, iMem); +#else + assert( iMem==sqlite3FirstAvailableRegister(pParse,iMem) ); +#endif } loadAnalysis(pParse, iDb); }

@@ -115436,6 +118890,8 @@ ** If the Index.aSample variable is not NULL, delete the aSample[] array

** and its contents. */ SQLITE_PRIVATE void sqlite3DeleteIndexSamples(sqlite3 *db, Index *pIdx){ + assert( db!=0 ); + assert( pIdx!=0 ); #ifdef SQLITE_ENABLE_STAT4 if( pIdx->aSample ){ int j;

@@ -115445,7 +118901,7 @@ sqlite3DbFree(db, p->p);

} sqlite3DbFree(db, pIdx->aSample); } - if( db && db->pnBytesFreed==0 ){ + if( db->pnBytesFreed==0 ){ pIdx->nSample = 0; pIdx->aSample = 0; }

@@ -115581,6 +119037,10 @@ nSample = sqlite3_column_int(pStmt, 1);

pIdx = findIndexOrPrimaryKey(db, zIndex, zDb); assert( pIdx==0 || pIdx->nSample==0 ); if( pIdx==0 ) continue; + if( pIdx->aSample!=0 ){ + /* The same index appears in sqlite_stat4 under multiple names */ + continue; + } assert( !HasRowid(pIdx->pTable) || pIdx->nColumn==pIdx->nKeyCol+1 ); if( !HasRowid(pIdx->pTable) && IsPrimaryKeyIndex(pIdx) ){ nIdxCol = pIdx->nKeyCol;

@@ -115588,6 +119048,7 @@ }else{

nIdxCol = pIdx->nColumn; } pIdx->nSampleCol = nIdxCol; + pIdx->mxSample = nSample; nByte = sizeof(IndexSample) * nSample; nByte += sizeof(tRowcnt) * nIdxCol * 3 * nSample; nByte += nIdxCol * sizeof(tRowcnt); /* Space for Index.aAvgEq[] */

@@ -115627,6 +119088,11 @@ zIndex = (char *)sqlite3_column_text(pStmt, 0);

if( zIndex==0 ) continue; pIdx = findIndexOrPrimaryKey(db, zIndex, zDb); if( pIdx==0 ) continue; + if( pIdx->nSample>=pIdx->mxSample ){ + /* Too many slots used because the same index appears in + ** sqlite_stat4 using multiple names */ + continue; + } /* This next condition is true if data has already been loaded from ** the sqlite_stat4 table. */ nCol = pIdx->nSampleCol;

@@ -115639,14 +119105,15 @@ decodeIntArray((char*)sqlite3_column_text(pStmt,1),nCol,pSample->anEq,0,0);

decodeIntArray((char*)sqlite3_column_text(pStmt,2),nCol,pSample->anLt,0,0); decodeIntArray((char*)sqlite3_column_text(pStmt,3),nCol,pSample->anDLt,0,0); - /* Take a copy of the sample. Add two 0x00 bytes the end of the buffer. + /* Take a copy of the sample. Add 8 extra 0x00 bytes the end of the buffer. ** This is in case the sample record is corrupted. In that case, the ** sqlite3VdbeRecordCompare() may read up to two varints past the ** end of the allocated buffer before it realizes it is dealing with - ** a corrupt record. Adding the two 0x00 bytes prevents this from causing + ** a corrupt record. Or it might try to read a large integer from the + ** buffer. In any case, eight 0x00 bytes prevents this from causing ** a buffer overread. */ pSample->n = sqlite3_column_bytes(pStmt, 4); - pSample->p = sqlite3DbMallocZero(db, pSample->n + 2); + pSample->p = sqlite3DbMallocZero(db, pSample->n + 8); if( pSample->p==0 ){ sqlite3_finalize(pStmt); return SQLITE_NOMEM_BKPT;

@@ -115670,11 +119137,12 @@ int rc = SQLITE_OK; /* Result codes from subroutines */

const Table *pStat4; assert( db->lookaside.bDisable ); - if( (pStat4 = sqlite3FindTable(db, "sqlite_stat4", zDb))!=0 + if( OptimizationEnabled(db, SQLITE_Stat4) + && (pStat4 = sqlite3FindTable(db, "sqlite_stat4", zDb))!=0 && IsOrdinaryTable(pStat4) ){ rc = loadStatTbl(db, - "SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx", + "SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx COLLATE nocase", "SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4", zDb );

@@ -115864,7 +119332,7 @@ char *zPath = 0;

char *zErr = 0; unsigned int flags; Db *aNew; /* New array of Db pointers */ - Db *pNew; /* Db object for the newly attached database */ + Db *pNew = 0; /* Db object for the newly attached database */ char *zErrDyn = 0; sqlite3_vfs *pVfs;

@@ -115884,13 +119352,26 @@ if( REOPEN_AS_MEMDB(db) ){

/* This is not a real ATTACH. Instead, this routine is being called ** from sqlite3_deserialize() to close database db->init.iDb and ** reopen it as a MemDB */ + Btree *pNewBt = 0; pVfs = sqlite3_vfs_find("memdb"); if( pVfs==0 ) return; - pNew = &db->aDb[db->init.iDb]; - if( pNew->pBt ) sqlite3BtreeClose(pNew->pBt); - pNew->pBt = 0; - pNew->pSchema = 0; - rc = sqlite3BtreeOpen(pVfs, "x\0", db, &pNew->pBt, 0, SQLITE_OPEN_MAIN_DB); + rc = sqlite3BtreeOpen(pVfs, "x\0", db, &pNewBt, 0, SQLITE_OPEN_MAIN_DB); + if( rc==SQLITE_OK ){ + Schema *pNewSchema = sqlite3SchemaGet(db, pNewBt); + if( pNewSchema ){ + /* Both the Btree and the new Schema were allocated successfully. + ** Close the old db and update the aDb[] slot with the new memdb + ** values. */ + pNew = &db->aDb[db->init.iDb]; + if( ALWAYS(pNew->pBt) ) sqlite3BtreeClose(pNew->pBt); + pNew->pBt = pNewBt; + pNew->pSchema = pNewSchema; + }else{ + sqlite3BtreeClose(pNewBt); + rc = SQLITE_NOMEM; + } + } + if( rc ) goto attach_error; }else{ /* This is a real ATTACH **

@@ -116003,7 +119484,7 @@ }

} #endif if( rc ){ - if( !REOPEN_AS_MEMDB(db) ){ + if( ALWAYS(!REOPEN_AS_MEMDB(db)) ){ int iDb = db->nDb - 1; assert( iDb>=2 ); if( db->aDb[iDb].pBt ){

@@ -116119,6 +119600,8 @@ NameContext sName;

Vdbe *v; sqlite3* db = pParse->db; int regArgs; + + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ) goto attach_end; if( pParse->nErr ) goto attach_end; memset(&sName, 0, sizeof(NameContext));

@@ -116588,7 +120071,7 @@ ){

sqlite3 *db = pParse->db; int rc; - /* Don't do any authorization checks if the database is initialising + /* Don't do any authorization checks if the database is initializing ** or if the parser is being invoked from within sqlite3_declare_vtab. */ assert( !IN_RENAME_OBJECT || db->xAuth==0 );

@@ -116889,29 +120372,26 @@ }

pParse->nVtabLock = 0; #endif +#ifndef SQLITE_OMIT_SHARED_CACHE /* Once all the cookies have been verified and transactions opened, ** obtain the required table-locks. This is a no-op unless the ** shared-cache feature is enabled. */ - codeTableLocks(pParse); + if( pParse->nTableLock ) codeTableLocks(pParse); +#endif /* Initialize any AUTOINCREMENT data structures required. */ - sqlite3AutoincrementBegin(pParse); + if( pParse->pAinc ) sqlite3AutoincrementBegin(pParse); - /* Code constant expressions that where factored out of inner loops. - ** - ** The pConstExpr list might also contain expressions that we simply - ** want to keep around until the Parse object is deleted. Such - ** expressions have iConstExprReg==0. Do not generate code for - ** those expressions, of course. + /* Code constant expressions that were factored out of inner loops. */ if( pParse->pConstExpr ){ ExprList *pEL = pParse->pConstExpr; pParse->okConstFactor = 0; for(i=0; i<pEL->nExpr; i++){ - int iReg = pEL->a[i].u.iConstExprReg; - sqlite3ExprCode(pParse, pEL->a[i].pExpr, iReg); + assert( pEL->a[i].u.iConstExprReg>0 ); + sqlite3ExprCode(pParse, pEL->a[i].pExpr, pEL->a[i].u.iConstExprReg); } }

@@ -116962,6 +120442,7 @@ u32 savedDbFlags = db->mDbFlags;

char saveBuf[PARSE_TAIL_SZ]; if( pParse->nErr ) return; + if( pParse->eParseMode ) return; assert( pParse->nested<10 ); /* Nesting should only be of limited depth */ va_start(ap, zFormat); zSql = sqlite3VMPrintf(db, zFormat, ap);

@@ -117409,7 +120890,7 @@ }

} /* -** Return the collating squence name for a column +** Return the collating sequence name for a column */ SQLITE_PRIVATE const char *sqlite3ColumnColl(Column *pCol){ const char *z;

@@ -117502,7 +120983,7 @@

if( IsOrdinaryTable(pTable) ){ sqlite3FkDelete(db, pTable); } -#ifndef SQLITE_OMIT_VIRTUAL_TABLE +#ifndef SQLITE_OMIT_VIRTUALTABLE else if( IsVirtual(pTable) ){ sqlite3VtabClear(db, pTable); }

@@ -118065,19 +121546,12 @@ }

#endif /* -** Name of the special TEMP trigger used to implement RETURNING. The -** name begins with "sqlite_" so that it is guaranteed not to collide -** with any application-generated triggers. -*/ -#define RETURNING_TRIGGER_NAME "sqlite_returning" - -/* ** Clean up the data structures associated with the RETURNING clause. */ static void sqlite3DeleteReturning(sqlite3 *db, Returning *pRet){ Hash *pHash; pHash = &(db->aDb[1].pSchema->trigHash); - sqlite3HashInsert(pHash, RETURNING_TRIGGER_NAME, 0); + sqlite3HashInsert(pHash, pRet->zName, 0); sqlite3ExprListDelete(db, pRet->pReturnEL); sqlite3DbFree(db, pRet); }

@@ -118105,7 +121579,7 @@ sqlite3 *db = pParse->db;

if( pParse->pNewTrigger ){ sqlite3ErrorMsg(pParse, "cannot use RETURNING in a trigger"); }else{ - assert( pParse->bReturning==0 ); + assert( pParse->bReturning==0 || pParse->ifNotExists ); } pParse->bReturning = 1; pRet = sqlite3DbMallocZero(db, sizeof(*pRet));

@@ -118120,7 +121594,9 @@ sqlite3ParserAddCleanup(pParse,

(void(*)(sqlite3*,void*))sqlite3DeleteReturning, pRet); testcase( pParse->earlyCleanup ); if( db->mallocFailed ) return; - pRet->retTrig.zName = RETURNING_TRIGGER_NAME; + sqlite3_snprintf(sizeof(pRet->zName), pRet->zName, + "sqlite_returning_%p", pParse); + pRet->retTrig.zName = pRet->zName; pRet->retTrig.op = TK_RETURNING; pRet->retTrig.tr_tm = TRIGGER_AFTER; pRet->retTrig.bReturning = 1;

@@ -118131,8 +121607,9 @@ pRet->retTStep.op = TK_RETURNING;

pRet->retTStep.pTrig = &pRet->retTrig; pRet->retTStep.pExprList = pList; pHash = &(db->aDb[1].pSchema->trigHash); - assert( sqlite3HashFind(pHash, RETURNING_TRIGGER_NAME)==0 || pParse->nErr ); - if( sqlite3HashInsert(pHash, RETURNING_TRIGGER_NAME, &pRet->retTrig) + assert( sqlite3HashFind(pHash, pRet->zName)==0 + || pParse->nErr || pParse->ifNotExists ); + if( sqlite3HashInsert(pHash, pRet->zName, &pRet->retTrig) ==&pRet->retTrig ){ sqlite3OomFault(db); }

@@ -118166,7 +121643,7 @@ return;

} if( !IN_RENAME_OBJECT ) sqlite3DequoteToken(&sName); - /* Because keywords GENERATE ALWAYS can be converted into indentifiers + /* Because keywords GENERATE ALWAYS can be converted into identifiers ** by the parser, we can sometimes end up with a typename that ends ** with "generated always". Check for this case and omit the surplus ** text. */

@@ -118387,7 +121864,7 @@ SQLITE_PRIVATE void sqlite3AddDefaultValue(

Parse *pParse, /* Parsing context */ Expr *pExpr, /* The parsed expression of the default value */ const char *zStart, /* Start of the default value text */ - const char *zEnd /* First character past end of defaut value text */ + const char *zEnd /* First character past end of default value text */ ){ Table *p; Column *pCol;

@@ -118659,6 +122136,14 @@ pTab->tabFlags |= eType;

if( pCol->colFlags & COLFLAG_PRIMKEY ){ makeColumnPartOfPrimaryKey(pParse, pCol); /* For the error message */ } + if( ALWAYS(pExpr) && pExpr->op==TK_ID ){ + /* The value of a generated column needs to be a real expression, not + ** just a reference to another column, in order for covering index + ** optimizations to work correctly. So if the value is not an expression, + ** turn it into one by adding a unary "+" operator. */ + pExpr = sqlite3PExpr(pParse, TK_UPLUS, pExpr, 0); + } + if( pExpr && pExpr->op!=TK_RAISE ) pExpr->affExpr = pCol->affinity; sqlite3ColumnSetExpr(pParse, pTab, pCol, pExpr); pExpr = 0; goto generated_done;

@@ -118727,7 +122212,7 @@ ** nul-terminated string pointed to by the third parameter, zSignedIdent,

** to the specified offset in the buffer and updates *pIdx to refer ** to the first byte after the last byte written before returning. ** -** If the string zSignedIdent consists entirely of alpha-numeric +** If the string zSignedIdent consists entirely of alphanumeric ** characters, does not begin with a digit and is not an SQL keyword, ** then it is copied to the output buffer exactly as it is. Otherwise, ** it is quoted using double-quotes.

@@ -118795,7 +122280,8 @@ /* SQLITE_AFF_BLOB */ "",

/* SQLITE_AFF_TEXT */ " TEXT", /* SQLITE_AFF_NUMERIC */ " NUM", /* SQLITE_AFF_INTEGER */ " INT", - /* SQLITE_AFF_REAL */ " REAL" + /* SQLITE_AFF_REAL */ " REAL", + /* SQLITE_AFF_FLEXNUM */ " NUM", }; int len; const char *zType;

@@ -118811,10 +122297,12 @@ testcase( pCol->affinity==SQLITE_AFF_TEXT );

testcase( pCol->affinity==SQLITE_AFF_NUMERIC ); testcase( pCol->affinity==SQLITE_AFF_INTEGER ); testcase( pCol->affinity==SQLITE_AFF_REAL ); + testcase( pCol->affinity==SQLITE_AFF_FLEXNUM ); zType = azType[pCol->affinity - SQLITE_AFF_BLOB]; len = sqlite3Strlen30(zType); assert( pCol->affinity==SQLITE_AFF_BLOB + || pCol->affinity==SQLITE_AFF_FLEXNUM || pCol->affinity==sqlite3AffinityType(zType, 0) ); memcpy(&zStmt[k], zType, len); k += len;

@@ -118876,7 +122364,7 @@ const Column *aCol = pIdx->pTable->aCol;

for(i=0; i<pIdx->nColumn; i++){ i16 x = pIdx->aiColumn[i]; assert( x<pIdx->pTable->nCol ); - wIndex += x<0 ? 1 : aCol[pIdx->aiColumn[i]].szEst; + wIndex += x<0 ? 1 : aCol[x].szEst; } pIdx->szIdxRow = sqlite3LogEst(wIndex*4); }

@@ -119229,6 +122717,7 @@ ** index definition are tagged this way to help ensure that we do

** not pass them into code generator routines by mistake. */ static int markImmutableExprStep(Walker *pWalker, Expr *pExpr){ + (void)pWalker; ExprSetVVAProperty(pExpr, EP_Immutable); return WRC_Continue; }

@@ -119564,6 +123053,17 @@

/* Reparse everything to update our internal data structures */ sqlite3VdbeAddParseSchemaOp(v, iDb, sqlite3MPrintf(db, "tbl_name='%q' AND type!='trigger'", p->zName),0); + + /* Test for cycles in generated columns and illegal expressions + ** in CHECK constraints and in DEFAULT clauses. */ + if( p->tabFlags & TF_HasGenerated ){ + sqlite3VdbeAddOp4(v, OP_SqlExec, 1, 0, 0, + sqlite3MPrintf(db, "SELECT*FROM\"%w\".\"%w\"", + db->aDb[iDb].zDbSName, p->zName), P4_DYNAMIC); + } + sqlite3VdbeAddOp4(v, OP_SqlExec, 1, 0, 0, + sqlite3MPrintf(db, "PRAGMA \"%w\".integrity_check(%Q)", + db->aDb[iDb].zDbSName, p->zName), P4_DYNAMIC); } /* Add the table to the in-memory representation of the database.

@@ -119795,8 +123295,7 @@ if( pParse->nErr==0

&& pTable->nCol==pSel->pEList->nExpr ){ assert( db->mallocFailed==0 ); - sqlite3SelectAddColumnTypeAndCollation(pParse, pTable, pSel, - SQLITE_AFF_NONE); + sqlite3SubqueryColumnTypes(pParse, pTable, pSel, SQLITE_AFF_NONE); } }else{ /* CREATE VIEW name AS... without an argument list. Construct

@@ -120614,7 +124113,7 @@

#ifndef SQLITE_OMIT_TEMPDB /* If the index name was unqualified, check if the table ** is a temp table. If so, set the database to 1. Do not do this - ** if initialising a database schema. + ** if initializing a database schema. */ if( !db->init.busy ){ pTab = sqlite3SrcListLookup(pParse, pTblName);

@@ -122271,7 +125770,7 @@ }

/* ** This routine is invoked once per CTE by the parser while parsing a -** WITH clause. The CTE described by teh third argument is added to +** WITH clause. The CTE described by the third argument is added to ** the WITH clause of the second argument. If the second argument is ** NULL, then a new WITH argument is created. */

@@ -122522,6 +126021,7 @@ /* EVIDENCE-OF: R-08308-17224 The default collating function for all

** strings is BINARY. */ db->pDfltColl = sqlite3FindCollSeq(db, enc, sqlite3StrBINARY, 0); + sqlite3ExpirePreparedStatements(db, 1); } /*

@@ -122912,8 +126412,9 @@ SrcItem *pItem = pSrc->a;

Table *pTab; assert( pItem && pSrc->nSrc>=1 ); pTab = sqlite3LocateTableItem(pParse, 0, pItem); - sqlite3DeleteTable(pParse->db, pItem->pTab); + if( pItem->pTab ) sqlite3DeleteTable(pParse->db, pItem->pTab); pItem->pTab = pTab; + pItem->fg.notCte = 1; if( pTab ){ pTab->nTabRef++; if( pItem->fg.isIndexedBy && sqlite3IndexedByLookup(pParse, pItem) ){

@@ -122993,13 +126494,15 @@ ** If pTab is not writable -> generate an error message and return 1.

** If pTab is writable but other errors have occurred -> return 1. ** If pTab is writable and no prior errors -> return 0; */ -SQLITE_PRIVATE int sqlite3IsReadOnly(Parse *pParse, Table *pTab, int viewOk){ +SQLITE_PRIVATE int sqlite3IsReadOnly(Parse *pParse, Table *pTab, Trigger *pTrigger){ if( tabIsReadOnly(pParse, pTab) ){ sqlite3ErrorMsg(pParse, "table %s may not be modified", pTab->zName); return 1; } #ifndef SQLITE_OMIT_VIEW - if( !viewOk && IsView(pTab) ){ + if( IsView(pTab) + && (pTrigger==0 || (pTrigger->bReturning && pTrigger->pNext==0)) + ){ sqlite3ErrorMsg(pParse,"cannot modify %s because it is a view",pTab->zName); return 1; }

@@ -123064,7 +126567,7 @@ ){

sqlite3 *db = pParse->db; Expr *pLhs = NULL; /* LHS of IN(SELECT...) operator */ Expr *pInClause = NULL; /* WHERE rowid IN ( select ) */ - ExprList *pEList = NULL; /* Expression list contaning only pSelectRowid */ + ExprList *pEList = NULL; /* Expression list containing only pSelectRowid*/ SrcList *pSelectSrc = NULL; /* SELECT rowid FROM x ... (dup of pSrc) */ Select *pSelect = NULL; /* Complete SELECT tree */ Table *pTab;

@@ -123102,14 +126605,20 @@ pParse, 0, sqlite3PExpr(pParse, TK_ROW, 0, 0)

); }else{ Index *pPk = sqlite3PrimaryKeyIndex(pTab); + assert( pPk!=0 ); + assert( pPk->nKeyCol>=1 ); if( pPk->nKeyCol==1 ){ - const char *zName = pTab->aCol[pPk->aiColumn[0]].zCnName; + const char *zName; + assert( pPk->aiColumn[0]>=0 && pPk->aiColumn[0]<pTab->nCol ); + zName = pTab->aCol[pPk->aiColumn[0]].zCnName; pLhs = sqlite3Expr(db, TK_ID, zName); pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ID, zName)); }else{ int i; for(i=0; i<pPk->nKeyCol; i++){ - Expr *p = sqlite3Expr(db, TK_ID, pTab->aCol[pPk->aiColumn[i]].zCnName); + Expr *p; + assert( pPk->aiColumn[i]>=0 && pPk->aiColumn[i]<pTab->nCol ); + p = sqlite3Expr(db, TK_ID, pTab->aCol[pPk->aiColumn[i]].zCnName); pEList = sqlite3ExprListAppend(pParse, pEList, p); } pLhs = sqlite3PExpr(pParse, TK_VECTOR, 0, 0);

@@ -123138,7 +126647,7 @@ pSelect = sqlite3SelectNew(pParse, pEList, pSelectSrc, pWhere, 0 ,0,

pOrderBy,0,pLimit ); - /* now generate the new WHERE rowid IN clause for the DELETE/UDPATE */ + /* now generate the new WHERE rowid IN clause for the DELETE/UPDATE */ pInClause = sqlite3PExpr(pParse, TK_IN, pLhs, 0); sqlite3PExprAddSelect(pParse, pInClause, pSelect); return pInClause;

@@ -123253,7 +126762,7 @@ if( sqlite3ViewGetColumnNames(pParse, pTab) ){

goto delete_from_cleanup; } - if( sqlite3IsReadOnly(pParse, pTab, (pTrigger?1:0)) ){ + if( sqlite3IsReadOnly(pParse, pTab, pTrigger) ){ goto delete_from_cleanup; } iDb = sqlite3SchemaToIndex(db, pTab->pSchema);

@@ -123362,12 +126871,12 @@ }else

#endif /* SQLITE_OMIT_TRUNCATE_OPTIMIZATION */ { u16 wcf = WHERE_ONEPASS_DESIRED|WHERE_DUPLICATES_OK; - if( sNC.ncFlags & NC_VarSelect ) bComplex = 1; + if( sNC.ncFlags & NC_Subquery ) bComplex = 1; wcf |= (bComplex ? 0 : WHERE_ONEPASS_MULTIROW); if( HasRowid(pTab) ){ /* For a rowid table, initialize the RowSet to an empty set */ pPk = 0; - nPk = 1; + assert( nPk==1 ); iRowSet = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Null, 0, iRowSet); }else{

@@ -123395,7 +126904,8 @@ pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0,0,wcf,iTabCur+1);

if( pWInfo==0 ) goto delete_from_cleanup; eOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass); assert( IsVirtual(pTab)==0 || eOnePass!=ONEPASS_MULTI ); - assert( IsVirtual(pTab) || bComplex || eOnePass!=ONEPASS_OFF ); + assert( IsVirtual(pTab) || bComplex || eOnePass!=ONEPASS_OFF + || OptimizationDisabled(db, SQLITE_OnePass) ); if( eOnePass!=ONEPASS_SINGLE ) sqlite3MultiWrite(pParse); if( sqlite3WhereUsesDeferredSeek(pWInfo) ){ sqlite3VdbeAddOp1(v, OP_FinishSeek, iTabCur);

@@ -123732,9 +127242,11 @@ ** to the row just deleted. */

sqlite3FkActions(pParse, pTab, 0, iOld, 0, 0); /* Invoke AFTER DELETE trigger programs. */ - sqlite3CodeRowTrigger(pParse, pTrigger, - TK_DELETE, 0, TRIGGER_AFTER, pTab, iOld, onconf, iLabel - ); + if( pTrigger ){ + sqlite3CodeRowTrigger(pParse, pTrigger, + TK_DELETE, 0, TRIGGER_AFTER, pTab, iOld, onconf, iLabel + ); + } /* Jump here if the row had already been deleted before any BEFORE ** trigger programs were invoked. Or if a trigger program throws a

@@ -124048,6 +127560,42 @@ }

} /* +** Implementation of the octet_length() function +*/ +static void bytelengthFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + assert( argc==1 ); + UNUSED_PARAMETER(argc); + switch( sqlite3_value_type(argv[0]) ){ + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + sqlite3_result_int(context, sqlite3_value_bytes(argv[0])); + break; + } + case SQLITE_INTEGER: + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + i64 m = sqlite3_context_db_handle(context)->enc<=SQLITE_UTF8 ? 1 : 2; + sqlite3_result_int64(context, sqlite3_value_bytes(argv[0])*m); + break; + } + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + if( sqlite3_value_encoding(argv[0])<=SQLITE_UTF8 ){ + sqlite3_result_int(context, sqlite3_value_bytes(argv[0])); + }else{ + sqlite3_result_int(context, sqlite3_value_bytes16(argv[0])); + } + break; + } + default: { + sqlite3_result_null(context); + break; + } + } +} + +/* ** Implementation of the abs() function. ** ** IMP: R-23979-26855 The abs(X) function returns the absolute value of

@@ -124323,7 +127871,7 @@ /* The value has no fractional part so there is nothing to round */

}else if( n==0 ){ r = (double)((sqlite_int64)(r+(r<0?-0.5:+0.5))); }else{ - zBuf = sqlite3_mprintf("%.*f",n,r); + zBuf = sqlite3_mprintf("%!.*f",n,r); if( zBuf==0 ){ sqlite3_result_error_nomem(context); return;

@@ -124523,7 +128071,7 @@ };

/* ** For LIKE and GLOB matching on EBCDIC machines, assume that every -** character is exactly one byte in size. Also, provde the Utf8Read() +** character is exactly one byte in size. Also, provide the Utf8Read() ** macro for fast reading of the next character in the common case where ** the next character is ASCII. */

@@ -124756,7 +128304,7 @@

/* ** Implementation of the like() SQL function. This function implements -** the build-in LIKE operator. The first argument to the function is the +** the built-in LIKE operator. The first argument to the function is the ** pattern and the second argument is the string. So, the SQL statements: ** ** A LIKE B

@@ -124980,7 +128528,7 @@ break;

} || sqlite3GetInt32(pToken->z, &iValue)==0 ){ char const *zBlob = sqlite3_value_blob(pValue); - int nBlob = sqlite3_value_bytes(pValue); + i64 nBlob = sqlite3_value_bytes(pValue); assert( zBlob==sqlite3_value_blob(pValue) ); /* No encoding change */ sqlite3StrAccumEnlarge(pStr, nBlob*2 + 4); if( pStr->accError==0 ){

@@ -125089,6 +128637,7 @@ *zOut++ = 0x80 + (u8)((c>>6) & 0x3F);

*zOut++ = 0x80 + (u8)(c & 0x3F); } \ } + *zOut = 0; sqlite3_result_text64(context, (char*)z, zOut-z, sqlite3_free, SQLITE_UTF8); }

@@ -125117,11 +128666,102 @@ *(z++) = hexdigits[(c>>4)&0xf];

*(z++) = hexdigits[c&0xf]; } *z = 0; - sqlite3_result_text(context, zHex, n*2, sqlite3_free); + sqlite3_result_text64(context, zHex, (u64)(z-zHex), + sqlite3_free, SQLITE_UTF8); } } /* +** Buffer zStr contains nStr bytes of utf-8 encoded text. Return 1 if zStr +** contains character ch, or 0 if it does not. +*/ +static int strContainsChar(const u8 *zStr, int nStr, u32 ch){ + const u8 *zEnd = &zStr[nStr]; + const u8 *z = zStr; + while( z<zEnd ){ + u32 tst = Utf8Read(z); + if( tst==ch ) return 1; + } + return 0; +} + +/* +** The unhex() function. This function may be invoked with either one or +** two arguments. In both cases the first argument is interpreted as text +** a text value containing a set of pairs of hexadecimal digits which are +** decoded and returned as a blob. +** +** If there is only a single argument, then it must consist only of an +** even number of hexadecimal digits. Otherwise, return NULL. +** +** Or, if there is a second argument, then any character that appears in +** the second argument is also allowed to appear between pairs of hexadecimal +** digits in the first argument. If any other character appears in the +** first argument, or if one of the allowed characters appears between +** two hexadecimal digits that make up a single byte, NULL is returned. +** +** The following expressions are all true: +** +** unhex('ABCD') IS x'ABCD' +** unhex('AB CD') IS NULL +** unhex('AB CD', ' ') IS x'ABCD' +** unhex('A BCD', ' ') IS NULL +*/ +static void unhexFunc( + sqlite3_context *pCtx, + int argc, + sqlite3_value **argv +){ + const u8 *zPass = (const u8*)""; + int nPass = 0; + const u8 *zHex = sqlite3_value_text(argv[0]); + int nHex = sqlite3_value_bytes(argv[0]); +#ifdef SQLITE_DEBUG + const u8 *zEnd = zHex ? &zHex[nHex] : 0; +#endif + u8 *pBlob = 0; + u8 *p = 0; + + assert( argc==1 || argc==2 ); + if( argc==2 ){ + zPass = sqlite3_value_text(argv[1]); + nPass = sqlite3_value_bytes(argv[1]); + } + if( !zHex || !zPass ) return; + + p = pBlob = contextMalloc(pCtx, (nHex/2)+1); + if( pBlob ){ + u8 c; /* Most significant digit of next byte */ + u8 d; /* Least significant digit of next byte */ + + while( (c = *zHex)!=0x00 ){ + while( !sqlite3Isxdigit(c) ){ + u32 ch = Utf8Read(zHex); + assert( zHex<=zEnd ); + if( !strContainsChar(zPass, nPass, ch) ) goto unhex_null; + c = *zHex; + if( c==0x00 ) goto unhex_done; + } + zHex++; + assert( *zEnd==0x00 ); + assert( zHex<=zEnd ); + d = *(zHex++); + if( !sqlite3Isxdigit(d) ) goto unhex_null; + *(p++) = (sqlite3HexToInt(c)<<4) | sqlite3HexToInt(d); + } + } + + unhex_done: + sqlite3_result_blob(pCtx, pBlob, (p - pBlob), sqlite3_free); + return; + + unhex_null: + sqlite3_free(pBlob); + return; +} + + +/* ** The zeroblob(N) function returns a zero-filled blob of size N bytes. */ static void zeroblobFunc(

@@ -125321,12 +128961,87 @@ }

sqlite3_result_text(context, (char*)zIn, nIn, SQLITE_TRANSIENT); } +/* The core implementation of the CONCAT(...) and CONCAT_WS(SEP,...) +** functions. +** +** Return a string value that is the concatenation of all non-null +** entries in argv[]. Use zSep as the separator. +*/ +static void concatFuncCore( + sqlite3_context *context, + int argc, + sqlite3_value **argv, + int nSep, + const char *zSep +){ + i64 j, k, n = 0; + int i; + char *z; + for(i=0; i<argc; i++){ + n += sqlite3_value_bytes(argv[i]); + } + n += (argc-1)*nSep; + z = sqlite3_malloc64(n+1); + if( z==0 ){ + sqlite3_result_error_nomem(context); + return; + } + j = 0; + for(i=0; i<argc; i++){ + k = sqlite3_value_bytes(argv[i]); + if( k>0 ){ + const char *v = (const char*)sqlite3_value_text(argv[i]); + if( v!=0 ){ + if( j>0 && nSep>0 ){ + memcpy(&z[j], zSep, nSep); + j += nSep; + } + memcpy(&z[j], v, k); + j += k; + } + } + } + z[j] = 0; + assert( j<=n ); + sqlite3_result_text64(context, z, j, sqlite3_free, SQLITE_UTF8); +} + +/* +** The CONCAT(...) function. Generate a string result that is the +** concatentation of all non-null arguments. +*/ +static void concatFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + concatFuncCore(context, argc, argv, 0, ""); +} + +/* +** The CONCAT_WS(separator, ...) function. +** +** Generate a string that is the concatenation of 2nd through the Nth +** argument. Use the first argument (which must be non-NULL) as the +** separator. +*/ +static void concatwsFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int nSep = sqlite3_value_bytes(argv[0]); + const char *zSep = (const char*)sqlite3_value_text(argv[0]); + if( zSep==0 ) return; + concatFuncCore(context, argc-1, argv+1, nSep, zSep); +} + #ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION /* ** The "unknown" function is automatically substituted in place of ** any unrecognized function name when doing an EXPLAIN or EXPLAIN QUERY PLAN -** when the SQLITE_ENABLE_UNKNOWN_FUNCTION compile-time option is used. +** when the SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION compile-time option is used. ** When the "sqlite3" command-line shell is built using this functionality, ** that allows an EXPLAIN or EXPLAIN QUERY PLAN for complex queries ** involving application-defined functions to be examined in a generic

@@ -125338,6 +129053,9 @@ int argc,

sqlite3_value **argv ){ /* no-op */ + (void)context; + (void)argc; + (void)argv; } #endif /*SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION*/

@@ -125439,14 +129157,69 @@ ** sum() or avg() aggregate computation.

*/ typedef struct SumCtx SumCtx; struct SumCtx { - double rSum; /* Floating point sum */ - i64 iSum; /* Integer sum */ + double rSum; /* Running sum as as a double */ + double rErr; /* Error term for Kahan-Babushka-Neumaier summation */ + i64 iSum; /* Running sum as a signed integer */ i64 cnt; /* Number of elements summed */ - u8 overflow; /* True if integer overflow seen */ - u8 approx; /* True if non-integer value was input to the sum */ + u8 approx; /* True if any non-integer value was input to the sum */ + u8 ovrfl; /* Integer overflow seen */ }; /* +** Do one step of the Kahan-Babushka-Neumaier summation. +** +** https://en.wikipedia.org/wiki/Kahan_summation_algorithm +** +** Variables are marked "volatile" to defeat c89 x86 floating point +** optimizations can mess up this algorithm. +*/ +static void kahanBabuskaNeumaierStep( + volatile SumCtx *pSum, + volatile double r +){ + volatile double s = pSum->rSum; + volatile double t = s + r; + if( fabs(s) > fabs(r) ){ + pSum->rErr += (s - t) + r; + }else{ + pSum->rErr += (r - t) + s; + } + pSum->rSum = t; +} + +/* +** Add a (possibly large) integer to the running sum. +*/ +static void kahanBabuskaNeumaierStepInt64(volatile SumCtx *pSum, i64 iVal){ + if( iVal<=-4503599627370496LL || iVal>=+4503599627370496LL ){ + i64 iBig, iSm; + iSm = iVal % 16384; + iBig = iVal - iSm; + kahanBabuskaNeumaierStep(pSum, iBig); + kahanBabuskaNeumaierStep(pSum, iSm); + }else{ + kahanBabuskaNeumaierStep(pSum, (double)iVal); + } +} + +/* +** Initialize the Kahan-Babaska-Neumaier sum from a 64-bit integer +*/ +static void kahanBabuskaNeumaierInit( + volatile SumCtx *p, + i64 iVal +){ + if( iVal<=-4503599627370496LL || iVal>=+4503599627370496LL ){ + i64 iSm = iVal % 16384; + p->rSum = (double)(iVal - iSm); + p->rErr = (double)iSm; + }else{ + p->rSum = (double)iVal; + p->rErr = 0.0; + } +} + +/* ** Routines used to compute the sum, average, and total. ** ** The SUM() function follows the (broken) SQL standard which means

@@ -125465,15 +129238,29 @@ p = sqlite3_aggregate_context(context, sizeof(*p));

type = sqlite3_value_numeric_type(argv[0]); if( p && type!=SQLITE_NULL ){ p->cnt++; - if( type==SQLITE_INTEGER ){ - i64 v = sqlite3_value_int64(argv[0]); - p->rSum += v; - if( (p->approx|p->overflow)==0 && sqlite3AddInt64(&p->iSum, v) ){ - p->approx = p->overflow = 1; + if( p->approx==0 ){ + if( type!=SQLITE_INTEGER ){ + kahanBabuskaNeumaierInit(p, p->iSum); + p->approx = 1; + kahanBabuskaNeumaierStep(p, sqlite3_value_double(argv[0])); + }else{ + i64 x = p->iSum; + if( sqlite3AddInt64(&x, sqlite3_value_int64(argv[0]))==0 ){ + p->iSum = x; + }else{ + p->ovrfl = 1; + kahanBabuskaNeumaierInit(p, p->iSum); + p->approx = 1; + kahanBabuskaNeumaierStepInt64(p, sqlite3_value_int64(argv[0])); + } } }else{ - p->rSum += sqlite3_value_double(argv[0]); - p->approx = 1; + if( type==SQLITE_INTEGER ){ + kahanBabuskaNeumaierStepInt64(p, sqlite3_value_int64(argv[0])); + }else{ + p->ovrfl = 0; + kahanBabuskaNeumaierStep(p, sqlite3_value_double(argv[0])); + } } } }

@@ -125490,13 +129277,18 @@ ** to initialize it */

if( ALWAYS(p) && type!=SQLITE_NULL ){ assert( p->cnt>0 ); p->cnt--; - assert( type==SQLITE_INTEGER || p->approx ); - if( type==SQLITE_INTEGER && p->approx==0 ){ - i64 v = sqlite3_value_int64(argv[0]); - p->rSum -= v; - p->iSum -= v; + if( !p->approx ){ + p->iSum -= sqlite3_value_int64(argv[0]); + }else if( type==SQLITE_INTEGER ){ + i64 iVal = sqlite3_value_int64(argv[0]); + if( iVal!=SMALLEST_INT64 ){ + kahanBabuskaNeumaierStepInt64(p, -iVal); + }else{ + kahanBabuskaNeumaierStepInt64(p, LARGEST_INT64); + kahanBabuskaNeumaierStepInt64(p, 1); + } }else{ - p->rSum -= sqlite3_value_double(argv[0]); + kahanBabuskaNeumaierStep(p, -sqlite3_value_double(argv[0])); } } }

@@ -125507,10 +129299,14 @@ static void sumFinalize(sqlite3_context *context){

SumCtx *p; p = sqlite3_aggregate_context(context, 0); if( p && p->cnt>0 ){ - if( p->overflow ){ - sqlite3_result_error(context,"integer overflow",-1); - }else if( p->approx ){ - sqlite3_result_double(context, p->rSum); + if( p->approx ){ + if( p->ovrfl ){ + sqlite3_result_error(context,"integer overflow",-1); + }else if( !sqlite3IsNaN(p->rErr) ){ + sqlite3_result_double(context, p->rSum+p->rErr); + }else{ + sqlite3_result_double(context, p->rSum); + } }else{ sqlite3_result_int64(context, p->iSum); }

@@ -125520,14 +129316,29 @@ static void avgFinalize(sqlite3_context *context){

SumCtx *p; p = sqlite3_aggregate_context(context, 0); if( p && p->cnt>0 ){ - sqlite3_result_double(context, p->rSum/(double)p->cnt); + double r; + if( p->approx ){ + r = p->rSum; + if( !sqlite3IsNaN(p->rErr) ) r += p->rErr; + }else{ + r = (double)(p->iSum); + } + sqlite3_result_double(context, r/(double)p->cnt); } } static void totalFinalize(sqlite3_context *context){ SumCtx *p; + double r = 0.0; p = sqlite3_aggregate_context(context, 0); - /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ - sqlite3_result_double(context, p ? p->rSum : (double)0); + if( p ){ + if( p->approx ){ + r = p->rSum; + if( !sqlite3IsNaN(p->rErr) ) r += p->rErr; + }else{ + r = (double)(p->iSum); + } + } + sqlite3_result_double(context, r); } /*

@@ -125646,6 +129457,7 @@ }

/* ** group_concat(EXPR, ?SEPARATOR?) +** string_agg(EXPR, SEPARATOR) ** ** The SEPARATOR goes before the EXPR string. This is tragic. The ** groupConcatInverse() implementation would have been easier if the

@@ -125749,7 +129561,7 @@ (void)argc; /* Suppress unused parameter warning */

if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; pGCC = (GroupConcatCtx*)sqlite3_aggregate_context(context, sizeof(*pGCC)); /* pGCC is always non-NULL since groupConcatStep() will have always - ** run frist to initialize it */ + ** run first to initialize it */ if( ALWAYS(pGCC) ){ int nVS; /* Must call sqlite3_value_text() to convert the argument into text prior

@@ -125833,8 +129645,10 @@ ** parameter determines whether or not the LIKE operator is case

** sensitive. */ SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive){ + FuncDef *pDef; struct compareInfo *pInfo; int flags; + int nArg; if( caseSensitive ){ pInfo = (struct compareInfo*)&likeInfoAlt; flags = SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE;

@@ -125842,10 +129656,13 @@ }else{

pInfo = (struct compareInfo*)&likeInfoNorm; flags = SQLITE_FUNC_LIKE; } - sqlite3CreateFunc(db, "like", 2, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0, 0, 0); - sqlite3CreateFunc(db, "like", 3, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0, 0, 0); - sqlite3FindFunction(db, "like", 2, SQLITE_UTF8, 0)->funcFlags |= flags; - sqlite3FindFunction(db, "like", 3, SQLITE_UTF8, 0)->funcFlags |= flags; + for(nArg=2; nArg<=3; nArg++){ + sqlite3CreateFunc(db, "like", nArg, SQLITE_UTF8, pInfo, likeFunc, + 0, 0, 0, 0, 0); + pDef = sqlite3FindFunction(db, "like", nArg, SQLITE_UTF8, 0); + pDef->funcFlags |= flags; + pDef->funcFlags &= ~SQLITE_FUNC_UNSAFE; + } } /*

@@ -125967,6 +129784,18 @@ static double xCeil(double x){ return ceil(x); }

static double xFloor(double x){ return floor(x); } /* +** Some systems do not have log2() and log10() in their standard math +** libraries. +*/ +#if defined(HAVE_LOG10) && HAVE_LOG10==0 +# define log10(X) (0.4342944819032517867*log(X)) +#endif +#if defined(HAVE_LOG2) && HAVE_LOG2==0 +# define log2(X) (1.442695040888963456*log(X)) +#endif + + +/* ** Implementation of SQL functions: ** ** ln(X) - natural logarithm

@@ -126004,17 +129833,15 @@ return;

} ans = log(x)/b; }else{ - ans = log(x); switch( SQLITE_PTR_TO_INT(sqlite3_user_data(context)) ){ case 1: - /* Convert from natural logarithm to log base 10 */ - ans /= M_LN10; + ans = log10(x); break; case 2: - /* Convert from natural logarithm to log base 2 */ - ans /= M_LN2; + ans = log2(x); break; default: + ans = log(x); break; } }

@@ -126083,6 +129910,7 @@ int argc,

sqlite3_value **argv ){ assert( argc==0 ); + (void)argv; sqlite3_result_double(context, M_PI); }

@@ -126106,6 +129934,37 @@ x = sqlite3_value_double(argv[0]);

sqlite3_result_int(context, x<0.0 ? -1 : x>0.0 ? +1 : 0); } +#ifdef SQLITE_DEBUG +/* +** Implementation of fpdecode(x,y,z) function. +** +** x is a real number that is to be decoded. y is the precision. +** z is the maximum real precision. +*/ +static void fpdecodeFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + FpDecode s; + double x; + int y, z; + char zBuf[100]; + UNUSED_PARAMETER(argc); + assert( argc==3 ); + x = sqlite3_value_double(argv[0]); + y = sqlite3_value_int(argv[1]); + z = sqlite3_value_int(argv[2]); + sqlite3FpDecode(&s, x, y, z); + if( s.isSpecial==2 ){ + sqlite3_snprintf(sizeof(zBuf), zBuf, "NaN"); + }else{ + sqlite3_snprintf(sizeof(zBuf), zBuf, "%c%.*s/%d", s.sign, s.n, s.z, s.iDP); + } + sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); +} +#endif /* SQLITE_DEBUG */ + /* ** All of the FuncDef structures in the aBuiltinFunc[] array above ** to the global function hash table. This occurs at start-time (as

@@ -126170,12 +130029,16 @@ SQLITE_FUNC_MINMAX|SQLITE_FUNC_ANYORDER ),

FUNCTION2(typeof, 1, 0, 0, typeofFunc, SQLITE_FUNC_TYPEOF), FUNCTION2(subtype, 1, 0, 0, subtypeFunc, SQLITE_FUNC_TYPEOF), FUNCTION2(length, 1, 0, 0, lengthFunc, SQLITE_FUNC_LENGTH), + FUNCTION2(octet_length, 1, 0, 0, bytelengthFunc,SQLITE_FUNC_BYTELEN), FUNCTION(instr, 2, 0, 0, instrFunc ), FUNCTION(printf, -1, 0, 0, printfFunc ), FUNCTION(format, -1, 0, 0, printfFunc ), FUNCTION(unicode, 1, 0, 0, unicodeFunc ), FUNCTION(char, -1, 0, 0, charFunc ), FUNCTION(abs, 1, 0, 0, absFunc ), +#ifdef SQLITE_DEBUG + FUNCTION(fpdecode, 3, 0, 0, fpdecodeFunc ), +#endif #ifndef SQLITE_OMIT_FLOATING_POINT FUNCTION(round, 1, 0, 0, roundFunc ), FUNCTION(round, 2, 0, 0, roundFunc ),

@@ -126183,6 +130046,13 @@ #endif

FUNCTION(upper, 1, 0, 0, upperFunc ), FUNCTION(lower, 1, 0, 0, lowerFunc ), FUNCTION(hex, 1, 0, 0, hexFunc ), + FUNCTION(unhex, 1, 0, 0, unhexFunc ), + FUNCTION(unhex, 2, 0, 0, unhexFunc ), + FUNCTION(concat, -1, 0, 0, concatFunc ), + FUNCTION(concat, 0, 0, 0, 0 ), + FUNCTION(concat_ws, -1, 0, 0, concatwsFunc ), + FUNCTION(concat_ws, 0, 0, 0, 0 ), + FUNCTION(concat_ws, 1, 0, 0, 0 ), INLINE_FUNC(ifnull, 2, INLINEFUNC_coalesce, 0 ), VFUNCTION(random, 0, 0, 0, randomFunc ), VFUNCTION(randomblob, 1, 0, 0, randomBlob ),

@@ -126211,6 +130081,8 @@ countFinalize, countFinalize, countInverse, SQLITE_FUNC_ANYORDER ),

WAGGREGATE(group_concat, 1, 0, 0, groupConcatStep, groupConcatFinalize, groupConcatValue, groupConcatInverse, 0), WAGGREGATE(group_concat, 2, 0, 0, groupConcatStep, + groupConcatFinalize, groupConcatValue, groupConcatInverse, 0), + WAGGREGATE(string_agg, 2, 0, 0, groupConcatStep, groupConcatFinalize, groupConcatValue, groupConcatInverse, 0), LIKEFUNC(glob, 2, &globInfo, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE),

@@ -127154,6 +131026,7 @@ Trigger *p = pTop->pTriggerPrg->pTrigger;

if( (p==pFKey->apTrigger[0] && pFKey->aAction[0]==OE_SetNull) || (p==pFKey->apTrigger[1] && pFKey->aAction[1]==OE_SetNull) ){ + assert( (pTop->db->flags & SQLITE_FkNoAction)==0 ); return 1; } }

@@ -127348,6 +131221,8 @@ fkScanChildren(pParse, pSrc, pTab, pIdx, pFKey, aiCol, regNew, -1);

} if( regOld!=0 ){ int eAction = pFKey->aAction[aChange!=0]; + if( (db->flags & SQLITE_FkNoAction) ) eAction = OE_None; + fkScanChildren(pParse, pSrc, pTab, pIdx, pFKey, aiCol, regOld, 1); /* If this is a deferred FK constraint, or a CASCADE or SET NULL ** action applies, then any foreign key violations caused by

@@ -127463,7 +131338,11 @@

/* Check if any parent key columns are being modified. */ for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ if( fkParentIsModified(pTab, p, aChange, chngRowid) ){ - if( p->aAction[1]!=OE_None ) return 2; + if( (pParse->db->flags & SQLITE_FkNoAction)==0 + && p->aAction[1]!=OE_None + ){ + return 2; + } bHaveFK = 1; } }

@@ -127513,6 +131392,7 @@ Trigger *pTrigger; /* Trigger definition to return */

int iAction = (pChanges!=0); /* 1 for UPDATE, 0 for DELETE */ action = pFKey->aAction[iAction]; + if( (db->flags & SQLITE_FkNoAction) ) action = OE_None; if( action==OE_Restrict && (db->flags & SQLITE_DeferFKs) ){ return 0; }

@@ -127613,22 +131493,22 @@ nFrom = sqlite3Strlen30(zFrom);

if( action==OE_Restrict ){ int iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - Token tFrom; - Token tDb; + SrcList *pSrc; Expr *pRaise; - tFrom.z = zFrom; - tFrom.n = nFrom; - tDb.z = db->aDb[iDb].zDbSName; - tDb.n = sqlite3Strlen30(tDb.z); - pRaise = sqlite3Expr(db, TK_RAISE, "FOREIGN KEY constraint failed"); if( pRaise ){ pRaise->affExpr = OE_Abort; } + pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0); + if( pSrc ){ + assert( pSrc->nSrc==1 ); + pSrc->a[0].zName = sqlite3DbStrDup(db, zFrom); + pSrc->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); + } pSelect = sqlite3SelectNew(pParse, sqlite3ExprListAppend(pParse, 0, pRaise), - sqlite3SrcListAppend(pParse, 0, &tDb, &tFrom), + pSrc, pWhere, 0, 0, 0, 0, 0 );

@@ -127744,9 +131624,8 @@ if( db->pnBytesFreed==0 ){

if( pFKey->pPrevTo ){ pFKey->pPrevTo->pNextTo = pFKey->pNextTo; }else{ - void *p = (void *)pFKey->pNextTo; - const char *z = (p ? pFKey->pNextTo->zTo : pFKey->zTo); - sqlite3HashInsert(&pTab->pSchema->fkeyHash, z, p); + const char *z = (pFKey->pNextTo ? pFKey->pNextTo->zTo : pFKey->zTo); + sqlite3HashInsert(&pTab->pSchema->fkeyHash, z, pFKey->pNextTo); } if( pFKey->pNextTo ){ pFKey->pNextTo->pPrevTo = pFKey->pPrevTo;

@@ -127809,8 +131688,10 @@ assert( !IsVirtual(pTab) );

assert( pParse->pVdbe!=0 ); v = pParse->pVdbe; assert( opcode==OP_OpenWrite || opcode==OP_OpenRead ); - sqlite3TableLock(pParse, iDb, pTab->tnum, - (opcode==OP_OpenWrite)?1:0, pTab->zName); + if( !pParse->db->noSharedCache ){ + sqlite3TableLock(pParse, iDb, pTab->tnum, + (opcode==OP_OpenWrite)?1:0, pTab->zName); + } if( HasRowid(pTab) ){ sqlite3VdbeAddOp4Int(v, opcode, iCur, pTab->tnum, iDb, pTab->nNVCol); VdbeComment((v, "%s", pTab->zName));

@@ -127844,45 +131725,47 @@ ** Memory for the buffer containing the column index affinity string

** is managed along with the rest of the Index structure. It will be ** released when sqlite3DeleteIndex() is called. */ -SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){ +static SQLITE_NOINLINE const char *computeIndexAffStr(sqlite3 *db, Index *pIdx){ + /* The first time a column affinity string for a particular index is + ** required, it is allocated and populated here. It is then stored as + ** a member of the Index structure for subsequent use. + ** + ** The column affinity string will eventually be deleted by + ** sqliteDeleteIndex() when the Index structure itself is cleaned + ** up. + */ + int n; + Table *pTab = pIdx->pTable; + pIdx->zColAff = (char *)sqlite3DbMallocRaw(0, pIdx->nColumn+1); if( !pIdx->zColAff ){ - /* The first time a column affinity string for a particular index is - ** required, it is allocated and populated here. It is then stored as - ** a member of the Index structure for subsequent use. - ** - ** The column affinity string will eventually be deleted by - ** sqliteDeleteIndex() when the Index structure itself is cleaned - ** up. - */ - int n; - Table *pTab = pIdx->pTable; - pIdx->zColAff = (char *)sqlite3DbMallocRaw(0, pIdx->nColumn+1); - if( !pIdx->zColAff ){ - sqlite3OomFault(db); - return 0; - } - for(n=0; n<pIdx->nColumn; n++){ - i16 x = pIdx->aiColumn[n]; - char aff; - if( x>=0 ){ - aff = pTab->aCol[x].affinity; - }else if( x==XN_ROWID ){ - aff = SQLITE_AFF_INTEGER; - }else{ - assert( x==XN_EXPR ); - assert( pIdx->bHasExpr ); - assert( pIdx->aColExpr!=0 ); - aff = sqlite3ExprAffinity(pIdx->aColExpr->a[n].pExpr); - } - if( aff<SQLITE_AFF_BLOB ) aff = SQLITE_AFF_BLOB; - if( aff>SQLITE_AFF_NUMERIC) aff = SQLITE_AFF_NUMERIC; - pIdx->zColAff[n] = aff; + sqlite3OomFault(db); + return 0; + } + for(n=0; n<pIdx->nColumn; n++){ + i16 x = pIdx->aiColumn[n]; + char aff; + if( x>=0 ){ + aff = pTab->aCol[x].affinity; + }else if( x==XN_ROWID ){ + aff = SQLITE_AFF_INTEGER; + }else{ + assert( x==XN_EXPR ); + assert( pIdx->bHasExpr ); + assert( pIdx->aColExpr!=0 ); + aff = sqlite3ExprAffinity(pIdx->aColExpr->a[n].pExpr); } - pIdx->zColAff[n] = 0; + if( aff<SQLITE_AFF_BLOB ) aff = SQLITE_AFF_BLOB; + if( aff>SQLITE_AFF_NUMERIC) aff = SQLITE_AFF_NUMERIC; + pIdx->zColAff[n] = aff; } - + pIdx->zColAff[n] = 0; return pIdx->zColAff; } +SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){ + if( !pIdx->zColAff ) return computeIndexAffStr(db, pIdx); + return pIdx->zColAff; +} + /* ** Compute an affinity string for a table. Space is obtained

@@ -127937,7 +131820,7 @@ **

** For STRICT tables: ** ------------------ ** -** Generate an appropropriate OP_TypeCheck opcode that will verify the +** Generate an appropriate OP_TypeCheck opcode that will verify the ** datatypes against the column definitions in pTab. If iReg==0, that ** means an OP_MakeRecord opcode has already been generated and should be ** the last opcode generated. The new OP_TypeCheck needs to be inserted

@@ -128568,7 +132451,7 @@ }

/* Cannot insert into a read-only table. */ - if( sqlite3IsReadOnly(pParse, pTab, tmask) ){ + if( sqlite3IsReadOnly(pParse, pTab, pTrigger) ){ goto insert_cleanup; }

@@ -129015,7 +132898,7 @@ sqlite3VdbeAddOp1(v, OP_MustBeInt, regCols); VdbeCoverage(v);

} /* Copy the new data already generated. */ - assert( pTab->nNVCol>0 ); + assert( pTab->nNVCol>0 || pParse->nErr>0 ); sqlite3VdbeAddOp3(v, OP_Copy, regRowid+1, regCols+1, pTab->nNVCol-1); #ifndef SQLITE_OMIT_GENERATED_COLUMNS

@@ -129229,7 +133112,7 @@

/* This is the Walker callback from sqlite3ExprReferencesUpdatedColumn(). * Set bit 0x01 of pWalker->eCode if pWalker->eCode to 0 and if this ** expression node references any of the -** columns that are being modifed by an UPDATE statement. +** columns that are being modified by an UPDATE statement. */ static int checkConstraintExprNode(Walker *pWalker, Expr *pExpr){ if( pExpr->op==TK_COLUMN ){

@@ -129452,7 +133335,7 @@ int *pbMayReplace, /* OUT: Set to true if constraint may cause a replace */

int *aiChng, /* column i is unchanged if aiChng[i]<0 */ Upsert *pUpsert /* ON CONFLICT clauses, if any. NULL otherwise */ ){ - Vdbe *v; /* VDBE under constrution */ + Vdbe *v; /* VDBE under construction */ Index *pIdx; /* Pointer to one of the indices */ Index *pPk = 0; /* The PRIMARY KEY index for WITHOUT ROWID tables */ sqlite3 *db; /* Database connection */

@@ -129567,6 +133450,7 @@ case OE_Rollback:

case OE_Fail: { char *zMsg = sqlite3MPrintf(db, "%s.%s", pTab->zName, pCol->zCnName); + testcase( zMsg==0 && db->mallocFailed==0 ); sqlite3VdbeAddOp3(v, OP_HaltIfNull, SQLITE_CONSTRAINT_NOTNULL, onError, iReg); sqlite3VdbeAppendP4(v, zMsg, P4_DYNAMIC);

@@ -129934,7 +133818,7 @@ for(pIdx = indexIteratorFirst(&sIdxIter, &ix);

pIdx; pIdx = indexIteratorNext(&sIdxIter, &ix) ){ - int regIdx; /* Range of registers hold conent for pIdx */ + int regIdx; /* Range of registers holding content for pIdx */ int regR; /* Range of registers holding conflicting PK */ int iThisCur; /* Cursor for this UNIQUE index */ int addrUniqueOk; /* Jump here if the UNIQUE constraint is satisfied */

@@ -130429,6 +134313,8 @@ Vdbe *v;

assert( op==OP_OpenRead || op==OP_OpenWrite ); assert( op==OP_OpenWrite || p5==0 ); + assert( piDataCur!=0 ); + assert( piIdxCur!=0 ); if( IsVirtual(pTab) ){ /* This routine is a no-op for virtual tables. Leave the output ** variables *piDataCur and *piIdxCur set to illegal cursor numbers

@@ -130441,18 +134327,18 @@ v = pParse->pVdbe;

assert( v!=0 ); if( iBase<0 ) iBase = pParse->nTab; iDataCur = iBase++; - if( piDataCur ) *piDataCur = iDataCur; + *piDataCur = iDataCur; if( HasRowid(pTab) && (aToOpen==0 || aToOpen[0]) ){ sqlite3OpenTable(pParse, iDataCur, iDb, pTab, op); - }else{ + }else if( pParse->db->noSharedCache==0 ){ sqlite3TableLock(pParse, iDb, pTab->tnum, op==OP_OpenWrite, pTab->zName); } - if( piIdxCur ) *piIdxCur = iBase; + *piIdxCur = iBase; for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ int iIdxCur = iBase++; assert( pIdx->pSchema==pTab->pSchema ); if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){ - if( piDataCur ) *piDataCur = iIdxCur; + *piDataCur = iIdxCur; p5 = 0; } if( aToOpen==0 || aToOpen[i+1] ){

@@ -130750,7 +134636,7 @@ return 0; /* Tables have different CHECK constraints. Ticket #2252 */

} #endif #ifndef SQLITE_OMIT_FOREIGN_KEY - /* Disallow the transfer optimization if the destination table constains + /* Disallow the transfer optimization if the destination table contains ** any foreign key constraints. This is more restrictive than necessary. ** But the main beneficiary of the transfer optimization is the VACUUM ** command, and the VACUUM command disables foreign key constraints. So

@@ -131458,6 +135344,13 @@ unsigned int);

const char *(*db_name)(sqlite3*,int); /* Version 3.40.0 and later */ int (*value_encoding)(sqlite3_value*); + /* Version 3.41.0 and later */ + int (*is_interrupted)(sqlite3*); + /* Version 3.43.0 and later */ + int (*stmt_explain)(sqlite3_stmt*,int); + /* Version 3.44.0 and later */ + void *(*get_clientdata)(sqlite3*,const char*); + int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*)); }; /*

@@ -131784,6 +135677,13 @@ #endif

#define sqlite3_db_name sqlite3_api->db_name /* Version 3.40.0 and later */ #define sqlite3_value_encoding sqlite3_api->value_encoding +/* Version 3.41.0 and later */ +#define sqlite3_is_interrupted sqlite3_api->is_interrupted +/* Version 3.43.0 and later */ +#define sqlite3_stmt_explain sqlite3_api->stmt_explain +/* Version 3.44.0 and later */ +#define sqlite3_get_clientdata sqlite3_api->get_clientdata +#define sqlite3_set_clientdata sqlite3_api->set_clientdata #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)

@@ -132298,7 +136198,14 @@ 0,

#endif sqlite3_db_name, /* Version 3.40.0 and later */ - sqlite3_value_type + sqlite3_value_encoding, + /* Version 3.41.0 and later */ + sqlite3_is_interrupted, + /* Version 3.43.0 and later */ + sqlite3_stmt_explain, + /* Version 3.44.0 and later */ + sqlite3_get_clientdata, + sqlite3_set_clientdata }; /* True if x is the directory separator character

@@ -132371,15 +136278,25 @@

/* tag-20210611-1. Some dlopen() implementations will segfault if given ** an oversize filename. Most filesystems have a pathname limit of 4K, ** so limit the extension filename length to about twice that. - ** https://sqlite.org/forum/forumpost/08a0d6d9bf */ + ** https://sqlite.org/forum/forumpost/08a0d6d9bf + ** + ** Later (2023-03-25): Save an extra 6 bytes for the filename suffix. + ** See https://sqlite.org/forum/forumpost/24083b579d. + */ if( nMsg>SQLITE_MAX_PATHLEN ) goto extension_not_found; + /* Do not allow sqlite3_load_extension() to link to a copy of the + ** running application, by passing in an empty filename. */ + if( nMsg==0 ) goto extension_not_found; + handle = sqlite3OsDlOpen(pVfs, zFile); #if SQLITE_OS_UNIX || SQLITE_OS_WIN for(ii=0; ii<ArraySize(azEndings) && handle==0; ii++){ char *zAltFile = sqlite3_mprintf("%s.%s", zFile, azEndings[ii]); if( zAltFile==0 ) return SQLITE_NOMEM_BKPT; - handle = sqlite3OsDlOpen(pVfs, zAltFile); + if( nMsg+strlen(azEndings[ii])+1<=SQLITE_MAX_PATHLEN ){ + handle = sqlite3OsDlOpen(pVfs, zAltFile); + } sqlite3_free(zAltFile); } #endif

@@ -132504,6 +136421,9 @@ ** Enable or disable extension loading. Extension loading is disabled by

** default so as not to open security holes in older applications. */ SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif sqlite3_mutex_enter(db->mutex); if( onoff ){ db->flags |= SQLITE_LoadExtension|SQLITE_LoadExtFunc;

@@ -132553,6 +136473,9 @@ SQLITE_API int sqlite3_auto_extension(

void (*xInit)(void) ){ int rc = SQLITE_OK; +#ifdef SQLITE_ENABLE_API_ARMOR + if( xInit==0 ) return SQLITE_MISUSE_BKPT; +#endif #ifndef SQLITE_OMIT_AUTOINIT rc = sqlite3_initialize(); if( rc ){

@@ -132605,6 +136528,9 @@ #endif

int i; int n = 0; wsdAutoextInit; +#ifdef SQLITE_ENABLE_API_ARMOR + if( xInit==0 ) return 0; +#endif sqlite3_mutex_enter(mutex); for(i=(int)wsdAutoext.nExt-1; i>=0; i--){ if( wsdAutoext.aExt[i]==xInit ){

@@ -134204,7 +138130,7 @@ ** PRAGMA [schema.]cache_spill=N

** ** The first form reports the current local setting for the ** page cache spill size. The second form turns cache spill on - ** or off. When turnning cache spill on, the size is set to the + ** or off. When turning cache spill on, the size is set to the ** current cache_size. The third form sets a spill size that ** may be different form the cache size. ** If N is positive then that is the

@@ -134474,7 +138400,11 @@ }

#endif if( sqlite3GetBoolean(zRight, 0) ){ - db->flags |= mask; + if( (mask & SQLITE_WriteSchema)==0 + || (db->flags & SQLITE_Defensive)==0 + ){ + db->flags |= mask; + } }else{ db->flags &= ~mask; if( mask==SQLITE_DeferFKs ) db->nDeferredImmCons = 0;

@@ -134874,7 +138804,7 @@ iDb = sqlite3SchemaToIndex(db, pTab->pSchema);

zDb = db->aDb[iDb].zDbSName; sqlite3CodeVerifySchema(pParse, iDb); sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); - if( pTab->nCol+regRow>pParse->nMem ) pParse->nMem = pTab->nCol + regRow; + sqlite3TouchRegister(pParse, pTab->nCol+regRow); sqlite3OpenTable(pParse, 0, iDb, pTab, OP_OpenRead); sqlite3VdbeLoadString(v, regResult, pTab->zName); assert( IsOrdinaryTable(pTab) );

@@ -134915,7 +138845,7 @@ /* Generate code to read the child key values into registers

** regRow..regRow+n. If any of the child key values are NULL, this ** row cannot cause an FK violation. Jump directly to addrOk in ** this case. */ - if( regRow+pFK->nCol>pParse->nMem ) pParse->nMem = regRow+pFK->nCol; + sqlite3TouchRegister(pParse, regRow + pFK->nCol); for(j=0; j<pFK->nCol; j++){ int iCol = aiCols ? aiCols[j] : pFK->aCol[j].iFrom; sqlite3ExprCodeGetColumnOfTable(v, pTab, 0, iCol, regRow+j);

@@ -134982,9 +138912,9 @@ **

** The "quick_check" is reduced version of ** integrity_check designed to detect most database corruption ** without the overhead of cross-checking indexes. Quick_check - ** is linear time wherease integrity_check is O(NlogN). + ** is linear time whereas integrity_check is O(NlogN). ** - ** The maximum nubmer of errors is 100 by default. A different default + ** The maximum number of errors is 100 by default. A different default ** can be specified using a numeric parameter N. ** ** Or, the parameter N can be the name of a table. In that case, only

@@ -135044,6 +138974,7 @@ if( OMIT_TEMPDB && i==1 ) continue;

if( iDb>=0 && i!=iDb ) continue; sqlite3CodeVerifySchema(pParse, i); + pParse->okConstFactor = 0; /* tag-20230327-1 */ /* Do an integrity check of the B-Tree **

@@ -135079,7 +139010,7 @@ }

aRoot[0] = cnt; /* Make sure sufficient number of registers have been allocated */ - pParse->nMem = MAX( pParse->nMem, 8+mxIdx ); + sqlite3TouchRegister(pParse, 8+mxIdx); sqlite3ClearTempRegCache(pParse); /* Do the b-tree integrity checks */

@@ -135106,8 +139037,31 @@ int bStrict; /* True for a STRICT table */

int r2; /* Previous key for WITHOUT ROWID tables */ int mxCol; /* Maximum non-virtual column number */ - if( !IsOrdinaryTable(pTab) ) continue; if( pObjTab && pObjTab!=pTab ) continue; + if( !IsOrdinaryTable(pTab) ){ +#ifndef SQLITE_OMIT_VIRTUALTABLE + sqlite3_vtab *pVTab; + int a1; + if( !IsVirtual(pTab) ) continue; + if( pTab->nCol<=0 ){ + const char *zMod = pTab->u.vtab.azArg[0]; + if( sqlite3HashFind(&db->aModule, zMod)==0 ) continue; + } + sqlite3ViewGetColumnNames(pParse, pTab); + if( pTab->u.vtab.p==0 ) continue; + pVTab = pTab->u.vtab.p->pVtab; + if( NEVER(pVTab==0) ) continue; + if( NEVER(pVTab->pModule==0) ) continue; + if( pVTab->pModule->iVersion<4 ) continue; + if( pVTab->pModule->xIntegrity==0 ) continue; + sqlite3VdbeAddOp3(v, OP_VCheck, i, 3, isQuick); + sqlite3VdbeAppendP4(v, pTab, P4_TABLE); + a1 = sqlite3VdbeAddOp1(v, OP_IsNull, 3); VdbeCoverage(v); + integrityCheckResultRow(v); + sqlite3VdbeJumpHere(v, a1); +#endif + continue; + } if( isQuick || HasRowid(pTab) ){ pPk = 0; r2 = 0;

@@ -135135,12 +139089,21 @@ ** the entire record header to be parsed and sanity checked. It

** will also prepopulate the cursor column cache that is used ** by the OP_IsType code, so it is a required step. */ - mxCol = pTab->nCol-1; - while( mxCol>=0 - && ((pTab->aCol[mxCol].colFlags & COLFLAG_VIRTUAL)!=0 - || pTab->iPKey==mxCol) ) mxCol--; + assert( !IsVirtual(pTab) ); + if( HasRowid(pTab) ){ + mxCol = -1; + for(j=0; j<pTab->nCol; j++){ + if( (pTab->aCol[j].colFlags & COLFLAG_VIRTUAL)==0 ) mxCol++; + } + if( mxCol==pTab->iPKey ) mxCol--; + }else{ + /* COLFLAG_VIRTUAL columns are not included in the WITHOUT ROWID + ** PK index column-count, so there is no need to account for them + ** in this case. */ + mxCol = sqlite3PrimaryKeyIndex(pTab)->nColumn-1; + } if( mxCol>=0 ){ - sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, mxCol, 3); + sqlite3VdbeAddOp3(v, OP_Column, iDataCur, mxCol, 3); sqlite3VdbeTypeofColumn(v, 3); }

@@ -135220,15 +139183,29 @@ labelError = sqlite3VdbeMakeLabel(pParse);

labelOk = sqlite3VdbeMakeLabel(pParse); if( pCol->notNull ){ /* (1) NOT NULL columns may not contain a NULL */ + int jmp3; int jmp2 = sqlite3VdbeAddOp4Int(v, OP_IsType, p1, labelOk, p3, p4); - sqlite3VdbeChangeP5(v, 0x0f); VdbeCoverage(v); + if( p1<0 ){ + sqlite3VdbeChangeP5(v, 0x0f); /* INT, REAL, TEXT, or BLOB */ + jmp3 = jmp2; + }else{ + sqlite3VdbeChangeP5(v, 0x0d); /* INT, TEXT, or BLOB */ + /* OP_IsType does not detect NaN values in the database file + ** which should be treated as a NULL. So if the header type + ** is REAL, we have to load the actual data using OP_Column + ** to reliably determine if the value is a NULL. */ + sqlite3VdbeAddOp3(v, OP_Column, p1, p3, 3); + jmp3 = sqlite3VdbeAddOp2(v, OP_NotNull, 3, labelOk); + VdbeCoverage(v); + } zErr = sqlite3MPrintf(db, "NULL value in %s.%s", pTab->zName, pCol->zCnName); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); if( doTypeCheck ){ sqlite3VdbeGoto(v, labelError); sqlite3VdbeJumpHere(v, jmp2); + sqlite3VdbeJumpHere(v, jmp3); }else{ /* VDBE byte code will fall thru */ }

@@ -135308,7 +139285,8 @@ }

if( !isQuick ){ /* Omit the remaining tests for quick_check */ /* Validate index entries for the current row */ for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ - int jmp2, jmp3, jmp4, jmp5; + int jmp2, jmp3, jmp4, jmp5, label6; + int kk; int ckUniq = sqlite3VdbeMakeLabel(pParse); if( pPk==pIdx ) continue; r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 0, &jmp3,

@@ -135326,13 +139304,49 @@ jmp5 = sqlite3VdbeLoadString(v, 4, pIdx->zName);

sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 3); jmp4 = integrityCheckResultRow(v); sqlite3VdbeJumpHere(v, jmp2); + + /* The OP_IdxRowid opcode is an optimized version of OP_Column + ** that extracts the rowid off the end of the index record. + ** But it only works correctly if index record does not have + ** any extra bytes at the end. Verify that this is the case. */ + if( HasRowid(pTab) ){ + int jmp7; + sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur+j, 3); + jmp7 = sqlite3VdbeAddOp3(v, OP_Eq, 3, 0, r1+pIdx->nColumn-1); + VdbeCoverageNeverNull(v); + sqlite3VdbeLoadString(v, 3, + "rowid not at end-of-record for row "); + sqlite3VdbeAddOp3(v, OP_Concat, 7, 3, 3); + sqlite3VdbeLoadString(v, 4, " of index "); + sqlite3VdbeGoto(v, jmp5-1); + sqlite3VdbeJumpHere(v, jmp7); + } + + /* Any indexed columns with non-BINARY collations must still hold + ** the exact same text value as the table. */ + label6 = 0; + for(kk=0; kk<pIdx->nKeyCol; kk++){ + if( pIdx->azColl[kk]==sqlite3StrBINARY ) continue; + if( label6==0 ) label6 = sqlite3VdbeMakeLabel(pParse); + sqlite3VdbeAddOp3(v, OP_Column, iIdxCur+j, kk, 3); + sqlite3VdbeAddOp3(v, OP_Ne, 3, label6, r1+kk); VdbeCoverage(v); + } + if( label6 ){ + int jmp6 = sqlite3VdbeAddOp0(v, OP_Goto); + sqlite3VdbeResolveLabel(v, label6); + sqlite3VdbeLoadString(v, 3, "row "); + sqlite3VdbeAddOp3(v, OP_Concat, 7, 3, 3); + sqlite3VdbeLoadString(v, 4, " values differ from index "); + sqlite3VdbeGoto(v, jmp5-1); + sqlite3VdbeJumpHere(v, jmp6); + } + /* For UNIQUE indexes, verify that only one entry exists with the ** current key. The entry is unique if (1) any column is NULL ** or (2) the next entry has a different key */ if( IsUniqueIndex(pIdx) ){ int uniqOk = sqlite3VdbeMakeLabel(pParse); int jmp6; - int kk; for(kk=0; kk<pIdx->nKeyCol; kk++){ int iCol = pIdx->aiColumn[kk]; assert( iCol!=XN_ROWID && iCol<pTab->nCol );

@@ -135681,7 +139695,7 @@ HashElem *k; /* Loop over tables of a schema */

Schema *pSchema; /* The current schema */ Table *pTab; /* A table in the schema */ Index *pIdx; /* An index of the table */ - LogEst szThreshold; /* Size threshold above which reanalysis is needd */ + LogEst szThreshold; /* Size threshold above which reanalysis needed */ char *zSubSql; /* SQL statement for the OP_SqlExec opcode */ u32 opMask; /* Mask of operations to perform */

@@ -136173,7 +140187,8 @@ 0, /* xRename - rename the table */

0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; /*

@@ -136505,7 +140520,14 @@ if( encoding==0 ) encoding = SQLITE_UTF8;

#else encoding = SQLITE_UTF8; #endif - sqlite3SetTextEncoding(db, encoding); + if( db->nVdbeActive>0 && encoding!=ENC(db) + && (db->mDbFlags & DBFLAG_Vacuum)==0 + ){ + rc = SQLITE_LOCKED; + goto initone_error_out; + }else{ + sqlite3SetTextEncoding(db, encoding); + } }else{ /* If opening an attached database, the encoding much match ENC(db) */ if( (meta[BTREE_TEXT_ENCODING-1] & 3)!=ENC(db) ){

@@ -136719,8 +140741,8 @@ ** set Parse.rc to SQLITE_SCHEMA. */

sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&cookie); assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); if( cookie!=db->aDb[iDb].pSchema->schema_cookie ){ + if( DbHasProperty(db, iDb, DB_SchemaLoaded) ) pParse->rc = SQLITE_SCHEMA; sqlite3ResetOneSchema(db, iDb); - pParse->rc = SQLITE_SCHEMA; } /* Close the transaction, if one was opened. */

@@ -136790,8 +140812,6 @@ db->lookaside.bDisable -= pParse->disableLookaside;

db->lookaside.sz = db->lookaside.bDisable ? 0 : db->lookaside.szTrue; assert( pParse->db->pParse==pParse ); db->pParse = pParse->pOuterParse; - pParse->db = 0; - pParse->disableLookaside = 0; } /*

@@ -136800,7 +140820,7 @@ ** the parser object is destroyed. But, beware: the cleanup might happen

** immediately. ** ** Use this mechanism for uncommon cleanups. There is a higher setup -** cost for this mechansim (an extra malloc), so it should not be used +** cost for this mechanism (an extra malloc), so it should not be used ** for common cleanups that happen on most calls. But for less ** common cleanups, we save a single NULL-pointer comparison in ** sqlite3ParseObjectReset(), which reduces the total CPU cycle count.

@@ -136892,9 +140912,18 @@ memset(PARSE_TAIL(&sParse), 0, PARSE_TAIL_SZ);

sParse.pOuterParse = db->pParse; db->pParse = &sParse; sParse.db = db; - sParse.pReprepare = pReprepare; + if( pReprepare ){ + sParse.pReprepare = pReprepare; + sParse.explain = sqlite3_stmt_isexplain((sqlite3_stmt*)pReprepare); + }else{ + assert( sParse.pReprepare==0 ); + } assert( ppStmt && *ppStmt==0 ); - if( db->mallocFailed ) sqlite3ErrorMsg(&sParse, "out of memory"); + if( db->mallocFailed ){ + sqlite3ErrorMsg(&sParse, "out of memory"); + db->errCode = rc = SQLITE_NOMEM; + goto end_prepare; + } assert( sqlite3_mutex_held(db->mutex) ); /* For a long-term use prepared statement avoid the use of

@@ -137331,6 +141360,10 @@ int nKey; /* Number of PK columns for table pTab (>=1) */

} aDefer[4]; #endif struct RowLoadInfo *pDeferredRowLoad; /* Deferred row loading info or NULL */ +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + int addrPush; /* First instruction to push data into sorter */ + int addrPushEnd; /* Last instruction that pushes data into sorter */ +#endif }; #define SORTFLAG_UseSorter 0x01 /* Use SorterOpen instead of OpenEphemeral */

@@ -137494,7 +141527,7 @@ ** NATURAL FULL - JT_NATURAL|JT_LEFT|JT_RIGHT

** NATURAL FULL OUTER JT_NATRUAL|JT_LEFT|JT_RIGHT ** ** To preserve historical compatibly, SQLite also accepts a variety -** of other non-standard and in many cases non-sensical join types. +** of other non-standard and in many cases nonsensical join types. ** This routine makes as much sense at it can from the nonsense join ** type and returns a result. Examples of accepted nonsense join types ** include but are not limited to:

@@ -137716,6 +141749,7 @@ ExprClearProperty(p, EP_CanBeNull);

} if( p->op==TK_FUNCTION ){ assert( ExprUseXList(p) ); + assert( p->pLeft==0 ); if( p->x.pList ){ int i; for(i=0; i<p->x.pList->nExpr; i++){

@@ -137765,7 +141799,7 @@

if( NEVER(pLeft->pTab==0 || pRightTab==0) ) continue; joinType = (pRight->fg.jointype & JT_OUTER)!=0 ? EP_OuterON : EP_InnerON; - /* If this is a NATURAL join, synthesize an approprate USING clause + /* If this is a NATURAL join, synthesize an appropriate USING clause ** to specify which columns should be joined. */ if( pRight->fg.jointype & JT_NATURAL ){

@@ -137979,14 +142013,18 @@ ** will be completely unrelated to regOrigData.

** (2) All output columns are included in the sort record. In that ** case regData==regOrigData. ** (3) Some output columns are omitted from the sort record due to - ** the SQLITE_ENABLE_SORTER_REFERENCE optimization, or due to the + ** the SQLITE_ENABLE_SORTER_REFERENCES optimization, or due to the ** SQLITE_ECEL_OMITREF optimization, or due to the - ** SortCtx.pDeferredRowLoad optimiation. In any of these cases + ** SortCtx.pDeferredRowLoad optimization. In any of these cases ** regOrigData is 0 to prevent this routine from trying to copy ** values that might not yet exist. */ assert( nData==1 || regData==regOrigData || regOrigData==0 ); +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + pSort->addrPush = sqlite3VdbeCurrentAddr(v); +#endif + if( nPrefixReg ){ assert( nPrefixReg==nExpr+bSeq ); regBase = regData - nPrefixReg;

@@ -138033,7 +142071,7 @@ sqlite3VdbeChangeP4(v, -1, (char*)pKI, P4_KEYINFO);

testcase( pKI->nAllField > pKI->nKeyField+2 ); pOp->p4.pKeyInfo = sqlite3KeyInfoFromExprList(pParse,pSort->pOrderBy,nOBSat, pKI->nAllField-pKI->nKeyField-1); - pOp = 0; /* Ensure pOp not used after sqltie3VdbeAddOp3() */ + pOp = 0; /* Ensure pOp not used after sqlite3VdbeAddOp3() */ addrJmp = sqlite3VdbeCurrentAddr(v); sqlite3VdbeAddOp3(v, OP_Jump, addrJmp+1, 0, addrJmp+1); VdbeCoverage(v); pSort->labelBkOut = sqlite3VdbeMakeLabel(pParse);

@@ -138087,6 +142125,9 @@ if( iSkip ){

sqlite3VdbeChangeP2(v, iSkip, pSort->labelOBLopt ? pSort->labelOBLopt : sqlite3VdbeCurrentAddr(v)); } +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + pSort->addrPushEnd = sqlite3VdbeCurrentAddr(v)-1; +#endif } /*

@@ -138124,7 +142165,7 @@ **

** The returned value in this case is a copy of parameter iTab. ** ** WHERE_DISTINCT_ORDERED: -** In this case rows are being delivered sorted order. The ephermal +** In this case rows are being delivered sorted order. The ephemeral ** table is not required. Instead, the current set of values ** is compared against previous row. If they match, the new row ** is not distinct and control jumps to VM address addrRepeat. Otherwise,

@@ -138553,9 +142594,16 @@ testcase( eDest==SRT_EphemTab );

testcase( eDest==SRT_Fifo ); testcase( eDest==SRT_DistFifo ); sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1+nPrefixReg); - if( pDest->zAffSdst ){ - sqlite3VdbeChangeP4(v, -1, pDest->zAffSdst, nResultCol); +#if !defined(SQLITE_ENABLE_NULL_TRIM) && defined(SQLITE_DEBUG) + /* A destination of SRT_Table and a non-zero iSDParm2 parameter means + ** that this is an "UPDATE ... FROM" on a virtual table or view. In this + ** case set the p5 parameter of the OP_MakeRecord to OPFLAG_NOCHNG_MAGIC. + ** This does not affect operation in any way - it just allows MakeRecord + ** to process OPFLAG_NOCHANGE values without an assert() failing. */ + if( eDest==SRT_Table && pDest->iSDParm2 ){ + sqlite3VdbeChangeP5(v, OPFLAG_NOCHNG_MAGIC); } +#endif #ifndef SQLITE_OMIT_CTE if( eDest==SRT_DistFifo ){ /* If the destination is DistFifo, then cursor (iParm+1) is open

@@ -138913,6 +142961,16 @@ int i;

int bSeq; /* True if sorter record includes seq. no. */ int nRefKey = 0; struct ExprList_item *aOutEx = p->pEList->a; +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + int addrExplain; /* Address of OP_Explain instruction */ +#endif + + ExplainQueryPlan2(addrExplain, (pParse, 0, + "USE TEMP B-TREE FOR %sORDER BY", pSort->nOBSat>0?"RIGHT PART OF ":"") + ); + sqlite3VdbeScanStatusRange(v, addrExplain,pSort->addrPush,pSort->addrPushEnd); + sqlite3VdbeScanStatusCounters(v, addrExplain, addrExplain, pSort->addrPush); + assert( addrBreak<0 ); if( pSort->labelBkOut ){

@@ -139025,6 +143083,7 @@ sqlite3VdbeAddOp3(v, OP_Column, iSortTab, iRead, regRow+i);

VdbeComment((v, "%s", aOutEx[i].zEName)); } } + sqlite3VdbeScanStatusRange(v, addrExplain, addrExplain, -1); switch( eDest ){ case SRT_Table: case SRT_EphemTab: {

@@ -139086,6 +143145,7 @@ sqlite3VdbeAddOp2(v, OP_SorterNext, iTab, addr); VdbeCoverage(v);

}else{ sqlite3VdbeAddOp2(v, OP_Next, iTab, addr); VdbeCoverage(v); } + sqlite3VdbeScanStatusRange(v, addrExplain, sqlite3VdbeCurrentAddr(v)-1, -1); if( pSort->regReturn ) sqlite3VdbeAddOp1(v, OP_Return, pSort->regReturn); sqlite3VdbeResolveLabel(v, addrBreak); }

@@ -139347,17 +143407,10 @@ sqlite3 *db = pParse->db;

int fullName; /* TABLE.COLUMN if no AS clause and is a direct table ref */ int srcName; /* COLUMN or TABLE.COLUMN if no AS clause and is direct */ -#ifndef SQLITE_OMIT_EXPLAIN - /* If this is an EXPLAIN, skip this step */ - if( pParse->explain ){ - return; - } -#endif - if( pParse->colNamesSet ) return; /* Column names are determined by the left-most term of a compound select */ while( pSelect->pPrior ) pSelect = pSelect->pPrior; - SELECTTRACE(1,pParse,pSelect,("generating column names\n")); + TREETRACE(0x80,pParse,pSelect,("generating column names\n")); pTabList = pSelect->pSrc; pEList = pSelect->pEList; assert( v!=0 );

@@ -139457,7 +143510,7 @@ assert( nCol==(i16)nCol );

*pnCol = nCol; *paCol = aCol; - for(i=0, pCol=aCol; i<nCol && !db->mallocFailed; i++, pCol++){ + for(i=0, pCol=aCol; i<nCol && !pParse->nErr; i++, pCol++){ struct ExprList_item *pX = &pEList->a[i]; struct ExprList_item *pCollide; /* Get an appropriate name for the column

@@ -139507,7 +143560,10 @@ for(j=nName-1; j>0 && sqlite3Isdigit(zName[j]); j--){}

if( zName[j]==':' ) nName = j; } zName = sqlite3MPrintf(db, "%.*z:%u", nName, zName, ++cnt); - if( cnt>3 ) sqlite3_randomness(sizeof(cnt), &cnt); + sqlite3ProgressCheck(pParse); + if( cnt>3 ){ + sqlite3_randomness(sizeof(cnt), &cnt); + } } pCol->zCnName = zName; pCol->hName = sqlite3StrIHash(zName);

@@ -139520,71 +143576,105 @@ sqlite3OomFault(db);

} } sqlite3HashClear(&ht); - if( db->mallocFailed ){ + if( pParse->nErr ){ for(j=0; j<i; j++){ sqlite3DbFree(db, aCol[j].zCnName); } sqlite3DbFree(db, aCol); *paCol = 0; *pnCol = 0; - return SQLITE_NOMEM_BKPT; + return pParse->rc; } return SQLITE_OK; } /* -** Add type and collation information to a column list based on -** a SELECT statement. -** -** The column list presumably came from selectColumnNamesFromExprList(). -** The column list has only names, not types or collations. This -** routine goes through and adds the types and collations. +** pTab is a transient Table object that represents a subquery of some +** kind (maybe a parenthesized subquery in the FROM clause of a larger +** query, or a VIEW, or a CTE). This routine computes type information +** for that Table object based on the Select object that implements the +** subquery. For the purposes of this routine, "type information" means: ** -** This routine requires that all identifiers in the SELECT -** statement be resolved. +** * The datatype name, as it might appear in a CREATE TABLE statement +** * Which collating sequence to use for the column +** * The affinity of the column */ -SQLITE_PRIVATE void sqlite3SelectAddColumnTypeAndCollation( - Parse *pParse, /* Parsing contexts */ - Table *pTab, /* Add column type information to this table */ - Select *pSelect, /* SELECT used to determine types and collations */ - char aff /* Default affinity for columns */ +SQLITE_PRIVATE void sqlite3SubqueryColumnTypes( + Parse *pParse, /* Parsing contexts */ + Table *pTab, /* Add column type information to this table */ + Select *pSelect, /* SELECT used to determine types and collations */ + char aff /* Default affinity. */ ){ sqlite3 *db = pParse->db; - NameContext sNC; Column *pCol; CollSeq *pColl; - int i; + int i,j; Expr *p; struct ExprList_item *a; + NameContext sNC; assert( pSelect!=0 ); - assert( (pSelect->selFlags & SF_Resolved)!=0 ); - assert( pTab->nCol==pSelect->pEList->nExpr || db->mallocFailed ); - if( db->mallocFailed ) return; + testcase( (pSelect->selFlags & SF_Resolved)==0 ); + assert( (pSelect->selFlags & SF_Resolved)!=0 || IN_RENAME_OBJECT ); + assert( pTab->nCol==pSelect->pEList->nExpr || pParse->nErr>0 ); + assert( aff==SQLITE_AFF_NONE || aff==SQLITE_AFF_BLOB ); + if( db->mallocFailed || IN_RENAME_OBJECT ) return; + while( pSelect->pPrior ) pSelect = pSelect->pPrior; + a = pSelect->pEList->a; memset(&sNC, 0, sizeof(sNC)); sNC.pSrcList = pSelect->pSrc; - a = pSelect->pEList->a; for(i=0, pCol=pTab->aCol; i<pTab->nCol; i++, pCol++){ const char *zType; - i64 n, m; + i64 n; pTab->tabFlags |= (pCol->colFlags & COLFLAG_NOINSERT); p = a[i].pExpr; - zType = columnType(&sNC, p, 0, 0, 0); /* pCol->szEst = ... // Column size est for SELECT tables never used */ pCol->affinity = sqlite3ExprAffinity(p); + if( pCol->affinity<=SQLITE_AFF_NONE ){ + pCol->affinity = aff; + } + if( pCol->affinity>=SQLITE_AFF_TEXT && pSelect->pNext ){ + int m = 0; + Select *pS2; + for(m=0, pS2=pSelect->pNext; pS2; pS2=pS2->pNext){ + m |= sqlite3ExprDataType(pS2->pEList->a[i].pExpr); + } + if( pCol->affinity==SQLITE_AFF_TEXT && (m&0x01)!=0 ){ + pCol->affinity = SQLITE_AFF_BLOB; + }else + if( pCol->affinity>=SQLITE_AFF_NUMERIC && (m&0x02)!=0 ){ + pCol->affinity = SQLITE_AFF_BLOB; + } + if( pCol->affinity>=SQLITE_AFF_NUMERIC && p->op==TK_CAST ){ + pCol->affinity = SQLITE_AFF_FLEXNUM; + } + } + zType = columnType(&sNC, p, 0, 0, 0); + if( zType==0 || pCol->affinity!=sqlite3AffinityType(zType, 0) ){ + if( pCol->affinity==SQLITE_AFF_NUMERIC + || pCol->affinity==SQLITE_AFF_FLEXNUM + ){ + zType = "NUM"; + }else{ + zType = 0; + for(j=1; j<SQLITE_N_STDTYPE; j++){ + if( sqlite3StdTypeAffinity[j]==pCol->affinity ){ + zType = sqlite3StdType[j]; + break; + } + } + } + } if( zType ){ - m = sqlite3Strlen30(zType); + i64 m = sqlite3Strlen30(zType); n = sqlite3Strlen30(pCol->zCnName); pCol->zCnName = sqlite3DbReallocOrFree(db, pCol->zCnName, n+m+2); + pCol->colFlags &= ~(COLFLAG_HASTYPE|COLFLAG_HASCOLL); if( pCol->zCnName ){ memcpy(&pCol->zCnName[n+1], zType, m+1); pCol->colFlags |= COLFLAG_HASTYPE; - }else{ - testcase( pCol->colFlags & COLFLAG_HASTYPE ); - pCol->colFlags &= ~(COLFLAG_HASTYPE|COLFLAG_HASCOLL); } } - if( pCol->affinity<=SQLITE_AFF_NONE ) pCol->affinity = aff; pColl = sqlite3ExprCollSeq(pParse, p); if( pColl ){ assert( pTab->pIndex==0 );

@@ -139618,7 +143708,7 @@ pTab->nTabRef = 1;

pTab->zName = 0; pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); sqlite3ColumnsFromExprList(pParse, pSelect->pEList, &pTab->nCol, &pTab->aCol); - sqlite3SelectAddColumnTypeAndCollation(pParse, pTab, pSelect, aff); + sqlite3SubqueryColumnTypes(pParse, pTab, pSelect, aff); pTab->iPKey = -1; if( db->mallocFailed ){ sqlite3DeleteTable(db, pTab);

@@ -139833,7 +143923,7 @@ int regCurrent; /* Register holding Current table */

int iQueue; /* The Queue table */ int iDistinct = 0; /* To ensure unique results if UNION */ int eDest = SRT_Fifo; /* How to write to Queue */ - SelectDest destQueue; /* SelectDest targetting the Queue table */ + SelectDest destQueue; /* SelectDest targeting the Queue table */ int i; /* Loop counter */ int rc; /* Result code */ ExprList *pOrderBy; /* The ORDER BY clause */

@@ -140143,7 +144233,7 @@ assert( !pPrior->pLimit );

pPrior->iLimit = p->iLimit; pPrior->iOffset = p->iOffset; pPrior->pLimit = p->pLimit; - SELECTTRACE(1, pParse, p, ("multiSelect UNION ALL left...\n")); + TREETRACE(0x200, pParse, p, ("multiSelect UNION ALL left...\n")); rc = sqlite3Select(pParse, pPrior, &dest); pPrior->pLimit = 0; if( rc ){

@@ -140161,7 +144251,7 @@ p->iLimit, p->iOffset+1, p->iOffset);

} } ExplainQueryPlan((pParse, 1, "UNION ALL")); - SELECTTRACE(1, pParse, p, ("multiSelect UNION ALL right...\n")); + TREETRACE(0x200, pParse, p, ("multiSelect UNION ALL right...\n")); rc = sqlite3Select(pParse, p, &dest); testcase( rc!=SQLITE_OK ); pDelete = p->pPrior;

@@ -140214,7 +144304,7 @@ /* Code the SELECT statements to our left

*/ assert( !pPrior->pOrderBy ); sqlite3SelectDestInit(&uniondest, priorOp, unionTab); - SELECTTRACE(1, pParse, p, ("multiSelect EXCEPT/UNION left...\n")); + TREETRACE(0x200, pParse, p, ("multiSelect EXCEPT/UNION left...\n")); rc = sqlite3Select(pParse, pPrior, &uniondest); if( rc ){ goto multi_select_end;

@@ -140234,7 +144324,7 @@ p->pLimit = 0;

uniondest.eDest = op; ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE", sqlite3SelectOpName(p->op))); - SELECTTRACE(1, pParse, p, ("multiSelect EXCEPT/UNION right...\n")); + TREETRACE(0x200, pParse, p, ("multiSelect EXCEPT/UNION right...\n")); rc = sqlite3Select(pParse, p, &uniondest); testcase( rc!=SQLITE_OK ); assert( p->pOrderBy==0 );

@@ -140295,7 +144385,7 @@

/* Code the SELECTs to our left into temporary table "tab1". */ sqlite3SelectDestInit(&intersectdest, SRT_Union, tab1); - SELECTTRACE(1, pParse, p, ("multiSelect INTERSECT left...\n")); + TREETRACE(0x400, pParse, p, ("multiSelect INTERSECT left...\n")); rc = sqlite3Select(pParse, pPrior, &intersectdest); if( rc ){ goto multi_select_end;

@@ -140312,7 +144402,7 @@ p->pLimit = 0;

intersectdest.iSDParm = tab2; ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE", sqlite3SelectOpName(p->op))); - SELECTTRACE(1, pParse, p, ("multiSelect INTERSECT right...\n")); + TREETRACE(0x400, pParse, p, ("multiSelect INTERSECT right...\n")); rc = sqlite3Select(pParse, p, &intersectdest); testcase( rc!=SQLITE_OK ); pDelete = p->pPrior;

@@ -140433,7 +144523,7 @@ }

/* ** Code an output subroutine for a coroutine implementation of a -** SELECT statment. +** SELECT statement. ** ** The data to be output is contained in pIn->iSdst. There are ** pIn->nSdst columns to be output. pDest is where the output should

@@ -140655,7 +144745,7 @@ ** End: ...

** ** We call AltB, AeqB, AgtB, EofA, and EofB "subroutines" but they are not ** actually called using Gosub and they do not Return. EofA and EofB loop -** until all data is exhausted then jump to the "end" labe. AltB, AeqB, +** until all data is exhausted then jump to the "end" label. AltB, AeqB, ** and AgtB jump to either L2 or to one of EofA or EofB. */ #ifndef SQLITE_OMIT_COMPOUND_SELECT

@@ -140692,7 +144782,7 @@ int savedLimit; /* Saved value of p->iLimit */

int savedOffset; /* Saved value of p->iOffset */ int labelCmpr; /* Label for the start of the merge algorithm */ int labelEnd; /* Label for the end of the overall SELECT stmt */ - int addr1; /* Jump instructions that get retargetted */ + int addr1; /* Jump instructions that get retargeted */ int op; /* One of TK_ALL, TK_UNION, TK_EXCEPT, TK_INTERSECT */ KeyInfo *pKeyDup = 0; /* Comparison information for duplicate removal */ KeyInfo *pKeyMerge; /* Comparison information for merging rows */

@@ -141061,16 +145151,21 @@ }else

#endif { Expr *pNew; - int iColumn = pExpr->iColumn; - Expr *pCopy = pSubst->pEList->a[iColumn].pExpr; + int iColumn; + Expr *pCopy; Expr ifNullRow; + iColumn = pExpr->iColumn; + assert( iColumn>=0 ); assert( pSubst->pEList!=0 && iColumn<pSubst->pEList->nExpr ); assert( pExpr->pRight==0 ); + pCopy = pSubst->pEList->a[iColumn].pExpr; if( sqlite3ExprIsVector(pCopy) ){ sqlite3VectorErrorMsg(pSubst->pParse, pCopy); }else{ sqlite3 *db = pSubst->pParse->db; - if( pSubst->isOuterJoin && pCopy->op!=TK_COLUMN ){ + if( pSubst->isOuterJoin + && (pCopy->op!=TK_COLUMN || pCopy->iTable!=pSubst->iNewTable) + ){ memset(&ifNullRow, 0, sizeof(ifNullRow)); ifNullRow.op = TK_IF_NULL_ROW; ifNullRow.pLeft = pCopy;

@@ -141316,6 +145411,34 @@ }

return pSel->pEList; } +/* +** Return true if any of the result-set columns in the compound query +** have incompatible affinities on one or more arms of the compound. +*/ +static int compoundHasDifferentAffinities(Select *p){ + int ii; + ExprList *pList; + assert( p!=0 ); + assert( p->pEList!=0 ); + assert( p->pPrior!=0 ); + pList = p->pEList; + for(ii=0; ii<pList->nExpr; ii++){ + char aff; + Select *pSub1; + assert( pList->a[ii].pExpr!=0 ); + aff = sqlite3ExprAffinity(pList->a[ii].pExpr); + for(pSub1=p->pPrior; pSub1; pSub1=pSub1->pPrior){ + assert( pSub1->pEList!=0 ); + assert( pSub1->pEList->nExpr>ii ); + assert( pSub1->pEList->a[ii].pExpr!=0 ); + if( sqlite3ExprAffinity(pSub1->pEList->a[ii].pExpr)!=aff ){ + return 1; + } + } + } + return 0; +} + #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) /* ** This routine attempts to flatten subqueries as a performance optimization.

@@ -141384,7 +145507,7 @@ **

** (9) If the subquery uses LIMIT then the outer query may not be aggregate. ** ** (**) Restriction (10) was removed from the code on 2005-02-05 but we -** accidently carried the comment forward until 2014-09-15. Original +** accidentally carried the comment forward until 2014-09-15. Original ** constraint: "If the subquery is aggregate then the outer query ** may not use LIMIT." **

@@ -141476,7 +145599,8 @@ ** (27a) the subquery is not a compound query.

** (27b) the subquery is a compound query and the RIGHT JOIN occurs ** in any arm of the compound query. (See also (17g).) ** -** (28) The subquery is not a MATERIALIZED CTE. +** (28) The subquery is not a MATERIALIZED CTE. (This is handled +** in the caller before ever reaching this routine.) ** ** ** In this routine, the "p" parameter is a pointer to the outer query.

@@ -141586,9 +145710,9 @@ assert( pSubSrc->nSrc>0 ); /* True by restriction (7) */

if( iFrom>0 && (pSubSrc->a[0].fg.jointype & JT_LTORJ)!=0 ){ return 0; /* Restriction (27a) */ } - if( pSubitem->fg.isCte && pSubitem->u2.pCteUse->eM10d==M10d_Yes ){ - return 0; /* (28) */ - } + + /* Condition (28) is blocked by the caller */ + assert( !pSubitem->fg.isCte || pSubitem->u2.pCteUse->eM10d!=M10d_Yes ); /* Restriction (17): If the sub-query is a compound SELECT, then it must ** use only the UNION ALL operator. And none of the simple select queries

@@ -141638,19 +145762,7 @@ /* Restriction (23) */

if( (p->selFlags & SF_Recursive) ) return 0; /* Restriction (17h) */ - for(ii=0; ii<pSub->pEList->nExpr; ii++){ - char aff; - assert( pSub->pEList->a[ii].pExpr!=0 ); - aff = sqlite3ExprAffinity(pSub->pEList->a[ii].pExpr); - for(pSub1=pSub->pPrior; pSub1; pSub1=pSub1->pPrior){ - assert( pSub1->pEList!=0 ); - assert( pSub1->pEList->nExpr>ii ); - assert( pSub1->pEList->a[ii].pExpr!=0 ); - if( sqlite3ExprAffinity(pSub1->pEList->a[ii].pExpr)!=aff ){ - return 0; - } - } - } + if( compoundHasDifferentAffinities(pSub) ) return 0; if( pSrc->nSrc>1 ){ if( pParse->nSelect>500 ) return 0;

@@ -141661,7 +145773,7 @@ }

} /***** If we reach this point, flattening is permitted. *****/ - SELECTTRACE(1,pParse,p,("flatten %u.%p from term %d\n", + TREETRACE(0x4,pParse,p,("flatten %u.%p from term %d\n", pSub->selId, pSub, iFrom)); /* Authorize the subquery */

@@ -141670,7 +145782,7 @@ TESTONLY(i =) sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0);

testcase( i==SQLITE_DENY ); pParse->zAuthContext = zSavedAuthContext; - /* Delete the transient structures associated with thesubquery */ + /* Delete the transient structures associated with the subquery */ pSub1 = pSubitem->pSelect; sqlite3DbFree(db, pSubitem->zDatabase); sqlite3DbFree(db, pSubitem->zName);

@@ -141740,7 +145852,7 @@ pNew->pPrior = pPrior;

if( pPrior ) pPrior->pNext = pNew; pNew->pNext = p; p->pPrior = pNew; - SELECTTRACE(2,pParse,p,("compound-subquery flattener" + TREETRACE(0x4,pParse,p,("compound-subquery flattener" " creates %u as peer\n",pNew->selId)); } assert( pSubitem->pSelect==0 );

@@ -141852,7 +145964,7 @@ /* At this point, any non-zero iOrderByCol values indicate that the

** ORDER BY column expression is identical to the iOrderByCol'th ** expression returned by SELECT statement pSub. Since these values ** do not necessarily correspond to columns in SELECT statement pParent, - ** zero them before transfering the ORDER BY clause. + ** zero them before transferring the ORDER BY clause. ** ** Not doing this may cause an error if a subsequent call to this ** function attempts to flatten a compound sub-query into pParent

@@ -141912,16 +146024,15 @@ recomputeColumnsUsed(pParent, &pSrc->a[i+iFrom]);

} } - /* Finially, delete what is left of the subquery and return - ** success. + /* Finally, delete what is left of the subquery and return success. */ sqlite3AggInfoPersistWalkerInit(&w, pParse); sqlite3WalkSelect(&w,pSub1); sqlite3SelectDelete(db, pSub1); #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x100 ){ - SELECTTRACE(0x100,pParse,p,("After flattening:\n")); + if( sqlite3TreeTrace & 0x4 ){ + TREETRACE(0x4,pParse,p,("After flattening:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif

@@ -141948,7 +146059,7 @@ };

/* ** Add a new entry to the pConst object. Except, do not add duplicate -** pColumn entires. Also, do not add if doing so would not be appropriate. +** pColumn entries. Also, do not add if doing so would not be appropriate. ** ** The caller guarantees the pColumn is a column and pValue is a constant. ** This routine has to do some additional checks before completing the

@@ -142134,7 +146245,7 @@ ** SELECT * FROM t1 WHERE a=123 AND b=a;

** SELECT * FROM t1 WHERE a=123 AND b=123; ** ** The two SELECT statements above should return different answers. b=a -** is alway true because the comparison uses numeric affinity, but b=123 +** is always true because the comparison uses numeric affinity, but b=123 ** is false because it uses text affinity and '0123' is not the same as '123'. ** To work around this, the expression tree is not actually changed from ** "b=a" to "b=123" but rather the "a" in "b=a" is tagged with EP_FixedCol

@@ -142218,7 +146329,7 @@ **

** At the time this function is called it is guaranteed that ** ** * the sub-query uses only one distinct window frame, and -** * that the window frame has a PARTITION BY clase. +** * that the window frame has a PARTITION BY clause. */ static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){ assert( pSubq->pWin->pPartition );

@@ -142295,12 +146406,28 @@ ** (7) The inner query is a Common Table Expression (CTE) that should

** be materialized. (This restriction is implemented in the calling ** routine.) ** -** (8) The subquery may not be a compound that uses UNION, INTERSECT, -** or EXCEPT. (We could, perhaps, relax this restriction to allow -** this case if none of the comparisons operators between left and -** right arms of the compound use a collation other than BINARY. -** But it is a lot of work to check that case for an obscure and -** minor optimization, so we omit it for now.) +** (8) If the subquery is a compound that uses UNION, INTERSECT, +** or EXCEPT, then all of the result set columns for all arms of +** the compound must use the BINARY collating sequence. +** +** (9) All three of the following are true: +** +** (9a) The WHERE clause expression originates in the ON or USING clause +** of a join (either an INNER or an OUTER join), and +** +** (9b) The subquery is to the right of the ON/USING clause +** +** (9c) There is a RIGHT JOIN (or FULL JOIN) in between the ON/USING +** clause and the subquery. +** +** Without this restriction, the push-down optimization might move +** the ON/USING filter expression from the left side of a RIGHT JOIN +** over to the right side, which leads to incorrect answers. See +** also restriction (6) in sqlite3ExprIsSingleTableConstraint(). +** +** (10) The inner query is not the right-hand table of a RIGHT JOIN. +** +** (11) The subquery is not a VALUES clause ** ** Return 0 if no changes are made and non-zero if one or more WHERE clause ** terms are duplicated into the subquery.

@@ -142309,28 +146436,56 @@ static int pushDownWhereTerms(

Parse *pParse, /* Parse context (for malloc() and error reporting) */ Select *pSubq, /* The subquery whose WHERE clause is to be augmented */ Expr *pWhere, /* The WHERE clause of the outer query */ - SrcItem *pSrc /* The subquery term of the outer FROM clause */ + SrcList *pSrcList, /* The complete from clause of the outer query */ + int iSrc /* Which FROM clause term to try to push into */ ){ Expr *pNew; + SrcItem *pSrc; /* The subquery FROM term into which WHERE is pushed */ int nChng = 0; + pSrc = &pSrcList->a[iSrc]; if( pWhere==0 ) return 0; - if( pSubq->selFlags & (SF_Recursive|SF_MultiPart) ) return 0; - if( pSrc->fg.jointype & (JT_LTORJ|JT_RIGHT) ) return 0; + if( pSubq->selFlags & (SF_Recursive|SF_MultiPart) ){ + return 0; /* restrictions (2) and (11) */ + } + if( pSrc->fg.jointype & (JT_LTORJ|JT_RIGHT) ){ + return 0; /* restrictions (10) */ + } -#ifndef SQLITE_OMIT_WINDOWFUNC if( pSubq->pPrior ){ Select *pSel; + int notUnionAll = 0; for(pSel=pSubq; pSel; pSel=pSel->pPrior){ u8 op = pSel->op; assert( op==TK_ALL || op==TK_SELECT || op==TK_UNION || op==TK_INTERSECT || op==TK_EXCEPT ); - if( op!=TK_ALL && op!=TK_SELECT ) return 0; /* restriction (8) */ + if( op!=TK_ALL && op!=TK_SELECT ){ + notUnionAll = 1; + } +#ifndef SQLITE_OMIT_WINDOWFUNC if( pSel->pWin ) return 0; /* restriction (6b) */ +#endif + } + if( notUnionAll ){ + /* If any of the compound arms are connected using UNION, INTERSECT, + ** or EXCEPT, then we must ensure that none of the columns use a + ** non-BINARY collating sequence. */ + for(pSel=pSubq; pSel; pSel=pSel->pPrior){ + int ii; + const ExprList *pList = pSel->pEList; + assert( pList!=0 ); + for(ii=0; ii<pList->nExpr; ii++){ + CollSeq *pColl = sqlite3ExprCollSeq(pParse, pList->a[ii].pExpr); + if( !sqlite3IsBinary(pColl) ){ + return 0; /* Restriction (8) */ + } + } + } } }else{ +#ifndef SQLITE_OMIT_WINDOWFUNC if( pSubq->pWin && pSubq->pWin->pPartition==0 ) return 0; - } #endif + } #ifdef SQLITE_DEBUG /* Only the first term of a compound can have a WITH clause. But make

@@ -142349,11 +146504,28 @@ if( pSubq->pLimit!=0 ){

return 0; /* restriction (3) */ } while( pWhere->op==TK_AND ){ - nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight, pSrc); + nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight, pSrcList, iSrc); pWhere = pWhere->pLeft; } -#if 0 /* Legacy code. Checks now done by sqlite3ExprIsTableConstraint() */ +#if 0 /* These checks now done by sqlite3ExprIsSingleTableConstraint() */ + if( ExprHasProperty(pWhere, EP_OuterON|EP_InnerON) /* (9a) */ + && (pSrcList->a[0].fg.jointype & JT_LTORJ)!=0 /* Fast pre-test of (9c) */ + ){ + int jj; + for(jj=0; jj<iSrc; jj++){ + if( pWhere->w.iJoin==pSrcList->a[jj].iCursor ){ + /* If we reach this point, both (9a) and (9b) are satisfied. + ** The following loop checks (9c): + */ + for(jj++; jj<iSrc; jj++){ + if( (pSrcList->a[jj].fg.jointype & JT_RIGHT)!=0 ){ + return 0; /* restriction (9) */ + } + } + } + } + } if( isLeftJoin && (ExprHasProperty(pWhere,EP_OuterON)==0 || pWhere->w.iJoin!=iCursor)

@@ -142367,7 +146539,7 @@ return 0; /* restriction (5) */

} #endif - if( sqlite3ExprIsTableConstraint(pWhere, pSrc) ){ + if( sqlite3ExprIsSingleTableConstraint(pWhere, pSrcList, iSrc) ){ nChng++; pSubq->selFlags |= SF_PushDown; while( pSubq ){

@@ -142402,6 +146574,78 @@ }

#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ /* +** Check to see if a subquery contains result-set columns that are +** never used. If it does, change the value of those result-set columns +** to NULL so that they do not cause unnecessary work to compute. +** +** Return the number of column that were changed to NULL. +*/ +static int disableUnusedSubqueryResultColumns(SrcItem *pItem){ + int nCol; + Select *pSub; /* The subquery to be simplified */ + Select *pX; /* For looping over compound elements of pSub */ + Table *pTab; /* The table that describes the subquery */ + int j; /* Column number */ + int nChng = 0; /* Number of columns converted to NULL */ + Bitmask colUsed; /* Columns that may not be NULLed out */ + + assert( pItem!=0 ); + if( pItem->fg.isCorrelated || pItem->fg.isCte ){ + return 0; + } + assert( pItem->pTab!=0 ); + pTab = pItem->pTab; + assert( pItem->pSelect!=0 ); + pSub = pItem->pSelect; + assert( pSub->pEList->nExpr==pTab->nCol ); + for(pX=pSub; pX; pX=pX->pPrior){ + if( (pX->selFlags & (SF_Distinct|SF_Aggregate))!=0 ){ + testcase( pX->selFlags & SF_Distinct ); + testcase( pX->selFlags & SF_Aggregate ); + return 0; + } + if( pX->pPrior && pX->op!=TK_ALL ){ + /* This optimization does not work for compound subqueries that + ** use UNION, INTERSECT, or EXCEPT. Only UNION ALL is allowed. */ + return 0; + } +#ifndef SQLITE_OMIT_WINDOWFUNC + if( pX->pWin ){ + /* This optimization does not work for subqueries that use window + ** functions. */ + return 0; + } +#endif + } + colUsed = pItem->colUsed; + if( pSub->pOrderBy ){ + ExprList *pList = pSub->pOrderBy; + for(j=0; j<pList->nExpr; j++){ + u16 iCol = pList->a[j].u.x.iOrderByCol; + if( iCol>0 ){ + iCol--; + colUsed |= ((Bitmask)1)<<(iCol>=BMS ? BMS-1 : iCol); + } + } + } + nCol = pTab->nCol; + for(j=0; j<nCol; j++){ + Bitmask m = j<BMS-1 ? MASKBIT(j) : TOPBIT; + if( (m & colUsed)!=0 ) continue; + for(pX=pSub; pX; pX=pX->pPrior) { + Expr *pY = pX->pEList->a[j].pExpr; + if( pY->op==TK_NULL ) continue; + pY->op = TK_NULL; + ExprClearProperty(pY, EP_Skip|EP_Unlikely); + pX->selFlags |= SF_PushDown; + nChng++; + } + } + return nChng; +} + + +/* ** The pFunc is the only aggregate function in the query. Check to see ** if the query is a candidate for the min/max optimization. **

@@ -142792,9 +147036,6 @@ }

pFrom->fg.isCte = 1; pFrom->u2.pCteUse = pCteUse; pCteUse->nUse++; - if( pCteUse->nUse>=2 && pCteUse->eM10d==M10d_Any ){ - pCteUse->eM10d = M10d_Yes; - } /* Check if this is a recursive CTE. */ pRecTerm = pSel = pFrom->pSelect;

@@ -143168,12 +147409,20 @@ /* This expression is a "*" or a "TABLE.*" and needs to be

** expanded. */ int tableSeen = 0; /* Set to 1 when TABLE matches */ char *zTName = 0; /* text of name of TABLE */ + int iErrOfst; if( pE->op==TK_DOT ){ + assert( (selFlags & SF_NestedFrom)==0 ); assert( pE->pLeft!=0 ); assert( !ExprHasProperty(pE->pLeft, EP_IntValue) ); zTName = pE->pLeft->u.zToken; + assert( ExprUseWOfst(pE->pLeft) ); + iErrOfst = pE->pRight->w.iOfst; + }else{ + assert( ExprUseWOfst(pE) ); + iErrOfst = pE->w.iOfst; } for(i=0, pFrom=pTabList->a; i<pTabList->nSrc; i++, pFrom++){ + int nAdd; /* Number of cols including rowid */ Table *pTab = pFrom->pTab; /* Table for this data source */ ExprList *pNestedFrom; /* Result-set of a nested FROM clause */ char *zTabName; /* AS name for this data source */

@@ -143191,6 +147440,7 @@ assert( pFrom->pSelect!=0 );

pNestedFrom = pFrom->pSelect->pEList; assert( pNestedFrom!=0 ); assert( pNestedFrom->nExpr==pTab->nCol ); + assert( VisibleRowid(pTab)==0 ); }else{ if( zTName && sqlite3StrICmp(zTName, zTabName)!=0 ){ continue;

@@ -143208,6 +147458,7 @@ pUsing = pFrom[1].u3.pUsing;

for(ii=0; ii<pUsing->nId; ii++){ const char *zUName = pUsing->a[ii].zName; pRight = sqlite3Expr(db, TK_ID, zUName); + sqlite3ExprSetErrorOffset(pRight, iErrOfst); pNew = sqlite3ExprListAppend(pParse, pNew, pRight); if( pNew ){ struct ExprList_item *pX = &pNew->a[pNew->nExpr-1];

@@ -143220,33 +147471,48 @@ }

}else{ pUsing = 0; } - for(j=0; j<pTab->nCol; j++){ - char *zName = pTab->aCol[j].zCnName; + + nAdd = pTab->nCol + (VisibleRowid(pTab) && (selFlags&SF_NestedFrom)); + for(j=0; j<nAdd; j++){ + const char *zName; struct ExprList_item *pX; /* Newly added ExprList term */ - assert( zName ); - if( zTName - && pNestedFrom - && sqlite3MatchEName(&pNestedFrom->a[j], 0, zTName, 0)==0 - ){ - continue; - } + if( j==pTab->nCol ){ + zName = sqlite3RowidAlias(pTab); + if( zName==0 ) continue; + }else{ + zName = pTab->aCol[j].zCnName; - /* If a column is marked as 'hidden', omit it from the expanded - ** result-set list unless the SELECT has the SF_IncludeHidden - ** bit set. - */ - if( (p->selFlags & SF_IncludeHidden)==0 - && IsHiddenColumn(&pTab->aCol[j]) - ){ - continue; - } - if( (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)!=0 - && zTName==0 - && (selFlags & (SF_NestedFrom))==0 - ){ - continue; + /* If pTab is actually an SF_NestedFrom sub-select, do not + ** expand any ENAME_ROWID columns. */ + if( pNestedFrom && pNestedFrom->a[j].fg.eEName==ENAME_ROWID ){ + continue; + } + + if( zTName + && pNestedFrom + && sqlite3MatchEName(&pNestedFrom->a[j], 0, zTName, 0, 0)==0 + ){ + continue; + } + + /* If a column is marked as 'hidden', omit it from the expanded + ** result-set list unless the SELECT has the SF_IncludeHidden + ** bit set. + */ + if( (p->selFlags & SF_IncludeHidden)==0 + && IsHiddenColumn(&pTab->aCol[j]) + ){ + continue; + } + if( (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)!=0 + && zTName==0 + && (selFlags & (SF_NestedFrom))==0 + ){ + continue; + } } + assert( zName ); tableSeen = 1; if( i>0 && zTName==0 && (selFlags & SF_NestedFrom)==0 ){

@@ -143280,6 +147546,7 @@ }

}else{ pExpr = pRight; } + sqlite3ExprSetErrorOffset(pExpr, iErrOfst); pNew = sqlite3ExprListAppend(pParse, pNew, pExpr); if( pNew==0 ){ break; /* OOM */

@@ -143295,11 +147562,11 @@ pX->zEName = sqlite3MPrintf(db, "%s.%s.%s",

zSchemaName, zTabName, zName); testcase( pX->zEName==0 ); } - pX->fg.eEName = ENAME_TAB; + pX->fg.eEName = (j==pTab->nCol ? ENAME_ROWID : ENAME_TAB); if( (pFrom->fg.isUsing && sqlite3IdListIndex(pFrom->u3.pUsing, zName)>=0) || (pUsing && sqlite3IdListIndex(pUsing, zName)>=0) - || (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)!=0 + || (j<pTab->nCol && (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)) ){ pX->fg.bNoExpand = 1; }

@@ -143334,8 +147601,8 @@ p->selFlags |= SF_ComplexResult;

} } #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x100 ){ - SELECTTRACE(0x100,pParse,p,("After result-set wildcard expansion:\n")); + if( sqlite3TreeTrace & 0x8 ){ + TREETRACE(0x8,pParse,p,("After result-set wildcard expansion:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif

@@ -143386,14 +147653,14 @@ /*

** This is a Walker.xSelectCallback callback for the sqlite3SelectTypeInfo() ** interface. ** -** For each FROM-clause subquery, add Column.zType and Column.zColl -** information to the Table structure that represents the result set -** of that subquery. +** For each FROM-clause subquery, add Column.zType, Column.zColl, and +** Column.affinity information to the Table structure that represents +** the result set of that subquery. ** ** The Table structure that represents the result set was constructed -** by selectExpander() but the type and collation information was omitted -** at that point because identifiers had not yet been resolved. This -** routine is called after identifier resolution. +** by selectExpander() but the type and collation and affinity information +** was omitted at that point because identifiers had not yet been resolved. +** This routine is called after identifier resolution. */ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ Parse *pParse;

@@ -143401,10 +147668,11 @@ int i;

SrcList *pTabList; SrcItem *pFrom; - assert( p->selFlags & SF_Resolved ); if( p->selFlags & SF_HasTypeInfo ) return; p->selFlags |= SF_HasTypeInfo; pParse = pWalker->pParse; + testcase( (p->selFlags & SF_Resolved)==0 ); + assert( (p->selFlags & SF_Resolved) || IN_RENAME_OBJECT ); pTabList = p->pSrc; for(i=0, pFrom=pTabList->a; i<pTabList->nSrc; i++, pFrom++){ Table *pTab = pFrom->pTab;

@@ -143413,9 +147681,7 @@ if( (pTab->tabFlags & TF_Ephemeral)!=0 ){

/* A sub-query in the FROM clause of a SELECT */ Select *pSel = pFrom->pSelect; if( pSel ){ - while( pSel->pPrior ) pSel = pSel->pPrior; - sqlite3SelectAddColumnTypeAndCollation(pParse, pTab, pSel, - SQLITE_AFF_NONE); + sqlite3SubqueryColumnTypes(pParse, pTab, pSel, SQLITE_AFF_NONE); } } }

@@ -143470,6 +147736,184 @@ if( pParse->nErr ) return;

sqlite3SelectAddTypeInfo(pParse, p); } +#if TREETRACE_ENABLED +/* +** Display all information about an AggInfo object +*/ +static void printAggInfo(AggInfo *pAggInfo){ + int ii; + for(ii=0; ii<pAggInfo->nColumn; ii++){ + struct AggInfo_col *pCol = &pAggInfo->aCol[ii]; + sqlite3DebugPrintf( + "agg-column[%d] pTab=%s iTable=%d iColumn=%d iMem=%d" + " iSorterColumn=%d %s\n", + ii, pCol->pTab ? pCol->pTab->zName : "NULL", + pCol->iTable, pCol->iColumn, pAggInfo->iFirstReg+ii, + pCol->iSorterColumn, + ii>=pAggInfo->nAccumulator ? "" : " Accumulator"); + sqlite3TreeViewExpr(0, pAggInfo->aCol[ii].pCExpr, 0); + } + for(ii=0; ii<pAggInfo->nFunc; ii++){ + sqlite3DebugPrintf("agg-func[%d]: iMem=%d\n", + ii, pAggInfo->iFirstReg+pAggInfo->nColumn+ii); + sqlite3TreeViewExpr(0, pAggInfo->aFunc[ii].pFExpr, 0); + } +} +#endif /* TREETRACE_ENABLED */ + +/* +** Analyze the arguments to aggregate functions. Create new pAggInfo->aCol[] +** entries for columns that are arguments to aggregate functions but which +** are not otherwise used. +** +** The aCol[] entries in AggInfo prior to nAccumulator are columns that +** are referenced outside of aggregate functions. These might be columns +** that are part of the GROUP by clause, for example. Other database engines +** would throw an error if there is a column reference that is not in the +** GROUP BY clause and that is not part of an aggregate function argument. +** But SQLite allows this. +** +** The aCol[] entries beginning with the aCol[nAccumulator] and following +** are column references that are used exclusively as arguments to +** aggregate functions. This routine is responsible for computing +** (or recomputing) those aCol[] entries. +*/ +static void analyzeAggFuncArgs( + AggInfo *pAggInfo, + NameContext *pNC +){ + int i; + assert( pAggInfo!=0 ); + assert( pAggInfo->iFirstReg==0 ); + pNC->ncFlags |= NC_InAggFunc; + for(i=0; i<pAggInfo->nFunc; i++){ + Expr *pExpr = pAggInfo->aFunc[i].pFExpr; + assert( pExpr->op==TK_FUNCTION || pExpr->op==TK_AGG_FUNCTION ); + assert( ExprUseXList(pExpr) ); + sqlite3ExprAnalyzeAggList(pNC, pExpr->x.pList); + if( pExpr->pLeft ){ + assert( pExpr->pLeft->op==TK_ORDER ); + assert( ExprUseXList(pExpr->pLeft) ); + sqlite3ExprAnalyzeAggList(pNC, pExpr->pLeft->x.pList); + } +#ifndef SQLITE_OMIT_WINDOWFUNC + assert( !IsWindowFunc(pExpr) ); + if( ExprHasProperty(pExpr, EP_WinFunc) ){ + sqlite3ExprAnalyzeAggregates(pNC, pExpr->y.pWin->pFilter); + } +#endif + } + pNC->ncFlags &= ~NC_InAggFunc; +} + +/* +** An index on expressions is being used in the inner loop of an +** aggregate query with a GROUP BY clause. This routine attempts +** to adjust the AggInfo object to take advantage of index and to +** perhaps use the index as a covering index. +** +*/ +static void optimizeAggregateUseOfIndexedExpr( + Parse *pParse, /* Parsing context */ + Select *pSelect, /* The SELECT statement being processed */ + AggInfo *pAggInfo, /* The aggregate info */ + NameContext *pNC /* Name context used to resolve agg-func args */ +){ + assert( pAggInfo->iFirstReg==0 ); + assert( pSelect!=0 ); + assert( pSelect->pGroupBy!=0 ); + pAggInfo->nColumn = pAggInfo->nAccumulator; + if( ALWAYS(pAggInfo->nSortingColumn>0) ){ + int mx = pSelect->pGroupBy->nExpr - 1; + int j, k; + for(j=0; j<pAggInfo->nColumn; j++){ + k = pAggInfo->aCol[j].iSorterColumn; + if( k>mx ) mx = k; + } + pAggInfo->nSortingColumn = mx+1; + } + analyzeAggFuncArgs(pAggInfo, pNC); +#if TREETRACE_ENABLED + if( sqlite3TreeTrace & 0x20 ){ + IndexedExpr *pIEpr; + TREETRACE(0x20, pParse, pSelect, + ("AggInfo (possibly) adjusted for Indexed Exprs\n")); + sqlite3TreeViewSelect(0, pSelect, 0); + for(pIEpr=pParse->pIdxEpr; pIEpr; pIEpr=pIEpr->pIENext){ + printf("data-cursor=%d index={%d,%d}\n", + pIEpr->iDataCur, pIEpr->iIdxCur, pIEpr->iIdxCol); + sqlite3TreeViewExpr(0, pIEpr->pExpr, 0); + } + printAggInfo(pAggInfo); + } +#else + UNUSED_PARAMETER(pSelect); + UNUSED_PARAMETER(pParse); +#endif +} + +/* +** Walker callback for aggregateConvertIndexedExprRefToColumn(). +*/ +static int aggregateIdxEprRefToColCallback(Walker *pWalker, Expr *pExpr){ + AggInfo *pAggInfo; + struct AggInfo_col *pCol; + UNUSED_PARAMETER(pWalker); + if( pExpr->pAggInfo==0 ) return WRC_Continue; + if( pExpr->op==TK_AGG_COLUMN ) return WRC_Continue; + if( pExpr->op==TK_AGG_FUNCTION ) return WRC_Continue; + if( pExpr->op==TK_IF_NULL_ROW ) return WRC_Continue; + pAggInfo = pExpr->pAggInfo; + if( NEVER(pExpr->iAgg>=pAggInfo->nColumn) ) return WRC_Continue; + assert( pExpr->iAgg>=0 ); + pCol = &pAggInfo->aCol[pExpr->iAgg]; + pExpr->op = TK_AGG_COLUMN; + pExpr->iTable = pCol->iTable; + pExpr->iColumn = pCol->iColumn; + ExprClearProperty(pExpr, EP_Skip|EP_Collate|EP_Unlikely); + return WRC_Prune; +} + +/* +** Convert every pAggInfo->aFunc[].pExpr such that any node within +** those expressions that has pAppInfo set is changed into a TK_AGG_COLUMN +** opcode. +*/ +static void aggregateConvertIndexedExprRefToColumn(AggInfo *pAggInfo){ + int i; + Walker w; + memset(&w, 0, sizeof(w)); + w.xExprCallback = aggregateIdxEprRefToColCallback; + for(i=0; i<pAggInfo->nFunc; i++){ + sqlite3WalkExpr(&w, pAggInfo->aFunc[i].pFExpr); + } +} + + +/* +** Allocate a block of registers so that there is one register for each +** pAggInfo->aCol[] and pAggInfo->aFunc[] entry in pAggInfo. The first +** register in this block is stored in pAggInfo->iFirstReg. +** +** This routine may only be called once for each AggInfo object. Prior +** to calling this routine: +** +** * The aCol[] and aFunc[] arrays may be modified +** * The AggInfoColumnReg() and AggInfoFuncReg() macros may not be used +** +** After calling this routine: +** +** * The aCol[] and aFunc[] arrays are fixed +** * The AggInfoColumnReg() and AggInfoFuncReg() macros may be used +** +*/ +static void assignAggregateRegisters(Parse *pParse, AggInfo *pAggInfo){ + assert( pAggInfo!=0 ); + assert( pAggInfo->iFirstReg==0 ); + pAggInfo->iFirstReg = pParse->nMem + 1; + pParse->nMem += pAggInfo->nColumn + pAggInfo->nFunc; +} + /* ** Reset the aggregate accumulator. **

@@ -143483,24 +147927,13 @@ Vdbe *v = pParse->pVdbe;

int i; struct AggInfo_func *pFunc; int nReg = pAggInfo->nFunc + pAggInfo->nColumn; + assert( pAggInfo->iFirstReg>0 ); assert( pParse->db->pParse==pParse ); assert( pParse->db->mallocFailed==0 || pParse->nErr!=0 ); if( nReg==0 ) return; if( pParse->nErr ) return; -#ifdef SQLITE_DEBUG - /* Verify that all AggInfo registers are within the range specified by - ** AggInfo.mnReg..AggInfo.mxReg */ - assert( nReg==pAggInfo->mxReg-pAggInfo->mnReg+1 ); - for(i=0; i<pAggInfo->nColumn; i++){ - assert( pAggInfo->aCol[i].iMem>=pAggInfo->mnReg - && pAggInfo->aCol[i].iMem<=pAggInfo->mxReg ); - } - for(i=0; i<pAggInfo->nFunc; i++){ - assert( pAggInfo->aFunc[i].iMem>=pAggInfo->mnReg - && pAggInfo->aFunc[i].iMem<=pAggInfo->mxReg ); - } -#endif - sqlite3VdbeAddOp3(v, OP_Null, 0, pAggInfo->mnReg, pAggInfo->mxReg); + sqlite3VdbeAddOp3(v, OP_Null, 0, pAggInfo->iFirstReg, + pAggInfo->iFirstReg+nReg-1); for(pFunc=pAggInfo->aFunc, i=0; i<pAggInfo->nFunc; i++, pFunc++){ if( pFunc->iDistinct>=0 ){ Expr *pE = pFunc->pFExpr;

@@ -143517,6 +147950,32 @@ ExplainQueryPlan((pParse, 0, "USE TEMP B-TREE FOR %s(DISTINCT)",

pFunc->pFunc->zName)); } } + if( pFunc->iOBTab>=0 ){ + ExprList *pOBList; + KeyInfo *pKeyInfo; + int nExtra = 0; + assert( pFunc->pFExpr->pLeft!=0 ); + assert( pFunc->pFExpr->pLeft->op==TK_ORDER ); + assert( ExprUseXList(pFunc->pFExpr->pLeft) ); + pOBList = pFunc->pFExpr->pLeft->x.pList; + if( !pFunc->bOBUnique ){ + nExtra++; /* One extra column for the OP_Sequence */ + } + if( pFunc->bOBPayload ){ + /* extra columns for the function arguments */ + assert( ExprUseXList(pFunc->pFExpr) ); + nExtra += pFunc->pFExpr->x.pList->nExpr; + } + pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pOBList, 0, nExtra); + if( !pFunc->bOBUnique && pParse->nErr==0 ){ + pKeyInfo->nKeyField++; + } + sqlite3VdbeAddOp4(v, OP_OpenEphemeral, + pFunc->iOBTab, pOBList->nExpr+nExtra, 0, + (char*)pKeyInfo, P4_KEYINFO); + ExplainQueryPlan((pParse, 0, "USE TEMP B-TREE FOR %s(ORDER BY)", + pFunc->pFunc->zName)); + } } }

@@ -143532,20 +147991,61 @@ for(i=0, pF=pAggInfo->aFunc; i<pAggInfo->nFunc; i++, pF++){

ExprList *pList; assert( ExprUseXList(pF->pFExpr) ); pList = pF->pFExpr->x.pList; - sqlite3VdbeAddOp2(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0); + if( pF->iOBTab>=0 ){ + /* For an ORDER BY aggregate, calls to OP_AggStep where deferred and + ** all content was stored in emphermal table pF->iOBTab. Extract that + ** content now (in ORDER BY order) and make all calls to OP_AggStep + ** before doing the OP_AggFinal call. */ + int iTop; /* Start of loop for extracting columns */ + int nArg; /* Number of columns to extract */ + int nKey; /* Key columns to be skipped */ + int regAgg; /* Extract into this array */ + int j; /* Loop counter */ + + nArg = pList->nExpr; + regAgg = sqlite3GetTempRange(pParse, nArg); + + if( pF->bOBPayload==0 ){ + nKey = 0; + }else{ + assert( pF->pFExpr->pLeft!=0 ); + assert( ExprUseXList(pF->pFExpr->pLeft) ); + assert( pF->pFExpr->pLeft->x.pList!=0 ); + nKey = pF->pFExpr->pLeft->x.pList->nExpr; + if( ALWAYS(!pF->bOBUnique) ) nKey++; + } + iTop = sqlite3VdbeAddOp1(v, OP_Rewind, pF->iOBTab); VdbeCoverage(v); + for(j=nArg-1; j>=0; j--){ + sqlite3VdbeAddOp3(v, OP_Column, pF->iOBTab, nKey+j, regAgg+j); + } + sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i)); + sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); + sqlite3VdbeChangeP5(v, (u8)nArg); + sqlite3VdbeAddOp2(v, OP_Next, pF->iOBTab, iTop+1); VdbeCoverage(v); + sqlite3VdbeJumpHere(v, iTop); + sqlite3ReleaseTempRange(pParse, regAgg, nArg); + } + sqlite3VdbeAddOp2(v, OP_AggFinal, AggInfoFuncReg(pAggInfo,i), + pList ? pList->nExpr : 0); sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); } } - /* -** Update the accumulator memory cells for an aggregate based on -** the current cursor position. +** Generate code that will update the accumulator memory cells for an +** aggregate based on the current cursor position. ** ** If regAcc is non-zero and there are no min() or max() aggregates ** in pAggInfo, then only populate the pAggInfo->nAccumulator accumulator ** registers if register regAcc contains 0. The caller will take care ** of setting and clearing regAcc. +** +** For an ORDER BY aggregate, the actual accumulator memory cell update +** is deferred until after all input rows have been received, so that they +** can be run in the requested order. In that case, instead of invoking +** OP_AggStep to update the accumulator, just add the arguments that would +** have been passed into OP_AggStep into the sorting ephemeral table +** (along with the appropriate sort key). */ static void updateAccumulator( Parse *pParse,

@@ -143560,11 +148060,15 @@ int addrHitTest = 0;

struct AggInfo_func *pF; struct AggInfo_col *pC; + assert( pAggInfo->iFirstReg>0 ); + if( pParse->nErr ) return; pAggInfo->directMode = 1; for(i=0, pF=pAggInfo->aFunc; i<pAggInfo->nFunc; i++, pF++){ int nArg; int addrNext = 0; int regAgg; + int regAggSz = 0; + int regDistinct = 0; ExprList *pList; assert( ExprUseXList(pF->pFExpr) ); assert( !IsWindowFunc(pF->pFExpr) );

@@ -143591,9 +148095,44 @@ }

addrNext = sqlite3VdbeMakeLabel(pParse); sqlite3ExprIfFalse(pParse, pFilter, addrNext, SQLITE_JUMPIFNULL); } - if( pList ){ + if( pF->iOBTab>=0 ){ + /* Instead of invoking AggStep, we must push the arguments that would + ** have been passed to AggStep onto the sorting table. */ + int jj; /* Registered used so far in building the record */ + ExprList *pOBList; /* The ORDER BY clause */ + assert( pList!=0 ); + nArg = pList->nExpr; + assert( nArg>0 ); + assert( pF->pFExpr->pLeft!=0 ); + assert( pF->pFExpr->pLeft->op==TK_ORDER ); + assert( ExprUseXList(pF->pFExpr->pLeft) ); + pOBList = pF->pFExpr->pLeft->x.pList; + assert( pOBList!=0 ); + assert( pOBList->nExpr>0 ); + regAggSz = pOBList->nExpr; + if( !pF->bOBUnique ){ + regAggSz++; /* One register for OP_Sequence */ + } + if( pF->bOBPayload ){ + regAggSz += nArg; + } + regAggSz++; /* One extra register to hold result of MakeRecord */ + regAgg = sqlite3GetTempRange(pParse, regAggSz); + regDistinct = regAgg; + sqlite3ExprCodeExprList(pParse, pOBList, regAgg, 0, SQLITE_ECEL_DUP); + jj = pOBList->nExpr; + if( !pF->bOBUnique ){ + sqlite3VdbeAddOp2(v, OP_Sequence, pF->iOBTab, regAgg+jj); + jj++; + } + if( pF->bOBPayload ){ + regDistinct = regAgg+jj; + sqlite3ExprCodeExprList(pParse, pList, regDistinct, 0, SQLITE_ECEL_DUP); + } + }else if( pList ){ nArg = pList->nExpr; regAgg = sqlite3GetTempRange(pParse, nArg); + regDistinct = regAgg; sqlite3ExprCodeExprList(pParse, pList, regAgg, 0, SQLITE_ECEL_DUP); }else{ nArg = 0;

@@ -143604,26 +148143,37 @@ if( addrNext==0 ){

addrNext = sqlite3VdbeMakeLabel(pParse); } pF->iDistinct = codeDistinct(pParse, eDistinctType, - pF->iDistinct, addrNext, pList, regAgg); + pF->iDistinct, addrNext, pList, regDistinct); } - if( pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){ - CollSeq *pColl = 0; - struct ExprList_item *pItem; - int j; - assert( pList!=0 ); /* pList!=0 if pF->pFunc has NEEDCOLL */ - for(j=0, pItem=pList->a; !pColl && j<nArg; j++, pItem++){ - pColl = sqlite3ExprCollSeq(pParse, pItem->pExpr); + if( pF->iOBTab>=0 ){ + /* Insert a new record into the ORDER BY table */ + sqlite3VdbeAddOp3(v, OP_MakeRecord, regAgg, regAggSz-1, + regAgg+regAggSz-1); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pF->iOBTab, regAgg+regAggSz-1, + regAgg, regAggSz-1); + sqlite3ReleaseTempRange(pParse, regAgg, regAggSz); + }else{ + /* Invoke the AggStep function */ + if( pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){ + CollSeq *pColl = 0; + struct ExprList_item *pItem; + int j; + assert( pList!=0 ); /* pList!=0 if pF->pFunc has NEEDCOLL */ + for(j=0, pItem=pList->a; !pColl && j<nArg; j++, pItem++){ + pColl = sqlite3ExprCollSeq(pParse, pItem->pExpr); + } + if( !pColl ){ + pColl = pParse->db->pDfltColl; + } + if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem; + sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, + (char *)pColl, P4_COLLSEQ); } - if( !pColl ){ - pColl = pParse->db->pDfltColl; - } - if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem; - sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, (char *)pColl, P4_COLLSEQ); + sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i)); + sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); + sqlite3VdbeChangeP5(v, (u8)nArg); + sqlite3ReleaseTempRange(pParse, regAgg, nArg); } - sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, pF->iMem); - sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, (u8)nArg); - sqlite3ReleaseTempRange(pParse, regAgg, nArg); if( addrNext ){ sqlite3VdbeResolveLabel(v, addrNext); }

@@ -143635,7 +148185,7 @@ if( regHit ){

addrHitTest = sqlite3VdbeAddOp1(v, OP_If, regHit); VdbeCoverage(v); } for(i=0, pC=pAggInfo->aCol; i<pAggInfo->nAccumulator; i++, pC++){ - sqlite3ExprCode(pParse, pC->pCExpr, pC->iMem); + sqlite3ExprCode(pParse, pC->pCExpr, AggInfoColumnReg(pAggInfo,i)); } pAggInfo->directMode = 0;

@@ -143731,26 +148281,31 @@ sWalker.u.pSelect = p;

sqlite3WalkExpr(&sWalker, p->pHaving); #if TREETRACE_ENABLED if( sWalker.eCode && (sqlite3TreeTrace & 0x100)!=0 ){ - SELECTTRACE(0x100,pParse,p,("Move HAVING terms into WHERE:\n")); + TREETRACE(0x100,pParse,p,("Move HAVING terms into WHERE:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif } /* -** Check to see if the pThis entry of pTabList is a self-join of a prior view. -** If it is, then return the SrcItem for the prior view. If it is not, -** then return 0. +** Check to see if the pThis entry of pTabList is a self-join of another view. +** Search FROM-clause entries in the range of iFirst..iEnd, including iFirst +** but stopping before iEnd. +** +** If pThis is a self-join, then return the SrcItem for the first other +** instance of that view found. If pThis is not a self-join then return 0. */ static SrcItem *isSelfJoinView( SrcList *pTabList, /* Search for self-joins in this FROM clause */ - SrcItem *pThis /* Search for prior reference to this subquery */ + SrcItem *pThis, /* Search for prior reference to this subquery */ + int iFirst, int iEnd /* Range of FROM-clause entries to search. */ ){ SrcItem *pItem; assert( pThis->pSelect!=0 ); if( pThis->pSelect->selFlags & SF_PushDown ) return 0; - for(pItem = pTabList->a; pItem<pThis; pItem++){ + while( iFirst<iEnd ){ Select *pS1; + pItem = &pTabList->a[iFirst++]; if( pItem->pSelect==0 ) continue; if( pItem->fg.viaCoroutine ) continue; if( pItem->zName==0 ) continue;

@@ -143783,7 +148338,6 @@ sqlite3DbFree(db, p->aFunc);

sqlite3DbFreeNN(db, p); } -#ifdef SQLITE_COUNTOFVIEW_OPTIMIZATION /* ** Attempt to transform a query of the form **

@@ -143811,7 +148365,9 @@ sqlite3 *db;

if( (p->selFlags & SF_Aggregate)==0 ) return 0; /* This is an aggregate */ if( p->pEList->nExpr!=1 ) return 0; /* Single result column */ if( p->pWhere ) return 0; + if( p->pHaving ) return 0; if( p->pGroupBy ) return 0; + if( p->pOrderBy ) return 0; pExpr = p->pEList->a[0].pExpr; if( pExpr->op!=TK_AGG_FUNCTION ) return 0; /* Result is an aggregate */ assert( ExprUseUToken(pExpr) );

@@ -143819,15 +148375,18 @@ if( sqlite3_stricmp(pExpr->u.zToken,"count") ) return 0; /* Is count() */

assert( ExprUseXList(pExpr) ); if( pExpr->x.pList!=0 ) return 0; /* Must be count(*) */ if( p->pSrc->nSrc!=1 ) return 0; /* One table in FROM */ + if( ExprHasProperty(pExpr, EP_WinFunc) ) return 0;/* Not a window function */ pSub = p->pSrc->a[0].pSelect; if( pSub==0 ) return 0; /* The FROM is a subquery */ - if( pSub->pPrior==0 ) return 0; /* Must be a compound ry */ + if( pSub->pPrior==0 ) return 0; /* Must be a compound */ + if( pSub->selFlags & SF_CopyCte ) return 0; /* Not a CTE */ do{ if( pSub->op!=TK_ALL && pSub->pPrior ) return 0; /* Must be UNION ALL */ if( pSub->pWhere ) return 0; /* No WHERE clause */ if( pSub->pLimit ) return 0; /* No LIMIT clause */ if( pSub->selFlags & SF_Aggregate ) return 0; /* Not an aggregate */ - pSub = pSub->pPrior; /* Repeat over compound */ + assert( pSub->pHaving==0 ); /* Due to the previous */ + pSub = pSub->pPrior; /* Repeat over compound */ }while( pSub ); /* If we reach this point then it is OK to perform the transformation */

@@ -143863,14 +148422,13 @@ p->pEList->a[0].pExpr = pExpr;

p->selFlags &= ~SF_Aggregate; #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x400 ){ - SELECTTRACE(0x400,pParse,p,("After count-of-view optimization:\n")); + if( sqlite3TreeTrace & 0x200 ){ + TREETRACE(0x200,pParse,p,("After count-of-view optimization:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif return 1; } -#endif /* SQLITE_COUNTOFVIEW_OPTIMIZATION */ /* ** If any term of pSrc, or any SF_NestedFrom sub-query, is not the same

@@ -143896,6 +148454,68 @@ return 0;

} /* +** Return TRUE (non-zero) if the i-th entry in the pTabList SrcList can +** be implemented as a co-routine. The i-th entry is guaranteed to be +** a subquery. +** +** The subquery is implemented as a co-routine if all of the following are +** true: +** +** (1) The subquery will likely be implemented in the outer loop of +** the query. This will be the case if any one of the following +** conditions hold: +** (a) The subquery is the only term in the FROM clause +** (b) The subquery is the left-most term and a CROSS JOIN or similar +** requires it to be the outer loop +** (c) All of the following are true: +** (i) The subquery is the left-most subquery in the FROM clause +** (ii) There is nothing that would prevent the subquery from +** being used as the outer loop if the sqlite3WhereBegin() +** routine nominates it to that position. +** (iii) The query is not a UPDATE ... FROM +** (2) The subquery is not a CTE that should be materialized because +** (a) the AS MATERIALIZED keyword is used, or +** (b) the CTE is used multiple times and does not have the +** NOT MATERIALIZED keyword +** (3) The subquery is not part of a left operand for a RIGHT JOIN +** (4) The SQLITE_Coroutine optimization disable flag is not set +** (5) The subquery is not self-joined +*/ +static int fromClauseTermCanBeCoroutine( + Parse *pParse, /* Parsing context */ + SrcList *pTabList, /* FROM clause */ + int i, /* Which term of the FROM clause holds the subquery */ + int selFlags /* Flags on the SELECT statement */ +){ + SrcItem *pItem = &pTabList->a[i]; + if( pItem->fg.isCte ){ + const CteUse *pCteUse = pItem->u2.pCteUse; + if( pCteUse->eM10d==M10d_Yes ) return 0; /* (2a) */ + if( pCteUse->nUse>=2 && pCteUse->eM10d!=M10d_No ) return 0; /* (2b) */ + } + if( pTabList->a[0].fg.jointype & JT_LTORJ ) return 0; /* (3) */ + if( OptimizationDisabled(pParse->db, SQLITE_Coroutines) ) return 0; /* (4) */ + if( isSelfJoinView(pTabList, pItem, i+1, pTabList->nSrc)!=0 ){ + return 0; /* (5) */ + } + if( i==0 ){ + if( pTabList->nSrc==1 ) return 1; /* (1a) */ + if( pTabList->a[1].fg.jointype & JT_CROSS ) return 1; /* (1b) */ + if( selFlags & SF_UpdateFrom ) return 0; /* (1c-iii) */ + return 1; + } + if( selFlags & SF_UpdateFrom ) return 0; /* (1c-iii) */ + while( 1 /*exit-by-break*/ ){ + if( pItem->fg.jointype & (JT_OUTER|JT_CROSS) ) return 0; /* (1c-ii) */ + if( i==0 ) break; + i--; + pItem--; + if( pItem->pSelect!=0 ) return 0; /* (1c-i) */ + } + return 1; +} + +/* ** Generate code for the SELECT statement given in the p argument. ** ** The results are returned according to the SelectDest structure.

@@ -143940,8 +148560,8 @@ }

assert( db->mallocFailed==0 ); if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1; #if TREETRACE_ENABLED - SELECTTRACE(1,pParse,p, ("begin processing:\n", pParse->addrExplain)); - if( sqlite3TreeTrace & 0x10100 ){ + TREETRACE(0x1,pParse,p, ("begin processing:\n", pParse->addrExplain)); + if( sqlite3TreeTrace & 0x10000 ){ if( (sqlite3TreeTrace & 0x10001)==0x10000 ){ sqlite3TreeViewLine(0, "In sqlite3Select() at %s:%d", __FILE__, __LINE__);

@@ -143961,8 +148581,8 @@ pDest->eDest==SRT_DistQueue || pDest->eDest==SRT_DistFifo );

/* All of these destinations are also able to ignore the ORDER BY clause */ if( p->pOrderBy ){ #if TREETRACE_ENABLED - SELECTTRACE(1,pParse,p, ("dropping superfluous ORDER BY:\n")); - if( sqlite3TreeTrace & 0x100 ){ + TREETRACE(0x800,pParse,p, ("dropping superfluous ORDER BY:\n")); + if( sqlite3TreeTrace & 0x800 ){ sqlite3TreeViewExprList(0, p->pOrderBy, 0, "ORDERBY"); } #endif

@@ -143982,8 +148602,8 @@ }

assert( db->mallocFailed==0 ); assert( p->pEList!=0 ); #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x104 ){ - SELECTTRACE(0x104,pParse,p, ("after name resolution:\n")); + if( sqlite3TreeTrace & 0x10 ){ + TREETRACE(0x10,pParse,p, ("after name resolution:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif

@@ -144024,8 +148644,8 @@ assert( pParse->nErr );

goto select_end; } #if TREETRACE_ENABLED - if( p->pWin && (sqlite3TreeTrace & 0x108)!=0 ){ - SELECTTRACE(0x104,pParse,p, ("after window rewrite:\n")); + if( p->pWin && (sqlite3TreeTrace & 0x40)!=0 ){ + TREETRACE(0x40,pParse,p, ("after window rewrite:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif

@@ -144049,22 +148669,58 @@ ** even for FROM clause elements such as subqueries that do not correspond

** to a real table */ assert( pTab!=0 ); - /* Convert LEFT JOIN into JOIN if there are terms of the right table - ** of the LEFT JOIN used in the WHERE clause. + /* Try to simplify joins: + ** + ** LEFT JOIN -> JOIN + ** RIGHT JOIN -> JOIN + ** FULL JOIN -> RIGHT JOIN + ** + ** If terms of the i-th table are used in the WHERE clause in such a + ** way that the i-th table cannot be the NULL row of a join, then + ** perform the appropriate simplification. This is called + ** "OUTER JOIN strength reduction" in the SQLite documentation. */ - if( (pItem->fg.jointype & (JT_LEFT|JT_RIGHT))==JT_LEFT - && sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor) + if( (pItem->fg.jointype & (JT_LEFT|JT_LTORJ))!=0 + && sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor, + pItem->fg.jointype & JT_LTORJ) && OptimizationEnabled(db, SQLITE_SimplifyJoin) ){ - SELECTTRACE(0x100,pParse,p, - ("LEFT-JOIN simplifies to JOIN on term %d\n",i)); - pItem->fg.jointype &= ~(JT_LEFT|JT_OUTER); - assert( pItem->iCursor>=0 ); - unsetJoinExpr(p->pWhere, pItem->iCursor, - pTabList->a[0].fg.jointype & JT_LTORJ); + if( pItem->fg.jointype & JT_LEFT ){ + if( pItem->fg.jointype & JT_RIGHT ){ + TREETRACE(0x1000,pParse,p, + ("FULL-JOIN simplifies to RIGHT-JOIN on term %d\n",i)); + pItem->fg.jointype &= ~JT_LEFT; + }else{ + TREETRACE(0x1000,pParse,p, + ("LEFT-JOIN simplifies to JOIN on term %d\n",i)); + pItem->fg.jointype &= ~(JT_LEFT|JT_OUTER); + unsetJoinExpr(p->pWhere, pItem->iCursor, 0); + } + } + if( pItem->fg.jointype & JT_LTORJ ){ + for(j=i+1; j<pTabList->nSrc; j++){ + SrcItem *pI2 = &pTabList->a[j]; + if( pI2->fg.jointype & JT_RIGHT ){ + if( pI2->fg.jointype & JT_LEFT ){ + TREETRACE(0x1000,pParse,p, + ("FULL-JOIN simplifies to LEFT-JOIN on term %d\n",j)); + pI2->fg.jointype &= ~JT_RIGHT; + }else{ + TREETRACE(0x1000,pParse,p, + ("RIGHT-JOIN simplifies to JOIN on term %d\n",j)); + pI2->fg.jointype &= ~(JT_RIGHT|JT_OUTER); + unsetJoinExpr(p->pWhere, pI2->iCursor, 1); + } + } + } + for(j=pTabList->nSrc-1; j>=0; j--){ + pTabList->a[j].fg.jointype &= ~JT_LTORJ; + if( pTabList->a[j].fg.jointype & JT_RIGHT ) break; + } + } } - /* No futher action if this term of the FROM clause is no a subquery */ + /* No further action if this term of the FROM clause is not a subquery */ if( pSub==0 ) continue; /* Catch mismatch in the declared columns of a view and the number of

@@ -144075,6 +148731,14 @@ pTab->nCol, pTab->zName, pSub->pEList->nExpr);

goto select_end; } + /* Do not attempt the usual optimizations (flattening and ORDER BY + ** elimination) on a MATERIALIZED common table expression because + ** a MATERIALIZED common table expression is an optimization fence. + */ + if( pItem->fg.isCte && pItem->u2.pCteUse->eM10d==M10d_Yes ){ + continue; + } + /* Do not try to flatten an aggregate subquery. ** ** Flattening an aggregate subquery is only possible if the outer query

@@ -144104,6 +148768,8 @@ ** one of:

** (a) The outer query has a different ORDER BY clause ** (b) The subquery is part of a join ** See forum post 062d576715d277c8 + ** + ** Also retain the ORDER BY if the OmitOrderBy optimization is disabled. */ if( pSub->pOrderBy!=0 && (p->pOrderBy!=0 || pTabList->nSrc>1) /* Condition (5) */

@@ -144112,7 +148778,7 @@ && (pSub->selFlags & SF_OrderByReqd)==0 /* Condition (2) */

&& (p->selFlags & SF_OrderByReqd)==0 /* Condition (3) and (4) */ && OptimizationEnabled(db, SQLITE_OmitOrderBy) ){ - SELECTTRACE(0x100,pParse,p, + TREETRACE(0x800,pParse,p, ("omit superfluous ORDER BY on %r FROM-clause subquery\n",i+1)); sqlite3ParserAddCleanup(pParse, (void(*)(sqlite3*,void*))sqlite3ExprListDelete,

@@ -144167,8 +148833,8 @@ */

if( p->pPrior ){ rc = multiSelect(pParse, p, pDest); #if TREETRACE_ENABLED - SELECTTRACE(0x1,pParse,p,("end compound-select processing\n")); - if( (sqlite3TreeTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){ + TREETRACE(0x400,pParse,p,("end compound-select processing\n")); + if( (sqlite3TreeTrace & 0x400)!=0 && ExplainQueryPlanParent(pParse)==0 ){ sqlite3TreeViewSelect(0, p, 0); } #endif

@@ -144188,24 +148854,21 @@ && OptimizationEnabled(db, SQLITE_PropagateConst)

&& propagateConstants(pParse, p) ){ #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x100 ){ - SELECTTRACE(0x100,pParse,p,("After constant propagation:\n")); + if( sqlite3TreeTrace & 0x2000 ){ + TREETRACE(0x2000,pParse,p,("After constant propagation:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif }else{ - SELECTTRACE(0x100,pParse,p,("Constant propagation not helpful\n")); + TREETRACE(0x2000,pParse,p,("Constant propagation not helpful\n")); } -#ifdef SQLITE_COUNTOFVIEW_OPTIMIZATION if( OptimizationEnabled(db, SQLITE_QueryFlattener|SQLITE_CountOfView) && countOfViewOptimization(pParse, p) ){ if( db->mallocFailed ) goto select_end; - pEList = p->pEList; pTabList = p->pSrc; } -#endif /* For each term in the FROM clause, do two things: ** (1) Authorized unreferenced tables

@@ -144264,39 +148927,42 @@ */

if( OptimizationEnabled(db, SQLITE_PushDown) && (pItem->fg.isCte==0 || (pItem->u2.pCteUse->eM10d!=M10d_Yes && pItem->u2.pCteUse->nUse<2)) - && pushDownWhereTerms(pParse, pSub, p->pWhere, pItem) + && pushDownWhereTerms(pParse, pSub, p->pWhere, pTabList, i) ){ #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x100 ){ - SELECTTRACE(0x100,pParse,p, + if( sqlite3TreeTrace & 0x4000 ){ + TREETRACE(0x4000,pParse,p, ("After WHERE-clause push-down into subquery %d:\n", pSub->selId)); sqlite3TreeViewSelect(0, p, 0); } #endif assert( pItem->pSelect && (pItem->pSelect->selFlags & SF_PushDown)!=0 ); }else{ - SELECTTRACE(0x100,pParse,p,("Push-down not possible\n")); + TREETRACE(0x4000,pParse,p,("Push-down not possible\n")); + } + + /* Convert unused result columns of the subquery into simple NULL + ** expressions, to avoid unneeded searching and computation. + */ + if( OptimizationEnabled(db, SQLITE_NullUnusedCols) + && disableUnusedSubqueryResultColumns(pItem) + ){ +#if TREETRACE_ENABLED + if( sqlite3TreeTrace & 0x4000 ){ + TREETRACE(0x4000,pParse,p, + ("Change unused result columns to NULL for subquery %d:\n", + pSub->selId)); + sqlite3TreeViewSelect(0, p, 0); + } +#endif } zSavedAuthContext = pParse->zAuthContext; pParse->zAuthContext = pItem->zName; /* Generate code to implement the subquery - ** - ** The subquery is implemented as a co-routine if all of the following are - ** true: - ** - ** (1) the subquery is guaranteed to be the outer loop (so that - ** it does not need to be computed more than once), and - ** (2) the subquery is not a CTE that should be materialized - ** (3) the subquery is not part of a left operand for a RIGHT JOIN */ - if( i==0 - && (pTabList->nSrc==1 - || (pTabList->a[1].fg.jointype&(JT_OUTER|JT_CROSS))!=0) /* (1) */ - && (pItem->fg.isCte==0 || pItem->u2.pCteUse->eM10d!=M10d_Yes) /* (2) */ - && (pTabList->a[0].fg.jointype & JT_LTORJ)==0 /* (3) */ - ){ + if( fromClauseTermCanBeCoroutine(pParse, pTabList, i, p->selFlags) ){ /* Implement a co-routine that will return a single row of the result ** set on each invocation. */

@@ -144318,7 +148984,7 @@ sqlite3ClearTempRegCache(pParse);

}else if( pItem->fg.isCte && pItem->u2.pCteUse->addrM9e>0 ){ /* This is a CTE for which materialization code has already been ** generated. Invoke the subroutine to compute the materialization, - ** the make the pItem->iCursor be a copy of the ephemerial table that + ** the make the pItem->iCursor be a copy of the ephemeral table that ** holds the result of the materialization. */ CteUse *pCteUse = pItem->u2.pCteUse; sqlite3VdbeAddOp2(v, OP_Gosub, pCteUse->regRtn, pCteUse->addrM9e);

@@ -144327,7 +148993,7 @@ sqlite3VdbeAddOp2(v, OP_OpenDup, pItem->iCursor, pCteUse->iCur);

VdbeComment((v, "%!S", pItem)); } pSub->nSelectRow = pCteUse->nRowEst; - }else if( (pPrior = isSelfJoinView(pTabList, pItem))!=0 ){ + }else if( (pPrior = isSelfJoinView(pTabList, pItem, 0, i))!=0 ){ /* This view has already been materialized by a prior entry in ** this same FROM clause. Reuse it. */ if( pPrior->addrFillSub ){

@@ -144341,6 +149007,9 @@ ** subroutine to do the materialization so that subsequent uses of

** the same view can reuse the materialization. */ int topAddr; int onceAddr = 0; +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + int addrExplain; +#endif pItem->regReturn = ++pParse->nMem; topAddr = sqlite3VdbeAddOp0(v, OP_Goto);

@@ -144356,15 +149025,14 @@ }else{

VdbeNoopComment((v, "materialize %!S", pItem)); } sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor); - ExplainQueryPlan((pParse, 1, "MATERIALIZE %!S", pItem)); - dest.zAffSdst = sqlite3TableAffinityStr(db, pItem->pTab); + + ExplainQueryPlan2(addrExplain, (pParse, 1, "MATERIALIZE %!S", pItem)); sqlite3Select(pParse, pSub, &dest); - sqlite3DbFree(db, dest.zAffSdst); - dest.zAffSdst = 0; pItem->pTab->nRowLogEst = pSub->nSelectRow; if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr); sqlite3VdbeAddOp2(v, OP_Return, pItem->regReturn, topAddr+1); VdbeComment((v, "end %!S", pItem)); + sqlite3VdbeScanStatusRange(v, addrExplain, addrExplain, -1); sqlite3VdbeJumpHere(v, topAddr); sqlite3ClearTempRegCache(pParse); if( pItem->fg.isCte && pItem->fg.isCorrelated==0 ){

@@ -144390,8 +149058,8 @@ pHaving = p->pHaving;

sDistinct.isTnct = (p->selFlags & SF_Distinct)!=0; #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x400 ){ - SELECTTRACE(0x400,pParse,p,("After all FROM-clause analysis:\n")); + if( sqlite3TreeTrace & 0x8000 ){ + TREETRACE(0x8000,pParse,p,("After all FROM-clause analysis:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif

@@ -144427,8 +149095,8 @@ assert( sDistinct.isTnct );

sDistinct.isTnct = 2; #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x400 ){ - SELECTTRACE(0x400,pParse,p,("Transform DISTINCT into GROUP BY:\n")); + if( sqlite3TreeTrace & 0x20000 ){ + TREETRACE(0x20000,pParse,p,("Transform DISTINCT into GROUP BY:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif

@@ -144514,7 +149182,7 @@ assert( WHERE_USE_LIMIT==SF_FixedLimit );

/* Begin the database scan. */ - SELECTTRACE(1,pParse,p,("WhereBegin\n")); + TREETRACE(0x2,pParse,p,("WhereBegin\n")); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, sSort.pOrderBy, p->pEList, p, wctrlFlags, p->nSelectRow); if( pWInfo==0 ) goto select_end;

@@ -144531,7 +149199,7 @@ if( sSort.nOBSat==sSort.pOrderBy->nExpr ){

sSort.pOrderBy = 0; } } - SELECTTRACE(1,pParse,p,("WhereBegin returns\n")); + TREETRACE(0x2,pParse,p,("WhereBegin returns\n")); /* If sorting index that was created by a prior OP_OpenEphemeral ** instruction ended up not being needed, then change the OP_OpenEphemeral

@@ -144570,7 +149238,7 @@ sqlite3WhereBreakLabel(pWInfo));

/* End the database scan loop. */ - SELECTTRACE(1,pParse,p,("WhereEnd\n")); + TREETRACE(0x2,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); } }else{

@@ -144651,12 +149319,14 @@ if( db->mallocFailed ){

goto select_end; } pAggInfo->selId = p->selId; +#ifdef SQLITE_DEBUG + pAggInfo->pSelect = p; +#endif memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pParse; sNC.pSrcList = pTabList; sNC.uNC.pAggInfo = pAggInfo; VVA_ONLY( sNC.ncFlags = NC_UAggInfo; ) - pAggInfo->mnReg = pParse->nMem+1; pAggInfo->nSortingColumn = pGroupBy ? pGroupBy->nExpr : 0; pAggInfo->pGroupBy = pGroupBy; sqlite3ExprAnalyzeAggList(&sNC, pEList);

@@ -144677,45 +149347,17 @@ minMaxFlag = minMaxQuery(db, pAggInfo->aFunc[0].pFExpr, &pMinMaxOrderBy);

}else{ minMaxFlag = WHERE_ORDERBY_NORMAL; } - for(i=0; i<pAggInfo->nFunc; i++){ - Expr *pExpr = pAggInfo->aFunc[i].pFExpr; - assert( ExprUseXList(pExpr) ); - sNC.ncFlags |= NC_InAggFunc; - sqlite3ExprAnalyzeAggList(&sNC, pExpr->x.pList); -#ifndef SQLITE_OMIT_WINDOWFUNC - assert( !IsWindowFunc(pExpr) ); - if( ExprHasProperty(pExpr, EP_WinFunc) ){ - sqlite3ExprAnalyzeAggregates(&sNC, pExpr->y.pWin->pFilter); - } -#endif - sNC.ncFlags &= ~NC_InAggFunc; - } - pAggInfo->mxReg = pParse->nMem; + analyzeAggFuncArgs(pAggInfo, &sNC); if( db->mallocFailed ) goto select_end; #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x400 ){ - int ii; - SELECTTRACE(0x400,pParse,p,("After aggregate analysis %p:\n", pAggInfo)); + if( sqlite3TreeTrace & 0x20 ){ + TREETRACE(0x20,pParse,p,("After aggregate analysis %p:\n", pAggInfo)); sqlite3TreeViewSelect(0, p, 0); if( minMaxFlag ){ sqlite3DebugPrintf("MIN/MAX Optimization (0x%02x) adds:\n", minMaxFlag); sqlite3TreeViewExprList(0, pMinMaxOrderBy, 0, "ORDERBY"); } - for(ii=0; ii<pAggInfo->nColumn; ii++){ - struct AggInfo_col *pCol = &pAggInfo->aCol[ii]; - sqlite3DebugPrintf( - "agg-column[%d] pTab=%s iTable=%d iColumn=%d iMem=%d" - " iSorterColumn=%d\n", - ii, pCol->pTab ? pCol->pTab->zName : "NULL", - pCol->iTable, pCol->iColumn, pCol->iMem, - pCol->iSorterColumn); - sqlite3TreeViewExpr(0, pAggInfo->aCol[ii].pCExpr, 0); - } - for(ii=0; ii<pAggInfo->nFunc; ii++){ - sqlite3DebugPrintf("agg-func[%d]: iMem=%d\n", - ii, pAggInfo->aFunc[ii].iMem); - sqlite3TreeViewExpr(0, pAggInfo->aFunc[ii].pFExpr, 0); - } + printAggInfo(pAggInfo); } #endif

@@ -144725,7 +149367,7 @@ ** much more complex than aggregates without a GROUP BY.

*/ if( pGroupBy ){ KeyInfo *pKeyInfo; /* Keying information for the group by clause */ - int addr1; /* A-vs-B comparision jump */ + int addr1; /* A-vs-B comparison jump */ int addrOutputRow; /* Start of subroutine that outputs a result row */ int regOutputRow; /* Return address register for output subroutine */ int addrSetAbort; /* Set the abort flag and return */

@@ -144784,7 +149426,7 @@ ** it might be a single loop that uses an index to extract information

** in the right order to begin with. */ sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); - SELECTTRACE(1,pParse,p,("WhereBegin\n")); + TREETRACE(0x2,pParse,p,("WhereBegin\n")); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, pDistinct, p, (sDistinct.isTnct==2 ? WHERE_DISTINCTBY : WHERE_GROUPBY) | (orderByGrp ? WHERE_SORTBYGROUP : 0) | distFlag, 0

@@ -144793,8 +149435,12 @@ if( pWInfo==0 ){

sqlite3ExprListDelete(db, pDistinct); goto select_end; } + if( pParse->pIdxEpr ){ + optimizeAggregateUseOfIndexedExpr(pParse, p, pAggInfo, &sNC); + } + assignAggregateRegisters(pParse, pAggInfo); eDist = sqlite3WhereIsDistinct(pWInfo); - SELECTTRACE(1,pParse,p,("WhereBegin returns\n")); + TREETRACE(0x2,pParse,p,("WhereBegin returns\n")); if( sqlite3WhereIsOrdered(pWInfo)==pGroupBy->nExpr ){ /* The optimizer is able to deliver rows in group by order so ** we do not have to sort. The OP_OpenEphemeral table will be

@@ -144812,9 +149458,13 @@ int regRecord;

int nCol; int nGroupBy; - explainTempTable(pParse, +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + int addrExp; /* Address of OP_Explain instruction */ +#endif + ExplainQueryPlan2(addrExp, (pParse, 0, "USE TEMP B-TREE FOR %s", (sDistinct.isTnct && (p->selFlags&SF_Distinct)==0) ? - "DISTINCT" : "GROUP BY"); + "DISTINCT" : "GROUP BY" + )); groupBySort = 1; nGroupBy = pGroupBy->nExpr;

@@ -144839,18 +149489,40 @@ }

} pAggInfo->directMode = 0; regRecord = sqlite3GetTempReg(pParse); + sqlite3VdbeScanStatusCounters(v, addrExp, 0, sqlite3VdbeCurrentAddr(v)); sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regRecord); sqlite3VdbeAddOp2(v, OP_SorterInsert, pAggInfo->sortingIdx, regRecord); + sqlite3VdbeScanStatusRange(v, addrExp, sqlite3VdbeCurrentAddr(v)-2, -1); sqlite3ReleaseTempReg(pParse, regRecord); sqlite3ReleaseTempRange(pParse, regBase, nCol); - SELECTTRACE(1,pParse,p,("WhereEnd\n")); + TREETRACE(0x2,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); pAggInfo->sortingIdxPTab = sortPTab = pParse->nTab++; sortOut = sqlite3GetTempReg(pParse); + sqlite3VdbeScanStatusCounters(v, addrExp, sqlite3VdbeCurrentAddr(v), 0); sqlite3VdbeAddOp3(v, OP_OpenPseudo, sortPTab, sortOut, nCol); sqlite3VdbeAddOp2(v, OP_SorterSort, pAggInfo->sortingIdx, addrEnd); VdbeComment((v, "GROUP BY sort")); VdbeCoverage(v); pAggInfo->useSortingIdx = 1; + sqlite3VdbeScanStatusRange(v, addrExp, -1, sortPTab); + sqlite3VdbeScanStatusRange(v, addrExp, -1, pAggInfo->sortingIdx); + } + + /* If there are entries in pAgggInfo->aFunc[] that contain subexpressions + ** that are indexed (and that were previously identified and tagged + ** in optimizeAggregateUseOfIndexedExpr()) then those subexpressions + ** must now be converted into a TK_AGG_COLUMN node so that the value + ** is correctly pulled from the index rather than being recomputed. */ + if( pParse->pIdxEpr ){ + aggregateConvertIndexedExprRefToColumn(pAggInfo); +#if TREETRACE_ENABLED + if( sqlite3TreeTrace & 0x20 ){ + TREETRACE(0x20, pParse, p, + ("AggInfo function expressions converted to reference index\n")); + sqlite3TreeViewSelect(0, p, 0); + printAggInfo(pAggInfo); + } +#endif } /* If the index or temporary table used by the GROUP BY sort

@@ -144921,7 +149593,7 @@ if( groupBySort ){

sqlite3VdbeAddOp2(v, OP_SorterNext, pAggInfo->sortingIdx,addrTopOfLoop); VdbeCoverage(v); }else{ - SELECTTRACE(1,pParse,p,("WhereEnd\n")); + TREETRACE(0x2,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); sqlite3VdbeChangeToNoop(v, addrSortingIdx); }

@@ -145031,7 +149703,8 @@ sqlite3VdbeAddOp4Int(v, OP_OpenRead, iCsr, (int)iRoot, iDb, 1);

if( pKeyInfo ){ sqlite3VdbeChangeP4(v, -1, (char *)pKeyInfo, P4_KEYINFO); } - sqlite3VdbeAddOp2(v, OP_Count, iCsr, pAggInfo->aFunc[0].iMem); + assignAggregateRegisters(pParse, pAggInfo); + sqlite3VdbeAddOp2(v, OP_Count, iCsr, AggInfoFuncReg(pAggInfo,0)); sqlite3VdbeAddOp1(v, OP_Close, iCsr); explainSimpleCount(pParse, pTab, pBest); }else{

@@ -145067,6 +149740,7 @@ assert( ExprUseXList(pAggInfo->aFunc[0].pFExpr) );

pDistinct = pAggInfo->aFunc[0].pFExpr->x.pList; distFlag = pDistinct ? (WHERE_WANT_DISTINCT|WHERE_AGG_DISTINCT) : 0; } + assignAggregateRegisters(pParse, pAggInfo); /* This case runs if the aggregate has no GROUP BY clause. The ** processing is much simpler since there is only a single row

@@ -145083,13 +149757,13 @@ */

assert( minMaxFlag==WHERE_ORDERBY_NORMAL || pMinMaxOrderBy!=0 ); assert( pMinMaxOrderBy==0 || pMinMaxOrderBy->nExpr==1 ); - SELECTTRACE(1,pParse,p,("WhereBegin\n")); + TREETRACE(0x2,pParse,p,("WhereBegin\n")); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMaxOrderBy, pDistinct, p, minMaxFlag|distFlag, 0); if( pWInfo==0 ){ goto select_end; } - SELECTTRACE(1,pParse,p,("WhereBegin returns\n")); + TREETRACE(0x2,pParse,p,("WhereBegin returns\n")); eDist = sqlite3WhereIsDistinct(pWInfo); updateAccumulator(pParse, regAcc, pAggInfo, eDist); if( eDist!=WHERE_DISTINCT_NOOP ){

@@ -145103,7 +149777,7 @@ if( regAcc ) sqlite3VdbeAddOp2(v, OP_Integer, 1, regAcc);

if( minMaxFlag ){ sqlite3WhereMinMaxOptEarlyOut(v, pWInfo); } - SELECTTRACE(1,pParse,p,("WhereEnd\n")); + TREETRACE(0x2,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); finalizeAggFunctions(pParse, pAggInfo); }

@@ -145125,8 +149799,6 @@ /* If there is an ORDER BY clause, then we need to sort the results

** and send them to the callback one by one. */ if( sSort.pOrderBy ){ - explainTempTable(pParse, - sSort.nOBSat>0 ? "RIGHT PART OF ORDER BY":"ORDER BY"); assert( p->pEList==pEList ); generateSortTail(pParse, p, &sSort, pEList->nExpr, pDest); }

@@ -145150,7 +149822,7 @@ #ifdef SQLITE_DEBUG

if( pAggInfo && !db->mallocFailed ){ for(i=0; i<pAggInfo->nColumn; i++){ Expr *pExpr = pAggInfo->aCol[i].pCExpr; - assert( pExpr!=0 ); + if( pExpr==0 ) continue; assert( pExpr->pAggInfo==pAggInfo ); assert( pExpr->iAgg==i ); }

@@ -145164,8 +149836,8 @@ }

#endif #if TREETRACE_ENABLED - SELECTTRACE(0x1,pParse,p,("end processing\n")); - if( (sqlite3TreeTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){ + TREETRACE(0x1,pParse,p,("end processing\n")); + if( (sqlite3TreeTrace & 0x40000)!=0 && ExplainQueryPlanParent(pParse)==0 ){ sqlite3TreeViewSelect(0, p, 0); } #endif

@@ -145439,7 +150111,7 @@ Trigger *pTrig = (Trigger *)sqliteHashData(p);

if( pTrig->pTabSchema==pTab->pSchema && pTrig->table && 0==sqlite3StrICmp(pTrig->table, pTab->zName) - && pTrig->pTabSchema!=pTmpSchema + && (pTrig->pTabSchema!=pTmpSchema || pTrig->bReturning) ){ pTrig->pNext = pList; pList = pTrig;

@@ -145561,6 +150233,10 @@ if( IsVirtual(pTab) ){

sqlite3ErrorMsg(pParse, "cannot create triggers on virtual tables"); goto trigger_orphan_error; } + if( (pTab->tabFlags & TF_Shadow)!=0 && sqlite3ReadOnlyShadowTables(db) ){ + sqlite3ErrorMsg(pParse, "cannot create triggers on shadow tables"); + goto trigger_orphan_error; + } /* Check that the trigger name is not reserved and that no trigger of the ** specified name exists */

@@ -145580,6 +150256,7 @@ sqlite3ErrorMsg(pParse, "trigger %T already exists", pName);

}else{ assert( !db->init.busy ); sqlite3CodeVerifySchema(pParse, iDb); + VVA_ONLY( pParse->ifNotExists = 1; ) } goto trigger_cleanup; }

@@ -146343,10 +151020,17 @@ Select sSelect;

SrcList sFrom; assert( v!=0 ); - assert( pParse->bReturning ); + if( !pParse->bReturning ){ + /* This RETURNING trigger must be for a different statement as + ** this statement lacks a RETURNING clause. */ + return; + } assert( db->pParse==pParse ); pReturning = pParse->u1.pReturning; - assert( pTrigger == &(pReturning->retTrig) ); + if( pTrigger != &(pReturning->retTrig) ){ + /* This RETURNING trigger is for a different statement */ + return; + } memset(&sSelect, 0, sizeof(sSelect)); memset(&sFrom, 0, sizeof(sFrom)); sSelect.pEList = sqlite3ExprListDup(db, pReturning->pReturnEL, 0);

@@ -146361,7 +151045,7 @@ sqlite3GenerateColumnNames(pParse, &sSelect);

} sqlite3ExprListDelete(db, sSelect.pEList); pNew = sqlite3ExpandReturning(pParse, pReturning->pReturnEL, pTab); - if( !db->mallocFailed ){ + if( pParse->nErr==0 ){ NameContext sNC; memset(&sNC, 0, sizeof(sNC)); if( pReturning->nRetCol==0 ){

@@ -146830,6 +151514,9 @@ u32 mask = 0;

Trigger *p; assert( isNew==1 || isNew==0 ); + if( IsView(pTab) ){ + return 0xffffffff; + } for(p=pTrigger; p; p=p->pNext){ if( p->op==op && (tr_tm&p->tr_tm)

@@ -147080,7 +151767,7 @@ pWhere2 = sqlite3ExprDup(db, pWhere, 0);

assert( pTabList->nSrc>1 ); if( pSrc ){ - pSrc->a[0].fg.notCte = 1; + assert( pSrc->a[0].fg.notCte ); pSrc->a[0].iCursor = -1; pSrc->a[0].pTab->nTabRef--; pSrc->a[0].pTab = 0;

@@ -147119,7 +151806,8 @@ );

} } pSelect = sqlite3SelectNew(pParse, pList, - pSrc, pWhere2, pGrp, 0, pOrderBy2, SF_UFSrcCheck|SF_IncludeHidden, pLimit2 + pSrc, pWhere2, pGrp, 0, pOrderBy2, + SF_UFSrcCheck|SF_IncludeHidden|SF_UpdateFrom, pLimit2 ); if( pSelect ) pSelect->selFlags |= SF_OrderByReqd; sqlite3SelectDestInit(&dest, eDest, iEph);

@@ -147263,7 +151951,7 @@

if( sqlite3ViewGetColumnNames(pParse, pTab) ){ goto update_cleanup; } - if( sqlite3IsReadOnly(pParse, pTab, tmask) ){ + if( sqlite3IsReadOnly(pParse, pTab, pTrigger) ){ goto update_cleanup; }

@@ -147582,12 +152270,22 @@ }else{

/* Begin the database scan. ** ** Do not consider a single-pass strategy for a multi-row update if - ** there are any triggers or foreign keys to process, or rows may - ** be deleted as a result of REPLACE conflict handling. Any of these - ** things might disturb a cursor being used to scan through the table - ** or index, causing a single-pass approach to malfunction. */ + ** there is anything that might disrupt the cursor being used to do + ** the UPDATE: + ** (1) This is a nested UPDATE + ** (2) There are triggers + ** (3) There are FOREIGN KEY constraints + ** (4) There are REPLACE conflict handlers + ** (5) There are subqueries in the WHERE clause + */ flags = WHERE_ONEPASS_DESIRED; - if( !pParse->nested && !pTrigger && !hasFK && !chngKey && !bReplace ){ + if( !pParse->nested + && !pTrigger + && !hasFK + && !chngKey + && !bReplace + && (pWhere==0 || !ExprHasProperty(pWhere, EP_Subquery)) + ){ flags |= WHERE_ONEPASS_MULTIROW; } pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere,0,0,0,flags,iIdxCur);

@@ -147658,6 +152356,8 @@ }

if( !isView ){ int addrOnce = 0; + int iNotUsed1 = 0; + int iNotUsed2 = 0; /* Open every index that needs updating. */ if( eOnePass!=ONEPASS_OFF ){

@@ -147669,7 +152369,7 @@ if( eOnePass==ONEPASS_MULTI && (nIdx-(aiCurOnePass[1]>=0))>0 ){

addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); } sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, 0, iBaseCur, - aToOpen, 0, 0); + aToOpen, &iNotUsed1, &iNotUsed2); if( addrOnce ){ sqlite3VdbeJumpHereOrPopInst(v, addrOnce); }

@@ -147960,8 +152660,10 @@ if( regRowCount ){

sqlite3VdbeAddOp2(v, OP_AddImm, regRowCount, 1); } - sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges, - TRIGGER_AFTER, pTab, regOldRowid, onError, labelContinue); + if( pTrigger ){ + sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges, + TRIGGER_AFTER, pTab, regOldRowid, onError, labelContinue); + } /* Repeat the above with the next record to be updated, until ** all record selected by the WHERE clause have been updated.

@@ -148056,7 +152758,7 @@ WhereInfo *pWInfo = 0;

int nArg = 2 + pTab->nCol; /* Number of arguments to VUpdate */ int regArg; /* First register in VUpdate arg array */ int regRec; /* Register in which to assemble record */ - int regRowid; /* Register for ephem table rowid */ + int regRowid; /* Register for ephemeral table rowid */ int iCsr = pSrc->a[0].iCursor; /* Cursor used for virtual table scan */ int aDummy[2]; /* Unused arg for sqlite3WhereOkOnePass() */ int eOnePass; /* True to use onepass strategy */

@@ -148100,7 +152802,9 @@ pList = sqlite3ExprListAppend(pParse, pList,

sqlite3ExprDup(db, pChanges->a[aXRef[i]].pExpr, 0) ); }else{ - pList = sqlite3ExprListAppend(pParse, pList, exprRowColumn(pParse, i)); + Expr *pRowExpr = exprRowColumn(pParse, i); + if( pRowExpr ) pRowExpr->op2 = OPFLAG_NOCHNG; + pList = sqlite3ExprListAppend(pParse, pList, pRowExpr); } }

@@ -148177,7 +152881,7 @@ if( pSrc->nSrc==1 ){

sqlite3WhereEnd(pWInfo); } - /* Begin scannning through the ephemeral table. */ + /* Begin scanning through the ephemeral table. */ addr = sqlite3VdbeAddOp1(v, OP_Rewind, ephemTab); VdbeCoverage(v); /* Extract arguments from the current row of the ephemeral table and

@@ -148385,7 +153089,7 @@ sCol[1].iColumn = pIdx->aiColumn[ii];

pExpr = &sCol[0]; } for(jj=0; jj<nn; jj++){ - if( sqlite3ExprCompare(pParse,pTarget->a[jj].pExpr,pExpr,iCursor)<2 ){ + if( sqlite3ExprCompare(0,pTarget->a[jj].pExpr,pExpr,iCursor)<2 ){ break; /* Column ii of the index matches column jj of target */ } }

@@ -148734,7 +153438,7 @@ ** occurs anyway. The integrity of the database is maintained by a

** (possibly synchronous) transaction opened on the main database before ** sqlite3BtreeCopyFile() is called. ** - ** An optimisation would be to use a non-journaled pager. + ** An optimization would be to use a non-journaled pager. ** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but ** that actually made the VACUUM run slower. Very little journalling ** actually occurs when doing a vacuum since the vacuum_db is initially

@@ -149153,10 +153857,10 @@

pVTab->nRef--; if( pVTab->nRef==0 ){ sqlite3_vtab *p = pVTab->pVtab; - sqlite3VtabModuleUnref(pVTab->db, pVTab->pMod); if( p ){ p->pModule->xDisconnect(p); } + sqlite3VtabModuleUnref(pVTab->db, pVTab->pMod); sqlite3DbFree(db, pVTab); } }

@@ -149423,7 +154127,7 @@ ** schema table. We just need to update that slot with all

** the information we've collected. ** ** The VM register number pParse->regRowid holds the rowid of an - ** entry in the sqlite_schema table tht was created for this vtab + ** entry in the sqlite_schema table that was created for this vtab ** by sqlite3StartTable(). */ iDb = sqlite3SchemaToIndex(db, pTab->pSchema);

@@ -149552,7 +154256,9 @@ sCtx.pVTable = pVTable;

sCtx.pPrior = db->pVtabCtx; sCtx.bDeclared = 0; db->pVtabCtx = &sCtx; + pTab->nTabRef++; rc = xConstruct(db, pMod->pAux, nArg, azArg, &pVTable->pVtab, &zErr); + sqlite3DeleteTable(db, pTab); db->pVtabCtx = sCtx.pPrior; if( rc==SQLITE_NOMEM ) sqlite3OomFault(db); assert( sCtx.pTab==pTab );

@@ -149761,7 +154467,7 @@ #endif

sqlite3_mutex_enter(db->mutex); pCtx = db->pVtabCtx; if( !pCtx || pCtx->bDeclared ){ - sqlite3Error(db, SQLITE_MISUSE); + sqlite3Error(db, SQLITE_MISUSE_BKPT); sqlite3_mutex_leave(db->mutex); return SQLITE_MISUSE_BKPT; }

@@ -150042,7 +154748,10 @@ xMethod = pMod->xRelease;

break; } if( xMethod && pVTab->iSavepoint>iSavepoint ){ + u64 savedFlags = (db->flags & SQLITE_Defensive); + db->flags &= ~(u64)SQLITE_Defensive; rc = xMethod(pVTab->pVtab, iSavepoint); + db->flags |= savedFlags; } sqlite3VtabUnlock(pVTab); }

@@ -150162,7 +154871,7 @@ ** and an error message was left in pParse.

** ** An eponymous virtual table instance is one that is named after its ** module, and more importantly, does not require a CREATE VIRTUAL TABLE -** statement in order to come into existance. Eponymous virtual table +** statement in order to come into existence. Eponymous virtual table ** instances always exist. They cannot be DROP-ed. ** ** Any virtual table module for which xConnect and xCreate are the same

@@ -150271,6 +154980,10 @@ case SQLITE_VTAB_DIRECTONLY: {

p->pVTable->eVtabRisk = SQLITE_VTABRISK_High; break; } + case SQLITE_VTAB_USES_ALL_SCHEMAS: { + p->pVTable->bAllSchemas = 1; + break; + } default: { rc = SQLITE_MISUSE_BKPT; break;

@@ -150349,7 +155062,7 @@ typedef struct WhereRightJoin WhereRightJoin;

/* ** This object is a header on a block of allocated memory that will be -** automatically freed when its WInfo oject is destructed. +** automatically freed when its WInfo object is destructed. */ struct WhereMemBlock { WhereMemBlock *pNext; /* Next block in the chain */

@@ -150410,7 +155123,7 @@ struct InLoop {

int iCur; /* The VDBE cursor used by this IN operator */ int addrInTop; /* Top of the IN loop */ int iBase; /* Base register of multi-key index record */ - int nPrefix; /* Number of prior entires in the key */ + int nPrefix; /* Number of prior entries in the key */ u8 eEndLoopOp; /* IN Loop terminator. OP_Next or OP_Prev */ } *aInLoop; /* Information about each nested IN operator */ } in; /* Used when pWLoop->wsFlags&WHERE_IN_ABLE */

@@ -150660,7 +155373,7 @@ u8 hasOr; /* True if any a[].eOperator is WO_OR */

int nTerm; /* Number of terms */ int nSlot; /* Number of entries in a[] */ int nBase; /* Number of terms through the last non-Virtual */ - WhereTerm *a; /* Each a[] describes a term of the WHERE cluase */ + WhereTerm *a; /* Each a[] describes a term of the WHERE clause */ #if defined(SQLITE_SMALL_STACK) WhereTerm aStatic[1]; /* Initial static space for a[] */ #else

@@ -150945,7 +155658,8 @@ #define WHERE_TRANSCONS 0x00200000 /* Uses a transitive constraint */

#define WHERE_BLOOMFILTER 0x00400000 /* Consider using a Bloom-filter */ #define WHERE_SELFCULL 0x00800000 /* nOut reduced by extra WHERE terms */ #define WHERE_OMIT_OFFSET 0x01000000 /* Set offset counter to zero */ -#define WHERE_VIEWSCAN 0x02000000 /* A full-scan of a VIEW or subquery */ + /* 0x02000000 -- available for reuse */ +#define WHERE_EXPRIDX 0x04000000 /* Uses an index-on-expressions */ #endif /* !defined(SQLITE_WHEREINT_H) */

@@ -151043,9 +155757,9 @@ }

/* ** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN -** command, or if either SQLITE_DEBUG or SQLITE_ENABLE_STMT_SCANSTATUS was -** defined at compile-time. If it is not a no-op, a single OP_Explain opcode -** is added to the output to describe the table scan strategy in pLevel. +** command, or if stmt_scanstatus_v2() stats are enabled, or if SQLITE_DEBUG +** was defined at compile-time. If it is not a no-op, a single OP_Explain +** opcode is added to the output to describe the table scan strategy in pLevel. ** ** If an OP_Explain opcode is added to the VM, its address is returned. ** Otherwise, if no OP_Explain is coded, zero is returned.

@@ -151057,8 +155771,8 @@ WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */

u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ ){ int ret = 0; -#if !defined(SQLITE_DEBUG) && !defined(SQLITE_ENABLE_STMT_SCANSTATUS) - if( sqlite3ParseToplevel(pParse)->explain==2 ) +#if !defined(SQLITE_DEBUG) + if( sqlite3ParseToplevel(pParse)->explain==2 || IS_STMT_SCANSTATUS(pParse->db) ) #endif { SrcItem *pItem = &pTabList->a[pLevel->iFrom];

@@ -151202,6 +155916,8 @@ sqlite3_str_append(&str, ")", 1);

zMsg = sqlite3StrAccumFinish(&str); ret = sqlite3VdbeAddOp4(v, OP_Explain, sqlite3VdbeCurrentAddr(v), pParse->addrExplain, 0, zMsg,P4_DYNAMIC); + + sqlite3VdbeScanStatus(v, sqlite3VdbeCurrentAddr(v)-1, 0, 0, 0, 0); return ret; } #endif /* SQLITE_OMIT_EXPLAIN */

@@ -151222,16 +155938,37 @@ SrcList *pSrclist, /* FROM clause pLvl reads data from */

WhereLevel *pLvl, /* Level to add scanstatus() entry for */ int addrExplain /* Address of OP_Explain (or 0) */ ){ - const char *zObj = 0; - WhereLoop *pLoop = pLvl->pWLoop; - if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 && pLoop->u.btree.pIndex!=0 ){ - zObj = pLoop->u.btree.pIndex->zName; - }else{ - zObj = pSrclist->a[pLvl->iFrom].zName; + if( IS_STMT_SCANSTATUS( sqlite3VdbeDb(v) ) ){ + const char *zObj = 0; + WhereLoop *pLoop = pLvl->pWLoop; + int wsFlags = pLoop->wsFlags; + int viaCoroutine = 0; + + if( (wsFlags & WHERE_VIRTUALTABLE)==0 && pLoop->u.btree.pIndex!=0 ){ + zObj = pLoop->u.btree.pIndex->zName; + }else{ + zObj = pSrclist->a[pLvl->iFrom].zName; + viaCoroutine = pSrclist->a[pLvl->iFrom].fg.viaCoroutine; + } + sqlite3VdbeScanStatus( + v, addrExplain, pLvl->addrBody, pLvl->addrVisit, pLoop->nOut, zObj + ); + + if( viaCoroutine==0 ){ + if( (wsFlags & (WHERE_MULTI_OR|WHERE_AUTO_INDEX))==0 ){ + sqlite3VdbeScanStatusRange(v, addrExplain, -1, pLvl->iTabCur); + } + if( wsFlags & WHERE_INDEXED ){ + sqlite3VdbeScanStatusRange(v, addrExplain, -1, pLvl->iIdxCur); + } + }else{ + int addr = pSrclist->a[pLvl->iFrom].addrFillSub; + VdbeOp *pOp = sqlite3VdbeGetOp(v, addr-1); + assert( sqlite3VdbeDb(v)->mallocFailed || pOp->opcode==OP_InitCoroutine ); + assert( sqlite3VdbeDb(v)->mallocFailed || pOp->p2>addr ); + sqlite3VdbeScanStatusRange(v, addrExplain, addr, pOp->p2-1); + } } - sqlite3VdbeScanStatus( - v, addrExplain, pLvl->addrBody, pLvl->addrVisit, pLoop->nOut, zObj - ); } #endif

@@ -151291,7 +156028,7 @@ }else{

pTerm->wtFlags |= TERM_CODED; } #ifdef WHERETRACE_ENABLED - if( sqlite3WhereTrace & 0x20000 ){ + if( (sqlite3WhereTrace & 0x4001)==0x4001 ){ sqlite3DebugPrintf("DISABLE-"); sqlite3WhereTermPrint(pTerm, (int)(pTerm - (pTerm->pWC->a))); }

@@ -151406,68 +156143,75 @@ WhereLoop *pLoop, /* The current loop */

Expr *pX /* The IN expression to be reduced */ ){ sqlite3 *db = pParse->db; + Select *pSelect; /* Pointer to the SELECT on the RHS */ Expr *pNew; pNew = sqlite3ExprDup(db, pX, 0); if( db->mallocFailed==0 ){ - ExprList *pOrigRhs; /* Original unmodified RHS */ - ExprList *pOrigLhs; /* Original unmodified LHS */ - ExprList *pRhs = 0; /* New RHS after modifications */ - ExprList *pLhs = 0; /* New LHS after mods */ - int i; /* Loop counter */ - Select *pSelect; /* Pointer to the SELECT on the RHS */ + for(pSelect=pNew->x.pSelect; pSelect; pSelect=pSelect->pPrior){ + ExprList *pOrigRhs; /* Original unmodified RHS */ + ExprList *pOrigLhs = 0; /* Original unmodified LHS */ + ExprList *pRhs = 0; /* New RHS after modifications */ + ExprList *pLhs = 0; /* New LHS after mods */ + int i; /* Loop counter */ - assert( ExprUseXSelect(pNew) ); - pOrigRhs = pNew->x.pSelect->pEList; - assert( pNew->pLeft!=0 ); - assert( ExprUseXList(pNew->pLeft) ); - pOrigLhs = pNew->pLeft->x.pList; - for(i=iEq; i<pLoop->nLTerm; i++){ - if( pLoop->aLTerm[i]->pExpr==pX ){ - int iField; - assert( (pLoop->aLTerm[i]->eOperator & (WO_OR|WO_AND))==0 ); - iField = pLoop->aLTerm[i]->u.x.iField - 1; - if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */ - pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr); - pOrigRhs->a[iField].pExpr = 0; - assert( pOrigLhs->a[iField].pExpr!=0 ); - pLhs = sqlite3ExprListAppend(pParse, pLhs, pOrigLhs->a[iField].pExpr); - pOrigLhs->a[iField].pExpr = 0; + assert( ExprUseXSelect(pNew) ); + pOrigRhs = pSelect->pEList; + assert( pNew->pLeft!=0 ); + assert( ExprUseXList(pNew->pLeft) ); + if( pSelect==pNew->x.pSelect ){ + pOrigLhs = pNew->pLeft->x.pList; + } + for(i=iEq; i<pLoop->nLTerm; i++){ + if( pLoop->aLTerm[i]->pExpr==pX ){ + int iField; + assert( (pLoop->aLTerm[i]->eOperator & (WO_OR|WO_AND))==0 ); + iField = pLoop->aLTerm[i]->u.x.iField - 1; + if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */ + pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr); + pOrigRhs->a[iField].pExpr = 0; + if( pOrigLhs ){ + assert( pOrigLhs->a[iField].pExpr!=0 ); + pLhs = sqlite3ExprListAppend(pParse,pLhs,pOrigLhs->a[iField].pExpr); + pOrigLhs->a[iField].pExpr = 0; + } + } + } + sqlite3ExprListDelete(db, pOrigRhs); + if( pOrigLhs ){ + sqlite3ExprListDelete(db, pOrigLhs); + pNew->pLeft->x.pList = pLhs; } - } - sqlite3ExprListDelete(db, pOrigRhs); - sqlite3ExprListDelete(db, pOrigLhs); - pNew->pLeft->x.pList = pLhs; - pNew->x.pSelect->pEList = pRhs; - if( pLhs && pLhs->nExpr==1 ){ - /* Take care here not to generate a TK_VECTOR containing only a - ** single value. Since the parser never creates such a vector, some - ** of the subroutines do not handle this case. */ - Expr *p = pLhs->a[0].pExpr; - pLhs->a[0].pExpr = 0; - sqlite3ExprDelete(db, pNew->pLeft); - pNew->pLeft = p; - } - pSelect = pNew->x.pSelect; - if( pSelect->pOrderBy ){ - /* If the SELECT statement has an ORDER BY clause, zero the - ** iOrderByCol variables. These are set to non-zero when an - ** ORDER BY term exactly matches one of the terms of the - ** result-set. Since the result-set of the SELECT statement may - ** have been modified or reordered, these variables are no longer - ** set correctly. Since setting them is just an optimization, - ** it's easiest just to zero them here. */ - ExprList *pOrderBy = pSelect->pOrderBy; - for(i=0; i<pOrderBy->nExpr; i++){ - pOrderBy->a[i].u.x.iOrderByCol = 0; + pSelect->pEList = pRhs; + if( pLhs && pLhs->nExpr==1 ){ + /* Take care here not to generate a TK_VECTOR containing only a + ** single value. Since the parser never creates such a vector, some + ** of the subroutines do not handle this case. */ + Expr *p = pLhs->a[0].pExpr; + pLhs->a[0].pExpr = 0; + sqlite3ExprDelete(db, pNew->pLeft); + pNew->pLeft = p; + } + if( pSelect->pOrderBy ){ + /* If the SELECT statement has an ORDER BY clause, zero the + ** iOrderByCol variables. These are set to non-zero when an + ** ORDER BY term exactly matches one of the terms of the + ** result-set. Since the result-set of the SELECT statement may + ** have been modified or reordered, these variables are no longer + ** set correctly. Since setting them is just an optimization, + ** it's easiest just to zero them here. */ + ExprList *pOrderBy = pSelect->pOrderBy; + for(i=0; i<pOrderBy->nExpr; i++){ + pOrderBy->a[i].u.x.iOrderByCol = 0; + } } - } #if 0 - printf("For indexing, change the IN expr:\n"); - sqlite3TreeViewExpr(0, pX, 0); - printf("Into:\n"); - sqlite3TreeViewExpr(0, pNew, 0); + printf("For indexing, change the IN expr:\n"); + sqlite3TreeViewExpr(0, pX, 0); + printf("Into:\n"); + sqlite3TreeViewExpr(0, pNew, 0); #endif + } } return pNew; }

@@ -151720,7 +156464,7 @@

/* Figure out how many memory cells we will need then allocate them. */ regBase = pParse->nMem + 1; - nReg = pLoop->u.btree.nEq + nExtraReg; + nReg = nEq + nExtraReg; pParse->nMem += nReg; zAff = sqlite3DbStrDup(pParse->db,sqlite3IndexAffinityStr(pParse->db,pIdx));

@@ -151767,9 +156511,6 @@ }else{

sqlite3VdbeAddOp2(v, OP_Copy, r1, regBase+j); } } - } - for(j=nSkip; j<nEq; j++){ - pTerm = pLoop->aLTerm[j]; if( pTerm->eOperator & WO_IN ){ if( pTerm->pExpr->flags & EP_xIsSelect ){ /* No affinity ever needs to be (or should be) applied to a value

@@ -151912,18 +156653,19 @@ **

** 2) transform the expression node to a TK_REGISTER node that reads ** from the newly populated register. ** -** Also, if the node is a TK_COLUMN that does access the table idenified +** Also, if the node is a TK_COLUMN that does access the table identified ** by pCCurHint.iTabCur, and an index is being used (which we will ** know because CCurHint.pIdx!=0) then transform the TK_COLUMN into ** an access of the index rather than the original table. */ static int codeCursorHintFixExpr(Walker *pWalker, Expr *pExpr){ int rc = WRC_Continue; + int reg; struct CCurHint *pHint = pWalker->u.pCCurHint; if( pExpr->op==TK_COLUMN ){ if( pExpr->iTable!=pHint->iTabCur ){ - int reg = ++pWalker->pParse->nMem; /* Register for column value */ - sqlite3ExprCode(pWalker->pParse, pExpr, reg); + reg = ++pWalker->pParse->nMem; /* Register for column value */ + reg = sqlite3ExprCodeTarget(pWalker->pParse, pExpr, reg); pExpr->op = TK_REGISTER; pExpr->iTable = reg; }else if( pHint->pIdx!=0 ){

@@ -151931,15 +156673,15 @@ pExpr->iTable = pHint->iIdxCur;

pExpr->iColumn = sqlite3TableColumnToIndex(pHint->pIdx, pExpr->iColumn); assert( pExpr->iColumn>=0 ); } - }else if( pExpr->op==TK_AGG_FUNCTION ){ - /* An aggregate function in the WHERE clause of a query means this must - ** be a correlated sub-query, and expression pExpr is an aggregate from - ** the parent context. Do not walk the function arguments in this case. - ** - ** todo: It should be possible to replace this node with a TK_REGISTER - ** expression, as the result of the expression must be stored in a - ** register at this point. The same holds for TK_AGG_COLUMN nodes. */ + }else if( pExpr->pAggInfo ){ rc = WRC_Prune; + reg = ++pWalker->pParse->nMem; /* Register for column value */ + reg = sqlite3ExprCodeTarget(pWalker->pParse, pExpr, reg); + pExpr->op = TK_REGISTER; + pExpr->iTable = reg; + }else if( pExpr->op==TK_TRUEFALSE ){ + /* Do not walk disabled expressions. tag-20230504-1 */ + return WRC_Prune; } return rc; }

@@ -152041,7 +156783,7 @@ pExpr = sqlite3ExprAnd(pParse, pExpr, sqlite3ExprDup(db, pTerm->pExpr, 0));

} if( pExpr!=0 ){ sWalker.xExprCallback = codeCursorHintFixExpr; - sqlite3WalkExpr(&sWalker, pExpr); + if( pParse->nErr==0 ) sqlite3WalkExpr(&sWalker, pExpr); sqlite3VdbeAddOp4(v, OP_CursorHint, (sHint.pIdx ? sHint.iIdxCur : sHint.iTabCur), 0, 0, (const char*)pExpr, P4_EXPR);

@@ -152278,13 +157020,15 @@ iCur = pTabItem->iCursor;

pLevel->notReady = notReady & ~sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur); bRev = (pWInfo->revMask>>iLevel)&1; VdbeModuleComment((v, "Begin WHERE-loop%d: %s",iLevel,pTabItem->pTab->zName)); -#if WHERETRACE_ENABLED /* 0x20800 */ - if( sqlite3WhereTrace & 0x800 ){ +#if WHERETRACE_ENABLED /* 0x4001 */ + if( sqlite3WhereTrace & 0x1 ){ sqlite3DebugPrintf("Coding level %d of %d: notReady=%llx iFrom=%d\n", iLevel, pWInfo->nLevel, (u64)notReady, pLevel->iFrom); - sqlite3WhereLoopPrint(pLoop, pWC); + if( sqlite3WhereTrace & 0x1000 ){ + sqlite3WhereLoopPrint(pLoop, pWC); + } } - if( sqlite3WhereTrace & 0x20000 ){ + if( (sqlite3WhereTrace & 0x4001)==0x4001 ){ if( iLevel==0 ){ sqlite3DebugPrintf("WHERE clause being coded:\n"); sqlite3TreeViewExpr(0, pWInfo->pWhere, 0);

@@ -152527,7 +157271,7 @@ /* TK_GE */ OP_SeekGE

}; assert( TK_LE==TK_GT+1 ); /* Make sure the ordering.. */ assert( TK_LT==TK_GT+2 ); /* ... of the TK_xx values... */ - assert( TK_GE==TK_GT+3 ); /* ... is correcct. */ + assert( TK_GE==TK_GT+3 ); /* ... is correct. */ assert( (pStart->wtFlags & TERM_VNULL)==0 ); testcase( pStart->wtFlags & TERM_VIRTUAL );

@@ -152833,7 +157577,7 @@ ** on the estimated number of rows in the btree seems like a good

** guess. */ addrSeekScan = sqlite3VdbeAddOp1(v, OP_SeekScan, (pIdx->aiRowLogEst[0]+9)/10); - if( pRangeStart ){ + if( pRangeStart || pRangeEnd ){ sqlite3VdbeChangeP5(v, 1); sqlite3VdbeChangeP2(v, addrSeekScan, sqlite3VdbeCurrentAddr(v)+1); addrSeekScan = 0;

@@ -152874,16 +157618,7 @@ nConstraint = nEq;

assert( pLevel->p2==0 ); if( pRangeEnd ){ Expr *pRight = pRangeEnd->pExpr->pRight; - if( addrSeekScan ){ - /* For a seek-scan that has a range on the lowest term of the index, - ** we have to make the top of the loop be code that sets the end - ** condition of the range. Otherwise, the OP_SeekScan might jump - ** over that initialization, leaving the range-end value set to the - ** range-start value, resulting in a wrong answer. - ** See ticket 5981a8c041a3c2f3 (2021-11-02). - */ - pLevel->p2 = sqlite3VdbeCurrentAddr(v); - } + assert( addrSeekScan==0 ); codeExprOrVector(pParse, pRight, regBase+nEq, nTop); whereLikeOptimizationStringFixup(v, pLevel, pRangeEnd); if( (pRangeEnd->wtFlags & TERM_VNULL)==0

@@ -152917,7 +157652,7 @@ if( zStartAff ) sqlite3DbNNFreeNN(db, zStartAff);

if( zEndAff ) sqlite3DbNNFreeNN(db, zEndAff); /* Top of the loop body */ - if( pLevel->p2==0 ) pLevel->p2 = sqlite3VdbeCurrentAddr(v); + pLevel->p2 = sqlite3VdbeCurrentAddr(v); /* Check if the index cursor is past the end of the range. */ if( nConstraint ){

@@ -153208,7 +157943,7 @@ pOrExpr = pAndExpr;

} /* Loop through table entries that match term pOrTerm. */ ExplainQueryPlan((pParse, 1, "INDEX %d", ii+1)); - WHERETRACE(0xffff, ("Subplan for OR-clause:\n")); + WHERETRACE(0xffffffff, ("Subplan for OR-clause:\n")); pSubWInfo = sqlite3WhereBegin(pParse, pOrTab, pOrExpr, 0, 0, 0, WHERE_OR_SUBCLAUSE, iCovCur); assert( pSubWInfo || pParse->nErr );

@@ -153445,12 +158180,12 @@ VdbeCoverageIf(v, (x&1)==0);

} #endif } -#ifdef WHERETRACE_ENABLED /* 0xffff */ +#ifdef WHERETRACE_ENABLED /* 0xffffffff */ if( sqlite3WhereTrace ){ VdbeNoopComment((v, "WhereTerm[%d] (%p) priority=%d", pWC->nTerm-j, pTerm, iLoop)); } - if( sqlite3WhereTrace & 0x800 ){ + if( sqlite3WhereTrace & 0x4000 ){ sqlite3DebugPrintf("Coding auxiliary constraint:\n"); sqlite3WhereTermPrint(pTerm, pWC->nTerm-j); }

@@ -153479,8 +158214,8 @@ if( (pTerm->eOperator & WO_EQUIV)==0 ) continue;

if( pTerm->leftCursor!=iCur ) continue; if( pTabItem->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT) ) continue; pE = pTerm->pExpr; -#ifdef WHERETRACE_ENABLED /* 0x800 */ - if( sqlite3WhereTrace & 0x800 ){ +#ifdef WHERETRACE_ENABLED /* 0x4001 */ + if( (sqlite3WhereTrace & 0x4001)==0x4001 ){ sqlite3DebugPrintf("Coding transitive constraint:\n"); sqlite3WhereTermPrint(pTerm, pWC->nTerm-j); }

@@ -153595,13 +158330,13 @@ pTerm->wtFlags |= TERM_CODED;

} } -#if WHERETRACE_ENABLED /* 0x20800 */ - if( sqlite3WhereTrace & 0x20000 ){ +#if WHERETRACE_ENABLED /* 0x4001 */ + if( sqlite3WhereTrace & 0x4000 ){ sqlite3DebugPrintf("All WHERE-clause terms after coding level %d:\n", iLevel); sqlite3WhereClausePrint(pWC); } - if( sqlite3WhereTrace & 0x800 ){ + if( sqlite3WhereTrace & 0x1 ){ sqlite3DebugPrintf("End Coding level %d: notReady=%llx\n", iLevel, (u64)pLevel->notReady); }

@@ -153716,7 +158451,7 @@ ** This module contains C code that generates VDBE code used to process

** the WHERE clause of SQL statements. ** ** This file was originally part of where.c but was split out to improve -** readability and editabiliity. This file contains utility routines for +** readability and editability. This file contains utility routines for ** analyzing Expr objects in the WHERE clause. */ /* #include "sqliteInt.h" */

@@ -153932,7 +158667,7 @@ ** that we can increment the prefix key to find an upper bound for the

** range search. The third is because the caller assumes that the pattern ** consists of at least one character after all escapes have been ** removed. */ - if( cnt!=0 && 255!=(u8)z[cnt-1] && (cnt>1 || z[0]!=wc[3]) ){ + if( (cnt>1 || (cnt>0 && z[0]!=wc[3])) && 255!=(u8)z[cnt-1] ){ Expr *pPrefix; /* A "complete" match if the pattern ends with "*" or "%" */

@@ -154505,7 +159240,7 @@ if( (chngToIN & sqlite3WhereGetMask(&pWInfo->sMaskSet,

pOrTerm->leftCursor))==0 ){ /* This term must be of the form t1.a==t2.b where t2 is in the ** chngToIN set but t1 is not. This term will be either preceded - ** or follwed by an inverted copy (t2.b==t1.a). Skip this term + ** or followed by an inverted copy (t2.b==t1.a). Skip this term ** and use its inversion. */ testcase( pOrTerm->wtFlags & TERM_COPIED ); testcase( pOrTerm->wtFlags & TERM_VIRTUAL );

@@ -154677,36 +159412,40 @@ ** might be added to an automatic index later.

*/ static SQLITE_NOINLINE int exprMightBeIndexed2( SrcList *pFrom, /* The FROM clause */ - Bitmask mPrereq, /* Bitmask of FROM clause terms referenced by pExpr */ int *aiCurCol, /* Write the referenced table cursor and column here */ - Expr *pExpr /* An operand of a comparison operator */ + Expr *pExpr, /* An operand of a comparison operator */ + int j /* Start looking with the j-th pFrom entry */ ){ Index *pIdx; int i; int iCur; - for(i=0; mPrereq>1; i++, mPrereq>>=1){} - iCur = pFrom->a[i].iCursor; - for(pIdx=pFrom->a[i].pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - if( pIdx->aColExpr==0 ) continue; - for(i=0; i<pIdx->nKeyCol; i++){ - if( pIdx->aiColumn[i]!=XN_EXPR ) continue; - assert( pIdx->bHasExpr ); - if( sqlite3ExprCompareSkip(pExpr, pIdx->aColExpr->a[i].pExpr, iCur)==0 ){ - aiCurCol[0] = iCur; - aiCurCol[1] = XN_EXPR; - return 1; + do{ + iCur = pFrom->a[j].iCursor; + for(pIdx=pFrom->a[j].pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + if( pIdx->aColExpr==0 ) continue; + for(i=0; i<pIdx->nKeyCol; i++){ + if( pIdx->aiColumn[i]!=XN_EXPR ) continue; + assert( pIdx->bHasExpr ); + if( sqlite3ExprCompareSkip(pExpr,pIdx->aColExpr->a[i].pExpr,iCur)==0 + && pExpr->op!=TK_STRING + ){ + aiCurCol[0] = iCur; + aiCurCol[1] = XN_EXPR; + return 1; + } } } - } + }while( ++j < pFrom->nSrc ); return 0; } static int exprMightBeIndexed( SrcList *pFrom, /* The FROM clause */ - Bitmask mPrereq, /* Bitmask of FROM clause terms referenced by pExpr */ int *aiCurCol, /* Write the referenced table cursor & column here */ Expr *pExpr, /* An operand of a comparison operator */ int op /* The specific comparison operator */ ){ + int i; + /* If this expression is a vector to the left or right of a ** inequality constraint (>, <, >= or <=), perform the processing ** on the first element of the vector. */

@@ -154716,7 +159455,6 @@ assert( op<=TK_GE );

if( pExpr->op==TK_VECTOR && (op>=TK_GT && ALWAYS(op<=TK_GE)) ){ assert( ExprUseXList(pExpr) ); pExpr = pExpr->x.pList->a[0].pExpr; - } if( pExpr->op==TK_COLUMN ){

@@ -154724,9 +159462,16 @@ aiCurCol[0] = pExpr->iTable;

aiCurCol[1] = pExpr->iColumn; return 1; } - if( mPrereq==0 ) return 0; /* No table references */ - if( (mPrereq&(mPrereq-1))!=0 ) return 0; /* Refs more than one table */ - return exprMightBeIndexed2(pFrom,mPrereq,aiCurCol,pExpr); + + for(i=0; i<pFrom->nSrc; i++){ + Index *pIdx; + for(pIdx=pFrom->a[i].pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + if( pIdx->aColExpr ){ + return exprMightBeIndexed2(pFrom,aiCurCol,pExpr,i); + } + } + } + return 0; }

@@ -154757,8 +159502,8 @@ WhereInfo *pWInfo = pWC->pWInfo; /* WHERE clause processing context */

WhereTerm *pTerm; /* The term to be analyzed */ WhereMaskSet *pMaskSet; /* Set of table index masks */ Expr *pExpr; /* The expression to be analyzed */ - Bitmask prereqLeft; /* Prerequesites of the pExpr->pLeft */ - Bitmask prereqAll; /* Prerequesites of pExpr */ + Bitmask prereqLeft; /* Prerequisites of the pExpr->pLeft */ + Bitmask prereqAll; /* Prerequisites of pExpr */ Bitmask extraRight = 0; /* Extra dependencies on LEFT JOIN */ Expr *pStr1 = 0; /* RHS of LIKE/GLOB operator */ int isComplete = 0; /* RHS of LIKE/GLOB ends with wildcard */

@@ -154852,7 +159597,7 @@ assert( ExprUseXList(pLeft) );

pLeft = pLeft->x.pList->a[pTerm->u.x.iField-1].pExpr; } - if( exprMightBeIndexed(pSrc, prereqLeft, aiCurCol, pLeft, op) ){ + if( exprMightBeIndexed(pSrc, aiCurCol, pLeft, op) ){ pTerm->leftCursor = aiCurCol[0]; assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); pTerm->u.x.leftColumn = aiCurCol[1];

@@ -154860,7 +159605,7 @@ pTerm->eOperator = operatorMask(op) & opMask;

} if( op==TK_IS ) pTerm->wtFlags |= TERM_IS; if( pRight - && exprMightBeIndexed(pSrc, pTerm->prereqRight, aiCurCol, pRight, op) + && exprMightBeIndexed(pSrc, aiCurCol, pRight, op) && !ExprHasProperty(pRight, EP_FixedCol) ){ WhereTerm *pNew;

@@ -154904,7 +159649,7 @@ && !ExprHasProperty(pExpr,EP_OuterON)

&& 0==sqlite3ExprCanBeNull(pLeft) ){ assert( !ExprHasProperty(pExpr, EP_IntValue) ); - pExpr->op = TK_TRUEFALSE; + pExpr->op = TK_TRUEFALSE; /* See tag-20230504-1 */ pExpr->u.zToken = "false"; ExprSetProperty(pExpr, EP_IsFalse); pTerm->prereqAll = 0;

@@ -155071,7 +159816,6 @@ pStr1);

transferJoinMarkings(pNewExpr1, pExpr); idxNew1 = whereClauseInsert(pWC, pNewExpr1, wtFlags); testcase( idxNew1==0 ); - exprAnalyze(pSrc, pWC, idxNew1); pNewExpr2 = sqlite3ExprDup(db, pLeft, 0); pNewExpr2 = sqlite3PExpr(pParse, TK_LT, sqlite3ExprAddCollateString(pParse,pNewExpr2,zCollSeqName),

@@ -155079,6 +159823,7 @@ pStr2);

transferJoinMarkings(pNewExpr2, pExpr); idxNew2 = whereClauseInsert(pWC, pNewExpr2, wtFlags); testcase( idxNew2==0 ); + exprAnalyze(pSrc, pWC, idxNew1); exprAnalyze(pSrc, pWC, idxNew2); pTerm = &pWC->a[idxTerm]; if( isComplete ){

@@ -155135,7 +159880,7 @@ else if( pExpr->op==TK_IN

&& pTerm->u.x.iField==0 && pExpr->pLeft->op==TK_VECTOR && ALWAYS( ExprUseXSelect(pExpr) ) - && pExpr->x.pSelect->pPrior==0 + && (pExpr->x.pSelect->pPrior==0 || (pExpr->x.pSelect->selFlags & SF_Values)) #ifndef SQLITE_OMIT_WINDOWFUNC && pExpr->x.pSelect->pWin==0 #endif

@@ -155323,6 +160068,13 @@ assert( pWC->a[ii].wtFlags & TERM_VIRTUAL );

assert( pWC->a[ii].eOperator==WO_ROWVAL ); continue; } + if( pWC->a[ii].nChild ){ + /* If this term has child terms, then they are also part of the + ** pWC->a[] array. So this term can be ignored, as a LIMIT clause + ** will only be added if each of the child terms passes the + ** (leftCursor==iCsr) test below. */ + continue; + } if( pWC->a[ii].leftCursor!=iCsr ) return; }

@@ -155542,9 +160294,12 @@ pItem->colUsed |= sqlite3ExprColUsed(pColRef);

pRhs = sqlite3PExpr(pParse, TK_UPLUS, sqlite3ExprDup(pParse->db, pArgs->a[j].pExpr, 0), 0); pTerm = sqlite3PExpr(pParse, TK_EQ, pColRef, pRhs); - if( pItem->fg.jointype & (JT_LEFT|JT_LTORJ) ){ + if( pItem->fg.jointype & (JT_LEFT|JT_RIGHT) ){ + testcase( pItem->fg.jointype & JT_LEFT ); /* testtag-20230227a */ + testcase( pItem->fg.jointype & JT_RIGHT ); /* testtag-20230227b */ joinType = EP_OuterON; }else{ + testcase( pItem->fg.jointype & JT_LTORJ ); /* testtag-20230227c */ joinType = EP_InnerON; } sqlite3SetJoinExpr(pTerm, pItem->iCursor, joinType);

@@ -155623,7 +160378,7 @@ ** is positive but less than the number of ORDER BY terms means that

** block sorting is required. */ SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo *pWInfo){ - return pWInfo->nOBSat; + return pWInfo->nOBSat<0 ? 0 : pWInfo->nOBSat; } /*

@@ -156261,7 +161016,7 @@ */

#if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(WHERETRACE_ENABLED) static void whereTraceIndexInfoInputs(sqlite3_index_info *p){ int i; - if( !sqlite3WhereTrace ) return; + if( (sqlite3WhereTrace & 0x10)==0 ) return; for(i=0; i<p->nConstraint; i++){ sqlite3DebugPrintf( " constraint[%d]: col=%d termid=%d op=%d usabled=%d collseq=%s\n",

@@ -156281,7 +161036,7 @@ }

} static void whereTraceIndexInfoOutputs(sqlite3_index_info *p){ int i; - if( !sqlite3WhereTrace ) return; + if( (sqlite3WhereTrace & 0x10)==0 ) return; for(i=0; i<p->nConstraint; i++){ sqlite3DebugPrintf(" usage[%d]: argvIdx=%d omit=%d\n", i,

@@ -156368,6 +161123,57 @@ #endif

#ifndef SQLITE_OMIT_AUTOMATIC_INDEX + +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS +/* +** Argument pIdx represents an automatic index that the current statement +** will create and populate. Add an OP_Explain with text of the form: +** +** CREATE AUTOMATIC INDEX ON <table>(<cols>) [WHERE <expr>] +** +** This is only required if sqlite3_stmt_scanstatus() is enabled, to +** associate an SQLITE_SCANSTAT_NCYCLE and SQLITE_SCANSTAT_NLOOP +** values with. In order to avoid breaking legacy code and test cases, +** the OP_Explain is not added if this is an EXPLAIN QUERY PLAN command. +*/ +static void explainAutomaticIndex( + Parse *pParse, + Index *pIdx, /* Automatic index to explain */ + int bPartial, /* True if pIdx is a partial index */ + int *pAddrExplain /* OUT: Address of OP_Explain */ +){ + if( IS_STMT_SCANSTATUS(pParse->db) && pParse->explain!=2 ){ + Table *pTab = pIdx->pTable; + const char *zSep = ""; + char *zText = 0; + int ii = 0; + sqlite3_str *pStr = sqlite3_str_new(pParse->db); + sqlite3_str_appendf(pStr,"CREATE AUTOMATIC INDEX ON %s(", pTab->zName); + assert( pIdx->nColumn>1 ); + assert( pIdx->aiColumn[pIdx->nColumn-1]==XN_ROWID ); + for(ii=0; ii<(pIdx->nColumn-1); ii++){ + const char *zName = 0; + int iCol = pIdx->aiColumn[ii]; + + zName = pTab->aCol[iCol].zCnName; + sqlite3_str_appendf(pStr, "%s%s", zSep, zName); + zSep = ", "; + } + zText = sqlite3_str_finish(pStr); + if( zText==0 ){ + sqlite3OomFault(pParse->db); + }else{ + *pAddrExplain = sqlite3VdbeExplain( + pParse, 0, "%s)%s", zText, (bPartial ? " WHERE <expr>" : "") + ); + sqlite3_free(zText); + } + } +} +#else +# define explainAutomaticIndex(a,b,c,d) +#endif + /* ** Generate code to construct the Index object for an automatic index ** and to set up the WhereLevel object pLevel so that the code generator

@@ -156375,8 +161181,7 @@ ** makes use of the automatic index.

*/ static SQLITE_NOINLINE void constructAutomaticIndex( Parse *pParse, /* The parsing context */ - const WhereClause *pWC, /* The WHERE clause */ - const SrcItem *pSrc, /* The FROM clause term to get the next index */ + WhereClause *pWC, /* The WHERE clause */ const Bitmask notReady, /* Mask of cursors that are not available */ WhereLevel *pLevel /* Write new index here */ ){

@@ -156397,12 +161202,17 @@ WhereLoop *pLoop; /* The Loop object */

char *zNotUsed; /* Extra space on the end of pIdx */ Bitmask idxCols; /* Bitmap of columns used for indexing */ Bitmask extraCols; /* Bitmap of additional columns */ - u8 sentWarning = 0; /* True if a warnning has been issued */ + u8 sentWarning = 0; /* True if a warning has been issued */ + u8 useBloomFilter = 0; /* True to also add a Bloom filter */ Expr *pPartial = 0; /* Partial Index Expression */ int iContinue = 0; /* Jump here to skip excluded rows */ - SrcItem *pTabItem; /* FROM clause term being indexed */ + SrcList *pTabList; /* The complete FROM clause */ + SrcItem *pSrc; /* The FROM clause term to get the next index */ int addrCounter = 0; /* Address where integer counter is initialized */ int regBase; /* Array of registers where record is assembled */ +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + int addrExp = 0; /* Address of OP_Explain */ +#endif /* Generate code to skip over the creation and initialization of the ** transient index on 2nd and subsequent iterations of the loop. */

@@ -156413,6 +161223,8 @@

/* Count the number of columns that will be added to the index ** and used to match WHERE clause constraints */ nKeyCol = 0; + pTabList = pWC->pWInfo->pTabList; + pSrc = &pTabList->a[pLevel->iFrom]; pTable = pSrc->pTab; pWCEnd = &pWC->a[pWC->nTerm]; pLoop = pLevel->pWLoop;

@@ -156423,7 +161235,7 @@ /* Make the automatic index a partial index if there are terms in the

** WHERE clause (or the ON clause of a LEFT join) that constrain which ** rows of the target table (pSrc) that can be used. */ if( (pTerm->wtFlags & TERM_VIRTUAL)==0 - && sqlite3ExprIsTableConstraint(pExpr, pSrc) + && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, pLevel->iFrom) ){ pPartial = sqlite3ExprAnd(pParse, pPartial, sqlite3ExprDup(pParse->db, pExpr, 0));

@@ -156464,7 +161276,11 @@ ** be a covering index because the index will not be updated if the

** original table changes and the index and table cannot both be used ** if they go out of sync. */ - extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1)); + if( IsView(pTable) ){ + extraCols = ALLBITS; + }else{ + extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1)); + } mxBitCol = MIN(BMS-1,pTable->nCol); testcase( pTable->nCol==BMS-1 ); testcase( pTable->nCol==BMS-2 );

@@ -156500,6 +161316,16 @@ pColl = sqlite3ExprCompareCollSeq(pParse, pX);

assert( pColl!=0 || pParse->nErr>0 ); /* TH3 collate01.800 */ pIdx->azColl[n] = pColl ? pColl->zName : sqlite3StrBINARY; n++; + if( ALWAYS(pX->pLeft!=0) + && sqlite3ExprAffinity(pX->pLeft)!=SQLITE_AFF_TEXT + ){ + /* TUNING: only use a Bloom filter on an automatic index + ** if one or more key columns has the ability to hold numeric + ** values, since strings all have the same hash in the Bloom + ** filter implementation and hence a Bloom filter on a text column + ** is not usually helpful. */ + useBloomFilter = 1; + } } } }

@@ -156526,25 +161352,27 @@ pIdx->aiColumn[n] = XN_ROWID;

pIdx->azColl[n] = sqlite3StrBINARY; /* Create the automatic index */ + explainAutomaticIndex(pParse, pIdx, pPartial!=0, &addrExp); assert( pLevel->iIdxCur>=0 ); pLevel->iIdxCur = pParse->nTab++; sqlite3VdbeAddOp2(v, OP_OpenAutoindex, pLevel->iIdxCur, nKeyCol+1); sqlite3VdbeSetP4KeyInfo(pParse, pIdx); VdbeComment((v, "for %s", pTable->zName)); - if( OptimizationEnabled(pParse->db, SQLITE_BloomFilter) ){ + if( OptimizationEnabled(pParse->db, SQLITE_BloomFilter) && useBloomFilter ){ + sqlite3WhereExplainBloomFilter(pParse, pWC->pWInfo, pLevel); pLevel->regFilter = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Blob, 10000, pLevel->regFilter); } /* Fill the automatic index with content */ - pTabItem = &pWC->pWInfo->pTabList->a[pLevel->iFrom]; - if( pTabItem->fg.viaCoroutine ){ - int regYield = pTabItem->regReturn; + assert( pSrc == &pWC->pWInfo->pTabList->a[pLevel->iFrom] ); + if( pSrc->fg.viaCoroutine ){ + int regYield = pSrc->regReturn; addrCounter = sqlite3VdbeAddOp2(v, OP_Integer, 0, 0); - sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub); + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pSrc->addrFillSub); addrTop = sqlite3VdbeAddOp1(v, OP_Yield, regYield); VdbeCoverage(v); - VdbeComment((v, "next row of %s", pTabItem->pTab->zName)); + VdbeComment((v, "next row of %s", pSrc->pTab->zName)); }else{ addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, pLevel->iTabCur); VdbeCoverage(v); }

@@ -156561,17 +161389,18 @@ if( pLevel->regFilter ){

sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pLevel->regFilter, 0, regBase, pLoop->u.btree.nEq); } + sqlite3VdbeScanStatusCounters(v, addrExp, addrExp, sqlite3VdbeCurrentAddr(v)); sqlite3VdbeAddOp2(v, OP_IdxInsert, pLevel->iIdxCur, regRecord); sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); if( pPartial ) sqlite3VdbeResolveLabel(v, iContinue); - if( pTabItem->fg.viaCoroutine ){ + if( pSrc->fg.viaCoroutine ){ sqlite3VdbeChangeP2(v, addrCounter, regBase+n); testcase( pParse->db->mallocFailed ); assert( pLevel->iIdxCur>0 ); translateColumnToCopy(pParse, addrTop, pLevel->iTabCur, - pTabItem->regResult, pLevel->iIdxCur); + pSrc->regResult, pLevel->iIdxCur); sqlite3VdbeGoto(v, addrTop); - pTabItem->fg.viaCoroutine = 0; + pSrc->fg.viaCoroutine = 0; }else{ sqlite3VdbeAddOp2(v, OP_Next, pLevel->iTabCur, addrTop+1); VdbeCoverage(v); sqlite3VdbeChangeP5(v, SQLITE_STMTSTATUS_AUTOINDEX);

@@ -156581,6 +161410,7 @@ sqlite3ReleaseTempReg(pParse, regRecord);

/* Jump here when skipping the initialization */ sqlite3VdbeJumpHere(v, addrInit); + sqlite3VdbeScanStatusRange(v, addrExp, addrExp, -1); end_auto_index_create: sqlite3ExprDelete(pParse->db, pPartial);

@@ -156622,16 +161452,26 @@ Parse *pParse = pWInfo->pParse; /* Parsing context */

Vdbe *v = pParse->pVdbe; /* VDBE under construction */ WhereLoop *pLoop = pLevel->pWLoop; /* The loop being coded */ int iCur; /* Cursor for table getting the filter */ + IndexedExpr *saved_pIdxEpr; /* saved copy of Parse.pIdxEpr */ + IndexedExpr *saved_pIdxPartExpr; /* saved copy of Parse.pIdxPartExpr */ + + saved_pIdxEpr = pParse->pIdxEpr; + saved_pIdxPartExpr = pParse->pIdxPartExpr; + pParse->pIdxEpr = 0; + pParse->pIdxPartExpr = 0; assert( pLoop!=0 ); assert( v!=0 ); assert( pLoop->wsFlags & WHERE_BLOOMFILTER ); + assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 ); addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); do{ + const SrcList *pTabList; const SrcItem *pItem; const Table *pTab; u64 sz; + int iSrc; sqlite3WhereExplainBloomFilter(pParse, pWInfo, pLevel); addrCont = sqlite3VdbeMakeLabel(pParse); iCur = pLevel->iTabCur;

@@ -156645,7 +161485,9 @@ ** P3==1 and use that value to initialize the blob. But that makes

** testing complicated. By basing the blob size on the value in the ** sqlite_stat1 table, testing is much easier. */ - pItem = &pWInfo->pTabList->a[pLevel->iFrom]; + pTabList = pWInfo->pTabList; + iSrc = pLevel->iFrom; + pItem = &pTabList->a[iSrc]; assert( pItem!=0 ); pTab = pItem->pTab; assert( pTab!=0 );

@@ -156662,7 +161504,7 @@ pWCEnd = &pWInfo->sWC.a[pWInfo->sWC.nTerm];

for(pTerm=pWInfo->sWC.a; pTerm<pWCEnd; pTerm++){ Expr *pExpr = pTerm->pExpr; if( (pTerm->wtFlags & TERM_VIRTUAL)==0 - && sqlite3ExprIsTableConstraint(pExpr, pItem) + && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, iSrc) ){ sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL); }

@@ -156678,9 +161520,8 @@ int n = pLoop->u.btree.nEq;

int r1 = sqlite3GetTempRange(pParse, n); int jj; for(jj=0; jj<n; jj++){ - int iCol = pIdx->aiColumn[jj]; assert( pIdx->pTable==pItem->pTab ); - sqlite3ExprCodeGetColumnOfTable(v, pIdx->pTable, iCur, iCol,r1+jj); + sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iCur, jj, r1+jj); } sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pLevel->regFilter, 0, r1, n); sqlite3ReleaseTempRange(pParse, r1, n);

@@ -156711,6 +161552,8 @@ }

} }while( iLevel < pWInfo->nLevel ); sqlite3VdbeJumpHere(v, addrOnce); + pParse->pIdxEpr = saved_pIdxEpr; + pParse->pIdxPartExpr = saved_pIdxPartExpr; }

@@ -156966,6 +161809,9 @@ }else{

sqlite3ErrorMsg(pParse, "%s", pVtab->zErrMsg); } } + if( pTab->u.vtab.p->bAllSchemas ){ + sqlite3VtabUsesAllSchemas(pParse); + } sqlite3_free(pVtab->zErrMsg); pVtab->zErrMsg = 0; return rc;

@@ -157009,6 +161855,7 @@ #endif

assert( pRec!=0 ); assert( pIdx->nSample>0 ); assert( pRec->nField>0 ); + /* Do a binary search to find the first sample greater than or equal ** to pRec. If pRec contains a single field, the set of samples to search

@@ -157054,7 +161901,12 @@ ** smaller than sample 1, so the binary search would not work. As a result,

** it is extended to two fields. The duplicates that this creates do not ** cause any problems. */ - nField = MIN(pRec->nField, pIdx->nSample); + if( !HasRowid(pIdx->pTable) && IsPrimaryKeyIndex(pIdx) ){ + nField = pIdx->nKeyCol; + }else{ + nField = pIdx->nColumn; + } + nField = MIN(pRec->nField, nField); iCol = 0; iSample = pIdx->nSample * nField; do{

@@ -157120,12 +161972,12 @@ ** If (i>0), then pRec must also be greater than sample (i-1). */

if( iCol>0 ){ pRec->nField = iCol; assert( sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec)<=0 - || pParse->db->mallocFailed ); + || pParse->db->mallocFailed || CORRUPT_DB ); } if( i>0 ){ pRec->nField = nField; assert( sqlite3VdbeRecordCompare(aSample[i-1].n, aSample[i-1].p, pRec)<0 - || pParse->db->mallocFailed ); + || pParse->db->mallocFailed || CORRUPT_DB ); } } }

@@ -157217,7 +162069,7 @@ **

** Value pLoop->nOut is currently set to the estimated number of rows ** visited for scanning (a=? AND b=?). This function reduces that estimate ** by some factor to account for the (c BETWEEN ? AND ?) expression based -** on the stat4 data for the index. this scan will be peformed multiple +** on the stat4 data for the index. this scan will be performed multiple ** times (once for each (a,b) combination that matches a=?) is dealt with ** by the caller. **

@@ -157298,7 +162150,7 @@ if( nDiff!=1 || pUpper==0 || pLower==0 ){

int nAdjust = (sqlite3LogEst(p->nSample) - sqlite3LogEst(nDiff)); pLoop->nOut -= nAdjust; *pbDone = 1; - WHERETRACE(0x10, ("range skip-scan regions: %u..%u adjust=%d est=%d\n", + WHERETRACE(0x20, ("range skip-scan regions: %u..%u adjust=%d est=%d\n", nLower, nUpper, nAdjust*-1, pLoop->nOut)); }

@@ -157476,7 +162328,7 @@ }

if( nNew<nOut ){ nOut = nNew; } - WHERETRACE(0x10, ("STAT4 range scan: %u..%u est=%d\n", + WHERETRACE(0x20, ("STAT4 range scan: %u..%u est=%d\n", (u32)iLower, (u32)iUpper, nOut)); } }else{

@@ -157490,7 +162342,7 @@ UNUSED_PARAMETER(pParse);

UNUSED_PARAMETER(pBuilder); assert( pLower || pUpper ); #endif - assert( pUpper==0 || (pUpper->wtFlags & TERM_VNULL)==0 ); + assert( pUpper==0 || (pUpper->wtFlags & TERM_VNULL)==0 || pParse->nErr>0 ); nNew = whereRangeAdjust(pLower, nOut); nNew = whereRangeAdjust(pUpper, nNew);

@@ -157509,7 +162361,7 @@ if( nNew<10 ) nNew = 10;

if( nNew<nOut ) nOut = nNew; #if defined(WHERETRACE_ENABLED) if( pLoop->nOut>nOut ){ - WHERETRACE(0x10,("Range scan lowers nOut from %d to %d\n", + WHERETRACE(0x20,("Range scan lowers nOut from %d to %d\n", pLoop->nOut, nOut)); } #endif

@@ -157574,7 +162426,7 @@ if( bOk==0 ) return SQLITE_NOTFOUND;

pBuilder->nRecValid = nEq; whereKeyStats(pParse, p, pRec, 0, a); - WHERETRACE(0x10,("equality scan regions %s(%d): %d\n", + WHERETRACE(0x20,("equality scan regions %s(%d): %d\n", p->zName, nEq-1, (int)a[1])); *pnRow = a[1];

@@ -157622,9 +162474,9 @@ pBuilder->nRecValid = nRecValid;

} if( rc==SQLITE_OK ){ - if( nRowEst > nRow0 ) nRowEst = nRow0; + if( nRowEst > (tRowcnt)nRow0 ) nRowEst = nRow0; *pnRow = nRowEst; - WHERETRACE(0x10,("IN row estimate: est=%d\n", nRowEst)); + WHERETRACE(0x20,("IN row estimate: est=%d\n", nRowEst)); } assert( pBuilder->nRecValid==nRecValid ); return rc;

@@ -157733,7 +162585,7 @@ }else{

sqlite3DebugPrintf(" f %06x N %d", p->wsFlags, p->nLTerm); } sqlite3DebugPrintf(" cost %d,%d,%d\n", p->rSetup, p->rRun, p->nOut); - if( p->nLTerm && (sqlite3WhereTrace & 0x100)!=0 ){ + if( p->nLTerm && (sqlite3WhereTrace & 0x4000)!=0 ){ int i; for(i=0; i<p->nLTerm; i++){ sqlite3WhereTermPrint(p->aLTerm[i], i);

@@ -157972,7 +162824,7 @@ ** case first. Hence compatible candidate WhereLoops never have a larger

** rSetup. Call this SETUP-INVARIANT */ assert( p->rSetup>=pTemplate->rSetup ); - /* Any loop using an appliation-defined index (or PRIMARY KEY or + /* Any loop using an application-defined index (or PRIMARY KEY or ** UNIQUE constraint) with one or more == constraints is better ** than an automatic index. Unless it is a skip-scan. */ if( (p->wsFlags & WHERE_AUTO_INDEX)!=0

@@ -157999,7 +162851,7 @@ }

/* If pTemplate is always better than p, then cause p to be overwritten ** with pTemplate. pTemplate is better than p if: - ** (1) pTemplate has no more dependences than p, and + ** (1) pTemplate has no more dependencies than p, and ** (2) pTemplate has an equal or lower cost than p. */ if( (p->prereq & pTemplate->prereq)==pTemplate->prereq /* (1) */

@@ -158117,7 +162969,7 @@ p->pNextLoop = 0;

}else{ /* We will be overwriting WhereLoop p[]. But before we do, first ** go through the rest of the list and delete any other entries besides - ** p[] that are also supplated by pTemplate */ + ** p[] that are also supplanted by pTemplate */ WhereLoop **ppTail = &p->pNextLoop; WhereLoop *pToDel; while( *ppTail ){

@@ -158197,6 +163049,7 @@ if( pX==pTerm ) break;

if( pX->iParent>=0 && (&pWC->a[pX->iParent])==pTerm ) break; } if( j<0 ){ + sqlite3ProgressCheck(pWC->pWInfo->pParse); if( pLoop->maskSelf==pTerm->prereqAll ){ /* If there are extra terms in the WHERE clause not used by an index ** that depend only on the table being scanned, and that will tend to

@@ -158316,7 +163169,7 @@ return i;

} /* -** Adjust the cost C by the costMult facter T. This only occurs if +** Adjust the cost C by the costMult factor T. This only occurs if ** compiled with -DSQLITE_ENABLE_COSTMULT */ #ifdef SQLITE_ENABLE_COSTMULT

@@ -158343,7 +163196,7 @@ SrcItem *pSrc, /* FROM clause term being analyzed */

Index *pProbe, /* An index on pSrc */ LogEst nInMul /* log(Number of iterations due to IN) */ ){ - WhereInfo *pWInfo = pBuilder->pWInfo; /* WHERE analyse context */ + WhereInfo *pWInfo = pBuilder->pWInfo; /* WHERE analyze context */ Parse *pParse = pWInfo->pParse; /* Parsing context */ sqlite3 *db = pParse->db; /* Database connection malloc context */ WhereLoop *pNew; /* Template WhereLoop under construction */

@@ -158364,7 +163217,10 @@ LogEst rLogSize; /* Logarithm of table size */

WhereTerm *pTop = 0, *pBtm = 0; /* Top and bottom range constraints */ pNew = pBuilder->pNew; - if( db->mallocFailed ) return SQLITE_NOMEM_BKPT; + assert( db->mallocFailed==0 || pParse->nErr>0 ); + if( pParse->nErr ){ + return pParse->rc; + } WHERETRACE(0x800, ("BEGIN %s.addBtreeIdx(%s), nEq=%d, nSkip=%d, rRun=%d\n", pProbe->pTable->zName,pProbe->zName, pNew->u.btree.nEq, pNew->nSkip, pNew->rRun));

@@ -158611,7 +163467,7 @@ ** See tag-202002240-1 */

&& pNew->nOut+10 > pProbe->aiRowLogEst[0] ){ #if WHERETRACE_ENABLED /* 0x01 */ - if( sqlite3WhereTrace & 0x01 ){ + if( sqlite3WhereTrace & 0x20 ){ sqlite3DebugPrintf( "STAT4 determines term has low selectivity:\n"); sqlite3WhereTermPrint(pTerm, 999);

@@ -158648,9 +163504,17 @@ ** it to pNew->rRun, which is currently set to the cost of the index

** seek only. Then, if this is a non-covering index, add the cost of ** visiting the rows in the main table. */ assert( pSrc->pTab->szTabRow>0 ); - rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow; + if( pProbe->idxType==SQLITE_IDXTYPE_IPK ){ + /* The pProbe->szIdxRow is low for an IPK table since the interior + ** pages are small. Thus szIdxRow gives a good estimate of seek cost. + ** But the leaf pages are full-size, so pProbe->szIdxRow would badly + ** under-estimate the scanning cost. */ + rCostIdx = pNew->nOut + 16; + }else{ + rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow; + } pNew->rRun = sqlite3LogEstAdd(rLogSize, rCostIdx); - if( (pNew->wsFlags & (WHERE_IDX_ONLY|WHERE_IPK))==0 ){ + if( (pNew->wsFlags & (WHERE_IDX_ONLY|WHERE_IPK|WHERE_EXPRIDX))==0 ){ pNew->rRun = sqlite3LogEstAdd(pNew->rRun, pNew->nOut + 16); } ApplyCostMultiplier(pNew->rRun, pProbe->pTable->costMult);

@@ -158672,6 +163536,9 @@ && pNew->u.btree.nEq<pProbe->nColumn

&& (pNew->u.btree.nEq<pProbe->nKeyCol || pProbe->idxType!=SQLITE_IDXTYPE_PRIMARYKEY) ){ + if( pNew->u.btree.nEq>3 ){ + sqlite3ProgressCheck(pParse); + } whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, nInMul+nIn); } pNew->nOut = saved_nOut;

@@ -158804,15 +163671,38 @@ return 0;

} /* +** pIdx is an index containing expressions. Check it see if any of the +** expressions in the index match the pExpr expression. +*/ +static int exprIsCoveredByIndex( + const Expr *pExpr, + const Index *pIdx, + int iTabCur +){ + int i; + for(i=0; i<pIdx->nColumn; i++){ + if( pIdx->aiColumn[i]==XN_EXPR + && sqlite3ExprCompare(0, pExpr, pIdx->aColExpr->a[i].pExpr, iTabCur)==0 + ){ + return 1; + } + } + return 0; +} + +/* ** Structure passed to the whereIsCoveringIndex Walker callback. */ +typedef struct CoveringIndexCheck CoveringIndexCheck; struct CoveringIndexCheck { Index *pIdx; /* The index */ int iTabCur; /* Cursor number for the corresponding table */ + u8 bExpr; /* Uses an indexed expression */ + u8 bUnidx; /* Uses an unindexed column not within an indexed expr */ }; /* -** Information passed in is pWalk->u.pCovIdxCk. Call is pCk. +** Information passed in is pWalk->u.pCovIdxCk. Call it pCk. ** ** If the Expr node references the table with cursor pCk->iTabCur, then ** make sure that column is covered by the index pCk->pIdx. We know that

@@ -158824,72 +163714,198 @@ ** non-zero and return WRC_Abort to stop the search.

** ** If this node does not disprove that the index can be a covering index, ** then just return WRC_Continue, to continue the search. +** +** If pCk->pIdx contains indexed expressions and one of those expressions +** matches pExpr, then prune the search. */ static int whereIsCoveringIndexWalkCallback(Walker *pWalk, Expr *pExpr){ - int i; /* Loop counter */ - const Index *pIdx; /* The index of interest */ - const i16 *aiColumn; /* Columns contained in the index */ - u16 nColumn; /* Number of columns in the index */ - if( pExpr->op!=TK_COLUMN && pExpr->op!=TK_AGG_COLUMN ) return WRC_Continue; - if( pExpr->iColumn<(BMS-1) ) return WRC_Continue; - if( pExpr->iTable!=pWalk->u.pCovIdxCk->iTabCur ) return WRC_Continue; - pIdx = pWalk->u.pCovIdxCk->pIdx; - aiColumn = pIdx->aiColumn; - nColumn = pIdx->nColumn; - for(i=0; i<nColumn; i++){ - if( aiColumn[i]==pExpr->iColumn ) return WRC_Continue; + int i; /* Loop counter */ + const Index *pIdx; /* The index of interest */ + const i16 *aiColumn; /* Columns contained in the index */ + u16 nColumn; /* Number of columns in the index */ + CoveringIndexCheck *pCk; /* Info about this search */ + + pCk = pWalk->u.pCovIdxCk; + pIdx = pCk->pIdx; + if( (pExpr->op==TK_COLUMN || pExpr->op==TK_AGG_COLUMN) ){ + /* if( pExpr->iColumn<(BMS-1) && pIdx->bHasExpr==0 ) return WRC_Continue;*/ + if( pExpr->iTable!=pCk->iTabCur ) return WRC_Continue; + pIdx = pWalk->u.pCovIdxCk->pIdx; + aiColumn = pIdx->aiColumn; + nColumn = pIdx->nColumn; + for(i=0; i<nColumn; i++){ + if( aiColumn[i]==pExpr->iColumn ) return WRC_Continue; + } + pCk->bUnidx = 1; + return WRC_Abort; + }else if( pIdx->bHasExpr + && exprIsCoveredByIndex(pExpr, pIdx, pWalk->u.pCovIdxCk->iTabCur) ){ + pCk->bExpr = 1; + return WRC_Prune; } - pWalk->eCode = 1; - return WRC_Abort; + return WRC_Continue; } /* ** pIdx is an index that covers all of the low-number columns used by -** pWInfo->pSelect (columns from 0 through 62). But there are columns -** in pWInfo->pSelect beyond 62. This routine tries to answer the question -** of whether pIdx covers *all* columns in the query. +** pWInfo->pSelect (columns from 0 through 62) or an index that has +** expressions terms. Hence, we cannot determine whether or not it is +** a covering index by using the colUsed bitmasks. We have to do a search +** to see if the index is covering. This routine does that search. +** +** The return value is one of these: ** -** Return 0 if pIdx is a covering index. Return non-zero if pIdx is -** not a covering index or if we are unable to determine if pIdx is a -** covering index. +** 0 The index is definitely not a covering index +** +** WHERE_IDX_ONLY The index is definitely a covering index +** +** WHERE_EXPRIDX The index is likely a covering index, but it is +** difficult to determine precisely because of the +** expressions that are indexed. Score it as a +** covering index, but still keep the main table open +** just in case we need it. ** -** This routine is an optimization. It is always safe to return non-zero. -** But returning zero when non-zero should have been returned can lead to -** incorrect bytecode and assertion faults. +** This routine is an optimization. It is always safe to return zero. +** But returning one of the other two values when zero should have been +** returned can lead to incorrect bytecode and assertion faults. */ static SQLITE_NOINLINE u32 whereIsCoveringIndex( WhereInfo *pWInfo, /* The WHERE clause context */ Index *pIdx, /* Index that is being tested */ int iTabCur /* Cursor for the table being indexed */ ){ - int i; + int i, rc; struct CoveringIndexCheck ck; Walker w; if( pWInfo->pSelect==0 ){ /* We don't have access to the full query, so we cannot check to see ** if pIdx is covering. Assume it is not. */ - return 1; + return 0; } - for(i=0; i<pIdx->nColumn; i++){ - if( pIdx->aiColumn[i]>=BMS-1 ) break; - } - if( i>=pIdx->nColumn ){ - /* pIdx does not index any columns greater than 62, but we know from - ** colMask that columns greater than 62 are used, so this is not a - ** covering index */ - return 1; + if( pIdx->bHasExpr==0 ){ + for(i=0; i<pIdx->nColumn; i++){ + if( pIdx->aiColumn[i]>=BMS-1 ) break; + } + if( i>=pIdx->nColumn ){ + /* pIdx does not index any columns greater than 62, but we know from + ** colMask that columns greater than 62 are used, so this is not a + ** covering index */ + return 0; + } } ck.pIdx = pIdx; ck.iTabCur = iTabCur; + ck.bExpr = 0; + ck.bUnidx = 0; memset(&w, 0, sizeof(w)); w.xExprCallback = whereIsCoveringIndexWalkCallback; w.xSelectCallback = sqlite3SelectWalkNoop; w.u.pCovIdxCk = &ck; - w.eCode = 0; sqlite3WalkSelect(&w, pWInfo->pSelect); - return w.eCode; + if( ck.bUnidx ){ + rc = 0; + }else if( ck.bExpr ){ + rc = WHERE_EXPRIDX; + }else{ + rc = WHERE_IDX_ONLY; + } + return rc; +} + +/* +** This is an sqlite3ParserAddCleanup() callback that is invoked to +** free the Parse->pIdxEpr list when the Parse object is destroyed. +*/ +static void whereIndexedExprCleanup(sqlite3 *db, void *pObject){ + IndexedExpr **pp = (IndexedExpr**)pObject; + while( *pp!=0 ){ + IndexedExpr *p = *pp; + *pp = p->pIENext; + sqlite3ExprDelete(db, p->pExpr); + sqlite3DbFreeNN(db, p); + } +} + +/* +** This function is called for a partial index - one with a WHERE clause - in +** two scenarios. In both cases, it determines whether or not the WHERE +** clause on the index implies that a column of the table may be safely +** replaced by a constant expression. For example, in the following +** SELECT: +** +** CREATE INDEX i1 ON t1(b, c) WHERE a=<expr>; +** SELECT a, b, c FROM t1 WHERE a=<expr> AND b=?; +** +** The "a" in the select-list may be replaced by <expr>, iff: +** +** (a) <expr> is a constant expression, and +** (b) The (a=<expr>) comparison uses the BINARY collation sequence, and +** (c) Column "a" has an affinity other than NONE or BLOB. +** +** If argument pItem is NULL, then pMask must not be NULL. In this case this +** function is being called as part of determining whether or not pIdx +** is a covering index. This function clears any bits in (*pMask) +** corresponding to columns that may be replaced by constants as described +** above. +** +** Otherwise, if pItem is not NULL, then this function is being called +** as part of coding a loop that uses index pIdx. In this case, add entries +** to the Parse.pIdxPartExpr list for each column that can be replaced +** by a constant. +*/ +static void wherePartIdxExpr( + Parse *pParse, /* Parse context */ + Index *pIdx, /* Partial index being processed */ + Expr *pPart, /* WHERE clause being processed */ + Bitmask *pMask, /* Mask to clear bits in */ + int iIdxCur, /* Cursor number for index */ + SrcItem *pItem /* The FROM clause entry for the table */ +){ + assert( pItem==0 || (pItem->fg.jointype & JT_RIGHT)==0 ); + assert( (pItem==0 || pMask==0) && (pMask!=0 || pItem!=0) ); + + if( pPart->op==TK_AND ){ + wherePartIdxExpr(pParse, pIdx, pPart->pRight, pMask, iIdxCur, pItem); + pPart = pPart->pLeft; + } + + if( (pPart->op==TK_EQ || pPart->op==TK_IS) ){ + Expr *pLeft = pPart->pLeft; + Expr *pRight = pPart->pRight; + u8 aff; + + if( pLeft->op!=TK_COLUMN ) return; + if( !sqlite3ExprIsConstant(pRight) ) return; + if( !sqlite3IsBinary(sqlite3ExprCompareCollSeq(pParse, pPart)) ) return; + if( pLeft->iColumn<0 ) return; + aff = pIdx->pTable->aCol[pLeft->iColumn].affinity; + if( aff>=SQLITE_AFF_TEXT ){ + if( pItem ){ + sqlite3 *db = pParse->db; + IndexedExpr *p = (IndexedExpr*)sqlite3DbMallocRaw(db, sizeof(*p)); + if( p ){ + int bNullRow = (pItem->fg.jointype&(JT_LEFT|JT_LTORJ))!=0; + p->pExpr = sqlite3ExprDup(db, pRight, 0); + p->iDataCur = pItem->iCursor; + p->iIdxCur = iIdxCur; + p->iIdxCol = pLeft->iColumn; + p->bMaybeNullRow = bNullRow; + p->pIENext = pParse->pIdxPartExpr; + p->aff = aff; + pParse->pIdxPartExpr = p; + if( p->pIENext==0 ){ + void *pArg = (void*)&pParse->pIdxPartExpr; + sqlite3ParserAddCleanup(pParse, whereIndexedExprCleanup, pArg); + } + } + }else if( pLeft->iColumn<(BMS-1) ){ + *pMask &= ~((Bitmask)1 << pLeft->iColumn); + } + } + } } + /* ** Add all WhereLoop objects for a single table of the join where the table

@@ -158929,7 +163945,7 @@ ** of a full table scan.

*/ static int whereLoopAddBtree( WhereLoopBuilder *pBuilder, /* WHERE clause information */ - Bitmask mPrereq /* Extra prerequesites for using this table */ + Bitmask mPrereq /* Extra prerequisites for using this table */ ){ WhereInfo *pWInfo; /* WHERE analysis context */ Index *pProbe; /* An index we are evaluating */

@@ -158973,7 +163989,7 @@ sPk.aiColumn = &aiColumnPk;

sPk.aiRowLogEst = aiRowEstPk; sPk.onError = OE_Replace; sPk.pTable = pTab; - sPk.szIdxRow = pTab->szTabRow; + sPk.szIdxRow = 3; /* TUNING: Interior rows of IPK table are very small */ sPk.idxType = SQLITE_IDXTYPE_IPK; aiRowEstPk[0] = pTab->nRowLogEst; aiRowEstPk[1] = 0;

@@ -159024,7 +164040,8 @@ pNew->rSetup = rLogSize + rSize;

if( !IsView(pTab) && (pTab->tabFlags & TF_Ephemeral)==0 ){ pNew->rSetup += 28; }else{ - pNew->rSetup -= 10; + pNew->rSetup -= 25; /* Greatly reduced setup cost for auto indexes + ** on ephemeral materializations of views */ } ApplyCostMultiplier(pNew->rSetup, pTab->costMult); if( pNew->rSetup<0 ) pNew->rSetup = 0;

@@ -159093,9 +164110,6 @@ pNew->rRun = rSize + 16 - 2*((pTab->tabFlags & TF_HasStat4)!=0);

#else pNew->rRun = rSize + 16; #endif - if( IsView(pTab) || (pTab->tabFlags & TF_Ephemeral)!=0 ){ - pNew->wsFlags |= WHERE_VIEWSCAN; - } ApplyCostMultiplier(pNew->rRun, pTab->costMult); whereLoopOutputAdjust(pWC, pNew, rSize); rc = whereLoopInsert(pBuilder, pNew);

@@ -159104,14 +164118,43 @@ if( rc ) break;

}else{ Bitmask m; if( pProbe->isCovering ){ - pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED; m = 0; + pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED; }else{ m = pSrc->colUsed & pProbe->colNotIdxed; - if( m==TOPBIT ){ - m = whereIsCoveringIndex(pWInfo, pProbe, pSrc->iCursor); + if( pProbe->pPartIdxWhere ){ + wherePartIdxExpr( + pWInfo->pParse, pProbe, pProbe->pPartIdxWhere, &m, 0, 0 + ); } - pNew->wsFlags = (m==0) ? (WHERE_IDX_ONLY|WHERE_INDEXED) : WHERE_INDEXED; + pNew->wsFlags = WHERE_INDEXED; + if( m==TOPBIT || (pProbe->bHasExpr && !pProbe->bHasVCol && m!=0) ){ + u32 isCov = whereIsCoveringIndex(pWInfo, pProbe, pSrc->iCursor); + if( isCov==0 ){ + WHERETRACE(0x200, + ("-> %s is not a covering index" + " according to whereIsCoveringIndex()\n", pProbe->zName)); + assert( m!=0 ); + }else{ + m = 0; + pNew->wsFlags |= isCov; + if( isCov & WHERE_IDX_ONLY ){ + WHERETRACE(0x200, + ("-> %s is a covering expression index" + " according to whereIsCoveringIndex()\n", pProbe->zName)); + }else{ + assert( isCov==WHERE_EXPRIDX ); + WHERETRACE(0x200, + ("-> %s might be a covering expression index" + " according to whereIsCoveringIndex()\n", pProbe->zName)); + } + } + }else if( m==0 ){ + WHERETRACE(0x200, + ("-> %s a covering index according to bitmasks\n", + pProbe->zName, m==0 ? "is" : "is not")); + pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED; + } } /* Full scan via index */

@@ -159284,7 +164327,7 @@ /* If the xBestIndex method returns SQLITE_CONSTRAINT, that means

** that the particular combination of parameters provided is unusable. ** Make no entries in the loop table. */ - WHERETRACE(0xffff, (" ^^^^--- non-viable plan rejected!\n")); + WHERETRACE(0xffffffff, (" ^^^^--- non-viable plan rejected!\n")); return SQLITE_OK; } return rc;

@@ -159395,7 +164438,7 @@ if( pNew->u.vtab.needFree ){

sqlite3_free(pNew->u.vtab.idxStr); pNew->u.vtab.needFree = 0; } - WHERETRACE(0xffff, (" bIn=%d prereqIn=%04llx prereqOut=%04llx\n", + WHERETRACE(0xffffffff, (" bIn=%d prereqIn=%04llx prereqOut=%04llx\n", *pbIn, (sqlite3_uint64)mPrereq, (sqlite3_uint64)(pNew->prereq & ~mPrereq)));

@@ -159411,7 +164454,7 @@ ** following the sqlite3_index_info structure.

** ** Return a pointer to the collation name: ** -** 1. If there is an explicit COLLATE operator on the constaint, return it. +** 1. If there is an explicit COLLATE operator on the constraint, return it. ** ** 2. Else, if the column has an alternative collation, return that. **

@@ -159466,7 +164509,7 @@ HiddenIndexInfo *pH = (HiddenIndexInfo*)&pIdxInfo[1];

sqlite3_value *pVal = 0; int rc = SQLITE_OK; if( iCons<0 || iCons>=pIdxInfo->nConstraint ){ - rc = SQLITE_MISUSE; /* EV: R-30545-25046 */ + rc = SQLITE_MISUSE_BKPT; /* EV: R-30545-25046 */ }else{ if( pH->aRhs[iCons]==0 ){ WhereTerm *pTerm = &pH->pWC->a[pIdxInfo->aConstraint[iCons].iTermOffset];

@@ -159496,32 +164539,27 @@ assert( pHidden->eDistinct>=0 && pHidden->eDistinct<=3 );

return pHidden->eDistinct; } -#if (defined(SQLITE_ENABLE_DBPAGE_VTAB) || defined(SQLITE_TEST)) \ - && !defined(SQLITE_OMIT_VIRTUALTABLE) /* ** Cause the prepared statement that is associated with a call to -** xBestIndex to potentiall use all schemas. If the statement being +** xBestIndex to potentially use all schemas. If the statement being ** prepared is read-only, then just start read transactions on all ** schemas. But if this is a write operation, start writes on all ** schemas. ** ** This is used by the (built-in) sqlite_dbpage virtual table. */ -SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(sqlite3_index_info *pIdxInfo){ - HiddenIndexInfo *pHidden = (HiddenIndexInfo*)&pIdxInfo[1]; - Parse *pParse = pHidden->pParse; +SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(Parse *pParse){ int nDb = pParse->db->nDb; int i; for(i=0; i<nDb; i++){ sqlite3CodeVerifySchema(pParse, i); } - if( pParse->writeMask ){ + if( DbMaskNonZero(pParse->writeMask) ){ for(i=0; i<nDb; i++){ sqlite3BeginWriteOperation(pParse, 0, i); } } } -#endif /* ** Add all WhereLoop objects for a table of the join identified by

@@ -159587,7 +164625,7 @@ }

/* First call xBestIndex() with all constraints usable. */ WHERETRACE(0x800, ("BEGIN %s.addVirtual()\n", pSrc->pTab->zName)); - WHERETRACE(0x40, (" VirtualOne: all usable\n")); + WHERETRACE(0x800, (" VirtualOne: all usable\n")); rc = whereLoopAddVirtualOne( pBuilder, mPrereq, ALLBITS, 0, p, mNoOmit, &bIn, &bRetry );

@@ -159612,7 +164650,7 @@

/* If the plan produced by the earlier call uses an IN(...) term, call ** xBestIndex again, this time with IN(...) terms disabled. */ if( bIn ){ - WHERETRACE(0x40, (" VirtualOne: all usable w/o IN\n")); + WHERETRACE(0x800, (" VirtualOne: all usable w/o IN\n")); rc = whereLoopAddVirtualOne( pBuilder, mPrereq, ALLBITS, WO_IN, p, mNoOmit, &bIn, 0); assert( bIn==0 );

@@ -159638,7 +164676,7 @@ }

mPrev = mNext; if( mNext==ALLBITS ) break; if( mNext==mBest || mNext==mBestNoIn ) continue; - WHERETRACE(0x40, (" VirtualOne: mPrev=%04llx mNext=%04llx\n", + WHERETRACE(0x800, (" VirtualOne: mPrev=%04llx mNext=%04llx\n", (sqlite3_uint64)mPrev, (sqlite3_uint64)mNext)); rc = whereLoopAddVirtualOne( pBuilder, mPrereq, mNext|mPrereq, 0, p, mNoOmit, &bIn, 0);

@@ -159652,7 +164690,7 @@ /* If the calls to xBestIndex() in the above loop did not find a plan

** that requires no source tables at all (i.e. one guaranteed to be ** usable), make a call here with all source tables disabled */ if( rc==SQLITE_OK && seenZero==0 ){ - WHERETRACE(0x40, (" VirtualOne: all disabled\n")); + WHERETRACE(0x800, (" VirtualOne: all disabled\n")); rc = whereLoopAddVirtualOne( pBuilder, mPrereq, mPrereq, 0, p, mNoOmit, &bIn, 0); if( bIn==0 ) seenZeroNoIN = 1;

@@ -159662,7 +164700,7 @@ /* If the calls to xBestIndex() have so far failed to find a plan

** that requires no source tables at all and does not use an IN(...) ** operator, make a final call to obtain one here. */ if( rc==SQLITE_OK && seenZeroNoIN==0 ){ - WHERETRACE(0x40, (" VirtualOne: all disabled and w/o IN\n")); + WHERETRACE(0x800, (" VirtualOne: all disabled and w/o IN\n")); rc = whereLoopAddVirtualOne( pBuilder, mPrereq, mPrereq, WO_IN, p, mNoOmit, &bIn, 0); }

@@ -159718,7 +164756,7 @@

sSubBuild = *pBuilder; sSubBuild.pOrSet = &sCur; - WHERETRACE(0x200, ("Begin processing OR-clause %p\n", pTerm)); + WHERETRACE(0x400, ("Begin processing OR-clause %p\n", pTerm)); for(pOrTerm=pOrWC->a; pOrTerm<pOrWCEnd; pOrTerm++){ if( (pOrTerm->eOperator & WO_AND)!=0 ){ sSubBuild.pWC = &pOrTerm->u.pAndInfo->wc;

@@ -159735,9 +164773,9 @@ continue;

} sCur.n = 0; #ifdef WHERETRACE_ENABLED - WHERETRACE(0x200, ("OR-term %d of %p has %d subterms:\n", + WHERETRACE(0x400, ("OR-term %d of %p has %d subterms:\n", (int)(pOrTerm-pOrWC->a), pTerm, sSubBuild.pWC->nTerm)); - if( sqlite3WhereTrace & 0x400 ){ + if( sqlite3WhereTrace & 0x20000 ){ sqlite3WhereClausePrint(sSubBuild.pWC); } #endif

@@ -159752,8 +164790,6 @@ }

if( rc==SQLITE_OK ){ rc = whereLoopAddOr(&sSubBuild, mPrereq, mUnusable); } - assert( rc==SQLITE_OK || rc==SQLITE_DONE || sCur.n==0 - || rc==SQLITE_NOMEM ); testcase( rc==SQLITE_NOMEM && sCur.n>0 ); testcase( rc==SQLITE_DONE ); if( sCur.n==0 ){

@@ -159799,7 +164835,7 @@ pNew->nOut = sSum.a[i].nOut;

pNew->prereq = sSum.a[i].prereq; rc = whereLoopInsert(pBuilder, pNew); } - WHERETRACE(0x200, ("End processing OR-clause %p\n", pTerm)); + WHERETRACE(0x400, ("End processing OR-clause %p\n", pTerm)); } } return rc;

@@ -160147,8 +165183,8 @@ if( pOBExpr->op!=TK_COLUMN && pOBExpr->op!=TK_AGG_COLUMN ) continue;

if( pOBExpr->iTable!=iCur ) continue; if( pOBExpr->iColumn!=iColumn ) continue; }else{ - Expr *pIdxExpr = pIndex->aColExpr->a[j].pExpr; - if( sqlite3ExprCompareSkip(pOBExpr, pIdxExpr, iCur) ){ + Expr *pIxExpr = pIndex->aColExpr->a[j].pExpr; + if( sqlite3ExprCompareSkip(pOBExpr, pIxExpr, iCur) ){ continue; } }

@@ -160280,37 +165316,56 @@ ** nOrderby columns and that the first nSorted columns are already in

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ */ static LogEst whereSortingCost( - WhereInfo *pWInfo, - LogEst nRow, - int nOrderBy, - int nSorted + WhereInfo *pWInfo, /* Query planning context */ + LogEst nRow, /* Estimated number of rows to sort */ + int nOrderBy, /* Number of ORDER BY clause terms */ + int nSorted /* Number of initial ORDER BY terms naturally in order */ ){ - /* TUNING: Estimated cost of a full external sort, where N is + /* Estimated cost of a full external sort, where N is ** the number of rows to sort is: ** - ** cost = (3.0 * N * log(N)). + ** cost = (K * N * log(N)). ** ** Or, if the order-by clause has X terms but only the last Y ** terms are out of order, then block-sorting will reduce the ** sorting cost to: ** - ** cost = (3.0 * N * log(N)) * (Y/X) + ** cost = (K * N * log(N)) * (Y/X) + ** + ** The constant K is at least 2.0 but will be larger if there are a + ** large number of columns to be sorted, as the sorting time is + ** proportional to the amount of content to be sorted. The algorithm + ** does not currently distinguish between fat columns (BLOBs and TEXTs) + ** and skinny columns (INTs). It just uses the number of columns as + ** an approximation for the row width. ** - ** The (Y/X) term is implemented using stack variable rScale - ** below. + ** And extra factor of 2.0 or 3.0 is added to the sorting cost if the sort + ** is built using OP_IdxInsert and OP_Sort rather than with OP_SorterInsert. */ - LogEst rScale, rSortCost; - assert( nOrderBy>0 && 66==sqlite3LogEst(100) ); - rScale = sqlite3LogEst((nOrderBy-nSorted)*100/nOrderBy) - 66; - rSortCost = nRow + rScale + 16; + LogEst rSortCost, nCol; + assert( pWInfo->pSelect!=0 ); + assert( pWInfo->pSelect->pEList!=0 ); + /* TUNING: sorting cost proportional to the number of output columns: */ + nCol = sqlite3LogEst((pWInfo->pSelect->pEList->nExpr+59)/30); + rSortCost = nRow + nCol; + if( nSorted>0 ){ + /* Scale the result by (Y/X) */ + rSortCost += sqlite3LogEst((nOrderBy-nSorted)*100/nOrderBy) - 66; + } /* Multiple by log(M) where M is the number of output rows. ** Use the LIMIT for M if it is smaller. Or if this sort is for ** a DISTINCT operator, M will be the number of distinct output ** rows, so fudge it downwards a bit. */ - if( (pWInfo->wctrlFlags & WHERE_USE_LIMIT)!=0 && pWInfo->iLimit<nRow ){ - nRow = pWInfo->iLimit; + if( (pWInfo->wctrlFlags & WHERE_USE_LIMIT)!=0 ){ + rSortCost += 10; /* TUNING: Extra 2.0x if using LIMIT */ + if( nSorted!=0 ){ + rSortCost += 6; /* TUNING: Extra 1.5x if also using partial sort */ + } + if( pWInfo->iLimit<nRow ){ + nRow = pWInfo->iLimit; + } }else if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT) ){ /* TUNING: In the sort for a DISTINCT operator, assume that the DISTINCT ** reduces the number of output rows by a factor of 2 */

@@ -160360,7 +165415,8 @@ ** For 2-way joins, the 5 best paths are followed.

** For joins of 3 or more tables, track the 10 best paths */ mxChoice = (nLoop<=1) ? 1 : (nLoop==2 ? 5 : 10); assert( nLoop<=pWInfo->pTabList->nSrc ); - WHERETRACE(0x002, ("---- begin solver. (nRowEst=%d)\n", nRowEst)); + WHERETRACE(0x002, ("---- begin solver. (nRowEst=%d, nQueryLoop=%d)\n", + nRowEst, pParse->nQueryLoop)); /* If nRowEst is zero and there is an ORDER BY clause, ignore it. In this ** case the purpose of this call is to estimate the number of rows returned

@@ -160462,11 +165518,11 @@ aSortCost[isOrdered] = whereSortingCost(

pWInfo, nRowEst, nOrderBy, isOrdered ); } - /* TUNING: Add a small extra penalty (5) to sorting as an - ** extra encouragment to the query planner to select a plan + /* TUNING: Add a small extra penalty (3) to sorting as an + ** extra encouragement to the query planner to select a plan ** where the rows emerge in the correct order without any sorting ** required. */ - rCost = sqlite3LogEstAdd(rUnsorted, aSortCost[isOrdered]) + 5; + rCost = sqlite3LogEstAdd(rUnsorted, aSortCost[isOrdered]) + 3; WHERETRACE(0x002, ("---- sort cost=%-3d (%d/%d) increases cost %3d to %-3d\n",

@@ -160477,13 +165533,6 @@ rCost = rUnsorted;

rUnsorted -= 2; /* TUNING: Slight bias in favor of no-sort plans */ } - /* TUNING: A full-scan of a VIEW or subquery in the outer loop - ** is not so bad. */ - if( iLoop==0 && (pWLoop->wsFlags & WHERE_VIEWSCAN)!=0 ){ - rCost += -10; - nOut += -30; - } - /* Check to see if pWLoop should be added to the set of ** mxChoice best-so-far paths. **

@@ -160670,6 +165719,10 @@ if( pWInfo->wctrlFlags & WHERE_DISTINCTBY ){

if( pFrom->isOrdered==pWInfo->pOrderBy->nExpr ){ pWInfo->eDistinct = WHERE_DISTINCT_ORDERED; } + if( pWInfo->pSelect->pOrderBy + && pWInfo->nOBSat > pWInfo->pSelect->pOrderBy->nExpr ){ + pWInfo->nOBSat = pWInfo->pSelect->pOrderBy->nExpr; + } }else{ pWInfo->revMask = pFrom->revLoop; if( pWInfo->nOBSat<=0 ){

@@ -160814,7 +165867,7 @@ #ifdef SQLITE_DEBUG

pLoop->cId = '0'; #endif #ifdef WHERETRACE_ENABLED - if( sqlite3WhereTrace ){ + if( sqlite3WhereTrace & 0x02 ){ sqlite3DebugPrintf("whereShortCut() used to compute solution\n"); } #endif

@@ -160881,6 +165934,13 @@ ** must contain a constraint that limits the scan of the table to

** at most a single row. ** 4) The table must not be referenced by any part of the query apart ** from its own USING or ON clause. +** 5) The table must not have an inner-join ON or USING clause if there is +** a RIGHT JOIN anywhere in the query. Otherwise the ON/USING clause +** might move from the right side to the left side of the RIGHT JOIN. +** Note: Due to (2), this condition can only arise if the table is +** the right-most table of a subquery that was flattened into the +** main query and that subquery was the right-hand operand of an +** inner join that held an ON or USING clause. ** ** For example, given: **

@@ -160906,6 +165966,7 @@ Bitmask notReady

){ int i; Bitmask tabUsed; + int hasRightJoin; /* Preconditions checked by the caller */ assert( pWInfo->nLevel>=2 );

@@ -160920,6 +165981,7 @@ tabUsed = sqlite3WhereExprListUsage(&pWInfo->sMaskSet, pWInfo->pResultSet);

if( pWInfo->pOrderBy ){ tabUsed |= sqlite3WhereExprListUsage(&pWInfo->sMaskSet, pWInfo->pOrderBy); } + hasRightJoin = (pWInfo->pTabList->a[0].fg.jointype & JT_LTORJ)!=0; for(i=pWInfo->nLevel-1; i>=1; i--){ WhereTerm *pTerm, *pEnd; SrcItem *pItem;

@@ -160942,9 +166004,15 @@ ){

break; } } + if( hasRightJoin + && ExprHasProperty(pTerm->pExpr, EP_InnerON) + && pTerm->pExpr->w.iJoin==pItem->iCursor + ){ + break; /* restriction (5) */ + } } if( pTerm<pEnd ) continue; - WHERETRACE(0xffff, ("-> drop loop %c not used\n", pLoop->cId)); + WHERETRACE(0xffffffff, ("-> drop loop %c not used\n", pLoop->cId)); notReady &= ~pLoop->maskSelf; for(pTerm=pWInfo->sWC.a; pTerm<pEnd; pTerm++){ if( (pTerm->prereqAll & pLoop->maskSelf)!=0 ){

@@ -160983,28 +166051,27 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful(

const WhereInfo *pWInfo ){ int i; - LogEst nSearch; + LogEst nSearch = 0; assert( pWInfo->nLevel>=2 ); assert( OptimizationEnabled(pWInfo->pParse->db, SQLITE_BloomFilter) ); - nSearch = pWInfo->a[0].pWLoop->nOut; - for(i=1; i<pWInfo->nLevel; i++){ + for(i=0; i<pWInfo->nLevel; i++){ WhereLoop *pLoop = pWInfo->a[i].pWLoop; const unsigned int reqFlags = (WHERE_SELFCULL|WHERE_COLUMN_EQ); - if( (pLoop->wsFlags & reqFlags)==reqFlags + SrcItem *pItem = &pWInfo->pTabList->a[pLoop->iTab]; + Table *pTab = pItem->pTab; + if( (pTab->tabFlags & TF_HasStat1)==0 ) break; + pTab->tabFlags |= TF_StatsUsed; + if( i>=1 + && (pLoop->wsFlags & reqFlags)==reqFlags /* vvvvvv--- Always the case if WHERE_COLUMN_EQ is defined */ && ALWAYS((pLoop->wsFlags & (WHERE_IPK|WHERE_INDEXED))!=0) ){ - SrcItem *pItem = &pWInfo->pTabList->a[pLoop->iTab]; - Table *pTab = pItem->pTab; - pTab->tabFlags |= TF_StatsUsed; - if( nSearch > pTab->nRowLogEst - && (pTab->tabFlags & TF_HasStat1)!=0 - ){ + if( nSearch > pTab->nRowLogEst ){ testcase( pItem->fg.jointype & JT_LEFT ); pLoop->wsFlags |= WHERE_BLOOMFILTER; pLoop->wsFlags &= ~WHERE_IDX_ONLY; - WHERETRACE(0xffff, ( + WHERETRACE(0xffffffff, ( "-> use Bloom-filter on loop %c because there are ~%.1e " "lookups into %s which has only ~%.1e rows\n", pLoop->cId, (double)sqlite3LogEstToInt(nSearch), pTab->zName,

@@ -161016,32 +166083,18 @@ }

} /* -** This is an sqlite3ParserAddCleanup() callback that is invoked to -** free the Parse->pIdxExpr list when the Parse object is destroyed. -*/ -static void whereIndexedExprCleanup(sqlite3 *db, void *pObject){ - Parse *pParse = (Parse*)pObject; - while( pParse->pIdxExpr!=0 ){ - IndexedExpr *p = pParse->pIdxExpr; - pParse->pIdxExpr = p->pIENext; - sqlite3ExprDelete(db, p->pExpr); - sqlite3DbFreeNN(db, p); - } -} - -/* ** The index pIdx is used by a query and contains one or more expressions. ** In other words pIdx is an index on an expression. iIdxCur is the cursor ** number for the index and iDataCur is the cursor number for the corresponding ** table. ** -** This routine adds IndexedExpr entries to the Parse->pIdxExpr field for +** This routine adds IndexedExpr entries to the Parse->pIdxEpr field for ** each of the expressions in the index so that the expression code generator ** will know to replace occurrences of the indexed expression with ** references to the corresponding column of the index. */ static SQLITE_NOINLINE void whereAddIndexedExpr( - Parse *pParse, /* Add IndexedExpr entries to pParse->pIdxExpr */ + Parse *pParse, /* Add IndexedExpr entries to pParse->pIdxEpr */ Index *pIdx, /* The index-on-expression that contains the expressions */ int iIdxCur, /* Cursor number for pIdx */ SrcItem *pTabItem /* The FROM clause entry for the table */

@@ -161068,20 +166121,66 @@ }else{

continue; } if( sqlite3ExprIsConstant(pExpr) ) continue; + if( pExpr->op==TK_FUNCTION ){ + /* Functions that might set a subtype should not be replaced by the + ** value taken from an expression index since the index omits the + ** subtype. https://sqlite.org/forum/forumpost/68d284c86b082c3e */ + int n; + FuncDef *pDef; + sqlite3 *db = pParse->db; + assert( ExprUseXList(pExpr) ); + n = pExpr->x.pList ? pExpr->x.pList->nExpr : 0; + pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); + if( pDef==0 || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ + continue; + } + } p = sqlite3DbMallocRaw(pParse->db, sizeof(IndexedExpr)); if( p==0 ) break; - p->pIENext = pParse->pIdxExpr; + p->pIENext = pParse->pIdxEpr; +#ifdef WHERETRACE_ENABLED + if( sqlite3WhereTrace & 0x200 ){ + sqlite3DebugPrintf("New pParse->pIdxEpr term {%d,%d}\n", iIdxCur, i); + if( sqlite3WhereTrace & 0x5000 ) sqlite3ShowExpr(pExpr); + } +#endif p->pExpr = sqlite3ExprDup(pParse->db, pExpr, 0); p->iDataCur = pTabItem->iCursor; p->iIdxCur = iIdxCur; p->iIdxCol = i; p->bMaybeNullRow = bMaybeNullRow; + if( sqlite3IndexAffinityStr(pParse->db, pIdx) ){ + p->aff = pIdx->zColAff[i]; + } #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS p->zIdxName = pIdx->zName; #endif - pParse->pIdxExpr = p; + pParse->pIdxEpr = p; if( p->pIENext==0 ){ - sqlite3ParserAddCleanup(pParse, whereIndexedExprCleanup, pParse); + void *pArg = (void*)&pParse->pIdxEpr; + sqlite3ParserAddCleanup(pParse, whereIndexedExprCleanup, pArg); + } + } +} + +/* +** Set the reverse-scan order mask to one for all tables in the query +** with the exception of MATERIALIZED common table expressions that have +** their own internal ORDER BY clauses. +** +** This implements the PRAGMA reverse_unordered_selects=ON setting. +** (Also SQLITE_DBCONFIG_REVERSE_SCANORDER). +*/ +static SQLITE_NOINLINE void whereReverseScanOrder(WhereInfo *pWInfo){ + int ii; + for(ii=0; ii<pWInfo->pTabList->nSrc; ii++){ + SrcItem *pItem = &pWInfo->pTabList->a[ii]; + if( !pItem->fg.isCte + || pItem->u2.pCteUse->eM10d!=M10d_Yes + || NEVER(pItem->pSelect==0) + || pItem->pSelect->pOrderBy==0 + ){ + pWInfo->revMask |= MASKBIT(ii); } } }

@@ -161144,7 +166243,7 @@ ** most loop)

** ** OUTER JOINS ** -** An outer join of tables t1 and t2 is conceptally coded as follows: +** An outer join of tables t1 and t2 is conceptually coded as follows: ** ** foreach row1 in t1 do ** flag = 0

@@ -161299,7 +166398,7 @@ /* Assign a bit from the bitmask to every term in the FROM clause.

** ** The N-th term of the FROM clause is assigned a bitmask of 1<<N. ** - ** The rule of the previous sentence ensures thta if X is the bitmask for + ** The rule of the previous sentence ensures that if X is the bitmask for ** a table T, then X-1 is the bitmask for all other tables to the left of T. ** Knowing the bitmask for all tables to the left of a left join is ** important. Ticket #3015.

@@ -161333,22 +166432,45 @@ sqlite3WhereAddLimit(&pWInfo->sWC, pSelect);

} if( pParse->nErr ) goto whereBeginError; - /* Special case: WHERE terms that do not refer to any tables in the join - ** (constant expressions). Evaluate each such term, and jump over all the - ** generated code if the result is not true. + /* The False-WHERE-Term-Bypass optimization: + ** + ** If there are WHERE terms that are false, then no rows will be output, + ** so skip over all of the code generated here. + ** + ** Conditions: + ** + ** (1) The WHERE term must not refer to any tables in the join. + ** (2) The term must not come from an ON clause on the + ** right-hand side of a LEFT or FULL JOIN. + ** (3) The term must not come from an ON clause, or there must be + ** no RIGHT or FULL OUTER joins in pTabList. + ** (4) If the expression contains non-deterministic functions + ** that are not within a sub-select. This is not required + ** for correctness but rather to preserves SQLite's legacy + ** behaviour in the following two cases: ** - ** Do not do this if the expression contains non-deterministic functions - ** that are not within a sub-select. This is not strictly required, but - ** preserves SQLite's legacy behaviour in the following two cases: + ** WHERE random()>0; -- eval random() once per row + ** WHERE (SELECT random())>0; -- eval random() just once overall ** - ** FROM ... WHERE random()>0; -- eval random() once per row - ** FROM ... WHERE (SELECT random())>0; -- eval random() once overall + ** Note that the Where term need not be a constant in order for this + ** optimization to apply, though it does need to be constant relative to + ** the current subquery (condition 1). The term might include variables + ** from outer queries so that the value of the term changes from one + ** invocation of the current subquery to the next. */ for(ii=0; ii<sWLB.pWC->nBase; ii++){ - WhereTerm *pT = &sWLB.pWC->a[ii]; + WhereTerm *pT = &sWLB.pWC->a[ii]; /* A term of the WHERE clause */ + Expr *pX; /* The expression of pT */ if( pT->wtFlags & TERM_VIRTUAL ) continue; - if( pT->prereqAll==0 && (nTabList==0 || exprIsDeterministic(pT->pExpr)) ){ - sqlite3ExprIfFalse(pParse, pT->pExpr, pWInfo->iBreak, SQLITE_JUMPIFNULL); + pX = pT->pExpr; + assert( pX!=0 ); + assert( pT->prereqAll!=0 || !ExprHasProperty(pX, EP_OuterON) ); + if( pT->prereqAll==0 /* Conditions (1) and (2) */ + && (nTabList==0 || exprIsDeterministic(pX)) /* Condition (4) */ + && !(ExprHasProperty(pX, EP_InnerON) /* Condition (3) */ + && (pTabList->a[0].fg.jointype & JT_LTORJ)!=0 ) + ){ + sqlite3ExprIfFalse(pParse, pX, pWInfo->iBreak, SQLITE_JUMPIFNULL); pT->wtFlags |= TERM_CODED; } }

@@ -161371,13 +166493,13 @@ }

/* Construct the WhereLoop objects */ #if defined(WHERETRACE_ENABLED) - if( sqlite3WhereTrace & 0xffff ){ + if( sqlite3WhereTrace & 0xffffffff ){ sqlite3DebugPrintf("*** Optimizer Start *** (wctrlFlags: 0x%x",wctrlFlags); if( wctrlFlags & WHERE_USE_LIMIT ){ sqlite3DebugPrintf(", limit: %d", iAuxArg); } sqlite3DebugPrintf(")\n"); - if( sqlite3WhereTrace & 0x100 ){ + if( sqlite3WhereTrace & 0x8000 ){ Select sSelect; memset(&sSelect, 0, sizeof(sSelect)); sSelect.selFlags = SF_WhereBegin;

@@ -161387,10 +166509,10 @@ sSelect.pOrderBy = pOrderBy;

sSelect.pEList = pResultSet; sqlite3TreeViewSelect(0, &sSelect, 0); } - } - if( sqlite3WhereTrace & 0x100 ){ /* Display all terms of the WHERE clause */ - sqlite3DebugPrintf("---- WHERE clause at start of analysis:\n"); - sqlite3WhereClausePrint(sWLB.pWC); + if( sqlite3WhereTrace & 0x4000 ){ /* Display all WHERE clause terms */ + sqlite3DebugPrintf("---- WHERE clause at start of analysis:\n"); + sqlite3WhereClausePrint(sWLB.pWC); + } } #endif

@@ -161406,7 +166528,7 @@ ** then we need to rerun the whole loop building process so that all

** loops will be built using the revised truthProb values. */ if( sWLB.bldFlags2 & SQLITE_BLDF2_2NDPASS ){ WHERETRACE_ALL_LOOPS(pWInfo, sWLB.pWC); - WHERETRACE(0xffff, + WHERETRACE(0xffffffff, ("**** Redo all loop computations due to" " TERM_HIGHTRUTH changes ****\n")); while( pWInfo->pLoops ){

@@ -161426,9 +166548,20 @@ if( pWInfo->pOrderBy ){

wherePathSolver(pWInfo, pWInfo->nRowOut+1); if( db->mallocFailed ) goto whereBeginError; } + + /* TUNING: Assume that a DISTINCT clause on a subquery reduces + ** the output size by a factor of 8 (LogEst -30). + */ + if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT)!=0 ){ + WHERETRACE(0x0080,("nRowOut reduced from %d to %d due to DISTINCT\n", + pWInfo->nRowOut, pWInfo->nRowOut-30)); + pWInfo->nRowOut -= 30; + } + } + assert( pWInfo->pTabList!=0 ); if( pWInfo->pOrderBy==0 && (db->flags & SQLITE_ReverseOrder)!=0 ){ - pWInfo->revMask = ALLBITS; + whereReverseScanOrder(pWInfo); } if( pParse->nErr ){ goto whereBeginError;

@@ -161492,11 +166625,11 @@ whereCheckIfBloomFilterIsUseful(pWInfo);

} #if defined(WHERETRACE_ENABLED) - if( sqlite3WhereTrace & 0x100 ){ /* Display all terms of the WHERE clause */ + if( sqlite3WhereTrace & 0x4000 ){ /* Display all terms of the WHERE clause */ sqlite3DebugPrintf("---- WHERE clause at end of analysis:\n"); sqlite3WhereClausePrint(sWLB.pWC); } - WHERETRACE(0xffff,("*** Optimizer Finished ***\n")); + WHERETRACE(0xffffffff,("*** Optimizer Finished ***\n")); #endif pWInfo->pParse->nQueryLoop += pWInfo->nRowOut;

@@ -161528,6 +166661,7 @@ if( bOnerow || (

0!=(wctrlFlags & WHERE_ONEPASS_MULTIROW) && !IsVirtual(pTabList->a[0].pTab) && (0==(wsFlags & WHERE_MULTI_OR) || (wctrlFlags & WHERE_DUPLICATES_OK)) + && OptimizationEnabled(db, SQLITE_OnePass) )){ pWInfo->eOnePass = bOnerow ? ONEPASS_SINGLE : ONEPASS_MULTI; if( HasRowid(pTabList->a[0].pTab) && (wsFlags & WHERE_IDX_ONLY) ){

@@ -161591,7 +166725,7 @@ sqlite3VdbeChangeP4(v, -1, SQLITE_INT_TO_PTR(n), P4_INT32);

assert( n<=pTab->nCol ); } #ifdef SQLITE_ENABLE_CURSOR_HINTS - if( pLoop->u.btree.pIndex!=0 ){ + if( pLoop->u.btree.pIndex!=0 && (pTab->tabFlags & TF_WithoutRowid)==0 ){ sqlite3VdbeChangeP5(v, OPFLAG_SEEKEQ|bFordelete); }else #endif

@@ -161636,6 +166770,11 @@ iIndexCur = pParse->nTab++;

if( pIx->bHasExpr && OptimizationEnabled(db, SQLITE_IndexedExpr) ){ whereAddIndexedExpr(pParse, pIx, iIndexCur, pTabItem); } + if( pIx->pPartIdxWhere && (pTabItem->fg.jointype & JT_RIGHT)==0 ){ + wherePartIdxExpr( + pParse, pIx, pIx->pPartIdxWhere, 0, iIndexCur, pTabItem + ); + } } pLevel->iIdxCur = iIndexCur; assert( pIx!=0 );

@@ -161728,11 +166867,11 @@ sqlite3VdbeAddOp2(v, OP_Gosub, pSrc->regReturn, pSrc->addrFillSub);

sqlite3VdbeJumpHere(v, iOnce); } } + assert( pTabList == pWInfo->pTabList ); if( (wsFlags & (WHERE_AUTO_INDEX|WHERE_BLOOMFILTER))!=0 ){ if( (wsFlags & WHERE_AUTO_INDEX)!=0 ){ #ifndef SQLITE_OMIT_AUTOMATIC_INDEX - constructAutomaticIndex(pParse, &pWInfo->sWC, - &pTabList->a[pLevel->iFrom], notReady, pLevel); + constructAutomaticIndex(pParse, &pWInfo->sWC, notReady, pLevel); #endif }else{ sqlite3ConstructBloomFilter(pWInfo, ii, pLevel, notReady);

@@ -162030,9 +167169,16 @@ }else{

last = pWInfo->iEndWhere; } if( pIdx->bHasExpr ){ - IndexedExpr *p = pParse->pIdxExpr; + IndexedExpr *p = pParse->pIdxEpr; while( p ){ if( p->iIdxCur==pLevel->iIdxCur ){ +#ifdef WHERETRACE_ENABLED + if( sqlite3WhereTrace & 0x200 ){ + sqlite3DebugPrintf("Disable pParse->pIdxEpr term {%d,%d}\n", + p->iIdxCur, p->iIdxCol); + if( sqlite3WhereTrace & 0x5000 ) sqlite3ShowExpr(p->pExpr); + } +#endif p->iDataCur = -1; p->iIdxCur = -1; }

@@ -162042,7 +167188,8 @@ }

k = pLevel->addrBody + 1; #ifdef SQLITE_DEBUG if( db->flags & SQLITE_VdbeAddopTrace ){ - printf("TRANSLATE opcodes in range %d..%d\n", k, last-1); + printf("TRANSLATE cursor %d->%d in opcode range %d..%d\n", + pLevel->iTabCur, pLevel->iIdxCur, k, last-1); } /* Proof that the "+1" on the k value above is safe */ pOp = sqlite3VdbeGetOp(v, k - 1);

@@ -162249,7 +167396,7 @@ ** nth_value(expr, N)

** ** These are the same built-in window functions supported by Postgres. ** Although the behaviour of aggregate window functions (functions that -** can be used as either aggregates or window funtions) allows them to +** can be used as either aggregates or window functions) allows them to ** be implemented using an API, built-in window functions are much more ** esoteric. Additionally, some window functions (e.g. nth_value()) ** may only be implemented by caching the entire partition in memory.

@@ -162779,7 +167926,7 @@ ** Argument pFunc is the function definition just resolved and pWin

** is the Window object representing the associated OVER clause. This ** function updates the contents of pWin as follows: ** -** * If the OVER clause refered to a named window (as in "max(x) OVER win"), +** * If the OVER clause referred to a named window (as in "max(x) OVER win"), ** search list pList for a matching WINDOW definition, and update pWin ** accordingly. If no such WINDOW clause can be found, leave an error ** in pParse.

@@ -162917,6 +168064,7 @@ }

} /* no break */ deliberate_fall_through + case TK_IF_NULL_ROW: case TK_AGG_FUNCTION: case TK_COLUMN: { int iCol = -1;

@@ -163169,7 +168317,7 @@ ExprList *pArgs;

assert( ExprUseXList(pWin->pOwner) ); assert( pWin->pWFunc!=0 ); pArgs = pWin->pOwner->x.pList; - if( pWin->pWFunc->funcFlags & SQLITE_FUNC_SUBTYPE ){ + if( pWin->pWFunc->funcFlags & SQLITE_SUBTYPE ){ selectWindowRewriteEList(pParse, pMWin, pSrc, pArgs, pTab, &pSublist); pWin->iArgCol = (pSublist ? pSublist->nExpr : 0); pWin->bExprArgs = 1;

@@ -163201,7 +168349,7 @@

pSub = sqlite3SelectNew( pParse, pSublist, pSrc, pWhere, pGroupBy, pHaving, pSort, 0, 0 ); - SELECTTRACE(1,pParse,pSub, + TREETRACE(0x40,pParse,pSub, ("New window-function subquery in FROM clause of (%u/%p)\n", p->selId, p)); p->pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0);

@@ -163211,6 +168359,7 @@ ** sqlite3SrcListAppend() */

if( p->pSrc ){ Table *pTab2; p->pSrc->a[0].pSelect = pSub; + p->pSrc->a[0].fg.isCorrelated = 1; sqlite3SrcListAssignCursors(pParse, p->pSrc); pSub->selFlags |= SF_Expanded|SF_OrderByReqd; pTab2 = sqlite3ResultSetOfSelect(pParse, pSub, SQLITE_AFF_NONE);

@@ -163398,7 +168547,7 @@ return pWin;

} /* -** Window *pWin has just been created from a WINDOW clause. Tokne pBase +** Window *pWin has just been created from a WINDOW clause. Token pBase ** is the base window. Earlier windows from the same WINDOW clause are ** stored in the linked list starting at pWin->pNextWin. This function ** either updates *pWin according to the base specification, or else

@@ -163442,8 +168591,9 @@ SQLITE_PRIVATE void sqlite3WindowAttach(Parse *pParse, Expr *p, Window *pWin){

if( p ){ assert( p->op==TK_FUNCTION ); assert( pWin ); + assert( ExprIsFullSize(p) ); p->y.pWin = pWin; - ExprSetProperty(p, EP_WinFunc); + ExprSetProperty(p, EP_WinFunc|EP_FullSize); pWin->pOwner = p; if( (p->flags & EP_Distinct) && pWin->eFrmType!=TK_FILTER ){ sqlite3ErrorMsg(pParse,

@@ -163704,7 +168854,7 @@ ** Consider a window-frame similar to the following:

** ** (ORDER BY a, b GROUPS BETWEEN 2 PRECEDING AND 2 FOLLOWING) ** -** The windows functions implmentation caches the input rows in a temp +** The windows functions implementation caches the input rows in a temp ** table, sorted by "a, b" (it actually populates the cache lazily, and ** aggressively removes rows once they are no longer required, but that's ** a mere detail). It keeps three cursors open on the temp table. One

@@ -164713,7 +169863,7 @@ ** }

** ** For the most part, the patterns above are adapted to support UNBOUNDED by ** assuming that it is equivalent to "infinity PRECEDING/FOLLOWING" and -** CURRENT ROW by assuming that it is equivilent to "0 PRECEDING/FOLLOWING". +** CURRENT ROW by assuming that it is equivalent to "0 PRECEDING/FOLLOWING". ** This is optimized of course - branches that will never be taken and ** conditions that are always true are omitted from the VM code. The only ** exceptional case is:

@@ -164992,7 +170142,7 @@ break;

} /* Allocate registers for the array of values from the sub-query, the - ** samve values in record form, and the rowid used to insert said record + ** same values in record form, and the rowid used to insert said record ** into the ephemeral table. */ regNew = pParse->nMem+1; pParse->nMem += nInput;

@@ -165076,8 +170226,7 @@ int addrGe = sqlite3VdbeAddOp3(v, op, regStart, 0, regEnd);

VdbeCoverageNeverNullIf(v, op==OP_Ge); /* NeverNull because bound <expr> */ VdbeCoverageNeverNullIf(v, op==OP_Le); /* values previously checked */ windowAggFinal(&s, 0); - sqlite3VdbeAddOp2(v, OP_Rewind, s.current.csr, 1); - VdbeCoverageNeverTaken(v); + sqlite3VdbeAddOp1(v, OP_Rewind, s.current.csr); windowReturnOneRow(&s); sqlite3VdbeAddOp1(v, OP_ResetSorter, s.current.csr); sqlite3VdbeAddOp2(v, OP_Goto, 0, lblWhereEnd);

@@ -165089,13 +170238,10 @@ sqlite3VdbeAddOp3(v, OP_Subtract, regStart, regEnd, regStart);

} if( pMWin->eStart!=TK_UNBOUNDED ){ - sqlite3VdbeAddOp2(v, OP_Rewind, s.start.csr, 1); - VdbeCoverageNeverTaken(v); + sqlite3VdbeAddOp1(v, OP_Rewind, s.start.csr); } - sqlite3VdbeAddOp2(v, OP_Rewind, s.current.csr, 1); - VdbeCoverageNeverTaken(v); - sqlite3VdbeAddOp2(v, OP_Rewind, s.end.csr, 1); - VdbeCoverageNeverTaken(v); + sqlite3VdbeAddOp1(v, OP_Rewind, s.current.csr); + sqlite3VdbeAddOp1(v, OP_Rewind, s.end.csr); if( regPeer && pOrderBy ){ sqlite3VdbeAddOp3(v, OP_Copy, regNewPeer, regPeer, pOrderBy->nExpr-1); sqlite3VdbeAddOp3(v, OP_Copy, regPeer, s.start.reg, pOrderBy->nExpr-1);

@@ -165237,7 +170383,8 @@

/************** End of window.c **********************************************/ /************** Begin file parse.c *******************************************/ /* This file is automatically generated by Lemon from input grammar -** source file "parse.y". */ +** source file "parse.y". +*/ /* ** 2001-09-15 **

@@ -165254,7 +170401,7 @@ **

** The canonical source code to this file ("parse.y") is a Lemon grammar ** file that specifies the input grammar and actions to take while parsing. ** That input file is processed by Lemon to generate a C-language -** implementation of a parser for the given grammer. You might be reading +** implementation of a parser for the given grammar. You might be reading ** this comment as part of the translated C-code. Edits should be made ** to the original parse.y sources. */

@@ -165748,18 +170895,18 @@ #define sqlite3ParserCTX_PARAM ,pParse

#define sqlite3ParserCTX_FETCH Parse *pParse=yypParser->pParse; #define sqlite3ParserCTX_STORE yypParser->pParse=pParse; #define YYFALLBACK 1 -#define YYNSTATE 576 +#define YYNSTATE 579 #define YYNRULE 405 -#define YYNRULE_WITH_ACTION 342 +#define YYNRULE_WITH_ACTION 340 #define YYNTOKEN 185 -#define YY_MAX_SHIFT 575 -#define YY_MIN_SHIFTREDUCE 835 -#define YY_MAX_SHIFTREDUCE 1239 -#define YY_ERROR_ACTION 1240 -#define YY_ACCEPT_ACTION 1241 -#define YY_NO_ACTION 1242 -#define YY_MIN_REDUCE 1243 -#define YY_MAX_REDUCE 1647 +#define YY_MAX_SHIFT 578 +#define YY_MIN_SHIFTREDUCE 838 +#define YY_MAX_SHIFTREDUCE 1242 +#define YY_ERROR_ACTION 1243 +#define YY_ACCEPT_ACTION 1244 +#define YY_NO_ACTION 1245 +#define YY_MIN_REDUCE 1246 +#define YY_MAX_REDUCE 1650 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))

@@ -165826,218 +170973,218 @@ ** shifting non-terminals after a reduce.

** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (2098) +#define YY_ACTTAB_COUNT (2100) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 568, 208, 568, 118, 115, 229, 568, 118, 115, 229, - /* 10 */ 568, 1314, 377, 1293, 408, 562, 562, 562, 568, 409, - /* 20 */ 378, 1314, 1276, 41, 41, 41, 41, 208, 1526, 71, - /* 30 */ 71, 971, 419, 41, 41, 491, 303, 279, 303, 972, - /* 40 */ 397, 71, 71, 125, 126, 80, 1217, 1217, 1050, 1053, - /* 50 */ 1040, 1040, 123, 123, 124, 124, 124, 124, 476, 409, - /* 60 */ 1241, 1, 1, 575, 2, 1245, 550, 118, 115, 229, - /* 70 */ 317, 480, 146, 480, 524, 118, 115, 229, 529, 1327, - /* 80 */ 417, 523, 142, 125, 126, 80, 1217, 1217, 1050, 1053, - /* 90 */ 1040, 1040, 123, 123, 124, 124, 124, 124, 118, 115, - /* 100 */ 229, 327, 122, 122, 122, 122, 121, 121, 120, 120, - /* 110 */ 120, 119, 116, 444, 284, 284, 284, 284, 442, 442, - /* 120 */ 442, 1567, 376, 1569, 1192, 375, 1163, 565, 1163, 565, - /* 130 */ 409, 1567, 537, 259, 226, 444, 101, 145, 449, 316, - /* 140 */ 559, 240, 122, 122, 122, 122, 121, 121, 120, 120, - /* 150 */ 120, 119, 116, 444, 125, 126, 80, 1217, 1217, 1050, - /* 160 */ 1053, 1040, 1040, 123, 123, 124, 124, 124, 124, 142, - /* 170 */ 294, 1192, 339, 448, 120, 120, 120, 119, 116, 444, - /* 180 */ 127, 1192, 1193, 1194, 148, 441, 440, 568, 119, 116, - /* 190 */ 444, 124, 124, 124, 124, 117, 122, 122, 122, 122, - /* 200 */ 121, 121, 120, 120, 120, 119, 116, 444, 454, 113, - /* 210 */ 13, 13, 546, 122, 122, 122, 122, 121, 121, 120, - /* 220 */ 120, 120, 119, 116, 444, 422, 316, 559, 1192, 1193, - /* 230 */ 1194, 149, 1224, 409, 1224, 124, 124, 124, 124, 122, - /* 240 */ 122, 122, 122, 121, 121, 120, 120, 120, 119, 116, - /* 250 */ 444, 465, 342, 1037, 1037, 1051, 1054, 125, 126, 80, - /* 260 */ 1217, 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, - /* 270 */ 124, 124, 1279, 522, 222, 1192, 568, 409, 224, 514, - /* 280 */ 175, 82, 83, 122, 122, 122, 122, 121, 121, 120, - /* 290 */ 120, 120, 119, 116, 444, 1007, 16, 16, 1192, 133, - /* 300 */ 133, 125, 126, 80, 1217, 1217, 1050, 1053, 1040, 1040, - /* 310 */ 123, 123, 124, 124, 124, 124, 122, 122, 122, 122, - /* 320 */ 121, 121, 120, 120, 120, 119, 116, 444, 1041, 546, - /* 330 */ 1192, 373, 1192, 1193, 1194, 252, 1434, 399, 504, 501, - /* 340 */ 500, 111, 560, 566, 4, 926, 926, 433, 499, 340, - /* 350 */ 460, 328, 360, 394, 1237, 1192, 1193, 1194, 563, 568, - /* 360 */ 122, 122, 122, 122, 121, 121, 120, 120, 120, 119, - /* 370 */ 116, 444, 284, 284, 369, 1580, 1607, 441, 440, 154, - /* 380 */ 409, 445, 71, 71, 1286, 565, 1221, 1192, 1193, 1194, - /* 390 */ 85, 1223, 271, 557, 543, 515, 1561, 568, 98, 1222, - /* 400 */ 6, 1278, 472, 142, 125, 126, 80, 1217, 1217, 1050, - /* 410 */ 1053, 1040, 1040, 123, 123, 124, 124, 124, 124, 550, - /* 420 */ 13, 13, 1027, 507, 1224, 1192, 1224, 549, 109, 109, - /* 430 */ 222, 568, 1238, 175, 568, 427, 110, 197, 445, 570, - /* 440 */ 569, 430, 1552, 1017, 325, 551, 1192, 270, 287, 368, - /* 450 */ 510, 363, 509, 257, 71, 71, 543, 71, 71, 359, - /* 460 */ 316, 559, 1613, 122, 122, 122, 122, 121, 121, 120, - /* 470 */ 120, 120, 119, 116, 444, 1017, 1017, 1019, 1020, 27, - /* 480 */ 284, 284, 1192, 1193, 1194, 1158, 568, 1612, 409, 901, - /* 490 */ 190, 550, 356, 565, 550, 937, 533, 517, 1158, 516, - /* 500 */ 413, 1158, 552, 1192, 1193, 1194, 568, 544, 1554, 51, - /* 510 */ 51, 214, 125, 126, 80, 1217, 1217, 1050, 1053, 1040, - /* 520 */ 1040, 123, 123, 124, 124, 124, 124, 1192, 474, 135, - /* 530 */ 135, 409, 284, 284, 1490, 505, 121, 121, 120, 120, - /* 540 */ 120, 119, 116, 444, 1007, 565, 518, 217, 541, 1561, - /* 550 */ 316, 559, 142, 6, 532, 125, 126, 80, 1217, 1217, - /* 560 */ 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, 124, - /* 570 */ 1555, 122, 122, 122, 122, 121, 121, 120, 120, 120, - /* 580 */ 119, 116, 444, 485, 1192, 1193, 1194, 482, 281, 1267, - /* 590 */ 957, 252, 1192, 373, 504, 501, 500, 1192, 340, 571, - /* 600 */ 1192, 571, 409, 292, 499, 957, 876, 191, 480, 316, - /* 610 */ 559, 384, 290, 380, 122, 122, 122, 122, 121, 121, - /* 620 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1217, - /* 630 */ 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, - /* 640 */ 124, 409, 394, 1136, 1192, 869, 100, 284, 284, 1192, - /* 650 */ 1193, 1194, 373, 1093, 1192, 1193, 1194, 1192, 1193, 1194, - /* 660 */ 565, 455, 32, 373, 233, 125, 126, 80, 1217, 1217, - /* 670 */ 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, 124, - /* 680 */ 1433, 959, 568, 228, 958, 122, 122, 122, 122, 121, - /* 690 */ 121, 120, 120, 120, 119, 116, 444, 1158, 228, 1192, - /* 700 */ 157, 1192, 1193, 1194, 1553, 13, 13, 301, 957, 1232, - /* 710 */ 1158, 153, 409, 1158, 373, 1583, 1176, 5, 369, 1580, - /* 720 */ 429, 1238, 3, 957, 122, 122, 122, 122, 121, 121, - /* 730 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1217, - /* 740 */ 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, - /* 750 */ 124, 409, 208, 567, 1192, 1028, 1192, 1193, 1194, 1192, - /* 760 */ 388, 852, 155, 1552, 286, 402, 1098, 1098, 488, 568, - /* 770 */ 465, 342, 1319, 1319, 1552, 125, 126, 80, 1217, 1217, - /* 780 */ 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, 124, - /* 790 */ 129, 568, 13, 13, 374, 122, 122, 122, 122, 121, - /* 800 */ 121, 120, 120, 120, 119, 116, 444, 302, 568, 453, - /* 810 */ 528, 1192, 1193, 1194, 13, 13, 1192, 1193, 1194, 1297, - /* 820 */ 463, 1267, 409, 1317, 1317, 1552, 1012, 453, 452, 200, - /* 830 */ 299, 71, 71, 1265, 122, 122, 122, 122, 121, 121, - /* 840 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1217, - /* 850 */ 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, - /* 860 */ 124, 409, 227, 1073, 1158, 284, 284, 419, 312, 278, - /* 870 */ 278, 285, 285, 1419, 406, 405, 382, 1158, 565, 568, - /* 880 */ 1158, 1196, 565, 1600, 565, 125, 126, 80, 1217, 1217, - /* 890 */ 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, 124, - /* 900 */ 453, 1482, 13, 13, 1536, 122, 122, 122, 122, 121, - /* 910 */ 121, 120, 120, 120, 119, 116, 444, 201, 568, 354, - /* 920 */ 1586, 575, 2, 1245, 840, 841, 842, 1562, 317, 1212, - /* 930 */ 146, 6, 409, 255, 254, 253, 206, 1327, 9, 1196, - /* 940 */ 262, 71, 71, 424, 122, 122, 122, 122, 121, 121, - /* 950 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1217, - /* 960 */ 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, - /* 970 */ 124, 568, 284, 284, 568, 1213, 409, 574, 313, 1245, - /* 980 */ 349, 1296, 352, 419, 317, 565, 146, 491, 525, 1643, - /* 990 */ 395, 371, 491, 1327, 70, 70, 1295, 71, 71, 240, - /* 1000 */ 1325, 104, 80, 1217, 1217, 1050, 1053, 1040, 1040, 123, - /* 1010 */ 123, 124, 124, 124, 124, 122, 122, 122, 122, 121, - /* 1020 */ 121, 120, 120, 120, 119, 116, 444, 1114, 284, 284, - /* 1030 */ 428, 448, 1525, 1213, 439, 284, 284, 1489, 1352, 311, - /* 1040 */ 474, 565, 1115, 971, 491, 491, 217, 1263, 565, 1538, - /* 1050 */ 568, 972, 207, 568, 1027, 240, 383, 1116, 519, 122, - /* 1060 */ 122, 122, 122, 121, 121, 120, 120, 120, 119, 116, - /* 1070 */ 444, 1018, 107, 71, 71, 1017, 13, 13, 912, 568, - /* 1080 */ 1495, 568, 284, 284, 97, 526, 491, 448, 913, 1326, - /* 1090 */ 1322, 545, 409, 284, 284, 565, 151, 209, 1495, 1497, - /* 1100 */ 262, 450, 55, 55, 56, 56, 565, 1017, 1017, 1019, - /* 1110 */ 443, 332, 409, 527, 12, 295, 125, 126, 80, 1217, - /* 1120 */ 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, - /* 1130 */ 124, 347, 409, 864, 1534, 1213, 125, 126, 80, 1217, - /* 1140 */ 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, - /* 1150 */ 124, 1137, 1641, 474, 1641, 371, 125, 114, 80, 1217, - /* 1160 */ 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, - /* 1170 */ 124, 1495, 329, 474, 331, 122, 122, 122, 122, 121, - /* 1180 */ 121, 120, 120, 120, 119, 116, 444, 203, 1419, 568, - /* 1190 */ 1294, 864, 464, 1213, 436, 122, 122, 122, 122, 121, - /* 1200 */ 121, 120, 120, 120, 119, 116, 444, 553, 1137, 1642, - /* 1210 */ 539, 1642, 15, 15, 892, 122, 122, 122, 122, 121, - /* 1220 */ 121, 120, 120, 120, 119, 116, 444, 568, 298, 538, - /* 1230 */ 1135, 1419, 1559, 1560, 1331, 409, 6, 6, 1169, 1268, - /* 1240 */ 415, 320, 284, 284, 1419, 508, 565, 525, 300, 457, - /* 1250 */ 43, 43, 568, 893, 12, 565, 330, 478, 425, 407, - /* 1260 */ 126, 80, 1217, 1217, 1050, 1053, 1040, 1040, 123, 123, - /* 1270 */ 124, 124, 124, 124, 568, 57, 57, 288, 1192, 1419, - /* 1280 */ 496, 458, 392, 392, 391, 273, 389, 1135, 1558, 849, - /* 1290 */ 1169, 407, 6, 568, 321, 1158, 470, 44, 44, 1557, - /* 1300 */ 1114, 426, 234, 6, 323, 256, 540, 256, 1158, 431, - /* 1310 */ 568, 1158, 322, 17, 487, 1115, 58, 58, 122, 122, - /* 1320 */ 122, 122, 121, 121, 120, 120, 120, 119, 116, 444, - /* 1330 */ 1116, 216, 481, 59, 59, 1192, 1193, 1194, 111, 560, - /* 1340 */ 324, 4, 236, 456, 526, 568, 237, 456, 568, 437, - /* 1350 */ 168, 556, 420, 141, 479, 563, 568, 293, 568, 1095, - /* 1360 */ 568, 293, 568, 1095, 531, 568, 872, 8, 60, 60, - /* 1370 */ 235, 61, 61, 568, 414, 568, 414, 568, 445, 62, - /* 1380 */ 62, 45, 45, 46, 46, 47, 47, 199, 49, 49, - /* 1390 */ 557, 568, 359, 568, 100, 486, 50, 50, 63, 63, - /* 1400 */ 64, 64, 561, 415, 535, 410, 568, 1027, 568, 534, - /* 1410 */ 316, 559, 316, 559, 65, 65, 14, 14, 568, 1027, - /* 1420 */ 568, 512, 932, 872, 1018, 109, 109, 931, 1017, 66, - /* 1430 */ 66, 131, 131, 110, 451, 445, 570, 569, 416, 177, - /* 1440 */ 1017, 132, 132, 67, 67, 568, 467, 568, 932, 471, - /* 1450 */ 1364, 283, 226, 931, 315, 1363, 407, 568, 459, 407, - /* 1460 */ 1017, 1017, 1019, 239, 407, 86, 213, 1350, 52, 52, - /* 1470 */ 68, 68, 1017, 1017, 1019, 1020, 27, 1585, 1180, 447, - /* 1480 */ 69, 69, 288, 97, 108, 1541, 106, 392, 392, 391, - /* 1490 */ 273, 389, 568, 879, 849, 883, 568, 111, 560, 466, - /* 1500 */ 4, 568, 152, 30, 38, 568, 1132, 234, 396, 323, - /* 1510 */ 111, 560, 527, 4, 563, 53, 53, 322, 568, 163, - /* 1520 */ 163, 568, 337, 468, 164, 164, 333, 563, 76, 76, - /* 1530 */ 568, 289, 1514, 568, 31, 1513, 568, 445, 338, 483, - /* 1540 */ 100, 54, 54, 344, 72, 72, 296, 236, 1080, 557, - /* 1550 */ 445, 879, 1360, 134, 134, 168, 73, 73, 141, 161, - /* 1560 */ 161, 1574, 557, 535, 568, 319, 568, 348, 536, 1009, - /* 1570 */ 473, 261, 261, 891, 890, 235, 535, 568, 1027, 568, - /* 1580 */ 475, 534, 261, 367, 109, 109, 521, 136, 136, 130, - /* 1590 */ 130, 1027, 110, 366, 445, 570, 569, 109, 109, 1017, - /* 1600 */ 162, 162, 156, 156, 568, 110, 1080, 445, 570, 569, - /* 1610 */ 410, 351, 1017, 568, 353, 316, 559, 568, 343, 568, - /* 1620 */ 100, 497, 357, 258, 100, 898, 899, 140, 140, 355, - /* 1630 */ 1310, 1017, 1017, 1019, 1020, 27, 139, 139, 362, 451, - /* 1640 */ 137, 137, 138, 138, 1017, 1017, 1019, 1020, 27, 1180, - /* 1650 */ 447, 568, 372, 288, 111, 560, 1021, 4, 392, 392, - /* 1660 */ 391, 273, 389, 568, 1141, 849, 568, 1076, 568, 258, - /* 1670 */ 492, 563, 568, 211, 75, 75, 555, 962, 234, 261, - /* 1680 */ 323, 111, 560, 929, 4, 113, 77, 77, 322, 74, - /* 1690 */ 74, 42, 42, 1373, 445, 48, 48, 1418, 563, 974, - /* 1700 */ 975, 1092, 1091, 1092, 1091, 862, 557, 150, 930, 1346, - /* 1710 */ 113, 1358, 554, 1424, 1021, 1275, 1266, 1254, 236, 1253, - /* 1720 */ 1255, 445, 1593, 1343, 308, 276, 168, 309, 11, 141, - /* 1730 */ 393, 310, 232, 557, 1405, 1027, 335, 291, 1400, 219, - /* 1740 */ 336, 109, 109, 936, 297, 1410, 235, 341, 477, 110, - /* 1750 */ 502, 445, 570, 569, 1393, 1409, 1017, 400, 1293, 365, - /* 1760 */ 223, 1486, 1027, 1485, 1355, 1356, 1354, 1353, 109, 109, - /* 1770 */ 204, 1596, 1232, 558, 265, 218, 110, 205, 445, 570, - /* 1780 */ 569, 410, 387, 1017, 1533, 179, 316, 559, 1017, 1017, - /* 1790 */ 1019, 1020, 27, 230, 1531, 1229, 79, 560, 85, 4, - /* 1800 */ 418, 215, 548, 81, 84, 188, 1406, 173, 181, 461, - /* 1810 */ 451, 35, 462, 563, 183, 1017, 1017, 1019, 1020, 27, - /* 1820 */ 184, 1491, 185, 186, 495, 242, 98, 398, 1412, 36, - /* 1830 */ 1411, 484, 91, 469, 401, 1414, 445, 192, 1480, 246, - /* 1840 */ 1502, 490, 346, 277, 248, 196, 493, 511, 557, 350, - /* 1850 */ 1256, 249, 250, 403, 1313, 1312, 111, 560, 432, 4, - /* 1860 */ 1311, 1304, 93, 1611, 883, 1610, 224, 404, 434, 520, - /* 1870 */ 263, 435, 1579, 563, 1283, 1282, 364, 1027, 306, 1281, - /* 1880 */ 264, 1609, 1565, 109, 109, 370, 1303, 307, 1564, 438, - /* 1890 */ 128, 110, 1378, 445, 570, 569, 445, 546, 1017, 10, - /* 1900 */ 1466, 105, 381, 1377, 34, 572, 99, 1336, 557, 314, - /* 1910 */ 1186, 530, 272, 274, 379, 210, 1335, 547, 385, 386, - /* 1920 */ 275, 573, 1251, 1246, 411, 412, 1518, 165, 178, 1519, - /* 1930 */ 1017, 1017, 1019, 1020, 27, 1517, 1516, 1027, 78, 147, - /* 1940 */ 166, 220, 221, 109, 109, 836, 304, 167, 446, 212, - /* 1950 */ 318, 110, 231, 445, 570, 569, 144, 1090, 1017, 1088, - /* 1960 */ 326, 180, 169, 1212, 182, 334, 238, 915, 241, 1104, - /* 1970 */ 187, 170, 171, 421, 87, 88, 423, 189, 89, 90, - /* 1980 */ 172, 1107, 243, 1103, 244, 158, 18, 245, 345, 247, - /* 1990 */ 1017, 1017, 1019, 1020, 27, 261, 1096, 193, 1226, 489, - /* 2000 */ 194, 37, 366, 851, 494, 251, 195, 506, 92, 19, - /* 2010 */ 498, 358, 20, 503, 881, 361, 94, 894, 305, 159, - /* 2020 */ 513, 39, 95, 1174, 160, 1056, 966, 1143, 96, 174, - /* 2030 */ 1142, 225, 280, 282, 198, 960, 113, 1164, 1160, 260, - /* 2040 */ 21, 22, 23, 1162, 1168, 1167, 1148, 24, 33, 25, - /* 2050 */ 202, 542, 26, 100, 1071, 102, 1057, 103, 7, 1055, - /* 2060 */ 1059, 1113, 1060, 1112, 266, 267, 28, 40, 390, 1022, - /* 2070 */ 863, 112, 29, 564, 1182, 1181, 268, 176, 143, 925, - /* 2080 */ 1242, 1242, 1242, 1242, 1242, 1242, 1242, 1242, 1242, 1242, - /* 2090 */ 1242, 1242, 1242, 1242, 269, 1602, 1242, 1601, + /* 0 */ 572, 210, 572, 119, 116, 231, 572, 119, 116, 231, + /* 10 */ 572, 1317, 379, 1296, 410, 566, 566, 566, 572, 411, + /* 20 */ 380, 1317, 1279, 42, 42, 42, 42, 210, 1529, 72, + /* 30 */ 72, 974, 421, 42, 42, 495, 305, 281, 305, 975, + /* 40 */ 399, 72, 72, 126, 127, 81, 1217, 1217, 1054, 1057, + /* 50 */ 1044, 1044, 124, 124, 125, 125, 125, 125, 480, 411, + /* 60 */ 1244, 1, 1, 578, 2, 1248, 554, 119, 116, 231, + /* 70 */ 319, 484, 147, 484, 528, 119, 116, 231, 533, 1330, + /* 80 */ 419, 527, 143, 126, 127, 81, 1217, 1217, 1054, 1057, + /* 90 */ 1044, 1044, 124, 124, 125, 125, 125, 125, 119, 116, + /* 100 */ 231, 329, 123, 123, 123, 123, 122, 122, 121, 121, + /* 110 */ 121, 120, 117, 448, 286, 286, 286, 286, 446, 446, + /* 120 */ 446, 1568, 378, 1570, 1193, 377, 1164, 569, 1164, 569, + /* 130 */ 411, 1568, 541, 261, 228, 448, 102, 146, 453, 318, + /* 140 */ 563, 242, 123, 123, 123, 123, 122, 122, 121, 121, + /* 150 */ 121, 120, 117, 448, 126, 127, 81, 1217, 1217, 1054, + /* 160 */ 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, 143, + /* 170 */ 296, 1193, 341, 452, 121, 121, 121, 120, 117, 448, + /* 180 */ 128, 1193, 1194, 1193, 149, 445, 444, 572, 120, 117, + /* 190 */ 448, 125, 125, 125, 125, 118, 123, 123, 123, 123, + /* 200 */ 122, 122, 121, 121, 121, 120, 117, 448, 458, 114, + /* 210 */ 13, 13, 550, 123, 123, 123, 123, 122, 122, 121, + /* 220 */ 121, 121, 120, 117, 448, 424, 318, 563, 1193, 1194, + /* 230 */ 1193, 150, 1225, 411, 1225, 125, 125, 125, 125, 123, + /* 240 */ 123, 123, 123, 122, 122, 121, 121, 121, 120, 117, + /* 250 */ 448, 469, 344, 1041, 1041, 1055, 1058, 126, 127, 81, + /* 260 */ 1217, 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, + /* 270 */ 125, 125, 1282, 526, 224, 1193, 572, 411, 226, 519, + /* 280 */ 177, 83, 84, 123, 123, 123, 123, 122, 122, 121, + /* 290 */ 121, 121, 120, 117, 448, 1010, 16, 16, 1193, 134, + /* 300 */ 134, 126, 127, 81, 1217, 1217, 1054, 1057, 1044, 1044, + /* 310 */ 124, 124, 125, 125, 125, 125, 123, 123, 123, 123, + /* 320 */ 122, 122, 121, 121, 121, 120, 117, 448, 1045, 550, + /* 330 */ 1193, 375, 1193, 1194, 1193, 254, 1438, 401, 508, 505, + /* 340 */ 504, 112, 564, 570, 4, 929, 929, 435, 503, 342, + /* 350 */ 464, 330, 362, 396, 1238, 1193, 1194, 1193, 567, 572, + /* 360 */ 123, 123, 123, 123, 122, 122, 121, 121, 121, 120, + /* 370 */ 117, 448, 286, 286, 371, 1581, 1607, 445, 444, 155, + /* 380 */ 411, 449, 72, 72, 1289, 569, 1222, 1193, 1194, 1193, + /* 390 */ 86, 1224, 273, 561, 547, 520, 520, 572, 99, 1223, + /* 400 */ 6, 1281, 476, 143, 126, 127, 81, 1217, 1217, 1054, + /* 410 */ 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, 554, + /* 420 */ 13, 13, 1031, 511, 1225, 1193, 1225, 553, 110, 110, + /* 430 */ 224, 572, 1239, 177, 572, 429, 111, 199, 449, 573, + /* 440 */ 449, 432, 1555, 1019, 327, 555, 1193, 272, 289, 370, + /* 450 */ 514, 365, 513, 259, 72, 72, 547, 72, 72, 361, + /* 460 */ 318, 563, 1613, 123, 123, 123, 123, 122, 122, 121, + /* 470 */ 121, 121, 120, 117, 448, 1019, 1019, 1021, 1022, 28, + /* 480 */ 286, 286, 1193, 1194, 1193, 1159, 572, 1612, 411, 904, + /* 490 */ 192, 554, 358, 569, 554, 940, 537, 521, 1159, 437, + /* 500 */ 415, 1159, 556, 1193, 1194, 1193, 572, 548, 548, 52, + /* 510 */ 52, 216, 126, 127, 81, 1217, 1217, 1054, 1057, 1044, + /* 520 */ 1044, 124, 124, 125, 125, 125, 125, 1193, 478, 136, + /* 530 */ 136, 411, 286, 286, 1493, 509, 122, 122, 121, 121, + /* 540 */ 121, 120, 117, 448, 1010, 569, 522, 219, 545, 545, + /* 550 */ 318, 563, 143, 6, 536, 126, 127, 81, 1217, 1217, + /* 560 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, + /* 570 */ 1557, 123, 123, 123, 123, 122, 122, 121, 121, 121, + /* 580 */ 120, 117, 448, 489, 1193, 1194, 1193, 486, 283, 1270, + /* 590 */ 960, 254, 1193, 375, 508, 505, 504, 1193, 342, 574, + /* 600 */ 1193, 574, 411, 294, 503, 960, 879, 193, 484, 318, + /* 610 */ 563, 386, 292, 382, 123, 123, 123, 123, 122, 122, + /* 620 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217, + /* 630 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, + /* 640 */ 125, 411, 396, 1139, 1193, 872, 101, 286, 286, 1193, + /* 650 */ 1194, 1193, 375, 1096, 1193, 1194, 1193, 1193, 1194, 1193, + /* 660 */ 569, 459, 33, 375, 235, 126, 127, 81, 1217, 1217, + /* 670 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, + /* 680 */ 1437, 962, 572, 230, 961, 123, 123, 123, 123, 122, + /* 690 */ 122, 121, 121, 121, 120, 117, 448, 1159, 230, 1193, + /* 700 */ 158, 1193, 1194, 1193, 1556, 13, 13, 303, 960, 1233, + /* 710 */ 1159, 154, 411, 1159, 375, 1584, 1177, 5, 371, 1581, + /* 720 */ 431, 1239, 3, 960, 123, 123, 123, 123, 122, 122, + /* 730 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217, + /* 740 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, + /* 750 */ 125, 411, 210, 571, 1193, 1032, 1193, 1194, 1193, 1193, + /* 760 */ 390, 855, 156, 1555, 376, 404, 1101, 1101, 492, 572, + /* 770 */ 469, 344, 1322, 1322, 1555, 126, 127, 81, 1217, 1217, + /* 780 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, + /* 790 */ 130, 572, 13, 13, 532, 123, 123, 123, 123, 122, + /* 800 */ 122, 121, 121, 121, 120, 117, 448, 304, 572, 457, + /* 810 */ 229, 1193, 1194, 1193, 13, 13, 1193, 1194, 1193, 1300, + /* 820 */ 467, 1270, 411, 1320, 1320, 1555, 1015, 457, 456, 436, + /* 830 */ 301, 72, 72, 1268, 123, 123, 123, 123, 122, 122, + /* 840 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217, + /* 850 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, + /* 860 */ 125, 411, 384, 1076, 1159, 286, 286, 421, 314, 280, + /* 870 */ 280, 287, 287, 461, 408, 407, 1539, 1159, 569, 572, + /* 880 */ 1159, 1196, 569, 409, 569, 126, 127, 81, 1217, 1217, + /* 890 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, + /* 900 */ 457, 1485, 13, 13, 1541, 123, 123, 123, 123, 122, + /* 910 */ 122, 121, 121, 121, 120, 117, 448, 202, 572, 462, + /* 920 */ 1587, 578, 2, 1248, 843, 844, 845, 1563, 319, 409, + /* 930 */ 147, 6, 411, 257, 256, 255, 208, 1330, 9, 1196, + /* 940 */ 264, 72, 72, 1436, 123, 123, 123, 123, 122, 122, + /* 950 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217, + /* 960 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, + /* 970 */ 125, 572, 286, 286, 572, 1213, 411, 577, 315, 1248, + /* 980 */ 421, 371, 1581, 356, 319, 569, 147, 495, 529, 1644, + /* 990 */ 397, 935, 495, 1330, 71, 71, 934, 72, 72, 242, + /* 1000 */ 1328, 105, 81, 1217, 1217, 1054, 1057, 1044, 1044, 124, + /* 1010 */ 124, 125, 125, 125, 125, 123, 123, 123, 123, 122, + /* 1020 */ 122, 121, 121, 121, 120, 117, 448, 1117, 286, 286, + /* 1030 */ 1422, 452, 1528, 1213, 443, 286, 286, 1492, 1355, 313, + /* 1040 */ 478, 569, 1118, 454, 351, 495, 354, 1266, 569, 209, + /* 1050 */ 572, 418, 179, 572, 1031, 242, 385, 1119, 523, 123, + /* 1060 */ 123, 123, 123, 122, 122, 121, 121, 121, 120, 117, + /* 1070 */ 448, 1020, 108, 72, 72, 1019, 13, 13, 915, 572, + /* 1080 */ 1498, 572, 286, 286, 98, 530, 1537, 452, 916, 1334, + /* 1090 */ 1329, 203, 411, 286, 286, 569, 152, 211, 1498, 1500, + /* 1100 */ 426, 569, 56, 56, 57, 57, 569, 1019, 1019, 1021, + /* 1110 */ 447, 572, 411, 531, 12, 297, 126, 127, 81, 1217, + /* 1120 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, + /* 1130 */ 125, 572, 411, 867, 15, 15, 126, 127, 81, 1217, + /* 1140 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, + /* 1150 */ 125, 373, 529, 264, 44, 44, 126, 115, 81, 1217, + /* 1160 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, + /* 1170 */ 125, 1498, 478, 1271, 417, 123, 123, 123, 123, 122, + /* 1180 */ 122, 121, 121, 121, 120, 117, 448, 205, 1213, 495, + /* 1190 */ 430, 867, 468, 322, 495, 123, 123, 123, 123, 122, + /* 1200 */ 122, 121, 121, 121, 120, 117, 448, 572, 557, 1140, + /* 1210 */ 1642, 1422, 1642, 543, 572, 123, 123, 123, 123, 122, + /* 1220 */ 122, 121, 121, 121, 120, 117, 448, 572, 1422, 572, + /* 1230 */ 13, 13, 542, 323, 1325, 411, 334, 58, 58, 349, + /* 1240 */ 1422, 1170, 326, 286, 286, 549, 1213, 300, 895, 530, + /* 1250 */ 45, 45, 59, 59, 1140, 1643, 569, 1643, 565, 417, + /* 1260 */ 127, 81, 1217, 1217, 1054, 1057, 1044, 1044, 124, 124, + /* 1270 */ 125, 125, 125, 125, 1367, 373, 500, 290, 1193, 512, + /* 1280 */ 1366, 427, 394, 394, 393, 275, 391, 896, 1138, 852, + /* 1290 */ 478, 258, 1422, 1170, 463, 1159, 12, 331, 428, 333, + /* 1300 */ 1117, 460, 236, 258, 325, 460, 544, 1544, 1159, 1098, + /* 1310 */ 491, 1159, 324, 1098, 440, 1118, 335, 516, 123, 123, + /* 1320 */ 123, 123, 122, 122, 121, 121, 121, 120, 117, 448, + /* 1330 */ 1119, 318, 563, 1138, 572, 1193, 1194, 1193, 112, 564, + /* 1340 */ 201, 4, 238, 433, 935, 490, 285, 228, 1517, 934, + /* 1350 */ 170, 560, 572, 142, 1516, 567, 572, 60, 60, 572, + /* 1360 */ 416, 572, 441, 572, 535, 302, 875, 8, 487, 572, + /* 1370 */ 237, 572, 416, 572, 485, 61, 61, 572, 449, 62, + /* 1380 */ 62, 332, 63, 63, 46, 46, 47, 47, 361, 572, + /* 1390 */ 561, 572, 48, 48, 50, 50, 51, 51, 572, 295, + /* 1400 */ 64, 64, 482, 295, 539, 412, 471, 1031, 572, 538, + /* 1410 */ 318, 563, 65, 65, 66, 66, 409, 475, 572, 1031, + /* 1420 */ 572, 14, 14, 875, 1020, 110, 110, 409, 1019, 572, + /* 1430 */ 474, 67, 67, 111, 455, 449, 573, 449, 98, 317, + /* 1440 */ 1019, 132, 132, 133, 133, 572, 1561, 572, 974, 409, + /* 1450 */ 6, 1562, 68, 68, 1560, 6, 975, 572, 6, 1559, + /* 1460 */ 1019, 1019, 1021, 6, 346, 218, 101, 531, 53, 53, + /* 1470 */ 69, 69, 1019, 1019, 1021, 1022, 28, 1586, 1181, 451, + /* 1480 */ 70, 70, 290, 87, 215, 31, 1363, 394, 394, 393, + /* 1490 */ 275, 391, 350, 109, 852, 107, 572, 112, 564, 483, + /* 1500 */ 4, 1212, 572, 239, 153, 572, 39, 236, 1299, 325, + /* 1510 */ 112, 564, 1298, 4, 567, 572, 32, 324, 572, 54, + /* 1520 */ 54, 572, 1135, 353, 398, 165, 165, 567, 166, 166, + /* 1530 */ 572, 291, 355, 572, 17, 357, 572, 449, 77, 77, + /* 1540 */ 1313, 55, 55, 1297, 73, 73, 572, 238, 470, 561, + /* 1550 */ 449, 472, 364, 135, 135, 170, 74, 74, 142, 163, + /* 1560 */ 163, 374, 561, 539, 572, 321, 572, 886, 540, 137, + /* 1570 */ 137, 339, 1353, 422, 298, 237, 539, 572, 1031, 572, + /* 1580 */ 340, 538, 101, 369, 110, 110, 162, 131, 131, 164, + /* 1590 */ 164, 1031, 111, 368, 449, 573, 449, 110, 110, 1019, + /* 1600 */ 157, 157, 141, 141, 572, 111, 572, 449, 573, 449, + /* 1610 */ 412, 288, 1019, 572, 882, 318, 563, 572, 219, 572, + /* 1620 */ 241, 1012, 477, 263, 263, 894, 893, 140, 140, 138, + /* 1630 */ 138, 1019, 1019, 1021, 1022, 28, 139, 139, 525, 455, + /* 1640 */ 76, 76, 78, 78, 1019, 1019, 1021, 1022, 28, 1181, + /* 1650 */ 451, 572, 1083, 290, 112, 564, 1575, 4, 394, 394, + /* 1660 */ 393, 275, 391, 572, 1023, 852, 572, 479, 345, 263, + /* 1670 */ 101, 567, 882, 1376, 75, 75, 1421, 501, 236, 260, + /* 1680 */ 325, 112, 564, 359, 4, 101, 43, 43, 324, 49, + /* 1690 */ 49, 901, 902, 161, 449, 101, 977, 978, 567, 1079, + /* 1700 */ 1349, 260, 965, 932, 263, 114, 561, 1095, 517, 1095, + /* 1710 */ 1083, 1094, 865, 1094, 151, 933, 1144, 114, 238, 1361, + /* 1720 */ 558, 449, 1023, 559, 1426, 1278, 170, 1269, 1257, 142, + /* 1730 */ 1601, 1256, 1258, 561, 1594, 1031, 496, 278, 213, 1346, + /* 1740 */ 310, 110, 110, 939, 311, 312, 237, 11, 234, 111, + /* 1750 */ 221, 449, 573, 449, 293, 395, 1019, 1408, 337, 1403, + /* 1760 */ 1396, 338, 1031, 299, 343, 1413, 1412, 481, 110, 110, + /* 1770 */ 506, 402, 225, 1296, 206, 367, 111, 1358, 449, 573, + /* 1780 */ 449, 412, 1359, 1019, 1489, 1488, 318, 563, 1019, 1019, + /* 1790 */ 1021, 1022, 28, 562, 207, 220, 80, 564, 389, 4, + /* 1800 */ 1597, 1357, 552, 1356, 1233, 181, 267, 232, 1536, 1534, + /* 1810 */ 455, 1230, 420, 567, 82, 1019, 1019, 1021, 1022, 28, + /* 1820 */ 86, 217, 85, 1494, 190, 175, 183, 465, 185, 466, + /* 1830 */ 36, 1409, 186, 187, 188, 499, 449, 244, 37, 99, + /* 1840 */ 400, 1415, 1414, 488, 1417, 194, 473, 403, 561, 1483, + /* 1850 */ 248, 92, 1505, 494, 198, 279, 112, 564, 250, 4, + /* 1860 */ 348, 497, 405, 352, 1259, 251, 252, 515, 1316, 434, + /* 1870 */ 1315, 1314, 94, 567, 1307, 886, 1306, 1031, 226, 406, + /* 1880 */ 1611, 1610, 438, 110, 110, 1580, 1286, 524, 439, 308, + /* 1890 */ 266, 111, 1285, 449, 573, 449, 449, 309, 1019, 366, + /* 1900 */ 1284, 1609, 265, 1566, 1565, 442, 372, 1381, 561, 129, + /* 1910 */ 550, 1380, 10, 1470, 383, 106, 316, 551, 100, 35, + /* 1920 */ 534, 575, 212, 1339, 381, 387, 1187, 1338, 274, 276, + /* 1930 */ 1019, 1019, 1021, 1022, 28, 277, 413, 1031, 576, 1254, + /* 1940 */ 388, 1521, 1249, 110, 110, 167, 1522, 168, 148, 1520, + /* 1950 */ 1519, 111, 306, 449, 573, 449, 222, 223, 1019, 839, + /* 1960 */ 169, 79, 450, 214, 414, 233, 320, 145, 1093, 1091, + /* 1970 */ 328, 182, 171, 1212, 918, 184, 240, 336, 243, 1107, + /* 1980 */ 189, 172, 173, 423, 425, 88, 180, 191, 89, 90, + /* 1990 */ 1019, 1019, 1021, 1022, 28, 91, 174, 1110, 245, 1106, + /* 2000 */ 246, 159, 18, 247, 347, 1099, 263, 195, 1227, 493, + /* 2010 */ 249, 196, 38, 854, 498, 368, 253, 360, 897, 197, + /* 2020 */ 502, 93, 19, 20, 507, 884, 363, 510, 95, 307, + /* 2030 */ 160, 96, 518, 97, 1175, 1060, 1146, 40, 21, 227, + /* 2040 */ 176, 1145, 282, 284, 969, 200, 963, 114, 262, 1165, + /* 2050 */ 22, 23, 24, 1161, 1169, 25, 1163, 1150, 34, 26, + /* 2060 */ 1168, 546, 27, 204, 101, 103, 104, 1074, 7, 1061, + /* 2070 */ 1059, 1063, 1116, 1064, 1115, 268, 269, 29, 41, 270, + /* 2080 */ 1024, 866, 113, 30, 568, 392, 1183, 144, 178, 1182, + /* 2090 */ 271, 928, 1245, 1245, 1245, 1245, 1245, 1245, 1245, 1602, }; static const YYCODETYPE yy_lookahead[] = { /* 0 */ 193, 193, 193, 274, 275, 276, 193, 274, 275, 276,

@@ -166116,7 +171263,7 @@ /* 720 */ 231, 101, 22, 143, 102, 103, 104, 105, 106, 107,

/* 730 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 740 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, /* 750 */ 57, 19, 193, 193, 59, 23, 116, 117, 118, 59, - /* 760 */ 201, 21, 241, 304, 22, 206, 127, 128, 129, 193, + /* 760 */ 201, 21, 241, 304, 193, 206, 127, 128, 129, 193, /* 770 */ 128, 129, 235, 236, 304, 43, 44, 45, 46, 47, /* 780 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, /* 790 */ 22, 193, 216, 217, 193, 102, 103, 104, 105, 106,

@@ -166127,129 +171274,129 @@ /* 830 */ 204, 216, 217, 205, 102, 103, 104, 105, 106, 107,

/* 840 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 850 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, /* 860 */ 57, 19, 193, 123, 76, 239, 240, 193, 253, 239, - /* 870 */ 240, 239, 240, 193, 106, 107, 193, 89, 252, 193, - /* 880 */ 92, 59, 252, 141, 252, 43, 44, 45, 46, 47, + /* 870 */ 240, 239, 240, 244, 106, 107, 193, 89, 252, 193, + /* 880 */ 92, 59, 252, 254, 252, 43, 44, 45, 46, 47, /* 890 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, /* 900 */ 284, 161, 216, 217, 193, 102, 103, 104, 105, 106, - /* 910 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 16, - /* 920 */ 187, 188, 189, 190, 7, 8, 9, 309, 195, 25, + /* 910 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 244, + /* 920 */ 187, 188, 189, 190, 7, 8, 9, 309, 195, 254, /* 930 */ 197, 313, 19, 127, 128, 129, 262, 204, 22, 117, - /* 940 */ 24, 216, 217, 263, 102, 103, 104, 105, 106, 107, + /* 940 */ 24, 216, 217, 273, 102, 103, 104, 105, 106, 107, /* 950 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 960 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, /* 970 */ 57, 193, 239, 240, 193, 59, 19, 188, 253, 190, - /* 980 */ 77, 226, 79, 193, 195, 252, 197, 193, 19, 301, - /* 990 */ 302, 193, 193, 204, 216, 217, 226, 216, 217, 266, + /* 980 */ 193, 311, 312, 16, 195, 252, 197, 193, 19, 301, + /* 990 */ 302, 135, 193, 204, 216, 217, 140, 216, 217, 266, /* 1000 */ 204, 159, 45, 46, 47, 48, 49, 50, 51, 52, /* 1010 */ 53, 54, 55, 56, 57, 102, 103, 104, 105, 106, /* 1020 */ 107, 108, 109, 110, 111, 112, 113, 12, 239, 240, - /* 1030 */ 232, 298, 238, 117, 253, 239, 240, 238, 259, 260, - /* 1040 */ 193, 252, 27, 31, 193, 193, 142, 204, 252, 193, - /* 1050 */ 193, 39, 262, 193, 100, 266, 278, 42, 204, 102, + /* 1030 */ 193, 298, 238, 117, 253, 239, 240, 238, 259, 260, + /* 1040 */ 193, 252, 27, 193, 77, 193, 79, 204, 252, 262, + /* 1050 */ 193, 299, 300, 193, 100, 266, 278, 42, 204, 102, /* 1060 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, /* 1070 */ 113, 117, 159, 216, 217, 121, 216, 217, 63, 193, - /* 1080 */ 193, 193, 239, 240, 115, 116, 193, 298, 73, 238, + /* 1080 */ 193, 193, 239, 240, 115, 116, 193, 298, 73, 240, /* 1090 */ 238, 231, 19, 239, 240, 252, 22, 24, 211, 212, - /* 1100 */ 24, 193, 216, 217, 216, 217, 252, 153, 154, 155, - /* 1110 */ 253, 16, 19, 144, 213, 268, 43, 44, 45, 46, + /* 1100 */ 263, 252, 216, 217, 216, 217, 252, 153, 154, 155, + /* 1110 */ 253, 193, 19, 144, 213, 268, 43, 44, 45, 46, /* 1120 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1130 */ 57, 238, 19, 59, 193, 59, 43, 44, 45, 46, + /* 1130 */ 57, 193, 19, 59, 216, 217, 43, 44, 45, 46, /* 1140 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1150 */ 57, 22, 23, 193, 25, 193, 43, 44, 45, 46, + /* 1150 */ 57, 193, 19, 24, 216, 217, 43, 44, 45, 46, /* 1160 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1170 */ 57, 284, 77, 193, 79, 102, 103, 104, 105, 106, - /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 286, 193, 193, - /* 1190 */ 193, 117, 291, 117, 232, 102, 103, 104, 105, 106, - /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 204, 22, 23, - /* 1210 */ 66, 25, 216, 217, 35, 102, 103, 104, 105, 106, - /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 193, 268, 85, - /* 1230 */ 101, 193, 309, 309, 240, 19, 313, 313, 94, 208, - /* 1240 */ 209, 193, 239, 240, 193, 66, 252, 19, 268, 244, - /* 1250 */ 216, 217, 193, 74, 213, 252, 161, 19, 263, 254, + /* 1170 */ 57, 284, 193, 208, 209, 102, 103, 104, 105, 106, + /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 286, 59, 193, + /* 1190 */ 232, 117, 291, 193, 193, 102, 103, 104, 105, 106, + /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 193, 204, 22, + /* 1210 */ 23, 193, 25, 66, 193, 102, 103, 104, 105, 106, + /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 193, 193, 193, + /* 1230 */ 216, 217, 85, 193, 238, 19, 16, 216, 217, 238, + /* 1240 */ 193, 94, 193, 239, 240, 231, 117, 268, 35, 116, + /* 1250 */ 216, 217, 216, 217, 22, 23, 252, 25, 208, 209, /* 1260 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - /* 1270 */ 54, 55, 56, 57, 193, 216, 217, 5, 59, 193, - /* 1280 */ 19, 244, 10, 11, 12, 13, 14, 101, 309, 17, - /* 1290 */ 146, 254, 313, 193, 193, 76, 115, 216, 217, 309, - /* 1300 */ 12, 263, 30, 313, 32, 46, 87, 46, 89, 130, - /* 1310 */ 193, 92, 40, 22, 263, 27, 216, 217, 102, 103, + /* 1270 */ 54, 55, 56, 57, 193, 193, 19, 5, 59, 66, + /* 1280 */ 193, 263, 10, 11, 12, 13, 14, 74, 101, 17, + /* 1290 */ 193, 46, 193, 146, 193, 76, 213, 77, 263, 79, + /* 1300 */ 12, 260, 30, 46, 32, 264, 87, 193, 89, 29, + /* 1310 */ 263, 92, 40, 33, 232, 27, 193, 108, 102, 103, /* 1320 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - /* 1330 */ 42, 150, 291, 216, 217, 116, 117, 118, 19, 20, - /* 1340 */ 193, 22, 70, 260, 116, 193, 24, 264, 193, 263, - /* 1350 */ 78, 63, 61, 81, 116, 36, 193, 260, 193, 29, - /* 1360 */ 193, 264, 193, 33, 145, 193, 59, 48, 216, 217, - /* 1370 */ 98, 216, 217, 193, 115, 193, 115, 193, 59, 216, - /* 1380 */ 217, 216, 217, 216, 217, 216, 217, 255, 216, 217, - /* 1390 */ 71, 193, 131, 193, 25, 65, 216, 217, 216, 217, - /* 1400 */ 216, 217, 208, 209, 85, 133, 193, 100, 193, 90, - /* 1410 */ 138, 139, 138, 139, 216, 217, 216, 217, 193, 100, - /* 1420 */ 193, 108, 135, 116, 117, 106, 107, 140, 121, 216, - /* 1430 */ 217, 216, 217, 114, 162, 116, 117, 118, 299, 300, - /* 1440 */ 121, 216, 217, 216, 217, 193, 244, 193, 135, 244, - /* 1450 */ 193, 256, 257, 140, 244, 193, 254, 193, 193, 254, - /* 1460 */ 153, 154, 155, 141, 254, 149, 150, 258, 216, 217, + /* 1330 */ 42, 138, 139, 101, 193, 116, 117, 118, 19, 20, + /* 1340 */ 255, 22, 70, 130, 135, 65, 256, 257, 193, 140, + /* 1350 */ 78, 63, 193, 81, 193, 36, 193, 216, 217, 193, + /* 1360 */ 115, 193, 263, 193, 145, 268, 59, 48, 193, 193, + /* 1370 */ 98, 193, 115, 193, 291, 216, 217, 193, 59, 216, + /* 1380 */ 217, 161, 216, 217, 216, 217, 216, 217, 131, 193, + /* 1390 */ 71, 193, 216, 217, 216, 217, 216, 217, 193, 260, + /* 1400 */ 216, 217, 19, 264, 85, 133, 244, 100, 193, 90, + /* 1410 */ 138, 139, 216, 217, 216, 217, 254, 244, 193, 100, + /* 1420 */ 193, 216, 217, 116, 117, 106, 107, 254, 121, 193, + /* 1430 */ 115, 216, 217, 114, 162, 116, 117, 118, 115, 244, + /* 1440 */ 121, 216, 217, 216, 217, 193, 309, 193, 31, 254, + /* 1450 */ 313, 309, 216, 217, 309, 313, 39, 193, 313, 309, + /* 1460 */ 153, 154, 155, 313, 193, 150, 25, 144, 216, 217, /* 1470 */ 216, 217, 153, 154, 155, 156, 157, 0, 1, 2, - /* 1480 */ 216, 217, 5, 115, 158, 193, 160, 10, 11, 12, - /* 1490 */ 13, 14, 193, 59, 17, 126, 193, 19, 20, 129, - /* 1500 */ 22, 193, 22, 22, 24, 193, 23, 30, 25, 32, - /* 1510 */ 19, 20, 144, 22, 36, 216, 217, 40, 193, 216, - /* 1520 */ 217, 193, 152, 129, 216, 217, 193, 36, 216, 217, - /* 1530 */ 193, 99, 193, 193, 53, 193, 193, 59, 23, 193, - /* 1540 */ 25, 216, 217, 193, 216, 217, 152, 70, 59, 71, - /* 1550 */ 59, 117, 193, 216, 217, 78, 216, 217, 81, 216, - /* 1560 */ 217, 318, 71, 85, 193, 133, 193, 193, 90, 23, - /* 1570 */ 23, 25, 25, 120, 121, 98, 85, 193, 100, 193, - /* 1580 */ 23, 90, 25, 121, 106, 107, 19, 216, 217, 216, + /* 1480 */ 216, 217, 5, 149, 150, 22, 193, 10, 11, 12, + /* 1490 */ 13, 14, 193, 158, 17, 160, 193, 19, 20, 116, + /* 1500 */ 22, 25, 193, 24, 22, 193, 24, 30, 226, 32, + /* 1510 */ 19, 20, 226, 22, 36, 193, 53, 40, 193, 216, + /* 1520 */ 217, 193, 23, 193, 25, 216, 217, 36, 216, 217, + /* 1530 */ 193, 99, 193, 193, 22, 193, 193, 59, 216, 217, + /* 1540 */ 193, 216, 217, 193, 216, 217, 193, 70, 129, 71, + /* 1550 */ 59, 129, 193, 216, 217, 78, 216, 217, 81, 216, + /* 1560 */ 217, 193, 71, 85, 193, 133, 193, 126, 90, 216, + /* 1570 */ 217, 152, 258, 61, 152, 98, 85, 193, 100, 193, + /* 1580 */ 23, 90, 25, 121, 106, 107, 23, 216, 217, 216, /* 1590 */ 217, 100, 114, 131, 116, 117, 118, 106, 107, 121, - /* 1600 */ 216, 217, 216, 217, 193, 114, 117, 116, 117, 118, - /* 1610 */ 133, 193, 121, 193, 193, 138, 139, 193, 23, 193, - /* 1620 */ 25, 23, 23, 25, 25, 7, 8, 216, 217, 193, - /* 1630 */ 193, 153, 154, 155, 156, 157, 216, 217, 193, 162, + /* 1600 */ 216, 217, 216, 217, 193, 114, 193, 116, 117, 118, + /* 1610 */ 133, 22, 121, 193, 59, 138, 139, 193, 142, 193, + /* 1620 */ 141, 23, 23, 25, 25, 120, 121, 216, 217, 216, + /* 1630 */ 217, 153, 154, 155, 156, 157, 216, 217, 19, 162, /* 1640 */ 216, 217, 216, 217, 153, 154, 155, 156, 157, 1, - /* 1650 */ 2, 193, 193, 5, 19, 20, 59, 22, 10, 11, - /* 1660 */ 12, 13, 14, 193, 97, 17, 193, 23, 193, 25, - /* 1670 */ 288, 36, 193, 242, 216, 217, 236, 23, 30, 25, + /* 1650 */ 2, 193, 59, 5, 19, 20, 318, 22, 10, 11, + /* 1660 */ 12, 13, 14, 193, 59, 17, 193, 23, 23, 25, + /* 1670 */ 25, 36, 117, 193, 216, 217, 193, 23, 30, 25, /* 1680 */ 32, 19, 20, 23, 22, 25, 216, 217, 40, 216, - /* 1690 */ 217, 216, 217, 193, 59, 216, 217, 193, 36, 83, - /* 1700 */ 84, 153, 153, 155, 155, 23, 71, 25, 23, 193, - /* 1710 */ 25, 193, 193, 193, 117, 193, 193, 193, 70, 193, - /* 1720 */ 193, 59, 193, 255, 255, 287, 78, 255, 243, 81, - /* 1730 */ 191, 255, 297, 71, 271, 100, 293, 245, 267, 214, - /* 1740 */ 246, 106, 107, 108, 246, 271, 98, 245, 293, 114, - /* 1750 */ 220, 116, 117, 118, 267, 271, 121, 271, 225, 219, - /* 1760 */ 229, 219, 100, 219, 259, 259, 259, 259, 106, 107, - /* 1770 */ 249, 196, 60, 280, 141, 243, 114, 249, 116, 117, - /* 1780 */ 118, 133, 245, 121, 200, 297, 138, 139, 153, 154, - /* 1790 */ 155, 156, 157, 297, 200, 38, 19, 20, 151, 22, - /* 1800 */ 200, 150, 140, 294, 294, 22, 272, 43, 234, 18, - /* 1810 */ 162, 270, 200, 36, 237, 153, 154, 155, 156, 157, - /* 1820 */ 237, 283, 237, 237, 18, 199, 149, 246, 272, 270, - /* 1830 */ 272, 200, 158, 246, 246, 234, 59, 234, 246, 199, - /* 1840 */ 290, 62, 289, 200, 199, 22, 221, 115, 71, 200, - /* 1850 */ 200, 199, 199, 221, 218, 218, 19, 20, 64, 22, - /* 1860 */ 218, 227, 22, 224, 126, 224, 165, 221, 24, 305, - /* 1870 */ 200, 113, 312, 36, 218, 220, 218, 100, 282, 218, - /* 1880 */ 91, 218, 317, 106, 107, 221, 227, 282, 317, 82, - /* 1890 */ 148, 114, 265, 116, 117, 118, 59, 145, 121, 22, - /* 1900 */ 277, 158, 200, 265, 25, 202, 147, 250, 71, 279, - /* 1910 */ 13, 146, 194, 194, 249, 248, 250, 140, 247, 246, - /* 1920 */ 6, 192, 192, 192, 303, 303, 213, 207, 300, 213, - /* 1930 */ 153, 154, 155, 156, 157, 213, 213, 100, 213, 222, - /* 1940 */ 207, 214, 214, 106, 107, 4, 222, 207, 3, 22, - /* 1950 */ 163, 114, 15, 116, 117, 118, 16, 23, 121, 23, - /* 1960 */ 139, 151, 130, 25, 142, 16, 24, 20, 144, 1, - /* 1970 */ 142, 130, 130, 61, 53, 53, 37, 151, 53, 53, - /* 1980 */ 130, 116, 34, 1, 141, 5, 22, 115, 161, 141, - /* 1990 */ 153, 154, 155, 156, 157, 25, 68, 68, 75, 41, - /* 2000 */ 115, 24, 131, 20, 19, 125, 22, 96, 22, 22, - /* 2010 */ 67, 23, 22, 67, 59, 24, 22, 28, 67, 23, - /* 2020 */ 22, 22, 149, 23, 23, 23, 116, 23, 25, 37, - /* 2030 */ 97, 141, 23, 23, 22, 143, 25, 75, 88, 34, - /* 2040 */ 34, 34, 34, 86, 75, 93, 23, 34, 22, 34, - /* 2050 */ 25, 24, 34, 25, 23, 142, 23, 142, 44, 23, - /* 2060 */ 23, 23, 11, 23, 25, 22, 22, 22, 15, 23, - /* 2070 */ 23, 22, 22, 25, 1, 1, 141, 25, 23, 135, - /* 2080 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2090 */ 319, 319, 319, 319, 141, 141, 319, 141, 319, 319, + /* 1690 */ 217, 7, 8, 23, 59, 25, 83, 84, 36, 23, + /* 1700 */ 193, 25, 23, 23, 25, 25, 71, 153, 145, 155, + /* 1710 */ 117, 153, 23, 155, 25, 23, 97, 25, 70, 193, + /* 1720 */ 193, 59, 117, 236, 193, 193, 78, 193, 193, 81, + /* 1730 */ 141, 193, 193, 71, 193, 100, 288, 287, 242, 255, + /* 1740 */ 255, 106, 107, 108, 255, 255, 98, 243, 297, 114, + /* 1750 */ 214, 116, 117, 118, 245, 191, 121, 271, 293, 267, + /* 1760 */ 267, 246, 100, 246, 245, 271, 271, 293, 106, 107, + /* 1770 */ 220, 271, 229, 225, 249, 219, 114, 259, 116, 117, + /* 1780 */ 118, 133, 259, 121, 219, 219, 138, 139, 153, 154, + /* 1790 */ 155, 156, 157, 280, 249, 243, 19, 20, 245, 22, + /* 1800 */ 196, 259, 140, 259, 60, 297, 141, 297, 200, 200, + /* 1810 */ 162, 38, 200, 36, 294, 153, 154, 155, 156, 157, + /* 1820 */ 151, 150, 294, 283, 22, 43, 234, 18, 237, 200, + /* 1830 */ 270, 272, 237, 237, 237, 18, 59, 199, 270, 149, + /* 1840 */ 246, 272, 272, 200, 234, 234, 246, 246, 71, 246, + /* 1850 */ 199, 158, 290, 62, 22, 200, 19, 20, 199, 22, + /* 1860 */ 289, 221, 221, 200, 200, 199, 199, 115, 218, 64, + /* 1870 */ 218, 218, 22, 36, 227, 126, 227, 100, 165, 221, + /* 1880 */ 224, 224, 24, 106, 107, 312, 218, 305, 113, 282, + /* 1890 */ 91, 114, 220, 116, 117, 118, 59, 282, 121, 218, + /* 1900 */ 218, 218, 200, 317, 317, 82, 221, 265, 71, 148, + /* 1910 */ 145, 265, 22, 277, 200, 158, 279, 140, 147, 25, + /* 1920 */ 146, 202, 248, 250, 249, 247, 13, 250, 194, 194, + /* 1930 */ 153, 154, 155, 156, 157, 6, 303, 100, 192, 192, + /* 1940 */ 246, 213, 192, 106, 107, 207, 213, 207, 222, 213, + /* 1950 */ 213, 114, 222, 116, 117, 118, 214, 214, 121, 4, + /* 1960 */ 207, 213, 3, 22, 303, 15, 163, 16, 23, 23, + /* 1970 */ 139, 151, 130, 25, 20, 142, 24, 16, 144, 1, + /* 1980 */ 142, 130, 130, 61, 37, 53, 300, 151, 53, 53, + /* 1990 */ 153, 154, 155, 156, 157, 53, 130, 116, 34, 1, + /* 2000 */ 141, 5, 22, 115, 161, 68, 25, 68, 75, 41, + /* 2010 */ 141, 115, 24, 20, 19, 131, 125, 23, 28, 22, + /* 2020 */ 67, 22, 22, 22, 67, 59, 24, 96, 22, 67, + /* 2030 */ 23, 149, 22, 25, 23, 23, 23, 22, 34, 141, + /* 2040 */ 37, 97, 23, 23, 116, 22, 143, 25, 34, 75, + /* 2050 */ 34, 34, 34, 88, 75, 34, 86, 23, 22, 34, + /* 2060 */ 93, 24, 34, 25, 25, 142, 142, 23, 44, 23, + /* 2070 */ 23, 23, 23, 11, 23, 25, 22, 22, 22, 141, + /* 2080 */ 23, 23, 22, 22, 25, 15, 1, 23, 25, 1, + /* 2090 */ 141, 135, 319, 319, 319, 319, 319, 319, 319, 141, /* 2100 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, /* 2110 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, /* 2120 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,

@@ -166268,176 +171415,177 @@ /* 2240 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,

/* 2250 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, /* 2260 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, /* 2270 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2280 */ 319, 319, 319, + /* 2280 */ 319, 319, 319, 319, 319, }; -#define YY_SHIFT_COUNT (575) +#define YY_SHIFT_COUNT (578) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (2074) +#define YY_SHIFT_MAX (2088) static const unsigned short int yy_shift_ofst[] = { /* 0 */ 1648, 1477, 1272, 322, 322, 1, 1319, 1478, 1491, 1837, /* 10 */ 1837, 1837, 471, 0, 0, 214, 1093, 1837, 1837, 1837, /* 20 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 30 */ 271, 271, 1219, 1219, 216, 88, 1, 1, 1, 1, - /* 40 */ 1, 40, 111, 258, 361, 469, 512, 583, 622, 693, - /* 50 */ 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, 1093, + /* 30 */ 1837, 271, 271, 1219, 1219, 216, 88, 1, 1, 1, + /* 40 */ 1, 1, 40, 111, 258, 361, 469, 512, 583, 622, + /* 50 */ 693, 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, /* 60 */ 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, - /* 70 */ 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1635, 1662, - /* 80 */ 1777, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, + /* 70 */ 1093, 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1635, + /* 80 */ 1662, 1777, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, /* 90 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, /* 100 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, /* 110 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, /* 120 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 130 */ 137, 181, 181, 181, 181, 181, 181, 181, 94, 430, - /* 140 */ 66, 65, 112, 366, 533, 533, 740, 1261, 533, 533, - /* 150 */ 79, 79, 533, 412, 412, 412, 77, 412, 123, 113, - /* 160 */ 113, 22, 22, 2098, 2098, 328, 328, 328, 239, 468, - /* 170 */ 468, 468, 468, 1015, 1015, 409, 366, 1129, 1186, 533, - /* 180 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, - /* 190 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 969, - /* 200 */ 621, 621, 533, 642, 788, 788, 1228, 1228, 822, 822, - /* 210 */ 67, 1274, 2098, 2098, 2098, 2098, 2098, 2098, 2098, 1307, - /* 220 */ 954, 954, 585, 472, 640, 387, 695, 538, 541, 700, - /* 230 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, - /* 240 */ 222, 533, 533, 533, 533, 533, 533, 533, 533, 533, - /* 250 */ 533, 533, 533, 1179, 1179, 1179, 533, 533, 533, 565, - /* 260 */ 533, 533, 533, 916, 1144, 533, 533, 1288, 533, 533, - /* 270 */ 533, 533, 533, 533, 533, 533, 639, 1330, 209, 1076, - /* 280 */ 1076, 1076, 1076, 580, 209, 209, 1313, 768, 917, 649, - /* 290 */ 1181, 1316, 405, 1316, 1238, 249, 1181, 1181, 249, 1181, - /* 300 */ 405, 1238, 1369, 464, 1259, 1012, 1012, 1012, 1368, 1368, - /* 310 */ 1368, 1368, 184, 184, 1326, 904, 1287, 1480, 1712, 1712, - /* 320 */ 1633, 1633, 1757, 1757, 1633, 1647, 1651, 1783, 1764, 1791, - /* 330 */ 1791, 1791, 1791, 1633, 1806, 1677, 1651, 1651, 1677, 1783, - /* 340 */ 1764, 1677, 1764, 1677, 1633, 1806, 1674, 1779, 1633, 1806, - /* 350 */ 1823, 1633, 1806, 1633, 1806, 1823, 1732, 1732, 1732, 1794, - /* 360 */ 1840, 1840, 1823, 1732, 1738, 1732, 1794, 1732, 1732, 1701, - /* 370 */ 1844, 1758, 1758, 1823, 1633, 1789, 1789, 1807, 1807, 1742, - /* 380 */ 1752, 1877, 1633, 1743, 1742, 1759, 1765, 1677, 1879, 1897, - /* 390 */ 1897, 1914, 1914, 1914, 2098, 2098, 2098, 2098, 2098, 2098, - /* 400 */ 2098, 2098, 2098, 2098, 2098, 2098, 2098, 2098, 2098, 207, - /* 410 */ 1095, 331, 620, 903, 806, 1074, 1483, 1432, 1481, 1322, - /* 420 */ 1370, 1394, 1515, 1291, 1546, 1547, 1557, 1595, 1598, 1599, - /* 430 */ 1434, 1453, 1618, 1462, 1567, 1489, 1644, 1654, 1616, 1660, - /* 440 */ 1548, 1549, 1682, 1685, 1597, 742, 1941, 1945, 1927, 1787, - /* 450 */ 1937, 1940, 1934, 1936, 1821, 1810, 1832, 1938, 1938, 1942, - /* 460 */ 1822, 1947, 1824, 1949, 1968, 1828, 1841, 1938, 1842, 1912, - /* 470 */ 1939, 1938, 1826, 1921, 1922, 1925, 1926, 1850, 1865, 1948, - /* 480 */ 1843, 1982, 1980, 1964, 1872, 1827, 1928, 1970, 1929, 1923, - /* 490 */ 1958, 1848, 1885, 1977, 1983, 1985, 1871, 1880, 1984, 1943, - /* 500 */ 1986, 1987, 1988, 1990, 1946, 1955, 1991, 1911, 1989, 1994, - /* 510 */ 1951, 1992, 1996, 1873, 1998, 2000, 2001, 2002, 2003, 2004, - /* 520 */ 1999, 1933, 1890, 2009, 2010, 1910, 2005, 2012, 1892, 2011, - /* 530 */ 2006, 2007, 2008, 2013, 1950, 1962, 1957, 2014, 1969, 1952, - /* 540 */ 2015, 2023, 2026, 2027, 2025, 2028, 2018, 1913, 1915, 2031, - /* 550 */ 2011, 2033, 2036, 2037, 2038, 2039, 2040, 2043, 2051, 2044, - /* 560 */ 2045, 2046, 2047, 2049, 2050, 2048, 1944, 1935, 1953, 1954, - /* 570 */ 1956, 2052, 2055, 2053, 2073, 2074, + /* 130 */ 1837, 137, 181, 181, 181, 181, 181, 181, 181, 94, + /* 140 */ 430, 66, 65, 112, 366, 533, 533, 740, 1257, 533, + /* 150 */ 533, 79, 79, 533, 412, 412, 412, 77, 412, 123, + /* 160 */ 113, 113, 113, 22, 22, 2100, 2100, 328, 328, 328, + /* 170 */ 239, 468, 468, 468, 468, 1015, 1015, 409, 366, 1187, + /* 180 */ 1232, 533, 533, 533, 533, 533, 533, 533, 533, 533, + /* 190 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, + /* 200 */ 533, 969, 621, 621, 533, 642, 788, 788, 1133, 1133, + /* 210 */ 822, 822, 67, 1193, 2100, 2100, 2100, 2100, 2100, 2100, + /* 220 */ 2100, 1307, 954, 954, 585, 472, 640, 387, 695, 538, + /* 230 */ 541, 700, 533, 533, 533, 533, 533, 533, 533, 533, + /* 240 */ 533, 533, 222, 533, 533, 533, 533, 533, 533, 533, + /* 250 */ 533, 533, 533, 533, 533, 1213, 1213, 1213, 533, 533, + /* 260 */ 533, 565, 533, 533, 533, 916, 1147, 533, 533, 1288, + /* 270 */ 533, 533, 533, 533, 533, 533, 533, 533, 639, 1280, + /* 280 */ 209, 1129, 1129, 1129, 1129, 580, 209, 209, 1209, 768, + /* 290 */ 917, 649, 1315, 1334, 405, 1334, 1383, 249, 1315, 1315, + /* 300 */ 249, 1315, 405, 1383, 1441, 464, 1245, 1417, 1417, 1417, + /* 310 */ 1323, 1323, 1323, 1323, 184, 184, 1335, 1476, 856, 1482, + /* 320 */ 1744, 1744, 1665, 1665, 1773, 1773, 1665, 1669, 1671, 1802, + /* 330 */ 1782, 1809, 1809, 1809, 1809, 1665, 1817, 1690, 1671, 1671, + /* 340 */ 1690, 1802, 1782, 1690, 1782, 1690, 1665, 1817, 1693, 1791, + /* 350 */ 1665, 1817, 1832, 1665, 1817, 1665, 1817, 1832, 1752, 1752, + /* 360 */ 1752, 1805, 1850, 1850, 1832, 1752, 1749, 1752, 1805, 1752, + /* 370 */ 1752, 1713, 1858, 1775, 1775, 1832, 1665, 1799, 1799, 1823, + /* 380 */ 1823, 1761, 1765, 1890, 1665, 1757, 1761, 1771, 1774, 1690, + /* 390 */ 1894, 1913, 1913, 1929, 1929, 1929, 2100, 2100, 2100, 2100, + /* 400 */ 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100, + /* 410 */ 2100, 207, 1220, 331, 620, 967, 806, 1074, 1499, 1432, + /* 420 */ 1463, 1479, 1419, 1422, 1557, 1512, 1598, 1599, 1644, 1645, + /* 430 */ 1654, 1660, 1555, 1505, 1684, 1462, 1670, 1563, 1619, 1593, + /* 440 */ 1676, 1679, 1613, 1680, 1554, 1558, 1689, 1692, 1605, 1589, + /* 450 */ 1955, 1959, 1941, 1803, 1950, 1951, 1945, 1946, 1831, 1820, + /* 460 */ 1842, 1948, 1948, 1952, 1833, 1954, 1834, 1961, 1978, 1838, + /* 470 */ 1851, 1948, 1852, 1922, 1947, 1948, 1836, 1932, 1935, 1936, + /* 480 */ 1942, 1866, 1881, 1964, 1859, 1998, 1996, 1980, 1888, 1843, + /* 490 */ 1937, 1981, 1939, 1933, 1968, 1869, 1896, 1988, 1993, 1995, + /* 500 */ 1884, 1891, 1997, 1953, 1999, 2000, 1994, 2001, 1957, 1966, + /* 510 */ 2002, 1931, 1990, 2006, 1962, 2003, 2007, 2004, 1882, 2010, + /* 520 */ 2011, 2012, 2008, 2013, 2015, 1944, 1898, 2019, 2020, 1928, + /* 530 */ 2014, 2023, 1903, 2022, 2016, 2017, 2018, 2021, 1965, 1974, + /* 540 */ 1970, 2024, 1979, 1967, 2025, 2034, 2036, 2037, 2038, 2039, + /* 550 */ 2028, 1923, 1924, 2044, 2022, 2046, 2047, 2048, 2049, 2050, + /* 560 */ 2051, 2054, 2062, 2055, 2056, 2057, 2058, 2060, 2061, 2059, + /* 570 */ 1956, 1938, 1949, 1958, 2063, 2064, 2070, 2085, 2088, }; -#define YY_REDUCE_COUNT (408) +#define YY_REDUCE_COUNT (410) #define YY_REDUCE_MIN (-271) -#define YY_REDUCE_MAX (1740) +#define YY_REDUCE_MAX (1753) static const short yy_reduce_ofst[] = { /* 0 */ -125, 733, 789, 241, 293, -123, -193, -191, -183, -187, /* 10 */ 166, 238, 133, -207, -199, -267, -176, -6, 204, 489, - /* 20 */ 576, -175, 598, 686, 615, 725, 860, 778, 781, 857, - /* 30 */ 616, 887, 87, 240, -192, 408, 626, 796, 843, 854, - /* 40 */ 1003, -271, -271, -271, -271, -271, -271, -271, -271, -271, + /* 20 */ 576, 598, -175, 686, 860, 615, 725, 1014, 778, 781, + /* 30 */ 857, 616, 887, 87, 240, -192, 408, 626, 796, 843, + /* 40 */ 854, 1004, -271, -271, -271, -271, -271, -271, -271, -271, /* 50 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271, /* 60 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271, - /* 70 */ -271, -271, -271, -271, -271, -271, -271, -271, 80, 83, - /* 80 */ 313, 886, 888, 996, 1034, 1059, 1081, 1100, 1117, 1152, - /* 90 */ 1155, 1163, 1165, 1167, 1169, 1172, 1180, 1182, 1184, 1198, - /* 100 */ 1200, 1213, 1215, 1225, 1227, 1252, 1254, 1264, 1299, 1303, - /* 110 */ 1308, 1312, 1325, 1328, 1337, 1340, 1343, 1371, 1373, 1384, - /* 120 */ 1386, 1411, 1420, 1424, 1426, 1458, 1470, 1473, 1475, 1479, - /* 130 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271, - /* 140 */ -271, 138, 459, 396, -158, 470, 302, -212, 521, 201, - /* 150 */ -195, -92, 559, 630, 632, 630, -271, 632, 901, 63, - /* 160 */ 407, -271, -271, -271, -271, 161, 161, 161, 251, 335, - /* 170 */ 847, 960, 980, 537, 588, 618, 628, 688, 688, -166, - /* 180 */ -161, 674, 790, 794, 799, 851, 852, -122, 680, -120, - /* 190 */ 995, 1038, 415, 1051, 893, 798, 962, 400, 1086, 779, - /* 200 */ 923, 924, 263, 1041, 979, 990, 1083, 1097, 1031, 1194, - /* 210 */ 362, 994, 1139, 1005, 1037, 1202, 1205, 1195, 1210, -194, - /* 220 */ 56, 185, -135, 232, 522, 560, 601, 617, 669, 683, - /* 230 */ 711, 856, 908, 941, 1048, 1101, 1147, 1257, 1262, 1265, - /* 240 */ 392, 1292, 1333, 1339, 1342, 1346, 1350, 1359, 1374, 1418, - /* 250 */ 1421, 1436, 1437, 593, 755, 770, 997, 1445, 1459, 1209, - /* 260 */ 1500, 1504, 1516, 1132, 1243, 1518, 1519, 1440, 1520, 560, - /* 270 */ 1522, 1523, 1524, 1526, 1527, 1529, 1382, 1438, 1431, 1468, - /* 280 */ 1469, 1472, 1476, 1209, 1431, 1431, 1485, 1525, 1539, 1435, - /* 290 */ 1463, 1471, 1492, 1487, 1443, 1494, 1474, 1484, 1498, 1486, - /* 300 */ 1502, 1455, 1530, 1531, 1533, 1540, 1542, 1544, 1505, 1506, - /* 310 */ 1507, 1508, 1521, 1528, 1493, 1537, 1532, 1575, 1488, 1496, - /* 320 */ 1584, 1594, 1509, 1510, 1600, 1538, 1534, 1541, 1574, 1577, - /* 330 */ 1583, 1585, 1586, 1612, 1626, 1581, 1556, 1558, 1587, 1559, - /* 340 */ 1601, 1588, 1603, 1592, 1631, 1640, 1550, 1553, 1643, 1645, - /* 350 */ 1625, 1649, 1652, 1650, 1653, 1632, 1636, 1637, 1642, 1634, - /* 360 */ 1639, 1641, 1646, 1656, 1655, 1658, 1659, 1661, 1663, 1560, - /* 370 */ 1564, 1596, 1605, 1664, 1670, 1565, 1571, 1627, 1638, 1657, - /* 380 */ 1665, 1623, 1702, 1630, 1666, 1667, 1671, 1673, 1703, 1718, - /* 390 */ 1719, 1729, 1730, 1731, 1621, 1622, 1628, 1720, 1713, 1716, - /* 400 */ 1722, 1723, 1733, 1717, 1724, 1727, 1728, 1725, 1740, + /* 70 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, 80, + /* 80 */ 83, 313, 886, 888, 918, 938, 1021, 1034, 1036, 1141, + /* 90 */ 1159, 1163, 1166, 1168, 1170, 1176, 1178, 1180, 1184, 1196, + /* 100 */ 1198, 1205, 1215, 1225, 1227, 1236, 1252, 1254, 1264, 1303, + /* 110 */ 1309, 1312, 1322, 1325, 1328, 1337, 1340, 1343, 1353, 1371, + /* 120 */ 1373, 1384, 1386, 1411, 1413, 1420, 1424, 1426, 1458, 1470, + /* 130 */ 1473, -271, -271, -271, -271, -271, -271, -271, -271, -271, + /* 140 */ -271, -271, 138, 459, 396, -158, 470, 302, -212, 521, + /* 150 */ 201, -195, -92, 559, 630, 632, 630, -271, 632, 901, + /* 160 */ 63, 407, 670, -271, -271, -271, -271, 161, 161, 161, + /* 170 */ 251, 335, 847, 979, 1097, 537, 588, 618, 628, 688, + /* 180 */ 688, -166, -161, 674, 787, 794, 799, 852, 996, -122, + /* 190 */ 837, -120, 1018, 1035, 415, 1047, 1001, 958, 1082, 400, + /* 200 */ 1099, 779, 1137, 1142, 263, 1083, 1145, 1150, 1041, 1139, + /* 210 */ 965, 1050, 362, 849, 752, 629, 675, 1162, 1173, 1090, + /* 220 */ 1195, -194, 56, 185, -135, 232, 522, 560, 571, 601, + /* 230 */ 617, 669, 683, 711, 850, 893, 1000, 1040, 1049, 1081, + /* 240 */ 1087, 1101, 392, 1114, 1123, 1155, 1161, 1175, 1271, 1293, + /* 250 */ 1299, 1330, 1339, 1342, 1347, 593, 1282, 1286, 1350, 1359, + /* 260 */ 1368, 1314, 1480, 1483, 1507, 1085, 1338, 1526, 1527, 1487, + /* 270 */ 1531, 560, 1532, 1534, 1535, 1538, 1539, 1541, 1448, 1450, + /* 280 */ 1496, 1484, 1485, 1489, 1490, 1314, 1496, 1496, 1504, 1536, + /* 290 */ 1564, 1451, 1486, 1492, 1509, 1493, 1465, 1515, 1494, 1495, + /* 300 */ 1517, 1500, 1519, 1474, 1550, 1543, 1548, 1556, 1565, 1566, + /* 310 */ 1518, 1523, 1542, 1544, 1525, 1545, 1513, 1553, 1552, 1604, + /* 320 */ 1508, 1510, 1608, 1609, 1520, 1528, 1612, 1540, 1559, 1560, + /* 330 */ 1592, 1591, 1595, 1596, 1597, 1629, 1638, 1594, 1569, 1570, + /* 340 */ 1600, 1568, 1610, 1601, 1611, 1603, 1643, 1651, 1562, 1571, + /* 350 */ 1655, 1659, 1640, 1663, 1666, 1664, 1667, 1641, 1650, 1652, + /* 360 */ 1653, 1647, 1656, 1657, 1658, 1668, 1672, 1681, 1649, 1682, + /* 370 */ 1683, 1573, 1582, 1607, 1615, 1685, 1702, 1586, 1587, 1642, + /* 380 */ 1646, 1673, 1675, 1636, 1714, 1637, 1677, 1674, 1678, 1694, + /* 390 */ 1719, 1734, 1735, 1746, 1747, 1750, 1633, 1661, 1686, 1738, + /* 400 */ 1728, 1733, 1736, 1737, 1740, 1726, 1730, 1742, 1743, 1748, + /* 410 */ 1753, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1647, 1647, 1647, 1475, 1240, 1351, 1240, 1240, 1240, 1475, - /* 10 */ 1475, 1475, 1240, 1381, 1381, 1528, 1273, 1240, 1240, 1240, - /* 20 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1474, 1240, 1240, - /* 30 */ 1240, 1240, 1563, 1563, 1240, 1240, 1240, 1240, 1240, 1240, - /* 40 */ 1240, 1240, 1390, 1240, 1397, 1240, 1240, 1240, 1240, 1240, - /* 50 */ 1476, 1477, 1240, 1240, 1240, 1527, 1529, 1492, 1404, 1403, - /* 60 */ 1402, 1401, 1510, 1369, 1395, 1388, 1392, 1470, 1471, 1469, - /* 70 */ 1473, 1477, 1476, 1240, 1391, 1438, 1454, 1437, 1240, 1240, - /* 80 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 90 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 100 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 110 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 120 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 130 */ 1446, 1453, 1452, 1451, 1460, 1450, 1447, 1440, 1439, 1441, - /* 140 */ 1442, 1240, 1240, 1264, 1240, 1240, 1261, 1315, 1240, 1240, - /* 150 */ 1240, 1240, 1240, 1547, 1546, 1240, 1443, 1240, 1273, 1432, - /* 160 */ 1431, 1457, 1444, 1456, 1455, 1535, 1599, 1598, 1493, 1240, - /* 170 */ 1240, 1240, 1240, 1240, 1240, 1563, 1240, 1240, 1240, 1240, - /* 180 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 190 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1371, - /* 200 */ 1563, 1563, 1240, 1273, 1563, 1563, 1372, 1372, 1269, 1269, - /* 210 */ 1375, 1240, 1542, 1342, 1342, 1342, 1342, 1351, 1342, 1240, - /* 220 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 230 */ 1240, 1240, 1240, 1240, 1532, 1530, 1240, 1240, 1240, 1240, - /* 240 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 250 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 260 */ 1240, 1240, 1240, 1347, 1240, 1240, 1240, 1240, 1240, 1240, - /* 270 */ 1240, 1240, 1240, 1240, 1240, 1592, 1240, 1505, 1329, 1347, - /* 280 */ 1347, 1347, 1347, 1349, 1330, 1328, 1341, 1274, 1247, 1639, - /* 290 */ 1407, 1396, 1348, 1396, 1636, 1394, 1407, 1407, 1394, 1407, - /* 300 */ 1348, 1636, 1290, 1615, 1285, 1381, 1381, 1381, 1371, 1371, - /* 310 */ 1371, 1371, 1375, 1375, 1472, 1348, 1341, 1240, 1639, 1639, - /* 320 */ 1357, 1357, 1638, 1638, 1357, 1493, 1623, 1416, 1318, 1324, - /* 330 */ 1324, 1324, 1324, 1357, 1258, 1394, 1623, 1623, 1394, 1416, - /* 340 */ 1318, 1394, 1318, 1394, 1357, 1258, 1509, 1633, 1357, 1258, - /* 350 */ 1483, 1357, 1258, 1357, 1258, 1483, 1316, 1316, 1316, 1305, - /* 360 */ 1240, 1240, 1483, 1316, 1290, 1316, 1305, 1316, 1316, 1581, - /* 370 */ 1240, 1487, 1487, 1483, 1357, 1573, 1573, 1384, 1384, 1389, - /* 380 */ 1375, 1478, 1357, 1240, 1389, 1387, 1385, 1394, 1308, 1595, - /* 390 */ 1595, 1591, 1591, 1591, 1644, 1644, 1542, 1608, 1273, 1273, - /* 400 */ 1273, 1273, 1608, 1292, 1292, 1274, 1274, 1273, 1608, 1240, - /* 410 */ 1240, 1240, 1240, 1240, 1240, 1603, 1240, 1537, 1494, 1361, - /* 420 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 430 */ 1240, 1240, 1240, 1240, 1548, 1240, 1240, 1240, 1240, 1240, - /* 440 */ 1240, 1240, 1240, 1240, 1240, 1421, 1240, 1243, 1539, 1240, - /* 450 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1398, 1399, 1362, - /* 460 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1413, 1240, 1240, - /* 470 */ 1240, 1408, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 480 */ 1635, 1240, 1240, 1240, 1240, 1240, 1240, 1508, 1507, 1240, - /* 490 */ 1240, 1359, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 500 */ 1240, 1240, 1240, 1240, 1240, 1288, 1240, 1240, 1240, 1240, - /* 510 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 520 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1386, - /* 530 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 540 */ 1240, 1240, 1240, 1240, 1578, 1376, 1240, 1240, 1240, 1240, - /* 550 */ 1626, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 560 */ 1240, 1240, 1240, 1240, 1240, 1619, 1332, 1423, 1240, 1422, - /* 570 */ 1426, 1262, 1240, 1252, 1240, 1240, + /* 0 */ 1648, 1648, 1648, 1478, 1243, 1354, 1243, 1243, 1243, 1478, + /* 10 */ 1478, 1478, 1243, 1384, 1384, 1531, 1276, 1243, 1243, 1243, + /* 20 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1477, 1243, + /* 30 */ 1243, 1243, 1243, 1564, 1564, 1243, 1243, 1243, 1243, 1243, + /* 40 */ 1243, 1243, 1243, 1393, 1243, 1400, 1243, 1243, 1243, 1243, + /* 50 */ 1243, 1479, 1480, 1243, 1243, 1243, 1530, 1532, 1495, 1407, + /* 60 */ 1406, 1405, 1404, 1513, 1372, 1398, 1391, 1395, 1474, 1475, + /* 70 */ 1473, 1626, 1480, 1479, 1243, 1394, 1442, 1458, 1441, 1243, + /* 80 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 90 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 100 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 110 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 120 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 130 */ 1243, 1450, 1457, 1456, 1455, 1464, 1454, 1451, 1444, 1443, + /* 140 */ 1445, 1446, 1243, 1243, 1267, 1243, 1243, 1264, 1318, 1243, + /* 150 */ 1243, 1243, 1243, 1243, 1550, 1549, 1243, 1447, 1243, 1276, + /* 160 */ 1435, 1434, 1433, 1461, 1448, 1460, 1459, 1538, 1600, 1599, + /* 170 */ 1496, 1243, 1243, 1243, 1243, 1243, 1243, 1564, 1243, 1243, + /* 180 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 190 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 200 */ 1243, 1374, 1564, 1564, 1243, 1276, 1564, 1564, 1375, 1375, + /* 210 */ 1272, 1272, 1378, 1243, 1545, 1345, 1345, 1345, 1345, 1354, + /* 220 */ 1345, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 230 */ 1243, 1243, 1243, 1243, 1243, 1243, 1535, 1533, 1243, 1243, + /* 240 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 250 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 260 */ 1243, 1243, 1243, 1243, 1243, 1350, 1243, 1243, 1243, 1243, + /* 270 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1593, 1243, 1508, + /* 280 */ 1332, 1350, 1350, 1350, 1350, 1352, 1333, 1331, 1344, 1277, + /* 290 */ 1250, 1640, 1410, 1399, 1351, 1399, 1637, 1397, 1410, 1410, + /* 300 */ 1397, 1410, 1351, 1637, 1293, 1615, 1288, 1384, 1384, 1384, + /* 310 */ 1374, 1374, 1374, 1374, 1378, 1378, 1476, 1351, 1344, 1243, + /* 320 */ 1640, 1640, 1360, 1360, 1639, 1639, 1360, 1496, 1623, 1419, + /* 330 */ 1321, 1327, 1327, 1327, 1327, 1360, 1261, 1397, 1623, 1623, + /* 340 */ 1397, 1419, 1321, 1397, 1321, 1397, 1360, 1261, 1512, 1634, + /* 350 */ 1360, 1261, 1486, 1360, 1261, 1360, 1261, 1486, 1319, 1319, + /* 360 */ 1319, 1308, 1243, 1243, 1486, 1319, 1293, 1319, 1308, 1319, + /* 370 */ 1319, 1582, 1243, 1490, 1490, 1486, 1360, 1574, 1574, 1387, + /* 380 */ 1387, 1392, 1378, 1481, 1360, 1243, 1392, 1390, 1388, 1397, + /* 390 */ 1311, 1596, 1596, 1592, 1592, 1592, 1645, 1645, 1545, 1608, + /* 400 */ 1276, 1276, 1276, 1276, 1608, 1295, 1295, 1277, 1277, 1276, + /* 410 */ 1608, 1243, 1243, 1243, 1243, 1243, 1243, 1603, 1243, 1540, + /* 420 */ 1497, 1364, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 430 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1551, 1243, + /* 440 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1424, + /* 450 */ 1243, 1246, 1542, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 460 */ 1243, 1401, 1402, 1365, 1243, 1243, 1243, 1243, 1243, 1243, + /* 470 */ 1243, 1416, 1243, 1243, 1243, 1411, 1243, 1243, 1243, 1243, + /* 480 */ 1243, 1243, 1243, 1243, 1636, 1243, 1243, 1243, 1243, 1243, + /* 490 */ 1243, 1511, 1510, 1243, 1243, 1362, 1243, 1243, 1243, 1243, + /* 500 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1291, + /* 510 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 520 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 530 */ 1243, 1243, 1243, 1389, 1243, 1243, 1243, 1243, 1243, 1243, + /* 540 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1579, 1379, + /* 550 */ 1243, 1243, 1243, 1243, 1627, 1243, 1243, 1243, 1243, 1243, + /* 560 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1619, + /* 570 */ 1335, 1425, 1243, 1428, 1265, 1243, 1255, 1243, 1243, }; /********** End of lemon-generated parsing tables *****************************/

@@ -167234,59 +172382,59 @@ /* 174 */ "idlist_opt ::= LP idlist RP",

/* 175 */ "idlist ::= idlist COMMA nm", /* 176 */ "idlist ::= nm", /* 177 */ "expr ::= LP expr RP", - /* 178 */ "expr ::= ID|INDEXED", - /* 179 */ "expr ::= JOIN_KW", - /* 180 */ "expr ::= nm DOT nm", - /* 181 */ "expr ::= nm DOT nm DOT nm", - /* 182 */ "term ::= NULL|FLOAT|BLOB", - /* 183 */ "term ::= STRING", - /* 184 */ "term ::= INTEGER", - /* 185 */ "expr ::= VARIABLE", - /* 186 */ "expr ::= expr COLLATE ID|STRING", - /* 187 */ "expr ::= CAST LP expr AS typetoken RP", - /* 188 */ "expr ::= ID|INDEXED LP distinct exprlist RP", - /* 189 */ "expr ::= ID|INDEXED LP STAR RP", - /* 190 */ "expr ::= ID|INDEXED LP distinct exprlist RP filter_over", - /* 191 */ "expr ::= ID|INDEXED LP STAR RP filter_over", - /* 192 */ "term ::= CTIME_KW", - /* 193 */ "expr ::= LP nexprlist COMMA expr RP", - /* 194 */ "expr ::= expr AND expr", - /* 195 */ "expr ::= expr OR expr", - /* 196 */ "expr ::= expr LT|GT|GE|LE expr", - /* 197 */ "expr ::= expr EQ|NE expr", - /* 198 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", - /* 199 */ "expr ::= expr PLUS|MINUS expr", - /* 200 */ "expr ::= expr STAR|SLASH|REM expr", - /* 201 */ "expr ::= expr CONCAT expr", - /* 202 */ "likeop ::= NOT LIKE_KW|MATCH", - /* 203 */ "expr ::= expr likeop expr", - /* 204 */ "expr ::= expr likeop expr ESCAPE expr", - /* 205 */ "expr ::= expr ISNULL|NOTNULL", - /* 206 */ "expr ::= expr NOT NULL", - /* 207 */ "expr ::= expr IS expr", - /* 208 */ "expr ::= expr IS NOT expr", - /* 209 */ "expr ::= expr IS NOT DISTINCT FROM expr", - /* 210 */ "expr ::= expr IS DISTINCT FROM expr", - /* 211 */ "expr ::= NOT expr", - /* 212 */ "expr ::= BITNOT expr", - /* 213 */ "expr ::= PLUS|MINUS expr", - /* 214 */ "expr ::= expr PTR expr", - /* 215 */ "between_op ::= BETWEEN", - /* 216 */ "between_op ::= NOT BETWEEN", - /* 217 */ "expr ::= expr between_op expr AND expr", - /* 218 */ "in_op ::= IN", - /* 219 */ "in_op ::= NOT IN", - /* 220 */ "expr ::= expr in_op LP exprlist RP", - /* 221 */ "expr ::= LP select RP", - /* 222 */ "expr ::= expr in_op LP select RP", - /* 223 */ "expr ::= expr in_op nm dbnm paren_exprlist", - /* 224 */ "expr ::= EXISTS LP select RP", - /* 225 */ "expr ::= CASE case_operand case_exprlist case_else END", - /* 226 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", - /* 227 */ "case_exprlist ::= WHEN expr THEN expr", - /* 228 */ "case_else ::= ELSE expr", - /* 229 */ "case_else ::=", - /* 230 */ "case_operand ::= expr", + /* 178 */ "expr ::= ID|INDEXED|JOIN_KW", + /* 179 */ "expr ::= nm DOT nm", + /* 180 */ "expr ::= nm DOT nm DOT nm", + /* 181 */ "term ::= NULL|FLOAT|BLOB", + /* 182 */ "term ::= STRING", + /* 183 */ "term ::= INTEGER", + /* 184 */ "expr ::= VARIABLE", + /* 185 */ "expr ::= expr COLLATE ID|STRING", + /* 186 */ "expr ::= CAST LP expr AS typetoken RP", + /* 187 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP", + /* 188 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP", + /* 189 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP", + /* 190 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over", + /* 191 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over", + /* 192 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over", + /* 193 */ "term ::= CTIME_KW", + /* 194 */ "expr ::= LP nexprlist COMMA expr RP", + /* 195 */ "expr ::= expr AND expr", + /* 196 */ "expr ::= expr OR expr", + /* 197 */ "expr ::= expr LT|GT|GE|LE expr", + /* 198 */ "expr ::= expr EQ|NE expr", + /* 199 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", + /* 200 */ "expr ::= expr PLUS|MINUS expr", + /* 201 */ "expr ::= expr STAR|SLASH|REM expr", + /* 202 */ "expr ::= expr CONCAT expr", + /* 203 */ "likeop ::= NOT LIKE_KW|MATCH", + /* 204 */ "expr ::= expr likeop expr", + /* 205 */ "expr ::= expr likeop expr ESCAPE expr", + /* 206 */ "expr ::= expr ISNULL|NOTNULL", + /* 207 */ "expr ::= expr NOT NULL", + /* 208 */ "expr ::= expr IS expr", + /* 209 */ "expr ::= expr IS NOT expr", + /* 210 */ "expr ::= expr IS NOT DISTINCT FROM expr", + /* 211 */ "expr ::= expr IS DISTINCT FROM expr", + /* 212 */ "expr ::= NOT expr", + /* 213 */ "expr ::= BITNOT expr", + /* 214 */ "expr ::= PLUS|MINUS expr", + /* 215 */ "expr ::= expr PTR expr", + /* 216 */ "between_op ::= BETWEEN", + /* 217 */ "between_op ::= NOT BETWEEN", + /* 218 */ "expr ::= expr between_op expr AND expr", + /* 219 */ "in_op ::= IN", + /* 220 */ "in_op ::= NOT IN", + /* 221 */ "expr ::= expr in_op LP exprlist RP", + /* 222 */ "expr ::= LP select RP", + /* 223 */ "expr ::= expr in_op LP select RP", + /* 224 */ "expr ::= expr in_op nm dbnm paren_exprlist", + /* 225 */ "expr ::= EXISTS LP select RP", + /* 226 */ "expr ::= CASE case_operand case_exprlist case_else END", + /* 227 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", + /* 228 */ "case_exprlist ::= WHEN expr THEN expr", + /* 229 */ "case_else ::= ELSE expr", + /* 230 */ "case_else ::=", /* 231 */ "case_operand ::=", /* 232 */ "exprlist ::=", /* 233 */ "nexprlist ::= nexprlist COMMA expr",

@@ -167367,100 +172515,100 @@ /* 307 */ "wqas ::= AS NOT MATERIALIZED",

/* 308 */ "wqitem ::= nm eidlist_opt wqas LP select RP", /* 309 */ "wqlist ::= wqitem", /* 310 */ "wqlist ::= wqlist COMMA wqitem", - /* 311 */ "windowdefn_list ::= windowdefn", - /* 312 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", - /* 313 */ "windowdefn ::= nm AS LP window RP", - /* 314 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", - /* 315 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", - /* 316 */ "window ::= ORDER BY sortlist frame_opt", - /* 317 */ "window ::= nm ORDER BY sortlist frame_opt", - /* 318 */ "window ::= frame_opt", - /* 319 */ "window ::= nm frame_opt", - /* 320 */ "frame_opt ::=", - /* 321 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", - /* 322 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", - /* 323 */ "range_or_rows ::= RANGE|ROWS|GROUPS", - /* 324 */ "frame_bound_s ::= frame_bound", - /* 325 */ "frame_bound_s ::= UNBOUNDED PRECEDING", - /* 326 */ "frame_bound_e ::= frame_bound", - /* 327 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", - /* 328 */ "frame_bound ::= expr PRECEDING|FOLLOWING", - /* 329 */ "frame_bound ::= CURRENT ROW", - /* 330 */ "frame_exclude_opt ::=", - /* 331 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", - /* 332 */ "frame_exclude ::= NO OTHERS", - /* 333 */ "frame_exclude ::= CURRENT ROW", - /* 334 */ "frame_exclude ::= GROUP|TIES", - /* 335 */ "window_clause ::= WINDOW windowdefn_list", - /* 336 */ "filter_over ::= filter_clause over_clause", - /* 337 */ "filter_over ::= over_clause", - /* 338 */ "filter_over ::= filter_clause", - /* 339 */ "over_clause ::= OVER LP window RP", - /* 340 */ "over_clause ::= OVER nm", - /* 341 */ "filter_clause ::= FILTER LP WHERE expr RP", - /* 342 */ "input ::= cmdlist", - /* 343 */ "cmdlist ::= cmdlist ecmd", - /* 344 */ "cmdlist ::= ecmd", - /* 345 */ "ecmd ::= SEMI", - /* 346 */ "ecmd ::= cmdx SEMI", - /* 347 */ "ecmd ::= explain cmdx SEMI", - /* 348 */ "trans_opt ::=", - /* 349 */ "trans_opt ::= TRANSACTION", - /* 350 */ "trans_opt ::= TRANSACTION nm", - /* 351 */ "savepoint_opt ::= SAVEPOINT", - /* 352 */ "savepoint_opt ::=", - /* 353 */ "cmd ::= create_table create_table_args", - /* 354 */ "table_option_set ::= table_option", - /* 355 */ "columnlist ::= columnlist COMMA columnname carglist", - /* 356 */ "columnlist ::= columnname carglist", - /* 357 */ "nm ::= ID|INDEXED", - /* 358 */ "nm ::= STRING", - /* 359 */ "nm ::= JOIN_KW", - /* 360 */ "typetoken ::= typename", - /* 361 */ "typename ::= ID|STRING", - /* 362 */ "signed ::= plus_num", - /* 363 */ "signed ::= minus_num", - /* 364 */ "carglist ::= carglist ccons", - /* 365 */ "carglist ::=", - /* 366 */ "ccons ::= NULL onconf", - /* 367 */ "ccons ::= GENERATED ALWAYS AS generated", - /* 368 */ "ccons ::= AS generated", - /* 369 */ "conslist_opt ::= COMMA conslist", - /* 370 */ "conslist ::= conslist tconscomma tcons", - /* 371 */ "conslist ::= tcons", - /* 372 */ "tconscomma ::=", - /* 373 */ "defer_subclause_opt ::= defer_subclause", - /* 374 */ "resolvetype ::= raisetype", - /* 375 */ "selectnowith ::= oneselect", - /* 376 */ "oneselect ::= values", - /* 377 */ "sclp ::= selcollist COMMA", - /* 378 */ "as ::= ID|STRING", - /* 379 */ "indexed_opt ::= indexed_by", - /* 380 */ "returning ::=", - /* 381 */ "expr ::= term", - /* 382 */ "likeop ::= LIKE_KW|MATCH", - /* 383 */ "exprlist ::= nexprlist", - /* 384 */ "nmnum ::= plus_num", - /* 385 */ "nmnum ::= nm", - /* 386 */ "nmnum ::= ON", - /* 387 */ "nmnum ::= DELETE", - /* 388 */ "nmnum ::= DEFAULT", - /* 389 */ "plus_num ::= INTEGER|FLOAT", - /* 390 */ "foreach_clause ::=", - /* 391 */ "foreach_clause ::= FOR EACH ROW", - /* 392 */ "trnm ::= nm", - /* 393 */ "tridxby ::=", - /* 394 */ "database_kw_opt ::= DATABASE", - /* 395 */ "database_kw_opt ::=", - /* 396 */ "kwcolumn_opt ::=", - /* 397 */ "kwcolumn_opt ::= COLUMNKW", - /* 398 */ "vtabarglist ::= vtabarg", - /* 399 */ "vtabarglist ::= vtabarglist COMMA vtabarg", - /* 400 */ "vtabarg ::= vtabarg vtabargtoken", - /* 401 */ "anylist ::=", - /* 402 */ "anylist ::= anylist LP anylist RP", - /* 403 */ "anylist ::= anylist ANY", - /* 404 */ "with ::=", + /* 311 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", + /* 312 */ "windowdefn ::= nm AS LP window RP", + /* 313 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", + /* 314 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", + /* 315 */ "window ::= ORDER BY sortlist frame_opt", + /* 316 */ "window ::= nm ORDER BY sortlist frame_opt", + /* 317 */ "window ::= nm frame_opt", + /* 318 */ "frame_opt ::=", + /* 319 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", + /* 320 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", + /* 321 */ "range_or_rows ::= RANGE|ROWS|GROUPS", + /* 322 */ "frame_bound_s ::= frame_bound", + /* 323 */ "frame_bound_s ::= UNBOUNDED PRECEDING", + /* 324 */ "frame_bound_e ::= frame_bound", + /* 325 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", + /* 326 */ "frame_bound ::= expr PRECEDING|FOLLOWING", + /* 327 */ "frame_bound ::= CURRENT ROW", + /* 328 */ "frame_exclude_opt ::=", + /* 329 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", + /* 330 */ "frame_exclude ::= NO OTHERS", + /* 331 */ "frame_exclude ::= CURRENT ROW", + /* 332 */ "frame_exclude ::= GROUP|TIES", + /* 333 */ "window_clause ::= WINDOW windowdefn_list", + /* 334 */ "filter_over ::= filter_clause over_clause", + /* 335 */ "filter_over ::= over_clause", + /* 336 */ "filter_over ::= filter_clause", + /* 337 */ "over_clause ::= OVER LP window RP", + /* 338 */ "over_clause ::= OVER nm", + /* 339 */ "filter_clause ::= FILTER LP WHERE expr RP", + /* 340 */ "input ::= cmdlist", + /* 341 */ "cmdlist ::= cmdlist ecmd", + /* 342 */ "cmdlist ::= ecmd", + /* 343 */ "ecmd ::= SEMI", + /* 344 */ "ecmd ::= cmdx SEMI", + /* 345 */ "ecmd ::= explain cmdx SEMI", + /* 346 */ "trans_opt ::=", + /* 347 */ "trans_opt ::= TRANSACTION", + /* 348 */ "trans_opt ::= TRANSACTION nm", + /* 349 */ "savepoint_opt ::= SAVEPOINT", + /* 350 */ "savepoint_opt ::=", + /* 351 */ "cmd ::= create_table create_table_args", + /* 352 */ "table_option_set ::= table_option", + /* 353 */ "columnlist ::= columnlist COMMA columnname carglist", + /* 354 */ "columnlist ::= columnname carglist", + /* 355 */ "nm ::= ID|INDEXED|JOIN_KW", + /* 356 */ "nm ::= STRING", + /* 357 */ "typetoken ::= typename", + /* 358 */ "typename ::= ID|STRING", + /* 359 */ "signed ::= plus_num", + /* 360 */ "signed ::= minus_num", + /* 361 */ "carglist ::= carglist ccons", + /* 362 */ "carglist ::=", + /* 363 */ "ccons ::= NULL onconf", + /* 364 */ "ccons ::= GENERATED ALWAYS AS generated", + /* 365 */ "ccons ::= AS generated", + /* 366 */ "conslist_opt ::= COMMA conslist", + /* 367 */ "conslist ::= conslist tconscomma tcons", + /* 368 */ "conslist ::= tcons", + /* 369 */ "tconscomma ::=", + /* 370 */ "defer_subclause_opt ::= defer_subclause", + /* 371 */ "resolvetype ::= raisetype", + /* 372 */ "selectnowith ::= oneselect", + /* 373 */ "oneselect ::= values", + /* 374 */ "sclp ::= selcollist COMMA", + /* 375 */ "as ::= ID|STRING", + /* 376 */ "indexed_opt ::= indexed_by", + /* 377 */ "returning ::=", + /* 378 */ "expr ::= term", + /* 379 */ "likeop ::= LIKE_KW|MATCH", + /* 380 */ "case_operand ::= expr", + /* 381 */ "exprlist ::= nexprlist", + /* 382 */ "nmnum ::= plus_num", + /* 383 */ "nmnum ::= nm", + /* 384 */ "nmnum ::= ON", + /* 385 */ "nmnum ::= DELETE", + /* 386 */ "nmnum ::= DEFAULT", + /* 387 */ "plus_num ::= INTEGER|FLOAT", + /* 388 */ "foreach_clause ::=", + /* 389 */ "foreach_clause ::= FOR EACH ROW", + /* 390 */ "trnm ::= nm", + /* 391 */ "tridxby ::=", + /* 392 */ "database_kw_opt ::= DATABASE", + /* 393 */ "database_kw_opt ::=", + /* 394 */ "kwcolumn_opt ::=", + /* 395 */ "kwcolumn_opt ::= COLUMNKW", + /* 396 */ "vtabarglist ::= vtabarg", + /* 397 */ "vtabarglist ::= vtabarglist COMMA vtabarg", + /* 398 */ "vtabarg ::= vtabarg vtabargtoken", + /* 399 */ "anylist ::=", + /* 400 */ "anylist ::= anylist LP anylist RP", + /* 401 */ "anylist ::= anylist ANY", + /* 402 */ "with ::=", + /* 403 */ "windowdefn_list ::= windowdefn", + /* 404 */ "window ::= frame_opt", }; #endif /* NDEBUG */

@@ -168145,59 +173293,59 @@ 270, /* (174) idlist_opt ::= LP idlist RP */

263, /* (175) idlist ::= idlist COMMA nm */ 263, /* (176) idlist ::= nm */ 217, /* (177) expr ::= LP expr RP */ - 217, /* (178) expr ::= ID|INDEXED */ - 217, /* (179) expr ::= JOIN_KW */ - 217, /* (180) expr ::= nm DOT nm */ - 217, /* (181) expr ::= nm DOT nm DOT nm */ - 216, /* (182) term ::= NULL|FLOAT|BLOB */ - 216, /* (183) term ::= STRING */ - 216, /* (184) term ::= INTEGER */ - 217, /* (185) expr ::= VARIABLE */ - 217, /* (186) expr ::= expr COLLATE ID|STRING */ - 217, /* (187) expr ::= CAST LP expr AS typetoken RP */ - 217, /* (188) expr ::= ID|INDEXED LP distinct exprlist RP */ - 217, /* (189) expr ::= ID|INDEXED LP STAR RP */ - 217, /* (190) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ - 217, /* (191) expr ::= ID|INDEXED LP STAR RP filter_over */ - 216, /* (192) term ::= CTIME_KW */ - 217, /* (193) expr ::= LP nexprlist COMMA expr RP */ - 217, /* (194) expr ::= expr AND expr */ - 217, /* (195) expr ::= expr OR expr */ - 217, /* (196) expr ::= expr LT|GT|GE|LE expr */ - 217, /* (197) expr ::= expr EQ|NE expr */ - 217, /* (198) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - 217, /* (199) expr ::= expr PLUS|MINUS expr */ - 217, /* (200) expr ::= expr STAR|SLASH|REM expr */ - 217, /* (201) expr ::= expr CONCAT expr */ - 274, /* (202) likeop ::= NOT LIKE_KW|MATCH */ - 217, /* (203) expr ::= expr likeop expr */ - 217, /* (204) expr ::= expr likeop expr ESCAPE expr */ - 217, /* (205) expr ::= expr ISNULL|NOTNULL */ - 217, /* (206) expr ::= expr NOT NULL */ - 217, /* (207) expr ::= expr IS expr */ - 217, /* (208) expr ::= expr IS NOT expr */ - 217, /* (209) expr ::= expr IS NOT DISTINCT FROM expr */ - 217, /* (210) expr ::= expr IS DISTINCT FROM expr */ - 217, /* (211) expr ::= NOT expr */ - 217, /* (212) expr ::= BITNOT expr */ - 217, /* (213) expr ::= PLUS|MINUS expr */ - 217, /* (214) expr ::= expr PTR expr */ - 275, /* (215) between_op ::= BETWEEN */ - 275, /* (216) between_op ::= NOT BETWEEN */ - 217, /* (217) expr ::= expr between_op expr AND expr */ - 276, /* (218) in_op ::= IN */ - 276, /* (219) in_op ::= NOT IN */ - 217, /* (220) expr ::= expr in_op LP exprlist RP */ - 217, /* (221) expr ::= LP select RP */ - 217, /* (222) expr ::= expr in_op LP select RP */ - 217, /* (223) expr ::= expr in_op nm dbnm paren_exprlist */ - 217, /* (224) expr ::= EXISTS LP select RP */ - 217, /* (225) expr ::= CASE case_operand case_exprlist case_else END */ - 279, /* (226) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - 279, /* (227) case_exprlist ::= WHEN expr THEN expr */ - 280, /* (228) case_else ::= ELSE expr */ - 280, /* (229) case_else ::= */ - 278, /* (230) case_operand ::= expr */ + 217, /* (178) expr ::= ID|INDEXED|JOIN_KW */ + 217, /* (179) expr ::= nm DOT nm */ + 217, /* (180) expr ::= nm DOT nm DOT nm */ + 216, /* (181) term ::= NULL|FLOAT|BLOB */ + 216, /* (182) term ::= STRING */ + 216, /* (183) term ::= INTEGER */ + 217, /* (184) expr ::= VARIABLE */ + 217, /* (185) expr ::= expr COLLATE ID|STRING */ + 217, /* (186) expr ::= CAST LP expr AS typetoken RP */ + 217, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + 217, /* (188) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + 217, /* (189) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + 217, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + 217, /* (191) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + 217, /* (192) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + 216, /* (193) term ::= CTIME_KW */ + 217, /* (194) expr ::= LP nexprlist COMMA expr RP */ + 217, /* (195) expr ::= expr AND expr */ + 217, /* (196) expr ::= expr OR expr */ + 217, /* (197) expr ::= expr LT|GT|GE|LE expr */ + 217, /* (198) expr ::= expr EQ|NE expr */ + 217, /* (199) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + 217, /* (200) expr ::= expr PLUS|MINUS expr */ + 217, /* (201) expr ::= expr STAR|SLASH|REM expr */ + 217, /* (202) expr ::= expr CONCAT expr */ + 274, /* (203) likeop ::= NOT LIKE_KW|MATCH */ + 217, /* (204) expr ::= expr likeop expr */ + 217, /* (205) expr ::= expr likeop expr ESCAPE expr */ + 217, /* (206) expr ::= expr ISNULL|NOTNULL */ + 217, /* (207) expr ::= expr NOT NULL */ + 217, /* (208) expr ::= expr IS expr */ + 217, /* (209) expr ::= expr IS NOT expr */ + 217, /* (210) expr ::= expr IS NOT DISTINCT FROM expr */ + 217, /* (211) expr ::= expr IS DISTINCT FROM expr */ + 217, /* (212) expr ::= NOT expr */ + 217, /* (213) expr ::= BITNOT expr */ + 217, /* (214) expr ::= PLUS|MINUS expr */ + 217, /* (215) expr ::= expr PTR expr */ + 275, /* (216) between_op ::= BETWEEN */ + 275, /* (217) between_op ::= NOT BETWEEN */ + 217, /* (218) expr ::= expr between_op expr AND expr */ + 276, /* (219) in_op ::= IN */ + 276, /* (220) in_op ::= NOT IN */ + 217, /* (221) expr ::= expr in_op LP exprlist RP */ + 217, /* (222) expr ::= LP select RP */ + 217, /* (223) expr ::= expr in_op LP select RP */ + 217, /* (224) expr ::= expr in_op nm dbnm paren_exprlist */ + 217, /* (225) expr ::= EXISTS LP select RP */ + 217, /* (226) expr ::= CASE case_operand case_exprlist case_else END */ + 279, /* (227) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + 279, /* (228) case_exprlist ::= WHEN expr THEN expr */ + 280, /* (229) case_else ::= ELSE expr */ + 280, /* (230) case_else ::= */ 278, /* (231) case_operand ::= */ 261, /* (232) exprlist ::= */ 253, /* (233) nexprlist ::= nexprlist COMMA expr */

@@ -168278,100 +173426,100 @@ 305, /* (307) wqas ::= AS NOT MATERIALIZED */

304, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */ 241, /* (309) wqlist ::= wqitem */ 241, /* (310) wqlist ::= wqlist COMMA wqitem */ - 306, /* (311) windowdefn_list ::= windowdefn */ - 306, /* (312) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - 307, /* (313) windowdefn ::= nm AS LP window RP */ - 308, /* (314) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - 308, /* (315) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - 308, /* (316) window ::= ORDER BY sortlist frame_opt */ - 308, /* (317) window ::= nm ORDER BY sortlist frame_opt */ - 308, /* (318) window ::= frame_opt */ - 308, /* (319) window ::= nm frame_opt */ - 309, /* (320) frame_opt ::= */ - 309, /* (321) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - 309, /* (322) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - 313, /* (323) range_or_rows ::= RANGE|ROWS|GROUPS */ - 315, /* (324) frame_bound_s ::= frame_bound */ - 315, /* (325) frame_bound_s ::= UNBOUNDED PRECEDING */ - 316, /* (326) frame_bound_e ::= frame_bound */ - 316, /* (327) frame_bound_e ::= UNBOUNDED FOLLOWING */ - 314, /* (328) frame_bound ::= expr PRECEDING|FOLLOWING */ - 314, /* (329) frame_bound ::= CURRENT ROW */ - 317, /* (330) frame_exclude_opt ::= */ - 317, /* (331) frame_exclude_opt ::= EXCLUDE frame_exclude */ - 318, /* (332) frame_exclude ::= NO OTHERS */ - 318, /* (333) frame_exclude ::= CURRENT ROW */ - 318, /* (334) frame_exclude ::= GROUP|TIES */ - 251, /* (335) window_clause ::= WINDOW windowdefn_list */ - 273, /* (336) filter_over ::= filter_clause over_clause */ - 273, /* (337) filter_over ::= over_clause */ - 273, /* (338) filter_over ::= filter_clause */ - 312, /* (339) over_clause ::= OVER LP window RP */ - 312, /* (340) over_clause ::= OVER nm */ - 311, /* (341) filter_clause ::= FILTER LP WHERE expr RP */ - 185, /* (342) input ::= cmdlist */ - 186, /* (343) cmdlist ::= cmdlist ecmd */ - 186, /* (344) cmdlist ::= ecmd */ - 187, /* (345) ecmd ::= SEMI */ - 187, /* (346) ecmd ::= cmdx SEMI */ - 187, /* (347) ecmd ::= explain cmdx SEMI */ - 192, /* (348) trans_opt ::= */ - 192, /* (349) trans_opt ::= TRANSACTION */ - 192, /* (350) trans_opt ::= TRANSACTION nm */ - 194, /* (351) savepoint_opt ::= SAVEPOINT */ - 194, /* (352) savepoint_opt ::= */ - 190, /* (353) cmd ::= create_table create_table_args */ - 203, /* (354) table_option_set ::= table_option */ - 201, /* (355) columnlist ::= columnlist COMMA columnname carglist */ - 201, /* (356) columnlist ::= columnname carglist */ - 193, /* (357) nm ::= ID|INDEXED */ - 193, /* (358) nm ::= STRING */ - 193, /* (359) nm ::= JOIN_KW */ - 208, /* (360) typetoken ::= typename */ - 209, /* (361) typename ::= ID|STRING */ - 210, /* (362) signed ::= plus_num */ - 210, /* (363) signed ::= minus_num */ - 207, /* (364) carglist ::= carglist ccons */ - 207, /* (365) carglist ::= */ - 215, /* (366) ccons ::= NULL onconf */ - 215, /* (367) ccons ::= GENERATED ALWAYS AS generated */ - 215, /* (368) ccons ::= AS generated */ - 202, /* (369) conslist_opt ::= COMMA conslist */ - 228, /* (370) conslist ::= conslist tconscomma tcons */ - 228, /* (371) conslist ::= tcons */ - 229, /* (372) tconscomma ::= */ - 233, /* (373) defer_subclause_opt ::= defer_subclause */ - 235, /* (374) resolvetype ::= raisetype */ - 239, /* (375) selectnowith ::= oneselect */ - 240, /* (376) oneselect ::= values */ - 254, /* (377) sclp ::= selcollist COMMA */ - 255, /* (378) as ::= ID|STRING */ - 264, /* (379) indexed_opt ::= indexed_by */ - 272, /* (380) returning ::= */ - 217, /* (381) expr ::= term */ - 274, /* (382) likeop ::= LIKE_KW|MATCH */ - 261, /* (383) exprlist ::= nexprlist */ - 284, /* (384) nmnum ::= plus_num */ - 284, /* (385) nmnum ::= nm */ - 284, /* (386) nmnum ::= ON */ - 284, /* (387) nmnum ::= DELETE */ - 284, /* (388) nmnum ::= DEFAULT */ - 211, /* (389) plus_num ::= INTEGER|FLOAT */ - 289, /* (390) foreach_clause ::= */ - 289, /* (391) foreach_clause ::= FOR EACH ROW */ - 292, /* (392) trnm ::= nm */ - 293, /* (393) tridxby ::= */ - 294, /* (394) database_kw_opt ::= DATABASE */ - 294, /* (395) database_kw_opt ::= */ - 297, /* (396) kwcolumn_opt ::= */ - 297, /* (397) kwcolumn_opt ::= COLUMNKW */ - 299, /* (398) vtabarglist ::= vtabarg */ - 299, /* (399) vtabarglist ::= vtabarglist COMMA vtabarg */ - 300, /* (400) vtabarg ::= vtabarg vtabargtoken */ - 303, /* (401) anylist ::= */ - 303, /* (402) anylist ::= anylist LP anylist RP */ - 303, /* (403) anylist ::= anylist ANY */ - 266, /* (404) with ::= */ + 306, /* (311) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + 307, /* (312) windowdefn ::= nm AS LP window RP */ + 308, /* (313) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + 308, /* (314) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + 308, /* (315) window ::= ORDER BY sortlist frame_opt */ + 308, /* (316) window ::= nm ORDER BY sortlist frame_opt */ + 308, /* (317) window ::= nm frame_opt */ + 309, /* (318) frame_opt ::= */ + 309, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + 309, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + 313, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */ + 315, /* (322) frame_bound_s ::= frame_bound */ + 315, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */ + 316, /* (324) frame_bound_e ::= frame_bound */ + 316, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */ + 314, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */ + 314, /* (327) frame_bound ::= CURRENT ROW */ + 317, /* (328) frame_exclude_opt ::= */ + 317, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */ + 318, /* (330) frame_exclude ::= NO OTHERS */ + 318, /* (331) frame_exclude ::= CURRENT ROW */ + 318, /* (332) frame_exclude ::= GROUP|TIES */ + 251, /* (333) window_clause ::= WINDOW windowdefn_list */ + 273, /* (334) filter_over ::= filter_clause over_clause */ + 273, /* (335) filter_over ::= over_clause */ + 273, /* (336) filter_over ::= filter_clause */ + 312, /* (337) over_clause ::= OVER LP window RP */ + 312, /* (338) over_clause ::= OVER nm */ + 311, /* (339) filter_clause ::= FILTER LP WHERE expr RP */ + 185, /* (340) input ::= cmdlist */ + 186, /* (341) cmdlist ::= cmdlist ecmd */ + 186, /* (342) cmdlist ::= ecmd */ + 187, /* (343) ecmd ::= SEMI */ + 187, /* (344) ecmd ::= cmdx SEMI */ + 187, /* (345) ecmd ::= explain cmdx SEMI */ + 192, /* (346) trans_opt ::= */ + 192, /* (347) trans_opt ::= TRANSACTION */ + 192, /* (348) trans_opt ::= TRANSACTION nm */ + 194, /* (349) savepoint_opt ::= SAVEPOINT */ + 194, /* (350) savepoint_opt ::= */ + 190, /* (351) cmd ::= create_table create_table_args */ + 203, /* (352) table_option_set ::= table_option */ + 201, /* (353) columnlist ::= columnlist COMMA columnname carglist */ + 201, /* (354) columnlist ::= columnname carglist */ + 193, /* (355) nm ::= ID|INDEXED|JOIN_KW */ + 193, /* (356) nm ::= STRING */ + 208, /* (357) typetoken ::= typename */ + 209, /* (358) typename ::= ID|STRING */ + 210, /* (359) signed ::= plus_num */ + 210, /* (360) signed ::= minus_num */ + 207, /* (361) carglist ::= carglist ccons */ + 207, /* (362) carglist ::= */ + 215, /* (363) ccons ::= NULL onconf */ + 215, /* (364) ccons ::= GENERATED ALWAYS AS generated */ + 215, /* (365) ccons ::= AS generated */ + 202, /* (366) conslist_opt ::= COMMA conslist */ + 228, /* (367) conslist ::= conslist tconscomma tcons */ + 228, /* (368) conslist ::= tcons */ + 229, /* (369) tconscomma ::= */ + 233, /* (370) defer_subclause_opt ::= defer_subclause */ + 235, /* (371) resolvetype ::= raisetype */ + 239, /* (372) selectnowith ::= oneselect */ + 240, /* (373) oneselect ::= values */ + 254, /* (374) sclp ::= selcollist COMMA */ + 255, /* (375) as ::= ID|STRING */ + 264, /* (376) indexed_opt ::= indexed_by */ + 272, /* (377) returning ::= */ + 217, /* (378) expr ::= term */ + 274, /* (379) likeop ::= LIKE_KW|MATCH */ + 278, /* (380) case_operand ::= expr */ + 261, /* (381) exprlist ::= nexprlist */ + 284, /* (382) nmnum ::= plus_num */ + 284, /* (383) nmnum ::= nm */ + 284, /* (384) nmnum ::= ON */ + 284, /* (385) nmnum ::= DELETE */ + 284, /* (386) nmnum ::= DEFAULT */ + 211, /* (387) plus_num ::= INTEGER|FLOAT */ + 289, /* (388) foreach_clause ::= */ + 289, /* (389) foreach_clause ::= FOR EACH ROW */ + 292, /* (390) trnm ::= nm */ + 293, /* (391) tridxby ::= */ + 294, /* (392) database_kw_opt ::= DATABASE */ + 294, /* (393) database_kw_opt ::= */ + 297, /* (394) kwcolumn_opt ::= */ + 297, /* (395) kwcolumn_opt ::= COLUMNKW */ + 299, /* (396) vtabarglist ::= vtabarg */ + 299, /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ + 300, /* (398) vtabarg ::= vtabarg vtabargtoken */ + 303, /* (399) anylist ::= */ + 303, /* (400) anylist ::= anylist LP anylist RP */ + 303, /* (401) anylist ::= anylist ANY */ + 266, /* (402) with ::= */ + 306, /* (403) windowdefn_list ::= windowdefn */ + 308, /* (404) window ::= frame_opt */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number

@@ -168555,59 +173703,59 @@ -3, /* (174) idlist_opt ::= LP idlist RP */

-3, /* (175) idlist ::= idlist COMMA nm */ -1, /* (176) idlist ::= nm */ -3, /* (177) expr ::= LP expr RP */ - -1, /* (178) expr ::= ID|INDEXED */ - -1, /* (179) expr ::= JOIN_KW */ - -3, /* (180) expr ::= nm DOT nm */ - -5, /* (181) expr ::= nm DOT nm DOT nm */ - -1, /* (182) term ::= NULL|FLOAT|BLOB */ - -1, /* (183) term ::= STRING */ - -1, /* (184) term ::= INTEGER */ - -1, /* (185) expr ::= VARIABLE */ - -3, /* (186) expr ::= expr COLLATE ID|STRING */ - -6, /* (187) expr ::= CAST LP expr AS typetoken RP */ - -5, /* (188) expr ::= ID|INDEXED LP distinct exprlist RP */ - -4, /* (189) expr ::= ID|INDEXED LP STAR RP */ - -6, /* (190) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ - -5, /* (191) expr ::= ID|INDEXED LP STAR RP filter_over */ - -1, /* (192) term ::= CTIME_KW */ - -5, /* (193) expr ::= LP nexprlist COMMA expr RP */ - -3, /* (194) expr ::= expr AND expr */ - -3, /* (195) expr ::= expr OR expr */ - -3, /* (196) expr ::= expr LT|GT|GE|LE expr */ - -3, /* (197) expr ::= expr EQ|NE expr */ - -3, /* (198) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - -3, /* (199) expr ::= expr PLUS|MINUS expr */ - -3, /* (200) expr ::= expr STAR|SLASH|REM expr */ - -3, /* (201) expr ::= expr CONCAT expr */ - -2, /* (202) likeop ::= NOT LIKE_KW|MATCH */ - -3, /* (203) expr ::= expr likeop expr */ - -5, /* (204) expr ::= expr likeop expr ESCAPE expr */ - -2, /* (205) expr ::= expr ISNULL|NOTNULL */ - -3, /* (206) expr ::= expr NOT NULL */ - -3, /* (207) expr ::= expr IS expr */ - -4, /* (208) expr ::= expr IS NOT expr */ - -6, /* (209) expr ::= expr IS NOT DISTINCT FROM expr */ - -5, /* (210) expr ::= expr IS DISTINCT FROM expr */ - -2, /* (211) expr ::= NOT expr */ - -2, /* (212) expr ::= BITNOT expr */ - -2, /* (213) expr ::= PLUS|MINUS expr */ - -3, /* (214) expr ::= expr PTR expr */ - -1, /* (215) between_op ::= BETWEEN */ - -2, /* (216) between_op ::= NOT BETWEEN */ - -5, /* (217) expr ::= expr between_op expr AND expr */ - -1, /* (218) in_op ::= IN */ - -2, /* (219) in_op ::= NOT IN */ - -5, /* (220) expr ::= expr in_op LP exprlist RP */ - -3, /* (221) expr ::= LP select RP */ - -5, /* (222) expr ::= expr in_op LP select RP */ - -5, /* (223) expr ::= expr in_op nm dbnm paren_exprlist */ - -4, /* (224) expr ::= EXISTS LP select RP */ - -5, /* (225) expr ::= CASE case_operand case_exprlist case_else END */ - -5, /* (226) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - -4, /* (227) case_exprlist ::= WHEN expr THEN expr */ - -2, /* (228) case_else ::= ELSE expr */ - 0, /* (229) case_else ::= */ - -1, /* (230) case_operand ::= expr */ + -1, /* (178) expr ::= ID|INDEXED|JOIN_KW */ + -3, /* (179) expr ::= nm DOT nm */ + -5, /* (180) expr ::= nm DOT nm DOT nm */ + -1, /* (181) term ::= NULL|FLOAT|BLOB */ + -1, /* (182) term ::= STRING */ + -1, /* (183) term ::= INTEGER */ + -1, /* (184) expr ::= VARIABLE */ + -3, /* (185) expr ::= expr COLLATE ID|STRING */ + -6, /* (186) expr ::= CAST LP expr AS typetoken RP */ + -5, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + -8, /* (188) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + -4, /* (189) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + -6, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + -9, /* (191) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + -5, /* (192) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + -1, /* (193) term ::= CTIME_KW */ + -5, /* (194) expr ::= LP nexprlist COMMA expr RP */ + -3, /* (195) expr ::= expr AND expr */ + -3, /* (196) expr ::= expr OR expr */ + -3, /* (197) expr ::= expr LT|GT|GE|LE expr */ + -3, /* (198) expr ::= expr EQ|NE expr */ + -3, /* (199) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + -3, /* (200) expr ::= expr PLUS|MINUS expr */ + -3, /* (201) expr ::= expr STAR|SLASH|REM expr */ + -3, /* (202) expr ::= expr CONCAT expr */ + -2, /* (203) likeop ::= NOT LIKE_KW|MATCH */ + -3, /* (204) expr ::= expr likeop expr */ + -5, /* (205) expr ::= expr likeop expr ESCAPE expr */ + -2, /* (206) expr ::= expr ISNULL|NOTNULL */ + -3, /* (207) expr ::= expr NOT NULL */ + -3, /* (208) expr ::= expr IS expr */ + -4, /* (209) expr ::= expr IS NOT expr */ + -6, /* (210) expr ::= expr IS NOT DISTINCT FROM expr */ + -5, /* (211) expr ::= expr IS DISTINCT FROM expr */ + -2, /* (212) expr ::= NOT expr */ + -2, /* (213) expr ::= BITNOT expr */ + -2, /* (214) expr ::= PLUS|MINUS expr */ + -3, /* (215) expr ::= expr PTR expr */ + -1, /* (216) between_op ::= BETWEEN */ + -2, /* (217) between_op ::= NOT BETWEEN */ + -5, /* (218) expr ::= expr between_op expr AND expr */ + -1, /* (219) in_op ::= IN */ + -2, /* (220) in_op ::= NOT IN */ + -5, /* (221) expr ::= expr in_op LP exprlist RP */ + -3, /* (222) expr ::= LP select RP */ + -5, /* (223) expr ::= expr in_op LP select RP */ + -5, /* (224) expr ::= expr in_op nm dbnm paren_exprlist */ + -4, /* (225) expr ::= EXISTS LP select RP */ + -5, /* (226) expr ::= CASE case_operand case_exprlist case_else END */ + -5, /* (227) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + -4, /* (228) case_exprlist ::= WHEN expr THEN expr */ + -2, /* (229) case_else ::= ELSE expr */ + 0, /* (230) case_else ::= */ 0, /* (231) case_operand ::= */ 0, /* (232) exprlist ::= */ -3, /* (233) nexprlist ::= nexprlist COMMA expr */

@@ -168688,100 +173836,100 @@ -3, /* (307) wqas ::= AS NOT MATERIALIZED */

-6, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */ -1, /* (309) wqlist ::= wqitem */ -3, /* (310) wqlist ::= wqlist COMMA wqitem */ - -1, /* (311) windowdefn_list ::= windowdefn */ - -3, /* (312) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - -5, /* (313) windowdefn ::= nm AS LP window RP */ - -5, /* (314) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - -6, /* (315) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - -4, /* (316) window ::= ORDER BY sortlist frame_opt */ - -5, /* (317) window ::= nm ORDER BY sortlist frame_opt */ - -1, /* (318) window ::= frame_opt */ - -2, /* (319) window ::= nm frame_opt */ - 0, /* (320) frame_opt ::= */ - -3, /* (321) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - -6, /* (322) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - -1, /* (323) range_or_rows ::= RANGE|ROWS|GROUPS */ - -1, /* (324) frame_bound_s ::= frame_bound */ - -2, /* (325) frame_bound_s ::= UNBOUNDED PRECEDING */ - -1, /* (326) frame_bound_e ::= frame_bound */ - -2, /* (327) frame_bound_e ::= UNBOUNDED FOLLOWING */ - -2, /* (328) frame_bound ::= expr PRECEDING|FOLLOWING */ - -2, /* (329) frame_bound ::= CURRENT ROW */ - 0, /* (330) frame_exclude_opt ::= */ - -2, /* (331) frame_exclude_opt ::= EXCLUDE frame_exclude */ - -2, /* (332) frame_exclude ::= NO OTHERS */ - -2, /* (333) frame_exclude ::= CURRENT ROW */ - -1, /* (334) frame_exclude ::= GROUP|TIES */ - -2, /* (335) window_clause ::= WINDOW windowdefn_list */ - -2, /* (336) filter_over ::= filter_clause over_clause */ - -1, /* (337) filter_over ::= over_clause */ - -1, /* (338) filter_over ::= filter_clause */ - -4, /* (339) over_clause ::= OVER LP window RP */ - -2, /* (340) over_clause ::= OVER nm */ - -5, /* (341) filter_clause ::= FILTER LP WHERE expr RP */ - -1, /* (342) input ::= cmdlist */ - -2, /* (343) cmdlist ::= cmdlist ecmd */ - -1, /* (344) cmdlist ::= ecmd */ - -1, /* (345) ecmd ::= SEMI */ - -2, /* (346) ecmd ::= cmdx SEMI */ - -3, /* (347) ecmd ::= explain cmdx SEMI */ - 0, /* (348) trans_opt ::= */ - -1, /* (349) trans_opt ::= TRANSACTION */ - -2, /* (350) trans_opt ::= TRANSACTION nm */ - -1, /* (351) savepoint_opt ::= SAVEPOINT */ - 0, /* (352) savepoint_opt ::= */ - -2, /* (353) cmd ::= create_table create_table_args */ - -1, /* (354) table_option_set ::= table_option */ - -4, /* (355) columnlist ::= columnlist COMMA columnname carglist */ - -2, /* (356) columnlist ::= columnname carglist */ - -1, /* (357) nm ::= ID|INDEXED */ - -1, /* (358) nm ::= STRING */ - -1, /* (359) nm ::= JOIN_KW */ - -1, /* (360) typetoken ::= typename */ - -1, /* (361) typename ::= ID|STRING */ - -1, /* (362) signed ::= plus_num */ - -1, /* (363) signed ::= minus_num */ - -2, /* (364) carglist ::= carglist ccons */ - 0, /* (365) carglist ::= */ - -2, /* (366) ccons ::= NULL onconf */ - -4, /* (367) ccons ::= GENERATED ALWAYS AS generated */ - -2, /* (368) ccons ::= AS generated */ - -2, /* (369) conslist_opt ::= COMMA conslist */ - -3, /* (370) conslist ::= conslist tconscomma tcons */ - -1, /* (371) conslist ::= tcons */ - 0, /* (372) tconscomma ::= */ - -1, /* (373) defer_subclause_opt ::= defer_subclause */ - -1, /* (374) resolvetype ::= raisetype */ - -1, /* (375) selectnowith ::= oneselect */ - -1, /* (376) oneselect ::= values */ - -2, /* (377) sclp ::= selcollist COMMA */ - -1, /* (378) as ::= ID|STRING */ - -1, /* (379) indexed_opt ::= indexed_by */ - 0, /* (380) returning ::= */ - -1, /* (381) expr ::= term */ - -1, /* (382) likeop ::= LIKE_KW|MATCH */ - -1, /* (383) exprlist ::= nexprlist */ - -1, /* (384) nmnum ::= plus_num */ - -1, /* (385) nmnum ::= nm */ - -1, /* (386) nmnum ::= ON */ - -1, /* (387) nmnum ::= DELETE */ - -1, /* (388) nmnum ::= DEFAULT */ - -1, /* (389) plus_num ::= INTEGER|FLOAT */ - 0, /* (390) foreach_clause ::= */ - -3, /* (391) foreach_clause ::= FOR EACH ROW */ - -1, /* (392) trnm ::= nm */ - 0, /* (393) tridxby ::= */ - -1, /* (394) database_kw_opt ::= DATABASE */ - 0, /* (395) database_kw_opt ::= */ - 0, /* (396) kwcolumn_opt ::= */ - -1, /* (397) kwcolumn_opt ::= COLUMNKW */ - -1, /* (398) vtabarglist ::= vtabarg */ - -3, /* (399) vtabarglist ::= vtabarglist COMMA vtabarg */ - -2, /* (400) vtabarg ::= vtabarg vtabargtoken */ - 0, /* (401) anylist ::= */ - -4, /* (402) anylist ::= anylist LP anylist RP */ - -2, /* (403) anylist ::= anylist ANY */ - 0, /* (404) with ::= */ + -3, /* (311) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + -5, /* (312) windowdefn ::= nm AS LP window RP */ + -5, /* (313) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + -6, /* (314) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + -4, /* (315) window ::= ORDER BY sortlist frame_opt */ + -5, /* (316) window ::= nm ORDER BY sortlist frame_opt */ + -2, /* (317) window ::= nm frame_opt */ + 0, /* (318) frame_opt ::= */ + -3, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + -6, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + -1, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */ + -1, /* (322) frame_bound_s ::= frame_bound */ + -2, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */ + -1, /* (324) frame_bound_e ::= frame_bound */ + -2, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */ + -2, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */ + -2, /* (327) frame_bound ::= CURRENT ROW */ + 0, /* (328) frame_exclude_opt ::= */ + -2, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */ + -2, /* (330) frame_exclude ::= NO OTHERS */ + -2, /* (331) frame_exclude ::= CURRENT ROW */ + -1, /* (332) frame_exclude ::= GROUP|TIES */ + -2, /* (333) window_clause ::= WINDOW windowdefn_list */ + -2, /* (334) filter_over ::= filter_clause over_clause */ + -1, /* (335) filter_over ::= over_clause */ + -1, /* (336) filter_over ::= filter_clause */ + -4, /* (337) over_clause ::= OVER LP window RP */ + -2, /* (338) over_clause ::= OVER nm */ + -5, /* (339) filter_clause ::= FILTER LP WHERE expr RP */ + -1, /* (340) input ::= cmdlist */ + -2, /* (341) cmdlist ::= cmdlist ecmd */ + -1, /* (342) cmdlist ::= ecmd */ + -1, /* (343) ecmd ::= SEMI */ + -2, /* (344) ecmd ::= cmdx SEMI */ + -3, /* (345) ecmd ::= explain cmdx SEMI */ + 0, /* (346) trans_opt ::= */ + -1, /* (347) trans_opt ::= TRANSACTION */ + -2, /* (348) trans_opt ::= TRANSACTION nm */ + -1, /* (349) savepoint_opt ::= SAVEPOINT */ + 0, /* (350) savepoint_opt ::= */ + -2, /* (351) cmd ::= create_table create_table_args */ + -1, /* (352) table_option_set ::= table_option */ + -4, /* (353) columnlist ::= columnlist COMMA columnname carglist */ + -2, /* (354) columnlist ::= columnname carglist */ + -1, /* (355) nm ::= ID|INDEXED|JOIN_KW */ + -1, /* (356) nm ::= STRING */ + -1, /* (357) typetoken ::= typename */ + -1, /* (358) typename ::= ID|STRING */ + -1, /* (359) signed ::= plus_num */ + -1, /* (360) signed ::= minus_num */ + -2, /* (361) carglist ::= carglist ccons */ + 0, /* (362) carglist ::= */ + -2, /* (363) ccons ::= NULL onconf */ + -4, /* (364) ccons ::= GENERATED ALWAYS AS generated */ + -2, /* (365) ccons ::= AS generated */ + -2, /* (366) conslist_opt ::= COMMA conslist */ + -3, /* (367) conslist ::= conslist tconscomma tcons */ + -1, /* (368) conslist ::= tcons */ + 0, /* (369) tconscomma ::= */ + -1, /* (370) defer_subclause_opt ::= defer_subclause */ + -1, /* (371) resolvetype ::= raisetype */ + -1, /* (372) selectnowith ::= oneselect */ + -1, /* (373) oneselect ::= values */ + -2, /* (374) sclp ::= selcollist COMMA */ + -1, /* (375) as ::= ID|STRING */ + -1, /* (376) indexed_opt ::= indexed_by */ + 0, /* (377) returning ::= */ + -1, /* (378) expr ::= term */ + -1, /* (379) likeop ::= LIKE_KW|MATCH */ + -1, /* (380) case_operand ::= expr */ + -1, /* (381) exprlist ::= nexprlist */ + -1, /* (382) nmnum ::= plus_num */ + -1, /* (383) nmnum ::= nm */ + -1, /* (384) nmnum ::= ON */ + -1, /* (385) nmnum ::= DELETE */ + -1, /* (386) nmnum ::= DEFAULT */ + -1, /* (387) plus_num ::= INTEGER|FLOAT */ + 0, /* (388) foreach_clause ::= */ + -3, /* (389) foreach_clause ::= FOR EACH ROW */ + -1, /* (390) trnm ::= nm */ + 0, /* (391) tridxby ::= */ + -1, /* (392) database_kw_opt ::= DATABASE */ + 0, /* (393) database_kw_opt ::= */ + 0, /* (394) kwcolumn_opt ::= */ + -1, /* (395) kwcolumn_opt ::= COLUMNKW */ + -1, /* (396) vtabarglist ::= vtabarg */ + -3, /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ + -2, /* (398) vtabarg ::= vtabarg vtabargtoken */ + 0, /* (399) anylist ::= */ + -4, /* (400) anylist ::= anylist LP anylist RP */ + -2, /* (401) anylist ::= anylist ANY */ + 0, /* (402) with ::= */ + -1, /* (403) windowdefn_list ::= windowdefn */ + -1, /* (404) window ::= frame_opt */ }; static void yy_accept(yyParser*); /* Forward Declaration */

@@ -168824,10 +173972,10 @@ */

/********** Begin reduce actions **********************************************/ YYMINORTYPE yylhsminor; case 0: /* explain ::= EXPLAIN */ -{ pParse->explain = 1; } +{ if( pParse->pReprepare==0 ) pParse->explain = 1; } break; case 1: /* explain ::= EXPLAIN QUERY PLAN */ -{ pParse->explain = 2; } +{ if( pParse->pReprepare==0 ) pParse->explain = 2; } break; case 2: /* cmdx ::= cmd */ { sqlite3FinishCoding(pParse); }

@@ -168841,7 +173989,7 @@ break;

case 5: /* transtype ::= DEFERRED */ case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6); case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7); - case 323: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==323); + case 321: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==321); {yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/} break; case 8: /* cmd ::= COMMIT|END trans_opt */

@@ -169062,8 +174210,8 @@ {yymsp[-1].minor.yy394 = yymsp[0].minor.yy394;}

break; case 63: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ case 80: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==80); - case 216: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==216); - case 219: /* in_op ::= NOT IN */ yytestcase(yyruleno==219); + case 217: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==217); + case 220: /* in_op ::= NOT IN */ yytestcase(yyruleno==220); case 245: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==245); {yymsp[-1].minor.yy394 = 1;} break;

@@ -169137,7 +174285,6 @@ Select *p = yymsp[0].minor.yy47;

if( p ){ parserDoubleLinkSelect(pParse, p); } - yymsp[0].minor.yy47 = p; /*A-overwrites-X*/ } break; case 88: /* selectnowith ::= selectnowith multiselect_op oneselect */

@@ -169229,14 +174376,17 @@ break;

case 101: /* selcollist ::= sclp scanpt STAR */ { Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0); + sqlite3ExprSetErrorOffset(p, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy322, p); } break; case 102: /* selcollist ::= sclp scanpt nm DOT STAR */ { - Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0); - Expr *pLeft = tokenExpr(pParse, TK_ID, yymsp[-2].minor.yy0); - Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); + Expr *pRight, *pLeft, *pDot; + pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0); + sqlite3ExprSetErrorOffset(pRight, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); + pLeft = tokenExpr(pParse, TK_ID, yymsp[-2].minor.yy0); + pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, pDot); } break;

@@ -169287,7 +174437,7 @@ case 113: /* seltablist ::= stl_prefix LP seltablist RP as on_using */

{ if( yymsp[-5].minor.yy131==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy561.pOn==0 && yymsp[0].minor.yy561.pUsing==0 ){ yymsp[-5].minor.yy131 = yymsp[-3].minor.yy131; - }else if( yymsp[-3].minor.yy131->nSrc==1 ){ + }else if( ALWAYS(yymsp[-3].minor.yy131!=0) && yymsp[-3].minor.yy131->nSrc==1 ){ yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy561); if( yymsp[-5].minor.yy131 ){ SrcItem *pNew = &yymsp[-5].minor.yy131->a[yymsp[-5].minor.yy131->nSrc-1];

@@ -169415,7 +174565,7 @@ case 144: /* having_opt ::= */

case 146: /* limit_opt ::= */ yytestcase(yyruleno==146); case 151: /* where_opt ::= */ yytestcase(yyruleno==151); case 153: /* where_opt_ret ::= */ yytestcase(yyruleno==153); - case 229: /* case_else ::= */ yytestcase(yyruleno==229); + case 230: /* case_else ::= */ yytestcase(yyruleno==230); case 231: /* case_operand ::= */ yytestcase(yyruleno==231); case 250: /* vinto ::= */ yytestcase(yyruleno==250); {yymsp[1].minor.yy528 = 0;}

@@ -169423,7 +174573,7 @@ break;

case 145: /* having_opt ::= HAVING expr */ case 152: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==152); case 154: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==154); - case 228: /* case_else ::= ELSE expr */ yytestcase(yyruleno==228); + case 229: /* case_else ::= ELSE expr */ yytestcase(yyruleno==229); case 249: /* vinto ::= INTO expr */ yytestcase(yyruleno==249); {yymsp[-1].minor.yy528 = yymsp[0].minor.yy528;} break;

@@ -169536,11 +174686,10 @@ break;

case 177: /* expr ::= LP expr RP */ {yymsp[-2].minor.yy528 = yymsp[-1].minor.yy528;} break; - case 178: /* expr ::= ID|INDEXED */ - case 179: /* expr ::= JOIN_KW */ yytestcase(yyruleno==179); + case 178: /* expr ::= ID|INDEXED|JOIN_KW */ {yymsp[0].minor.yy528=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 180: /* expr ::= nm DOT nm */ + case 179: /* expr ::= nm DOT nm */ { Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0);

@@ -169548,7 +174697,7 @@ yylhsminor.yy528 = sqlite3PExpr(pParse, TK_DOT, temp1, temp2);

} yymsp[-2].minor.yy528 = yylhsminor.yy528; break; - case 181: /* expr ::= nm DOT nm DOT nm */ + case 180: /* expr ::= nm DOT nm DOT nm */ { Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-4].minor.yy0); Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0);

@@ -169561,18 +174710,18 @@ yylhsminor.yy528 = sqlite3PExpr(pParse, TK_DOT, temp1, temp4);

} yymsp[-4].minor.yy528 = yylhsminor.yy528; break; - case 182: /* term ::= NULL|FLOAT|BLOB */ - case 183: /* term ::= STRING */ yytestcase(yyruleno==183); + case 181: /* term ::= NULL|FLOAT|BLOB */ + case 182: /* term ::= STRING */ yytestcase(yyruleno==182); {yymsp[0].minor.yy528=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 184: /* term ::= INTEGER */ + case 183: /* term ::= INTEGER */ { yylhsminor.yy528 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); if( yylhsminor.yy528 ) yylhsminor.yy528->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); } yymsp[0].minor.yy528 = yylhsminor.yy528; break; - case 185: /* expr ::= VARIABLE */ + case 184: /* expr ::= VARIABLE */ { if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){ u32 n = yymsp[0].minor.yy0.n;

@@ -169594,50 +174743,65 @@ }

} } break; - case 186: /* expr ::= expr COLLATE ID|STRING */ + case 185: /* expr ::= expr COLLATE ID|STRING */ { yymsp[-2].minor.yy528 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy528, &yymsp[0].minor.yy0, 1); } break; - case 187: /* expr ::= CAST LP expr AS typetoken RP */ + case 186: /* expr ::= CAST LP expr AS typetoken RP */ { yymsp[-5].minor.yy528 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy528, yymsp[-3].minor.yy528, 0); } break; - case 188: /* expr ::= ID|INDEXED LP distinct exprlist RP */ + case 187: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ { yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy394); } yymsp[-4].minor.yy528 = yylhsminor.yy528; break; - case 189: /* expr ::= ID|INDEXED LP STAR RP */ + case 188: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ +{ + yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy322, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy394); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy528, yymsp[-1].minor.yy322); +} + yymsp[-7].minor.yy528 = yylhsminor.yy528; + break; + case 189: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ { yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); } yymsp[-3].minor.yy528 = yylhsminor.yy528; break; - case 190: /* expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ + case 190: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ { yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy322, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy394); sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); } yymsp[-5].minor.yy528 = yylhsminor.yy528; break; - case 191: /* expr ::= ID|INDEXED LP STAR RP filter_over */ + case 191: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ +{ + yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy322, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy394); + sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy528, yymsp[-2].minor.yy322); +} + yymsp[-8].minor.yy528 = yylhsminor.yy528; + break; + case 192: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ { yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); } yymsp[-4].minor.yy528 = yylhsminor.yy528; break; - case 192: /* term ::= CTIME_KW */ + case 193: /* term ::= CTIME_KW */ { yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); } yymsp[0].minor.yy528 = yylhsminor.yy528; break; - case 193: /* expr ::= LP nexprlist COMMA expr RP */ + case 194: /* expr ::= LP nexprlist COMMA expr RP */ { ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528); yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0);

@@ -169651,22 +174815,22 @@ sqlite3ExprListDelete(pParse->db, pList);

} } break; - case 194: /* expr ::= expr AND expr */ + case 195: /* expr ::= expr AND expr */ {yymsp[-2].minor.yy528=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} break; - case 195: /* expr ::= expr OR expr */ - case 196: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==196); - case 197: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==197); - case 198: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==198); - case 199: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==199); - case 200: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==200); - case 201: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==201); + case 196: /* expr ::= expr OR expr */ + case 197: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==197); + case 198: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==198); + case 199: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==199); + case 200: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==200); + case 201: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==201); + case 202: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==202); {yymsp[-2].minor.yy528=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} break; - case 202: /* likeop ::= NOT LIKE_KW|MATCH */ + case 203: /* likeop ::= NOT LIKE_KW|MATCH */ {yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/} break; - case 203: /* expr ::= expr likeop expr */ + case 204: /* expr ::= expr likeop expr */ { ExprList *pList; int bNot = yymsp[-1].minor.yy0.n & 0x80000000;

@@ -169678,7 +174842,7 @@ if( bNot ) yymsp[-2].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-2].minor.yy528, 0);

if( yymsp[-2].minor.yy528 ) yymsp[-2].minor.yy528->flags |= EP_InfixFunc; } break; - case 204: /* expr ::= expr likeop expr ESCAPE expr */ + case 205: /* expr ::= expr likeop expr ESCAPE expr */ { ExprList *pList; int bNot = yymsp[-3].minor.yy0.n & 0x80000000;

@@ -169691,47 +174855,47 @@ if( bNot ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0);

if( yymsp[-4].minor.yy528 ) yymsp[-4].minor.yy528->flags |= EP_InfixFunc; } break; - case 205: /* expr ::= expr ISNULL|NOTNULL */ + case 206: /* expr ::= expr ISNULL|NOTNULL */ {yymsp[-1].minor.yy528 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy528,0);} break; - case 206: /* expr ::= expr NOT NULL */ + case 207: /* expr ::= expr NOT NULL */ {yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy528,0);} break; - case 207: /* expr ::= expr IS expr */ + case 208: /* expr ::= expr IS expr */ { yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy528,yymsp[0].minor.yy528); binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-2].minor.yy528, TK_ISNULL); } break; - case 208: /* expr ::= expr IS NOT expr */ + case 209: /* expr ::= expr IS NOT expr */ { yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy528,yymsp[0].minor.yy528); binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-3].minor.yy528, TK_NOTNULL); } break; - case 209: /* expr ::= expr IS NOT DISTINCT FROM expr */ + case 210: /* expr ::= expr IS NOT DISTINCT FROM expr */ { yymsp[-5].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy528,yymsp[0].minor.yy528); binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-5].minor.yy528, TK_ISNULL); } break; - case 210: /* expr ::= expr IS DISTINCT FROM expr */ + case 211: /* expr ::= expr IS DISTINCT FROM expr */ { yymsp[-4].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy528,yymsp[0].minor.yy528); binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-4].minor.yy528, TK_NOTNULL); } break; - case 211: /* expr ::= NOT expr */ - case 212: /* expr ::= BITNOT expr */ yytestcase(yyruleno==212); + case 212: /* expr ::= NOT expr */ + case 213: /* expr ::= BITNOT expr */ yytestcase(yyruleno==213); {yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy528, 0);/*A-overwrites-B*/} break; - case 213: /* expr ::= PLUS|MINUS expr */ + case 214: /* expr ::= PLUS|MINUS expr */ { yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy528, 0); /*A-overwrites-B*/ } break; - case 214: /* expr ::= expr PTR expr */ + case 215: /* expr ::= expr PTR expr */ { ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy528); pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy528);

@@ -169739,11 +174903,11 @@ yylhsminor.yy528 = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0, 0);

} yymsp[-2].minor.yy528 = yylhsminor.yy528; break; - case 215: /* between_op ::= BETWEEN */ - case 218: /* in_op ::= IN */ yytestcase(yyruleno==218); + case 216: /* between_op ::= BETWEEN */ + case 219: /* in_op ::= IN */ yytestcase(yyruleno==219); {yymsp[0].minor.yy394 = 0;} break; - case 217: /* expr ::= expr between_op expr AND expr */ + case 218: /* expr ::= expr between_op expr AND expr */ { ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy528);

@@ -169756,7 +174920,7 @@ }

if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); } break; - case 220: /* expr ::= expr in_op LP exprlist RP */ + case 221: /* expr ::= expr in_op LP exprlist RP */ { if( yymsp[-1].minor.yy322==0 ){ /* Expressions of the form

@@ -169777,6 +174941,11 @@ yymsp[-1].minor.yy322->a[0].pExpr = 0;

sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322); pRHS = sqlite3PExpr(pParse, TK_UPLUS, pRHS, 0); yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy528, pRHS); + }else if( yymsp[-1].minor.yy322->nExpr==1 && pRHS->op==TK_SELECT ){ + yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, pRHS->x.pSelect); + pRHS->x.pSelect = 0; + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322); }else{ yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); if( yymsp[-4].minor.yy528==0 ){

@@ -169797,20 +174966,20 @@ if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0);

} } break; - case 221: /* expr ::= LP select RP */ + case 222: /* expr ::= LP select RP */ { yymsp[-2].minor.yy528 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy528, yymsp[-1].minor.yy47); } break; - case 222: /* expr ::= expr in_op LP select RP */ + case 223: /* expr ::= expr in_op LP select RP */ { yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, yymsp[-1].minor.yy47); if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); } break; - case 223: /* expr ::= expr in_op nm dbnm paren_exprlist */ + case 224: /* expr ::= expr in_op nm dbnm paren_exprlist */ { SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0);

@@ -169820,14 +174989,14 @@ sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, pSelect);

if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); } break; - case 224: /* expr ::= EXISTS LP select RP */ + case 225: /* expr ::= EXISTS LP select RP */ { Expr *p; p = yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy47); } break; - case 225: /* expr ::= CASE case_operand case_exprlist case_else END */ + case 226: /* expr ::= CASE case_operand case_exprlist case_else END */ { yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy528, 0); if( yymsp[-4].minor.yy528 ){

@@ -169839,20 +175008,17 @@ sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy528);

} } break; - case 226: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ + case 227: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ { yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[-2].minor.yy528); yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[0].minor.yy528); } break; - case 227: /* case_exprlist ::= WHEN expr THEN expr */ + case 228: /* case_exprlist ::= WHEN expr THEN expr */ { yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy322, yymsp[0].minor.yy528); } - break; - case 230: /* case_operand ::= expr */ -{yymsp[0].minor.yy528 = yymsp[0].minor.yy528; /*A-overwrites-X*/} break; case 233: /* nexprlist ::= nexprlist COMMA expr */ {yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[0].minor.yy528);}

@@ -170129,11 +175295,7 @@ {

yymsp[-2].minor.yy521 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy521, yymsp[0].minor.yy385); } break; - case 311: /* windowdefn_list ::= windowdefn */ -{ yylhsminor.yy41 = yymsp[0].minor.yy41; } - yymsp[0].minor.yy41 = yylhsminor.yy41; - break; - case 312: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ + case 311: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ { assert( yymsp[0].minor.yy41!=0 ); sqlite3WindowChain(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy41);

@@ -170142,7 +175304,7 @@ yylhsminor.yy41 = yymsp[0].minor.yy41;

} yymsp[-2].minor.yy41 = yylhsminor.yy41; break; - case 313: /* windowdefn ::= nm AS LP window RP */ + case 312: /* windowdefn ::= nm AS LP window RP */ { if( ALWAYS(yymsp[-1].minor.yy41) ){ yymsp[-1].minor.yy41->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n);

@@ -170151,90 +175313,83 @@ yylhsminor.yy41 = yymsp[-1].minor.yy41;

} yymsp[-4].minor.yy41 = yylhsminor.yy41; break; - case 314: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + case 313: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ { yymsp[-4].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, 0); } break; - case 315: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + case 314: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ { yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, &yymsp[-5].minor.yy0); } yymsp[-5].minor.yy41 = yylhsminor.yy41; break; - case 316: /* window ::= ORDER BY sortlist frame_opt */ + case 315: /* window ::= ORDER BY sortlist frame_opt */ { yymsp[-3].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, 0); } break; - case 317: /* window ::= nm ORDER BY sortlist frame_opt */ + case 316: /* window ::= nm ORDER BY sortlist frame_opt */ { yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0); } yymsp[-4].minor.yy41 = yylhsminor.yy41; break; - case 318: /* window ::= frame_opt */ - case 337: /* filter_over ::= over_clause */ yytestcase(yyruleno==337); -{ - yylhsminor.yy41 = yymsp[0].minor.yy41; -} - yymsp[0].minor.yy41 = yylhsminor.yy41; - break; - case 319: /* window ::= nm frame_opt */ + case 317: /* window ::= nm frame_opt */ { yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, 0, &yymsp[-1].minor.yy0); } yymsp[-1].minor.yy41 = yylhsminor.yy41; break; - case 320: /* frame_opt ::= */ + case 318: /* frame_opt ::= */ { yymsp[1].minor.yy41 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); } break; - case 321: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + case 319: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ { yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy394, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy516); } yymsp[-2].minor.yy41 = yylhsminor.yy41; break; - case 322: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + case 320: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ { yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy394, yymsp[-3].minor.yy595.eType, yymsp[-3].minor.yy595.pExpr, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, yymsp[0].minor.yy516); } yymsp[-5].minor.yy41 = yylhsminor.yy41; break; - case 324: /* frame_bound_s ::= frame_bound */ - case 326: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==326); + case 322: /* frame_bound_s ::= frame_bound */ + case 324: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==324); {yylhsminor.yy595 = yymsp[0].minor.yy595;} yymsp[0].minor.yy595 = yylhsminor.yy595; break; - case 325: /* frame_bound_s ::= UNBOUNDED PRECEDING */ - case 327: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==327); - case 329: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==329); + case 323: /* frame_bound_s ::= UNBOUNDED PRECEDING */ + case 325: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==325); + case 327: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==327); {yylhsminor.yy595.eType = yymsp[-1].major; yylhsminor.yy595.pExpr = 0;} yymsp[-1].minor.yy595 = yylhsminor.yy595; break; - case 328: /* frame_bound ::= expr PRECEDING|FOLLOWING */ + case 326: /* frame_bound ::= expr PRECEDING|FOLLOWING */ {yylhsminor.yy595.eType = yymsp[0].major; yylhsminor.yy595.pExpr = yymsp[-1].minor.yy528;} yymsp[-1].minor.yy595 = yylhsminor.yy595; break; - case 330: /* frame_exclude_opt ::= */ + case 328: /* frame_exclude_opt ::= */ {yymsp[1].minor.yy516 = 0;} break; - case 331: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ + case 329: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ {yymsp[-1].minor.yy516 = yymsp[0].minor.yy516;} break; - case 332: /* frame_exclude ::= NO OTHERS */ - case 333: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==333); + case 330: /* frame_exclude ::= NO OTHERS */ + case 331: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==331); {yymsp[-1].minor.yy516 = yymsp[-1].major; /*A-overwrites-X*/} break; - case 334: /* frame_exclude ::= GROUP|TIES */ + case 332: /* frame_exclude ::= GROUP|TIES */ {yymsp[0].minor.yy516 = yymsp[0].major; /*A-overwrites-X*/} break; - case 335: /* window_clause ::= WINDOW windowdefn_list */ + case 333: /* window_clause ::= WINDOW windowdefn_list */ { yymsp[-1].minor.yy41 = yymsp[0].minor.yy41; } break; - case 336: /* filter_over ::= filter_clause over_clause */ + case 334: /* filter_over ::= filter_clause over_clause */ { if( yymsp[0].minor.yy41 ){ yymsp[0].minor.yy41->pFilter = yymsp[-1].minor.yy528;

@@ -170245,7 +175400,13 @@ yylhsminor.yy41 = yymsp[0].minor.yy41;

} yymsp[-1].minor.yy41 = yylhsminor.yy41; break; - case 338: /* filter_over ::= filter_clause */ + case 335: /* filter_over ::= over_clause */ +{ + yylhsminor.yy41 = yymsp[0].minor.yy41; +} + yymsp[0].minor.yy41 = yylhsminor.yy41; + break; + case 336: /* filter_over ::= filter_clause */ { yylhsminor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); if( yylhsminor.yy41 ){

@@ -170257,13 +175418,13 @@ }

} yymsp[0].minor.yy41 = yylhsminor.yy41; break; - case 339: /* over_clause ::= OVER LP window RP */ + case 337: /* over_clause ::= OVER LP window RP */ { yymsp[-3].minor.yy41 = yymsp[-1].minor.yy41; assert( yymsp[-3].minor.yy41!=0 ); } break; - case 340: /* over_clause ::= OVER nm */ + case 338: /* over_clause ::= OVER nm */ { yymsp[-1].minor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); if( yymsp[-1].minor.yy41 ){

@@ -170271,73 +175432,75 @@ yymsp[-1].minor.yy41->zName = sqlite3DbStrNDup(pParse->db, yymsp[0].minor.yy0.z, yymsp[0].minor.yy0.n);

} } break; - case 341: /* filter_clause ::= FILTER LP WHERE expr RP */ + case 339: /* filter_clause ::= FILTER LP WHERE expr RP */ { yymsp[-4].minor.yy528 = yymsp[-1].minor.yy528; } break; default: - /* (342) input ::= cmdlist */ yytestcase(yyruleno==342); - /* (343) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==343); - /* (344) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=344); - /* (345) ecmd ::= SEMI */ yytestcase(yyruleno==345); - /* (346) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==346); - /* (347) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=347); - /* (348) trans_opt ::= */ yytestcase(yyruleno==348); - /* (349) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==349); - /* (350) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==350); - /* (351) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==351); - /* (352) savepoint_opt ::= */ yytestcase(yyruleno==352); - /* (353) cmd ::= create_table create_table_args */ yytestcase(yyruleno==353); - /* (354) table_option_set ::= table_option (OPTIMIZED OUT) */ assert(yyruleno!=354); - /* (355) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==355); - /* (356) columnlist ::= columnname carglist */ yytestcase(yyruleno==356); - /* (357) nm ::= ID|INDEXED */ yytestcase(yyruleno==357); - /* (358) nm ::= STRING */ yytestcase(yyruleno==358); - /* (359) nm ::= JOIN_KW */ yytestcase(yyruleno==359); - /* (360) typetoken ::= typename */ yytestcase(yyruleno==360); - /* (361) typename ::= ID|STRING */ yytestcase(yyruleno==361); - /* (362) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=362); - /* (363) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=363); - /* (364) carglist ::= carglist ccons */ yytestcase(yyruleno==364); - /* (365) carglist ::= */ yytestcase(yyruleno==365); - /* (366) ccons ::= NULL onconf */ yytestcase(yyruleno==366); - /* (367) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==367); - /* (368) ccons ::= AS generated */ yytestcase(yyruleno==368); - /* (369) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==369); - /* (370) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==370); - /* (371) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=371); - /* (372) tconscomma ::= */ yytestcase(yyruleno==372); - /* (373) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=373); - /* (374) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=374); - /* (375) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=375); - /* (376) oneselect ::= values */ yytestcase(yyruleno==376); - /* (377) sclp ::= selcollist COMMA */ yytestcase(yyruleno==377); - /* (378) as ::= ID|STRING */ yytestcase(yyruleno==378); - /* (379) indexed_opt ::= indexed_by (OPTIMIZED OUT) */ assert(yyruleno!=379); - /* (380) returning ::= */ yytestcase(yyruleno==380); - /* (381) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=381); - /* (382) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==382); - /* (383) exprlist ::= nexprlist */ yytestcase(yyruleno==383); - /* (384) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=384); - /* (385) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=385); - /* (386) nmnum ::= ON */ yytestcase(yyruleno==386); - /* (387) nmnum ::= DELETE */ yytestcase(yyruleno==387); - /* (388) nmnum ::= DEFAULT */ yytestcase(yyruleno==388); - /* (389) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==389); - /* (390) foreach_clause ::= */ yytestcase(yyruleno==390); - /* (391) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==391); - /* (392) trnm ::= nm */ yytestcase(yyruleno==392); - /* (393) tridxby ::= */ yytestcase(yyruleno==393); - /* (394) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==394); - /* (395) database_kw_opt ::= */ yytestcase(yyruleno==395); - /* (396) kwcolumn_opt ::= */ yytestcase(yyruleno==396); - /* (397) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==397); - /* (398) vtabarglist ::= vtabarg */ yytestcase(yyruleno==398); - /* (399) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==399); - /* (400) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==400); - /* (401) anylist ::= */ yytestcase(yyruleno==401); - /* (402) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==402); - /* (403) anylist ::= anylist ANY */ yytestcase(yyruleno==403); - /* (404) with ::= */ yytestcase(yyruleno==404); + /* (340) input ::= cmdlist */ yytestcase(yyruleno==340); + /* (341) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==341); + /* (342) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=342); + /* (343) ecmd ::= SEMI */ yytestcase(yyruleno==343); + /* (344) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==344); + /* (345) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=345); + /* (346) trans_opt ::= */ yytestcase(yyruleno==346); + /* (347) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==347); + /* (348) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==348); + /* (349) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==349); + /* (350) savepoint_opt ::= */ yytestcase(yyruleno==350); + /* (351) cmd ::= create_table create_table_args */ yytestcase(yyruleno==351); + /* (352) table_option_set ::= table_option (OPTIMIZED OUT) */ assert(yyruleno!=352); + /* (353) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==353); + /* (354) columnlist ::= columnname carglist */ yytestcase(yyruleno==354); + /* (355) nm ::= ID|INDEXED|JOIN_KW */ yytestcase(yyruleno==355); + /* (356) nm ::= STRING */ yytestcase(yyruleno==356); + /* (357) typetoken ::= typename */ yytestcase(yyruleno==357); + /* (358) typename ::= ID|STRING */ yytestcase(yyruleno==358); + /* (359) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=359); + /* (360) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=360); + /* (361) carglist ::= carglist ccons */ yytestcase(yyruleno==361); + /* (362) carglist ::= */ yytestcase(yyruleno==362); + /* (363) ccons ::= NULL onconf */ yytestcase(yyruleno==363); + /* (364) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==364); + /* (365) ccons ::= AS generated */ yytestcase(yyruleno==365); + /* (366) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==366); + /* (367) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==367); + /* (368) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=368); + /* (369) tconscomma ::= */ yytestcase(yyruleno==369); + /* (370) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=370); + /* (371) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=371); + /* (372) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=372); + /* (373) oneselect ::= values */ yytestcase(yyruleno==373); + /* (374) sclp ::= selcollist COMMA */ yytestcase(yyruleno==374); + /* (375) as ::= ID|STRING */ yytestcase(yyruleno==375); + /* (376) indexed_opt ::= indexed_by (OPTIMIZED OUT) */ assert(yyruleno!=376); + /* (377) returning ::= */ yytestcase(yyruleno==377); + /* (378) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=378); + /* (379) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==379); + /* (380) case_operand ::= expr */ yytestcase(yyruleno==380); + /* (381) exprlist ::= nexprlist */ yytestcase(yyruleno==381); + /* (382) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=382); + /* (383) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=383); + /* (384) nmnum ::= ON */ yytestcase(yyruleno==384); + /* (385) nmnum ::= DELETE */ yytestcase(yyruleno==385); + /* (386) nmnum ::= DEFAULT */ yytestcase(yyruleno==386); + /* (387) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==387); + /* (388) foreach_clause ::= */ yytestcase(yyruleno==388); + /* (389) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==389); + /* (390) trnm ::= nm */ yytestcase(yyruleno==390); + /* (391) tridxby ::= */ yytestcase(yyruleno==391); + /* (392) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==392); + /* (393) database_kw_opt ::= */ yytestcase(yyruleno==393); + /* (394) kwcolumn_opt ::= */ yytestcase(yyruleno==394); + /* (395) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==395); + /* (396) vtabarglist ::= vtabarg */ yytestcase(yyruleno==396); + /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==397); + /* (398) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==398); + /* (399) anylist ::= */ yytestcase(yyruleno==399); + /* (400) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==400); + /* (401) anylist ::= anylist ANY */ yytestcase(yyruleno==401); + /* (402) with ::= */ yytestcase(yyruleno==402); + /* (403) windowdefn_list ::= windowdefn (OPTIMIZED OUT) */ assert(yyruleno!=403); + /* (404) window ::= frame_opt (OPTIMIZED OUT) */ assert(yyruleno!=404); break; /********** End reduce actions ************************************************/ };

@@ -170913,7 +176076,7 @@ };

/* aKWNext[] forms the hash collision chain. If aKWHash[i]==0 ** then the i-th keyword has no more hash collisions. Otherwise, ** the next keyword with the same hash is aKWHash[i]-1. */ -static const unsigned char aKWNext[147] = { +static const unsigned char aKWNext[148] = {0, 0, 0, 0, 0, 4, 0, 43, 0, 0, 106, 114, 0, 0, 0, 2, 0, 0, 143, 0, 0, 0, 13, 0, 0, 0, 0, 141, 0, 0, 119, 52, 0, 0, 137, 12, 0, 0, 62, 0,

@@ -170928,7 +176091,7 @@ 99, 44, 0, 55, 0, 76, 0, 95, 32, 33, 57, 25, 0,

102, 0, 0, 87, }; /* aKWLen[i] is the length (in bytes) of the i-th keyword */ -static const unsigned char aKWLen[147] = { +static const unsigned char aKWLen[148] = {0, 7, 7, 5, 4, 6, 4, 5, 3, 6, 7, 3, 6, 6, 7, 7, 3, 8, 2, 6, 5, 4, 4, 3, 10, 4, 7, 6, 9, 4, 2, 6, 5, 9, 9, 4, 7, 3, 2, 4,

@@ -170944,7 +176107,7 @@ 2, 9, 3, 7,

}; /* aKWOffset[i] is the index into zKWText[] of the start of ** the text for the i-th keyword. */ -static const unsigned short int aKWOffset[147] = { +static const unsigned short int aKWOffset[148] = {0, 0, 2, 2, 8, 9, 14, 16, 20, 23, 25, 25, 29, 33, 36, 41, 46, 48, 53, 54, 59, 62, 65, 67, 69, 78, 81, 86, 90, 90, 94, 99, 101, 105, 111, 119, 123, 123, 123, 126,

@@ -170959,7 +176122,7 @@ 585, 588, 597, 602, 610, 610, 614, 623, 628, 633, 639, 642, 645,

648, 650, 655, 659, }; /* aKWCode[i] is the parser symbol code for the i-th keyword */ -static const unsigned char aKWCode[147] = { +static const unsigned char aKWCode[148] = {0, TK_REINDEX, TK_INDEXED, TK_INDEX, TK_DESC, TK_ESCAPE, TK_EACH, TK_CHECK, TK_KEY, TK_BEFORE, TK_FOREIGN, TK_FOR, TK_IGNORE, TK_LIKE_KW, TK_EXPLAIN, TK_INSTEAD,

@@ -171126,185 +176289,185 @@ ** return the integer n (the length of the token). */

static int keywordCode(const char *z, int n, int *pType){ int i, j; const char *zKW; - if( n>=2 ){ - i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n*1) % 127; - for(i=((int)aKWHash[i])-1; i>=0; i=((int)aKWNext[i])-1){ - if( aKWLen[i]!=n ) continue; - zKW = &zKWText[aKWOffset[i]]; + assert( n>=2 ); + i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n*1) % 127; + for(i=(int)aKWHash[i]; i>0; i=aKWNext[i]){ + if( aKWLen[i]!=n ) continue; + zKW = &zKWText[aKWOffset[i]]; #ifdef SQLITE_ASCII - if( (z[0]&~0x20)!=zKW[0] ) continue; - if( (z[1]&~0x20)!=zKW[1] ) continue; - j = 2; - while( j<n && (z[j]&~0x20)==zKW[j] ){ j++; } + if( (z[0]&~0x20)!=zKW[0] ) continue; + if( (z[1]&~0x20)!=zKW[1] ) continue; + j = 2; + while( j<n && (z[j]&~0x20)==zKW[j] ){ j++; } #endif #ifdef SQLITE_EBCDIC - if( toupper(z[0])!=zKW[0] ) continue; - if( toupper(z[1])!=zKW[1] ) continue; - j = 2; - while( j<n && toupper(z[j])==zKW[j] ){ j++; } + if( toupper(z[0])!=zKW[0] ) continue; + if( toupper(z[1])!=zKW[1] ) continue; + j = 2; + while( j<n && toupper(z[j])==zKW[j] ){ j++; } #endif - if( j<n ) continue; - testcase( i==0 ); /* REINDEX */ - testcase( i==1 ); /* INDEXED */ - testcase( i==2 ); /* INDEX */ - testcase( i==3 ); /* DESC */ - testcase( i==4 ); /* ESCAPE */ - testcase( i==5 ); /* EACH */ - testcase( i==6 ); /* CHECK */ - testcase( i==7 ); /* KEY */ - testcase( i==8 ); /* BEFORE */ - testcase( i==9 ); /* FOREIGN */ - testcase( i==10 ); /* FOR */ - testcase( i==11 ); /* IGNORE */ - testcase( i==12 ); /* REGEXP */ - testcase( i==13 ); /* EXPLAIN */ - testcase( i==14 ); /* INSTEAD */ - testcase( i==15 ); /* ADD */ - testcase( i==16 ); /* DATABASE */ - testcase( i==17 ); /* AS */ - testcase( i==18 ); /* SELECT */ - testcase( i==19 ); /* TABLE */ - testcase( i==20 ); /* LEFT */ - testcase( i==21 ); /* THEN */ - testcase( i==22 ); /* END */ - testcase( i==23 ); /* DEFERRABLE */ - testcase( i==24 ); /* ELSE */ - testcase( i==25 ); /* EXCLUDE */ - testcase( i==26 ); /* DELETE */ - testcase( i==27 ); /* TEMPORARY */ - testcase( i==28 ); /* TEMP */ - testcase( i==29 ); /* OR */ - testcase( i==30 ); /* ISNULL */ - testcase( i==31 ); /* NULLS */ - testcase( i==32 ); /* SAVEPOINT */ - testcase( i==33 ); /* INTERSECT */ - testcase( i==34 ); /* TIES */ - testcase( i==35 ); /* NOTNULL */ - testcase( i==36 ); /* NOT */ - testcase( i==37 ); /* NO */ - testcase( i==38 ); /* NULL */ - testcase( i==39 ); /* LIKE */ - testcase( i==40 ); /* EXCEPT */ - testcase( i==41 ); /* TRANSACTION */ - testcase( i==42 ); /* ACTION */ - testcase( i==43 ); /* ON */ - testcase( i==44 ); /* NATURAL */ - testcase( i==45 ); /* ALTER */ - testcase( i==46 ); /* RAISE */ - testcase( i==47 ); /* EXCLUSIVE */ - testcase( i==48 ); /* EXISTS */ - testcase( i==49 ); /* CONSTRAINT */ - testcase( i==50 ); /* INTO */ - testcase( i==51 ); /* OFFSET */ - testcase( i==52 ); /* OF */ - testcase( i==53 ); /* SET */ - testcase( i==54 ); /* TRIGGER */ - testcase( i==55 ); /* RANGE */ - testcase( i==56 ); /* GENERATED */ - testcase( i==57 ); /* DETACH */ - testcase( i==58 ); /* HAVING */ - testcase( i==59 ); /* GLOB */ - testcase( i==60 ); /* BEGIN */ - testcase( i==61 ); /* INNER */ - testcase( i==62 ); /* REFERENCES */ - testcase( i==63 ); /* UNIQUE */ - testcase( i==64 ); /* QUERY */ - testcase( i==65 ); /* WITHOUT */ - testcase( i==66 ); /* WITH */ - testcase( i==67 ); /* OUTER */ - testcase( i==68 ); /* RELEASE */ - testcase( i==69 ); /* ATTACH */ - testcase( i==70 ); /* BETWEEN */ - testcase( i==71 ); /* NOTHING */ - testcase( i==72 ); /* GROUPS */ - testcase( i==73 ); /* GROUP */ - testcase( i==74 ); /* CASCADE */ - testcase( i==75 ); /* ASC */ - testcase( i==76 ); /* DEFAULT */ - testcase( i==77 ); /* CASE */ - testcase( i==78 ); /* COLLATE */ - testcase( i==79 ); /* CREATE */ - testcase( i==80 ); /* CURRENT_DATE */ - testcase( i==81 ); /* IMMEDIATE */ - testcase( i==82 ); /* JOIN */ - testcase( i==83 ); /* INSERT */ - testcase( i==84 ); /* MATCH */ - testcase( i==85 ); /* PLAN */ - testcase( i==86 ); /* ANALYZE */ - testcase( i==87 ); /* PRAGMA */ - testcase( i==88 ); /* MATERIALIZED */ - testcase( i==89 ); /* DEFERRED */ - testcase( i==90 ); /* DISTINCT */ - testcase( i==91 ); /* IS */ - testcase( i==92 ); /* UPDATE */ - testcase( i==93 ); /* VALUES */ - testcase( i==94 ); /* VIRTUAL */ - testcase( i==95 ); /* ALWAYS */ - testcase( i==96 ); /* WHEN */ - testcase( i==97 ); /* WHERE */ - testcase( i==98 ); /* RECURSIVE */ - testcase( i==99 ); /* ABORT */ - testcase( i==100 ); /* AFTER */ - testcase( i==101 ); /* RENAME */ - testcase( i==102 ); /* AND */ - testcase( i==103 ); /* DROP */ - testcase( i==104 ); /* PARTITION */ - testcase( i==105 ); /* AUTOINCREMENT */ - testcase( i==106 ); /* TO */ - testcase( i==107 ); /* IN */ - testcase( i==108 ); /* CAST */ - testcase( i==109 ); /* COLUMN */ - testcase( i==110 ); /* COMMIT */ - testcase( i==111 ); /* CONFLICT */ - testcase( i==112 ); /* CROSS */ - testcase( i==113 ); /* CURRENT_TIMESTAMP */ - testcase( i==114 ); /* CURRENT_TIME */ - testcase( i==115 ); /* CURRENT */ - testcase( i==116 ); /* PRECEDING */ - testcase( i==117 ); /* FAIL */ - testcase( i==118 ); /* LAST */ - testcase( i==119 ); /* FILTER */ - testcase( i==120 ); /* REPLACE */ - testcase( i==121 ); /* FIRST */ - testcase( i==122 ); /* FOLLOWING */ - testcase( i==123 ); /* FROM */ - testcase( i==124 ); /* FULL */ - testcase( i==125 ); /* LIMIT */ - testcase( i==126 ); /* IF */ - testcase( i==127 ); /* ORDER */ - testcase( i==128 ); /* RESTRICT */ - testcase( i==129 ); /* OTHERS */ - testcase( i==130 ); /* OVER */ - testcase( i==131 ); /* RETURNING */ - testcase( i==132 ); /* RIGHT */ - testcase( i==133 ); /* ROLLBACK */ - testcase( i==134 ); /* ROWS */ - testcase( i==135 ); /* ROW */ - testcase( i==136 ); /* UNBOUNDED */ - testcase( i==137 ); /* UNION */ - testcase( i==138 ); /* USING */ - testcase( i==139 ); /* VACUUM */ - testcase( i==140 ); /* VIEW */ - testcase( i==141 ); /* WINDOW */ - testcase( i==142 ); /* DO */ - testcase( i==143 ); /* BY */ - testcase( i==144 ); /* INITIALLY */ - testcase( i==145 ); /* ALL */ - testcase( i==146 ); /* PRIMARY */ - *pType = aKWCode[i]; - break; - } + if( j<n ) continue; + testcase( i==1 ); /* REINDEX */ + testcase( i==2 ); /* INDEXED */ + testcase( i==3 ); /* INDEX */ + testcase( i==4 ); /* DESC */ + testcase( i==5 ); /* ESCAPE */ + testcase( i==6 ); /* EACH */ + testcase( i==7 ); /* CHECK */ + testcase( i==8 ); /* KEY */ + testcase( i==9 ); /* BEFORE */ + testcase( i==10 ); /* FOREIGN */ + testcase( i==11 ); /* FOR */ + testcase( i==12 ); /* IGNORE */ + testcase( i==13 ); /* REGEXP */ + testcase( i==14 ); /* EXPLAIN */ + testcase( i==15 ); /* INSTEAD */ + testcase( i==16 ); /* ADD */ + testcase( i==17 ); /* DATABASE */ + testcase( i==18 ); /* AS */ + testcase( i==19 ); /* SELECT */ + testcase( i==20 ); /* TABLE */ + testcase( i==21 ); /* LEFT */ + testcase( i==22 ); /* THEN */ + testcase( i==23 ); /* END */ + testcase( i==24 ); /* DEFERRABLE */ + testcase( i==25 ); /* ELSE */ + testcase( i==26 ); /* EXCLUDE */ + testcase( i==27 ); /* DELETE */ + testcase( i==28 ); /* TEMPORARY */ + testcase( i==29 ); /* TEMP */ + testcase( i==30 ); /* OR */ + testcase( i==31 ); /* ISNULL */ + testcase( i==32 ); /* NULLS */ + testcase( i==33 ); /* SAVEPOINT */ + testcase( i==34 ); /* INTERSECT */ + testcase( i==35 ); /* TIES */ + testcase( i==36 ); /* NOTNULL */ + testcase( i==37 ); /* NOT */ + testcase( i==38 ); /* NO */ + testcase( i==39 ); /* NULL */ + testcase( i==40 ); /* LIKE */ + testcase( i==41 ); /* EXCEPT */ + testcase( i==42 ); /* TRANSACTION */ + testcase( i==43 ); /* ACTION */ + testcase( i==44 ); /* ON */ + testcase( i==45 ); /* NATURAL */ + testcase( i==46 ); /* ALTER */ + testcase( i==47 ); /* RAISE */ + testcase( i==48 ); /* EXCLUSIVE */ + testcase( i==49 ); /* EXISTS */ + testcase( i==50 ); /* CONSTRAINT */ + testcase( i==51 ); /* INTO */ + testcase( i==52 ); /* OFFSET */ + testcase( i==53 ); /* OF */ + testcase( i==54 ); /* SET */ + testcase( i==55 ); /* TRIGGER */ + testcase( i==56 ); /* RANGE */ + testcase( i==57 ); /* GENERATED */ + testcase( i==58 ); /* DETACH */ + testcase( i==59 ); /* HAVING */ + testcase( i==60 ); /* GLOB */ + testcase( i==61 ); /* BEGIN */ + testcase( i==62 ); /* INNER */ + testcase( i==63 ); /* REFERENCES */ + testcase( i==64 ); /* UNIQUE */ + testcase( i==65 ); /* QUERY */ + testcase( i==66 ); /* WITHOUT */ + testcase( i==67 ); /* WITH */ + testcase( i==68 ); /* OUTER */ + testcase( i==69 ); /* RELEASE */ + testcase( i==70 ); /* ATTACH */ + testcase( i==71 ); /* BETWEEN */ + testcase( i==72 ); /* NOTHING */ + testcase( i==73 ); /* GROUPS */ + testcase( i==74 ); /* GROUP */ + testcase( i==75 ); /* CASCADE */ + testcase( i==76 ); /* ASC */ + testcase( i==77 ); /* DEFAULT */ + testcase( i==78 ); /* CASE */ + testcase( i==79 ); /* COLLATE */ + testcase( i==80 ); /* CREATE */ + testcase( i==81 ); /* CURRENT_DATE */ + testcase( i==82 ); /* IMMEDIATE */ + testcase( i==83 ); /* JOIN */ + testcase( i==84 ); /* INSERT */ + testcase( i==85 ); /* MATCH */ + testcase( i==86 ); /* PLAN */ + testcase( i==87 ); /* ANALYZE */ + testcase( i==88 ); /* PRAGMA */ + testcase( i==89 ); /* MATERIALIZED */ + testcase( i==90 ); /* DEFERRED */ + testcase( i==91 ); /* DISTINCT */ + testcase( i==92 ); /* IS */ + testcase( i==93 ); /* UPDATE */ + testcase( i==94 ); /* VALUES */ + testcase( i==95 ); /* VIRTUAL */ + testcase( i==96 ); /* ALWAYS */ + testcase( i==97 ); /* WHEN */ + testcase( i==98 ); /* WHERE */ + testcase( i==99 ); /* RECURSIVE */ + testcase( i==100 ); /* ABORT */ + testcase( i==101 ); /* AFTER */ + testcase( i==102 ); /* RENAME */ + testcase( i==103 ); /* AND */ + testcase( i==104 ); /* DROP */ + testcase( i==105 ); /* PARTITION */ + testcase( i==106 ); /* AUTOINCREMENT */ + testcase( i==107 ); /* TO */ + testcase( i==108 ); /* IN */ + testcase( i==109 ); /* CAST */ + testcase( i==110 ); /* COLUMN */ + testcase( i==111 ); /* COMMIT */ + testcase( i==112 ); /* CONFLICT */ + testcase( i==113 ); /* CROSS */ + testcase( i==114 ); /* CURRENT_TIMESTAMP */ + testcase( i==115 ); /* CURRENT_TIME */ + testcase( i==116 ); /* CURRENT */ + testcase( i==117 ); /* PRECEDING */ + testcase( i==118 ); /* FAIL */ + testcase( i==119 ); /* LAST */ + testcase( i==120 ); /* FILTER */ + testcase( i==121 ); /* REPLACE */ + testcase( i==122 ); /* FIRST */ + testcase( i==123 ); /* FOLLOWING */ + testcase( i==124 ); /* FROM */ + testcase( i==125 ); /* FULL */ + testcase( i==126 ); /* LIMIT */ + testcase( i==127 ); /* IF */ + testcase( i==128 ); /* ORDER */ + testcase( i==129 ); /* RESTRICT */ + testcase( i==130 ); /* OTHERS */ + testcase( i==131 ); /* OVER */ + testcase( i==132 ); /* RETURNING */ + testcase( i==133 ); /* RIGHT */ + testcase( i==134 ); /* ROLLBACK */ + testcase( i==135 ); /* ROWS */ + testcase( i==136 ); /* ROW */ + testcase( i==137 ); /* UNBOUNDED */ + testcase( i==138 ); /* UNION */ + testcase( i==139 ); /* USING */ + testcase( i==140 ); /* VACUUM */ + testcase( i==141 ); /* VIEW */ + testcase( i==142 ); /* WINDOW */ + testcase( i==143 ); /* DO */ + testcase( i==144 ); /* BY */ + testcase( i==145 ); /* INITIALLY */ + testcase( i==146 ); /* ALL */ + testcase( i==147 ); /* PRIMARY */ + *pType = aKWCode[i]; + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ } return n; } SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char *z, int n){ int id = TK_ID; - keywordCode((char*)z, n, &id); + if( n>=2 ) keywordCode((char*)z, n, &id); return id; } #define SQLITE_N_KEYWORD 147 SQLITE_API int sqlite3_keyword_name(int i,const char **pzName,int *pnName){ if( i<0 || i>=SQLITE_N_KEYWORD ) return SQLITE_ERROR; + i++; *pzName = zKWText + aKWOffset[i]; *pnName = aKWLen[i]; return SQLITE_OK;

@@ -171603,7 +176766,7 @@ case CC_DIGIT: {

testcase( z[0]=='0' ); testcase( z[0]=='1' ); testcase( z[0]=='2' ); testcase( z[0]=='3' ); testcase( z[0]=='4' ); testcase( z[0]=='5' ); testcase( z[0]=='6' ); testcase( z[0]=='7' ); testcase( z[0]=='8' ); - testcase( z[0]=='9' ); + testcase( z[0]=='9' ); testcase( z[0]=='.' ); *tokenType = TK_INTEGER; #ifndef SQLITE_OMIT_HEX_INTEGER if( z[0]=='0' && (z[1]=='x' || z[1]=='X') && sqlite3Isxdigit(z[2]) ){

@@ -171675,7 +176838,8 @@ if( n==0 ) *tokenType = TK_ILLEGAL;

return i; } case CC_KYWD0: { - for(i=1; aiClass[z[i]]<=CC_KYWD; i++){} + if( aiClass[z[1]]>CC_KYWD ){ i = 1; break; } + for(i=2; aiClass[z[i]]<=CC_KYWD; i++){} if( IdChar(z[i]) ){ /* This token started out using characters that can appear in keywords, ** but z[i] is a character not allowed within keywords, so this must

@@ -172454,30 +177618,20 @@ /*

** Forward declarations of external module initializer functions ** for modules that need them. */ -#ifdef SQLITE_ENABLE_FTS1 -SQLITE_PRIVATE int sqlite3Fts1Init(sqlite3*); -#endif -#ifdef SQLITE_ENABLE_FTS2 -SQLITE_PRIVATE int sqlite3Fts2Init(sqlite3*); -#endif #ifdef SQLITE_ENABLE_FTS5 SQLITE_PRIVATE int sqlite3Fts5Init(sqlite3*); #endif #ifdef SQLITE_ENABLE_STMTVTAB SQLITE_PRIVATE int sqlite3StmtVtabInit(sqlite3*); #endif - +#ifdef SQLITE_EXTRA_AUTOEXT +int SQLITE_EXTRA_AUTOEXT(sqlite3*); +#endif /* ** An array of pointers to extension initializer functions for ** built-in extensions. */ static int (*const sqlite3BuiltinExtensions[])(sqlite3*) = { -#ifdef SQLITE_ENABLE_FTS1 - sqlite3Fts1Init, -#endif -#ifdef SQLITE_ENABLE_FTS2 - sqlite3Fts2Init, -#endif #ifdef SQLITE_ENABLE_FTS3 sqlite3Fts3Init, #endif

@@ -172505,6 +177659,9 @@ sqlite3StmtVtabInit,

#endif #ifdef SQLITE_ENABLE_BYTECODE_VTAB sqlite3VdbeBytecodeVtabInit, +#endif +#ifdef SQLITE_EXTRA_AUTOEXT + SQLITE_EXTRA_AUTOEXT, #endif };

@@ -172580,6 +177737,32 @@ */

SQLITE_API char *sqlite3_data_directory = 0; /* +** Determine whether or not high-precision (long double) floating point +** math works correctly on CPU currently running. +*/ +static SQLITE_NOINLINE int hasHighPrecisionDouble(int rc){ + if( sizeof(LONGDOUBLE_TYPE)<=8 ){ + /* If the size of "long double" is not more than 8, then + ** high-precision math is not possible. */ + return 0; + }else{ + /* Just because sizeof(long double)>8 does not mean that the underlying + ** hardware actually supports high-precision floating point. For example, + ** clearing the 0x100 bit in the floating-point control word on Intel + ** processors will make long double work like double, even though long + ** double takes up more space. The only way to determine if long double + ** actually works is to run an experiment. */ + LONGDOUBLE_TYPE a, b, c; + rc++; + a = 1.0+rc*0.1; + b = 1.0e+18+rc*25.0; + c = a+b; + return b!=c; + } +} + + +/* ** Initialize SQLite. ** ** This routine must be called to initialize the memory allocation,

@@ -172774,6 +177957,12 @@ rc = SQLITE_EXTRA_INIT(0);

} #endif + /* Experimentally determine if high-precision floating point is + ** available. */ +#ifndef SQLITE_OMIT_WSD + sqlite3Config.bUseLongDouble = hasHighPrecisionDouble(rc); +#endif + return rc; }

@@ -172843,9 +178032,21 @@ SQLITE_API int sqlite3_config(int op, ...){

va_list ap; int rc = SQLITE_OK; - /* sqlite3_config() shall return SQLITE_MISUSE if it is invoked while - ** the SQLite library is in use. */ - if( sqlite3GlobalConfig.isInit ) return SQLITE_MISUSE_BKPT; + /* sqlite3_config() normally returns SQLITE_MISUSE if it is invoked while + ** the SQLite library is in use. Except, a few selected opcodes + ** are allowed. + */ + if( sqlite3GlobalConfig.isInit ){ + static const u64 mAnytimeConfigOption = 0 + | MASKBIT64( SQLITE_CONFIG_LOG ) + | MASKBIT64( SQLITE_CONFIG_PCACHE_HDRSZ ) + ; + if( op<0 || op>63 || (MASKBIT64(op) & mAnytimeConfigOption)==0 ){ + return SQLITE_MISUSE_BKPT; + } + testcase( op==SQLITE_CONFIG_LOG ); + testcase( op==SQLITE_CONFIG_PCACHE_HDRSZ ); + } va_start(ap, op); switch( op ){

@@ -172914,6 +178115,7 @@ *va_arg(ap, sqlite3_mem_methods*) = sqlite3GlobalConfig.m;

break; } case SQLITE_CONFIG_MEMSTATUS: { + assert( !sqlite3GlobalConfig.isInit ); /* Cannot change at runtime */ /* EVIDENCE-OF: R-61275-35157 The SQLITE_CONFIG_MEMSTATUS option takes ** single argument of type int, interpreted as a boolean, which enables ** or disables the collection of memory allocation statistics. */

@@ -173037,8 +178239,10 @@ ** http://support.microsoft.com/kb/47961

** sqlite3GlobalConfig.xLog = va_arg(ap, void(*)(void*,int,const char*)); */ typedef void(*LOGFUNC_t)(void*,int,const char*); - sqlite3GlobalConfig.xLog = va_arg(ap, LOGFUNC_t); - sqlite3GlobalConfig.pLogArg = va_arg(ap, void*); + LOGFUNC_t xLog = va_arg(ap, LOGFUNC_t); + void *pLogArg = va_arg(ap, void*); + AtomicStore(&sqlite3GlobalConfig.xLog, xLog); + AtomicStore(&sqlite3GlobalConfig.pLogArg, pLogArg); break; }

@@ -173052,7 +178256,8 @@ /* EVIDENCE-OF: R-25451-61125 The SQLITE_CONFIG_URI option takes a single

** argument of type int. If non-zero, then URI handling is globally ** enabled. If the parameter is zero, then URI handling is globally ** disabled. */ - sqlite3GlobalConfig.bOpenUri = va_arg(ap, int); + int bOpenUri = va_arg(ap, int); + AtomicStore(&sqlite3GlobalConfig.bOpenUri, bOpenUri); break; }

@@ -173328,6 +178533,10 @@ */

SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){ va_list ap; int rc; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif sqlite3_mutex_enter(db->mutex); va_start(ap, op); switch( op ){

@@ -173367,6 +178576,8 @@ { SQLITE_DBCONFIG_DQS_DDL, SQLITE_DqsDDL },

{ SQLITE_DBCONFIG_DQS_DML, SQLITE_DqsDML }, { SQLITE_DBCONFIG_LEGACY_FILE_FORMAT, SQLITE_LegacyFileFmt }, { SQLITE_DBCONFIG_TRUSTED_SCHEMA, SQLITE_TrustedSchema }, + { SQLITE_DBCONFIG_STMT_SCANSTATUS, SQLITE_StmtScanStatus }, + { SQLITE_DBCONFIG_REVERSE_SCANORDER, SQLITE_ReverseOrder }, }; unsigned int i; rc = SQLITE_ERROR; /* IMP: R-42790-23372 */

@@ -173654,6 +178865,14 @@ /* Closing the handle. Fourth parameter is passed the value 2. */

sqlite3GlobalConfig.xSqllog(sqlite3GlobalConfig.pSqllogArg, db, 0, 2); } #endif + + while( db->pDbData ){ + DbClientData *p = db->pDbData; + db->pDbData = p->pNext; + assert( p->pData!=0 ); + if( p->xDestructor ) p->xDestructor(p->pData); + sqlite3_free(p); + } /* Convert the connection into a zombie and then close it. */

@@ -173979,6 +179198,7 @@ case SQLITE_NOTICE: zName = "SQLITE_NOTICE"; break;

case SQLITE_NOTICE_RECOVER_WAL: zName = "SQLITE_NOTICE_RECOVER_WAL";break; case SQLITE_NOTICE_RECOVER_ROLLBACK: zName = "SQLITE_NOTICE_RECOVER_ROLLBACK"; break; + case SQLITE_NOTICE_RBU: zName = "SQLITE_NOTICE_RBU"; break; case SQLITE_WARNING: zName = "SQLITE_WARNING"; break; case SQLITE_WARNING_AUTOINDEX: zName = "SQLITE_WARNING_AUTOINDEX"; break; case SQLITE_DONE: zName = "SQLITE_DONE"; break;

@@ -174071,9 +179291,9 @@ static int sqliteDefaultBusyCallback(

void *ptr, /* Database connection */ int count /* Number of times table has been busy */ ){ -#if SQLITE_OS_WIN || HAVE_USLEEP +#if SQLITE_OS_WIN || !defined(HAVE_NANOSLEEP) || HAVE_NANOSLEEP /* This case is for systems that have support for sleeping for fractions of - ** a second. Examples: All windows systems, unix systems with usleep() */ + ** a second. Examples: All windows systems, unix systems with nanosleep() */ static const u8 delays[] = { 1, 2, 5, 10, 15, 20, 25, 25, 25, 50, 50, 100 }; static const u8 totals[] =

@@ -174208,7 +179428,9 @@ ** Cause any pending operation to stop at its earliest opportunity.

*/ SQLITE_API void sqlite3_interrupt(sqlite3 *db){ #ifdef SQLITE_ENABLE_API_ARMOR - if( !sqlite3SafetyCheckOk(db) && (db==0 || db->eOpenState!=SQLITE_STATE_ZOMBIE) ){ + if( !sqlite3SafetyCheckOk(db) + && (db==0 || db->eOpenState!=SQLITE_STATE_ZOMBIE) + ){ (void)SQLITE_MISUSE_BKPT; return; }

@@ -174216,6 +179438,21 @@ #endif

AtomicStore(&db->u1.isInterrupted, 1); } +/* +** Return true or false depending on whether or not an interrupt is +** pending on connection db. +*/ +SQLITE_API int sqlite3_is_interrupted(sqlite3 *db){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) + && (db==0 || db->eOpenState!=SQLITE_STATE_ZOMBIE) + ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + return AtomicLoad(&db->u1.isInterrupted)!=0; +} /* ** This function is exactly the same as sqlite3_create_function(), except

@@ -174254,13 +179491,13 @@

assert( SQLITE_FUNC_CONSTANT==SQLITE_DETERMINISTIC ); assert( SQLITE_FUNC_DIRECT==SQLITE_DIRECTONLY ); extraFlags = enc & (SQLITE_DETERMINISTIC|SQLITE_DIRECTONLY| - SQLITE_SUBTYPE|SQLITE_INNOCUOUS); + SQLITE_SUBTYPE|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE); enc &= (SQLITE_FUNC_ENCMASK|SQLITE_ANY); /* The SQLITE_INNOCUOUS flag is the same bit as SQLITE_FUNC_UNSAFE. But ** the meaning is inverted. So flip the bit. */ assert( SQLITE_FUNC_UNSAFE==SQLITE_INNOCUOUS ); - extraFlags ^= SQLITE_FUNC_UNSAFE; + extraFlags ^= SQLITE_FUNC_UNSAFE; /* tag-20230109-1 */ #ifndef SQLITE_OMIT_UTF16

@@ -174278,11 +179515,11 @@ break;

case SQLITE_ANY: { int rc; rc = sqlite3CreateFunc(db, zFunctionName, nArg, - (SQLITE_UTF8|extraFlags)^SQLITE_FUNC_UNSAFE, + (SQLITE_UTF8|extraFlags)^SQLITE_FUNC_UNSAFE, /* tag-20230109-1 */ pUserData, xSFunc, xStep, xFinal, xValue, xInverse, pDestructor); if( rc==SQLITE_OK ){ rc = sqlite3CreateFunc(db, zFunctionName, nArg, - (SQLITE_UTF16LE|extraFlags)^SQLITE_FUNC_UNSAFE, + (SQLITE_UTF16LE|extraFlags)^SQLITE_FUNC_UNSAFE, /* tag-20230109-1*/ pUserData, xSFunc, xStep, xFinal, xValue, xInverse, pDestructor); } if( rc!=SQLITE_OK ){

@@ -174531,7 +179768,7 @@ sqlite3_mutex_enter(db->mutex);

rc = sqlite3FindFunction(db, zName, nArg, SQLITE_UTF8, 0)!=0; sqlite3_mutex_leave(db->mutex); if( rc ) return SQLITE_OK; - zCopy = sqlite3_mprintf(zName); + zCopy = sqlite3_mprintf("%s", zName); if( zCopy==0 ) return SQLITE_NOMEM; return sqlite3_create_function_v2(db, zName, nArg, SQLITE_UTF8, zCopy, sqlite3InvalidFunction, 0, 0, sqlite3_free);

@@ -174711,6 +179948,12 @@ void*,sqlite3*,int,char const*,char const*,sqlite3_int64,sqlite3_int64),

void *pArg /* First callback argument */ ){ void *pRet; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( db==0 ){ + return 0; + } +#endif sqlite3_mutex_enter(db->mutex); pRet = db->pPreUpdateArg; db->xPreUpdateCallback = xCallback;

@@ -174857,7 +180100,7 @@ assert( SQLITE_CHECKPOINT_TRUNCATE==3 );

if( eMode<SQLITE_CHECKPOINT_PASSIVE || eMode>SQLITE_CHECKPOINT_TRUNCATE ){ /* EVIDENCE-OF: R-03996-12088 The M parameter must be a valid checkpoint ** mode: */ - return SQLITE_MISUSE; + return SQLITE_MISUSE_BKPT; } sqlite3_mutex_enter(db->mutex);

@@ -175334,9 +180577,9 @@ int nUri = sqlite3Strlen30(zUri);

assert( *pzErrMsg==0 ); - if( ((flags & SQLITE_OPEN_URI) /* IMP: R-48725-32206 */ - || sqlite3GlobalConfig.bOpenUri) /* IMP: R-51689-46548 */ - && nUri>=5 && memcmp(zUri, "file:", 5)==0 /* IMP: R-57884-37496 */ + if( ((flags & SQLITE_OPEN_URI) /* IMP: R-48725-32206 */ + || AtomicLoad(&sqlite3GlobalConfig.bOpenUri)) /* IMP: R-51689-46548 */ + && nUri>=5 && memcmp(zUri, "file:", 5)==0 /* IMP: R-57884-37496 */ ){ char *zOpt; int eState; /* Parser state when parsing URI */

@@ -175694,7 +180937,7 @@ ** 1 off on

** 0 off off ** ** Legacy behavior is 3 (double-quoted string literals are allowed anywhere) -** and so that is the default. But developers are encouranged to use +** and so that is the default. But developers are encouraged to use ** -DSQLITE_DQS=0 (best) or -DSQLITE_DQS=1 (second choice) if possible. */ #if !defined(SQLITE_DQS)

@@ -175743,6 +180986,9 @@ #endif

#if defined(SQLITE_DEFAULT_LEGACY_ALTER_TABLE) | SQLITE_LegacyAlter #endif +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) + | SQLITE_StmtScanStatus +#endif ; sqlite3HashInit(&db->aCollSeq); #ifndef SQLITE_OMIT_VIRTUALTABLE

@@ -176091,6 +181337,69 @@ return SQLITE_OK;

} #endif /* SQLITE_OMIT_UTF16 */ +/* +** Find existing client data. +*/ +SQLITE_API void *sqlite3_get_clientdata(sqlite3 *db, const char *zName){ + DbClientData *p; + sqlite3_mutex_enter(db->mutex); + for(p=db->pDbData; p; p=p->pNext){ + if( strcmp(p->zName, zName)==0 ){ + void *pResult = p->pData; + sqlite3_mutex_leave(db->mutex); + return pResult; + } + } + sqlite3_mutex_leave(db->mutex); + return 0; +} + +/* +** Add new client data to a database connection. +*/ +SQLITE_API int sqlite3_set_clientdata( + sqlite3 *db, /* Attach client data to this connection */ + const char *zName, /* Name of the client data */ + void *pData, /* The client data itself */ + void (*xDestructor)(void*) /* Destructor */ +){ + DbClientData *p, **pp; + sqlite3_mutex_enter(db->mutex); + pp = &db->pDbData; + for(p=db->pDbData; p && strcmp(p->zName,zName); p=p->pNext){ + pp = &p->pNext; + } + if( p ){ + assert( p->pData!=0 ); + if( p->xDestructor ) p->xDestructor(p->pData); + if( pData==0 ){ + *pp = p->pNext; + sqlite3_free(p); + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; + } + }else if( pData==0 ){ + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; + }else{ + size_t n = strlen(zName); + p = sqlite3_malloc64( sizeof(DbClientData)+n+1 ); + if( p==0 ){ + if( xDestructor ) xDestructor(pData); + sqlite3_mutex_leave(db->mutex); + return SQLITE_NOMEM; + } + memcpy(p->zName, zName, n+1); + p->pNext = db->pDbData; + db->pDbData = p; + } + p->pData = pData; + p->xDestructor = xDestructor; + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + + #ifndef SQLITE_OMIT_DEPRECATED /* ** This function is now an anachronism. It used to be used to recover from a

@@ -176226,7 +181535,7 @@ }

/* Find the column for which info is requested */ if( zColumnName==0 ){ - /* Query for existance of table only */ + /* Query for existence of table only */ }else{ for(iCol=0; iCol<pTab->nCol; iCol++){ pCol = &pTab->aCol[iCol];

@@ -176307,7 +181616,7 @@

/* This function works in milliseconds, but the underlying OsSleep() ** API uses microseconds. Hence the 1000's. */ - rc = (sqlite3OsSleep(pVfs, 1000*ms)/1000); + rc = (sqlite3OsSleep(pVfs, ms<0 ? 0 : 1000*ms)/1000); return rc; }

@@ -176363,6 +181672,9 @@ if( iNew>=0 && iNew<=255 ){

sqlite3BtreeSetPageSize(pBtree, 0, iNew, 0); } rc = SQLITE_OK; + }else if( op==SQLITE_FCNTL_RESET_CACHE ){ + sqlite3BtreeClearCache(pBtree); + rc = SQLITE_OK; }else{ int nSave = db->busyHandler.nBusy; rc = sqlite3OsFileControl(fd, op, pArg);

@@ -176437,6 +181749,28 @@ break;

} #endif + /* sqlite3_test_control(SQLITE_TESTCTRL_FK_NO_ACTION, sqlite3 *db, int b); + ** + ** If b is true, then activate the SQLITE_FkNoAction setting. If b is + ** false then clearn that setting. If the SQLITE_FkNoAction setting is + ** abled, all foreign key ON DELETE and ON UPDATE actions behave as if + ** they were NO ACTION, regardless of how they are defined. + ** + ** NB: One must usually run "PRAGMA writable_schema=RESET" after + ** using this test-control, before it will take full effect. failing + ** to reset the schema can result in some unexpected behavior. + */ + case SQLITE_TESTCTRL_FK_NO_ACTION: { + sqlite3 *db = va_arg(ap, sqlite3*); + int b = va_arg(ap, int); + if( b ){ + db->flags |= SQLITE_FkNoAction; + }else{ + db->flags &= ~SQLITE_FkNoAction; + } + break; + } + /* ** sqlite3_test_control(BITVEC_TEST, size, program) **

@@ -176543,10 +181877,12 @@ sqlite3ShowIdList(0);

sqlite3ShowSrcList(0); sqlite3ShowWith(0); sqlite3ShowUpsert(0); +#ifndef SQLITE_OMIT_TRIGGER sqlite3ShowTriggerStep(0); sqlite3ShowTriggerStepList(0); sqlite3ShowTrigger(0); sqlite3ShowTriggerList(0); +#endif #ifndef SQLITE_OMIT_WINDOWFUNC sqlite3ShowWindow(0); sqlite3ShowWinFunc(0);

@@ -176663,7 +181999,7 @@ ** Set or clear a flag that indicates that the database file is always well-

** formed and never corrupt. This flag is clear by default, indicating that ** database files might have arbitrary corruption. Setting the flag during ** testing causes certain assert() statements in the code to be activated - ** that demonstrat invariants on well-formed database files. + ** that demonstrate invariants on well-formed database files. */ case SQLITE_TESTCTRL_NEVER_CORRUPT: { sqlite3GlobalConfig.neverCorrupt = va_arg(ap, int);

@@ -176817,7 +182153,7 @@ ** "ptr" is a pointer to a u32.

** ** op==0 Store the current sqlite3TreeTrace in *ptr ** op==1 Set sqlite3TreeTrace to the value *ptr - ** op==3 Store the current sqlite3WhereTrace in *ptr + ** op==2 Store the current sqlite3WhereTrace in *ptr ** op==3 Set sqlite3WhereTrace to the value *ptr */ case SQLITE_TESTCTRL_TRACEFLAGS: {

@@ -176853,6 +182189,23 @@ *pI2 = sqlite3LogEst(*pU64);

break; } +#if !defined(SQLITE_OMIT_WSD) + /* sqlite3_test_control(SQLITE_TESTCTRL_USELONGDOUBLE, int X); + ** + ** X<0 Make no changes to the bUseLongDouble. Just report value. + ** X==0 Disable bUseLongDouble + ** X==1 Enable bUseLongDouble + ** X>=2 Set bUseLongDouble to its default value for this platform + */ + case SQLITE_TESTCTRL_USELONGDOUBLE: { + int b = va_arg(ap, int); + if( b>=2 ) b = hasHighPrecisionDouble(b); + if( b>=0 ) sqlite3Config.bUseLongDouble = b>0; + rc = sqlite3Config.bUseLongDouble!=0; + break; + } +#endif + #if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WSD) /* sqlite3_test_control(SQLITE_TESTCTRL_TUNE, id, *piValue)

@@ -177153,7 +182506,7 @@ return rc;

} /* -** Open a read-transaction on the snapshot idendified by pSnapshot. +** Open a read-transaction on the snapshot identified by pSnapshot. */ SQLITE_API int sqlite3_snapshot_open( sqlite3 *db,

@@ -177260,7 +182613,7 @@ int i, n;

int nOpt; const char **azCompileOpt; -#if SQLITE_ENABLE_API_ARMOR +#ifdef SQLITE_ENABLE_API_ARMOR if( zOptName==0 ){ (void)SQLITE_MISUSE_BKPT; return 0;

@@ -177455,6 +182808,9 @@ void *pArg

){ int rc = SQLITE_OK; +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif sqlite3_mutex_enter(db->mutex); enterMutex();

@@ -178476,6 +183832,7 @@ u8 bIgnoreSavepoint; /* True to ignore xSavepoint invocations */

int nPgsz; /* Page size for host database */ char *zSegmentsTbl; /* Name of %_segments table */ sqlite3_blob *pSegments; /* Blob handle open on %_segments table */ + int iSavepoint; /* ** The following array of hash tables is used to buffer pending index

@@ -178861,6 +184218,8 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeIsalnum(int);

SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int); #endif +SQLITE_PRIVATE int sqlite3Fts3ExprIterate(Fts3Expr*, int (*x)(Fts3Expr*,int,void*), void*); + #endif /* !SQLITE_CORE || SQLITE_ENABLE_FTS3 */ #endif /* _FTSINT_H */

@@ -179217,6 +184576,7 @@ const char *zLanguageid;

zLanguageid = (p->zLanguageid ? p->zLanguageid : "__langid"); sqlite3_vtab_config(p->db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); + sqlite3_vtab_config(p->db, SQLITE_VTAB_INNOCUOUS); /* Create a list of user columns for the virtual table */ zCols = sqlite3_mprintf("%Q, ", p->azColumn[0]);

@@ -182466,6 +187826,8 @@ if( rc==SQLITE_OK ){

rc = sqlite3Fts3PendingTermsFlush(p); } + p->bIgnoreSavepoint = 1; + if( p->zContentTbl==0 ){ fts3DbExec(&rc, db, "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';",

@@ -182493,6 +187855,8 @@ fts3DbExec(&rc, db,

"ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';", p->zDb, p->zName, zName ); + + p->bIgnoreSavepoint = 0; return rc; }

@@ -182503,12 +187867,28 @@ ** Flush the contents of the pending-terms table to disk.

*/ static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ int rc = SQLITE_OK; - UNUSED_PARAMETER(iSavepoint); - assert( ((Fts3Table *)pVtab)->inTransaction ); - assert( ((Fts3Table *)pVtab)->mxSavepoint <= iSavepoint ); - TESTONLY( ((Fts3Table *)pVtab)->mxSavepoint = iSavepoint ); - if( ((Fts3Table *)pVtab)->bIgnoreSavepoint==0 ){ - rc = fts3SyncMethod(pVtab); + Fts3Table *pTab = (Fts3Table*)pVtab; + assert( pTab->inTransaction ); + assert( pTab->mxSavepoint<=iSavepoint ); + TESTONLY( pTab->mxSavepoint = iSavepoint ); + + if( pTab->bIgnoreSavepoint==0 ){ + if( fts3HashCount(&pTab->aIndex[0].hPending)>0 ){ + char *zSql = sqlite3_mprintf("INSERT INTO %Q.%Q(%Q) VALUES('flush')", + pTab->zDb, pTab->zName, pTab->zName + ); + if( zSql ){ + pTab->bIgnoreSavepoint = 1; + rc = sqlite3_exec(pTab->db, zSql, 0, 0, 0); + pTab->bIgnoreSavepoint = 0; + sqlite3_free(zSql); + }else{ + rc = SQLITE_NOMEM; + } + } + if( rc==SQLITE_OK ){ + pTab->iSavepoint = iSavepoint+1; + } } return rc; }

@@ -182519,12 +187899,11 @@ **

** This is a no-op. */ static int fts3ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){ - TESTONLY( Fts3Table *p = (Fts3Table*)pVtab ); - UNUSED_PARAMETER(iSavepoint); - UNUSED_PARAMETER(pVtab); - assert( p->inTransaction ); - assert( p->mxSavepoint >= iSavepoint ); - TESTONLY( p->mxSavepoint = iSavepoint-1 ); + Fts3Table *pTab = (Fts3Table*)pVtab; + assert( pTab->inTransaction ); + assert( pTab->mxSavepoint >= iSavepoint ); + TESTONLY( pTab->mxSavepoint = iSavepoint-1 ); + pTab->iSavepoint = iSavepoint; return SQLITE_OK; }

@@ -182534,11 +187913,13 @@ **

** Discard the contents of the pending terms table. */ static int fts3RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){ - Fts3Table *p = (Fts3Table*)pVtab; + Fts3Table *pTab = (Fts3Table*)pVtab; UNUSED_PARAMETER(iSavepoint); - assert( p->inTransaction ); - TESTONLY( p->mxSavepoint = iSavepoint ); - sqlite3Fts3PendingTermsClear(p); + assert( pTab->inTransaction ); + TESTONLY( pTab->mxSavepoint = iSavepoint ); + if( (iSavepoint+1)<=pTab->iSavepoint ){ + sqlite3Fts3PendingTermsClear(pTab); + } return SQLITE_OK; }

@@ -182557,8 +187938,49 @@ }

return 0; } +/* +** Implementation of the xIntegrity() method on the FTS3/FTS4 virtual +** table. +*/ +static int fts3Integrity( + sqlite3_vtab *pVtab, /* The virtual table to be checked */ + const char *zSchema, /* Name of schema in which pVtab lives */ + const char *zTabname, /* Name of the pVTab table */ + int isQuick, /* True if this is a quick_check */ + char **pzErr /* Write error message here */ +){ + Fts3Table *p = (Fts3Table*)pVtab; + char *zSql; + int rc; + char *zErr = 0; + + assert( pzErr!=0 ); + assert( *pzErr==0 ); + UNUSED_PARAMETER(isQuick); + zSql = sqlite3_mprintf( + "INSERT INTO \"%w\".\"%w\"(\"%w\") VALUES('integrity-check');", + zSchema, zTabname, zTabname); + if( zSql==0 ){ + return SQLITE_NOMEM; + } + rc = sqlite3_exec(p->db, zSql, 0, 0, &zErr); + sqlite3_free(zSql); + if( (rc&0xff)==SQLITE_CORRUPT ){ + *pzErr = sqlite3_mprintf("malformed inverted index for FTS%d table %s.%s", + p->bFts4 ? 4 : 3, zSchema, zTabname); + }else if( rc!=SQLITE_OK ){ + *pzErr = sqlite3_mprintf("unable to validate the inverted index for" + " FTS%d table %s.%s: %s", + p->bFts4 ? 4 : 3, zSchema, zTabname, zErr); + } + sqlite3_free(zErr); + return SQLITE_OK; +} + + + static const sqlite3_module fts3Module = { - /* iVersion */ 3, + /* iVersion */ 4, /* xCreate */ fts3CreateMethod, /* xConnect */ fts3ConnectMethod, /* xBestIndex */ fts3BestIndexMethod,

@@ -182582,6 +188004,7 @@ /* xSavepoint */ fts3SavepointMethod,

/* xRelease */ fts3ReleaseMethod, /* xRollbackTo */ fts3RollbackToMethod, /* xShadowName */ fts3ShadowName, + /* xIntegrity */ fts3Integrity, }; /*

@@ -183864,9 +189287,8 @@ Fts3Cursor *pCsr, /* FTS Cursor handle */

Fts3Expr *pExpr, /* Expr. to advance to next matching row */ int *pRc /* IN/OUT: Error code */ ){ - if( *pRc==SQLITE_OK ){ + if( *pRc==SQLITE_OK && pExpr->bEof==0 ){ int bDescDoclist = pCsr->bDesc; /* Used by DOCID_CMP() macro */ - assert( pExpr->bEof==0 ); pExpr->bStart = 1; switch( pExpr->eType ){

@@ -184343,6 +189765,22 @@ }

} /* +** This is an sqlite3Fts3ExprIterate() callback. If the Fts3Expr.aMI[] array +** has not yet been allocated, allocate and zero it. Otherwise, just zero +** it. +*/ +static int fts3AllocateMSI(Fts3Expr *pExpr, int iPhrase, void *pCtx){ + Fts3Table *pTab = (Fts3Table*)pCtx; + UNUSED_PARAMETER(iPhrase); + if( pExpr->aMI==0 ){ + pExpr->aMI = (u32 *)sqlite3_malloc64(pTab->nColumn * 3 * sizeof(u32)); + if( pExpr->aMI==0 ) return SQLITE_NOMEM; + } + memset(pExpr->aMI, 0, pTab->nColumn * 3 * sizeof(u32)); + return SQLITE_OK; +} + +/* ** Expression pExpr must be of type FTSQUERY_PHRASE. ** ** If it is not already allocated and populated, this function allocates and

@@ -184363,7 +189801,6 @@ assert( pExpr->eType==FTSQUERY_PHRASE );

if( pExpr->aMI==0 ){ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; Fts3Expr *pRoot; /* Root of NEAR expression */ - Fts3Expr *p; /* Iterator used for several purposes */ sqlite3_int64 iPrevId = pCsr->iPrevId; sqlite3_int64 iDocid;

@@ -184371,7 +189808,9 @@ u8 bEof;

/* Find the root of the NEAR expression */ pRoot = pExpr; - while( pRoot->pParent && pRoot->pParent->eType==FTSQUERY_NEAR ){ + while( pRoot->pParent + && (pRoot->pParent->eType==FTSQUERY_NEAR || pRoot->bDeferred) + ){ pRoot = pRoot->pParent; } iDocid = pRoot->iDocid;

@@ -184379,14 +189818,8 @@ bEof = pRoot->bEof;

assert( pRoot->bStart ); /* Allocate space for the aMSI[] array of each FTSQUERY_PHRASE node */ - for(p=pRoot; p; p=p->pLeft){ - Fts3Expr *pE = (p->eType==FTSQUERY_PHRASE?p:p->pRight); - assert( pE->aMI==0 ); - pE->aMI = (u32 *)sqlite3_malloc64(pTab->nColumn * 3 * sizeof(u32)); - if( !pE->aMI ) return SQLITE_NOMEM; - memset(pE->aMI, 0, pTab->nColumn * 3 * sizeof(u32)); - } - + rc = sqlite3Fts3ExprIterate(pRoot, fts3AllocateMSI, (void*)pTab); + if( rc!=SQLITE_OK ) return rc; fts3EvalRestart(pCsr, pRoot, &rc); while( pCsr->isEof==0 && rc==SQLITE_OK ){

@@ -184542,6 +189975,7 @@ int bOr = 0;

u8 bTreeEof = 0; Fts3Expr *p; /* Used to iterate from pExpr to root */ Fts3Expr *pNear; /* Most senior NEAR ancestor (or pExpr) */ + Fts3Expr *pRun; /* Closest non-deferred ancestor of pNear */ int bMatch; /* Check if this phrase descends from an OR expression node. If not,

@@ -184556,25 +189990,30 @@ if( p->eType==FTSQUERY_NEAR ) pNear = p;

if( p->bEof ) bTreeEof = 1; } if( bOr==0 ) return SQLITE_OK; + pRun = pNear; + while( pRun->bDeferred ){ + assert( pRun->pParent ); + pRun = pRun->pParent; + } /* This is the descendent of an OR node. In this case we cannot use ** an incremental phrase. Load the entire doclist for the phrase ** into memory in this case. */ if( pPhrase->bIncr ){ - int bEofSave = pNear->bEof; - fts3EvalRestart(pCsr, pNear, &rc); - while( rc==SQLITE_OK && !pNear->bEof ){ - fts3EvalNextRow(pCsr, pNear, &rc); - if( bEofSave==0 && pNear->iDocid==iDocid ) break; + int bEofSave = pRun->bEof; + fts3EvalRestart(pCsr, pRun, &rc); + while( rc==SQLITE_OK && !pRun->bEof ){ + fts3EvalNextRow(pCsr, pRun, &rc); + if( bEofSave==0 && pRun->iDocid==iDocid ) break; } assert( rc!=SQLITE_OK || pPhrase->bIncr==0 ); - if( rc==SQLITE_OK && pNear->bEof!=bEofSave ){ + if( rc==SQLITE_OK && pRun->bEof!=bEofSave ){ rc = FTS_CORRUPT_VTAB; } } if( bTreeEof ){ - while( rc==SQLITE_OK && !pNear->bEof ){ - fts3EvalNextRow(pCsr, pNear, &rc); + while( rc==SQLITE_OK && !pRun->bEof ){ + fts3EvalNextRow(pCsr, pRun, &rc); } } if( rc!=SQLITE_OK ) return rc;

@@ -185241,7 +190680,8 @@ 0, /* xRename */

0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; int rc; /* Return code */

@@ -188807,7 +194247,8 @@ 0, /* xRename */

0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; int rc; /* Return code */

@@ -191490,16 +196931,18 @@ Fts3MultiSegReader *pMsr, /* Multi-segment-reader handle */

char *pList, i64 nList ){ - if( nList>pMsr->nBuffer ){ + if( (nList+FTS3_NODE_PADDING)>pMsr->nBuffer ){ char *pNew; - pMsr->nBuffer = nList*2; - pNew = (char *)sqlite3_realloc64(pMsr->aBuffer, pMsr->nBuffer); + int nNew = nList*2 + FTS3_NODE_PADDING; + pNew = (char *)sqlite3_realloc64(pMsr->aBuffer, nNew); if( !pNew ) return SQLITE_NOMEM; pMsr->aBuffer = pNew; + pMsr->nBuffer = nNew; } assert( nList>0 ); memcpy(pMsr->aBuffer, pList, nList); + memset(&pMsr->aBuffer[nList], 0, FTS3_NODE_PADDING); return SQLITE_OK; }

@@ -192146,7 +197589,6 @@ for(i=0; rc==SQLITE_OK && i<p->nIndex; i++){

rc = fts3SegmentMerge(p, p->iPrevLangid, i, FTS3_SEGCURSOR_PENDING); if( rc==SQLITE_DONE ) rc = SQLITE_OK; } - sqlite3Fts3PendingTermsClear(p); /* Determine the auto-incr-merge setting if unknown. If enabled, ** estimate the number of leaf blocks of content to be written

@@ -192168,6 +197610,10 @@ }

rc = sqlite3_reset(pStmt); } } + + if( rc==SQLITE_OK ){ + sqlite3Fts3PendingTermsClear(p); + } return rc; }

@@ -192799,6 +198245,8 @@ assert_fts3_nc( (pNode->a[0]=='\0')==(aDoclist!=0) );

blobGrowBuffer(pPrev, nTerm, &rc); if( rc!=SQLITE_OK ) return rc; + assert( pPrev!=0 ); + assert( pPrev->a!=0 ); nPrefix = fts3PrefixCompress(pPrev->a, pPrev->n, zTerm, nTerm); nSuffix = nTerm - nPrefix;

@@ -192855,9 +198303,13 @@ nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix;

nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist; /* If the current block is not empty, and if adding this term/doclist - ** to the current block would make it larger than Fts3Table.nNodeSize - ** bytes, write this block out to the database. */ - if( pLeaf->block.n>0 && (pLeaf->block.n + nSpace)>p->nNodeSize ){ + ** to the current block would make it larger than Fts3Table.nNodeSize bytes, + ** and if there is still room for another leaf page, write this block out to + ** the database. */ + if( pLeaf->block.n>0 + && (pLeaf->block.n + nSpace)>p->nNodeSize + && pLeaf->iBlock < (pWriter->iStart + pWriter->nLeafEst) + ){ rc = fts3WriteSegment(p, pLeaf->iBlock, pLeaf->block.a, pLeaf->block.n); pWriter->nWork++;

@@ -193168,6 +198620,7 @@ }

for(i=nHeight; i>=0 && rc==SQLITE_OK; i--){ NodeReader reader; + memset(&reader, 0, sizeof(reader)); pNode = &pWriter->aNodeWriter[i]; if( pNode->block.a){

@@ -193188,7 +198641,7 @@ pNode->iBlock = reader.iChild;

rc = sqlite3Fts3ReadBlock(p, reader.iChild, &aBlock, &nBlock,0); blobGrowBuffer(&pNode->block, MAX(nBlock, p->nNodeSize)+FTS3_NODE_PADDING, &rc - ); + ); if( rc==SQLITE_OK ){ memcpy(pNode->block.a, aBlock, nBlock); pNode->block.n = nBlock;

@@ -194038,7 +199491,7 @@ Fts3MultiSegReader csr;

int rc; u64 cksum = 0; - assert( *pRc==SQLITE_OK ); + if( *pRc ) return 0; memset(&filter, 0, sizeof(filter)); memset(&csr, 0, sizeof(csr));

@@ -194253,8 +199706,11 @@ }else if( nVal>6 && 0==sqlite3_strnicmp(zVal, "merge=", 6) ){

rc = fts3DoIncrmerge(p, &zVal[6]); }else if( nVal>10 && 0==sqlite3_strnicmp(zVal, "automerge=", 10) ){ rc = fts3DoAutoincrmerge(p, &zVal[10]); + }else if( nVal==5 && 0==sqlite3_strnicmp(zVal, "flush", 5) ){ + rc = sqlite3Fts3PendingTermsFlush(p); + } #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) - }else{ + else{ int v; if( nVal>9 && 0==sqlite3_strnicmp(zVal, "nodesize=", 9) ){ v = atoi(&zVal[9]);

@@ -194272,8 +199728,8 @@ v = atoi(&zVal[11]);

if( v>=4 && v<=FTS3_MERGE_COUNT && (v&1)==0 ) p->nMergeCount = v; rc = SQLITE_OK; } -#endif } +#endif return rc; }

@@ -194681,7 +200137,7 @@ #define FTS3_MATCHINFO_DEFAULT "pcx"

/* -** Used as an fts3ExprIterate() context when loading phrase doclists to +** Used as an sqlite3Fts3ExprIterate() context when loading phrase doclists to ** Fts3Expr.aDoclist[]/nDoclist. */ typedef struct LoadDoclistCtx LoadDoclistCtx;

@@ -194725,7 +200181,7 @@ u64 hlmask; /* Mask of snippet terms to highlight */

}; /* -** This type is used as an fts3ExprIterate() context object while +** This type is used as an sqlite3Fts3ExprIterate() context object while ** accumulating the data returned by the matchinfo() function. */ typedef struct MatchInfo MatchInfo;

@@ -194884,7 +200340,7 @@ *piPos += (iVal-2);

} /* -** Helper function for fts3ExprIterate() (see below). +** Helper function for sqlite3Fts3ExprIterate() (see below). */ static int fts3ExprIterate2( Fts3Expr *pExpr, /* Expression to iterate phrases of */

@@ -194918,7 +200374,7 @@ ** the iteration is abandoned and the error code returned immediately.

** Otherwise, SQLITE_OK is returned after a callback has been made for ** all eligible phrase nodes. */ -static int fts3ExprIterate( +SQLITE_PRIVATE int sqlite3Fts3ExprIterate( Fts3Expr *pExpr, /* Expression to iterate phrases of */ int (*x)(Fts3Expr*,int,void*), /* Callback function to invoke for phrases */ void *pCtx /* Second argument to pass to callback */

@@ -194927,10 +200383,9 @@ int iPhrase = 0; /* Variable used as the phrase counter */

return fts3ExprIterate2(pExpr, &iPhrase, x, pCtx); } - /* -** This is an fts3ExprIterate() callback used while loading the doclists -** for each phrase into Fts3Expr.aDoclist[]/nDoclist. See also +** This is an sqlite3Fts3ExprIterate() callback used while loading the +** doclists for each phrase into Fts3Expr.aDoclist[]/nDoclist. See also ** fts3ExprLoadDoclists(). */ static int fts3ExprLoadDoclistsCb(Fts3Expr *pExpr, int iPhrase, void *ctx){

@@ -194962,9 +200417,9 @@ int *pnPhrase, /* OUT: Number of phrases in query */

int *pnToken /* OUT: Number of tokens in query */ ){ int rc; /* Return Code */ - LoadDoclistCtx sCtx = {0,0,0}; /* Context for fts3ExprIterate() */ + LoadDoclistCtx sCtx = {0,0,0}; /* Context for sqlite3Fts3ExprIterate() */ sCtx.pCsr = pCsr; - rc = fts3ExprIterate(pCsr->pExpr, fts3ExprLoadDoclistsCb, (void *)&sCtx); + rc = sqlite3Fts3ExprIterate(pCsr->pExpr,fts3ExprLoadDoclistsCb,(void*)&sCtx); if( pnPhrase ) *pnPhrase = sCtx.nPhrase; if( pnToken ) *pnToken = sCtx.nToken; return rc;

@@ -194977,7 +200432,7 @@ return SQLITE_OK;

} static int fts3ExprPhraseCount(Fts3Expr *pExpr){ int nPhrase = 0; - (void)fts3ExprIterate(pExpr, fts3ExprPhraseCountCb, (void *)&nPhrase); + (void)sqlite3Fts3ExprIterate(pExpr, fts3ExprPhraseCountCb, (void *)&nPhrase); return nPhrase; }

@@ -195105,8 +200560,9 @@ *pmHighlight = mHighlight;

} /* -** This function is an fts3ExprIterate() callback used by fts3BestSnippet(). -** Each invocation populates an element of the SnippetIter.aPhrase[] array. +** This function is an sqlite3Fts3ExprIterate() callback used by +** fts3BestSnippet(). Each invocation populates an element of the +** SnippetIter.aPhrase[] array. */ static int fts3SnippetFindPositions(Fts3Expr *pExpr, int iPhrase, void *ctx){ SnippetIter *p = (SnippetIter *)ctx;

@@ -195196,7 +200652,9 @@ sIter.iCol = iCol;

sIter.nSnippet = nSnippet; sIter.nPhrase = nList; sIter.iCurrent = -1; - rc = fts3ExprIterate(pCsr->pExpr, fts3SnippetFindPositions, (void*)&sIter); + rc = sqlite3Fts3ExprIterate( + pCsr->pExpr, fts3SnippetFindPositions, (void*)&sIter + ); if( rc==SQLITE_OK ){ /* Set the *pmSeen output variable. */

@@ -195557,10 +201015,10 @@ return rc;

} /* -** fts3ExprIterate() callback used to collect the "global" matchinfo stats -** for a single query. +** sqlite3Fts3ExprIterate() callback used to collect the "global" matchinfo +** stats for a single query. ** -** fts3ExprIterate() callback to load the 'global' elements of a +** sqlite3Fts3ExprIterate() callback to load the 'global' elements of a ** FTS3_MATCHINFO_HITS matchinfo array. The global stats are those elements ** of the matchinfo array that are constant for all rows returned by the ** current query.

@@ -195595,7 +201053,7 @@ );

} /* -** fts3ExprIterate() callback used to collect the "local" part of the +** sqlite3Fts3ExprIterate() callback used to collect the "local" part of the ** FTS3_MATCHINFO_HITS array. The local stats are those elements of the ** array that are different for each row returned by the query. */

@@ -195791,7 +201249,7 @@ ** contains one element for each matchable phrase in the query.

**/ aIter = sqlite3Fts3MallocZero(sizeof(LcsIterator) * pCsr->nPhrase); if( !aIter ) return SQLITE_NOMEM; - (void)fts3ExprIterate(pCsr->pExpr, fts3MatchinfoLcsCb, (void*)aIter); + (void)sqlite3Fts3ExprIterate(pCsr->pExpr, fts3MatchinfoLcsCb, (void*)aIter); for(i=0; i<pInfo->nPhrase; i++){ LcsIterator *pIter = &aIter[i];

@@ -195968,11 +201426,11 @@ if( pCsr->pDeferred ){

rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &pInfo->nDoc,0,0); if( rc!=SQLITE_OK ) break; } - rc = fts3ExprIterate(pExpr, fts3ExprGlobalHitsCb,(void*)pInfo); + rc = sqlite3Fts3ExprIterate(pExpr, fts3ExprGlobalHitsCb,(void*)pInfo); sqlite3Fts3EvalTestDeferred(pCsr, &rc); if( rc!=SQLITE_OK ) break; } - (void)fts3ExprIterate(pExpr, fts3ExprLocalHitsCb,(void*)pInfo); + (void)sqlite3Fts3ExprIterate(pExpr, fts3ExprLocalHitsCb,(void*)pInfo); break; } }

@@ -196195,7 +201653,7 @@ TermOffset *aTerm;

}; /* -** This function is an fts3ExprIterate() callback used by sqlite3Fts3Offsets(). +** This function is an sqlite3Fts3ExprIterate() callback used by sqlite3Fts3Offsets(). */ static int fts3ExprTermOffsetInit(Fts3Expr *pExpr, int iPhrase, void *ctx){ TermOffsetCtx *p = (TermOffsetCtx *)ctx;

@@ -196277,7 +201735,9 @@ ** operation may fail if the database contains corrupt records.

*/ sCtx.iCol = iCol; sCtx.iTerm = 0; - rc = fts3ExprIterate(pCsr->pExpr, fts3ExprTermOffsetInit, (void*)&sCtx); + rc = sqlite3Fts3ExprIterate( + pCsr->pExpr, fts3ExprTermOffsetInit, (void*)&sCtx + ); if( rc!=SQLITE_OK ) goto offsets_out; /* Retreive the text stored in column iCol. If an SQL NULL is stored

@@ -197210,25 +202670,51 @@ ** the library isspace() function, resulting in a 7% overall performance

** increase for the parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os). */ static const char jsonIsSpace[] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; #define fast_isspace(x) (jsonIsSpace[(unsigned char)x]) +/* +** Characters that are special to JSON. Control charaters, +** '"' and '\\'. +*/ +static const char jsonIsOk[256] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 +}; + + #if !defined(SQLITE_DEBUG) && !defined(SQLITE_COVERAGE_TEST) # define VVA(X) #else

@@ -197239,6 +202725,7 @@ /* Objects */

typedef struct JsonString JsonString; typedef struct JsonNode JsonNode; typedef struct JsonParse JsonParse; +typedef struct JsonCleanup JsonCleanup; /* An instance of this object represents a JSON string ** under construction. Really, this is a generic string accumulator

@@ -197254,16 +202741,26 @@ u8 bErr; /* True if an error has been encountered */

char zSpace[100]; /* Initial static space */ }; +/* A deferred cleanup task. A list of JsonCleanup objects might be +** run when the JsonParse object is destroyed. +*/ +struct JsonCleanup { + JsonCleanup *pJCNext; /* Next in a list */ + void (*xOp)(void*); /* Routine to run */ + void *pArg; /* Argument to xOp() */ +}; + /* JSON type values */ -#define JSON_NULL 0 -#define JSON_TRUE 1 -#define JSON_FALSE 2 -#define JSON_INT 3 -#define JSON_REAL 4 -#define JSON_STRING 5 -#define JSON_ARRAY 6 -#define JSON_OBJECT 7 +#define JSON_SUBST 0 /* Special edit node. Uses u.iPrev */ +#define JSON_NULL 1 +#define JSON_TRUE 2 +#define JSON_FALSE 3 +#define JSON_INT 4 +#define JSON_REAL 5 +#define JSON_STRING 6 +#define JSON_ARRAY 7 +#define JSON_OBJECT 8 /* The "subtype" set for JSON values */ #define JSON_SUBTYPE 74 /* Ascii for "J" */

@@ -197272,59 +202769,97 @@ /*

** Names of the various JSON types: */ static const char * const jsonType[] = { + "subst", "null", "true", "false", "integer", "real", "text", "array", "object" }; /* Bit values for the JsonNode.jnFlag field */ -#define JNODE_RAW 0x01 /* Content is raw, not JSON encoded */ -#define JNODE_ESCAPE 0x02 /* Content is text with \ escapes */ -#define JNODE_REMOVE 0x04 /* Do not output */ -#define JNODE_REPLACE 0x08 /* Replace with JsonNode.u.iReplace */ -#define JNODE_PATCH 0x10 /* Patch with JsonNode.u.pPatch */ -#define JNODE_APPEND 0x20 /* More ARRAY/OBJECT entries at u.iAppend */ -#define JNODE_LABEL 0x40 /* Is a label of an object */ +#define JNODE_RAW 0x01 /* Content is raw, not JSON encoded */ +#define JNODE_ESCAPE 0x02 /* Content is text with \ escapes */ +#define JNODE_REMOVE 0x04 /* Do not output */ +#define JNODE_REPLACE 0x08 /* Target of a JSON_SUBST node */ +#define JNODE_APPEND 0x10 /* More ARRAY/OBJECT entries at u.iAppend */ +#define JNODE_LABEL 0x20 /* Is a label of an object */ +#define JNODE_JSON5 0x40 /* Node contains JSON5 enhancements */ -/* A single node of parsed JSON +/* A single node of parsed JSON. An array of these nodes describes +** a parse of JSON + edits. +** +** Use the json_parse() SQL function (available when compiled with +** -DSQLITE_DEBUG) to see a dump of complete JsonParse objects, including +** a complete listing and decoding of the array of JsonNodes. */ struct JsonNode { u8 eType; /* One of the JSON_ type values */ u8 jnFlags; /* JNODE flags */ u8 eU; /* Which union element to use */ - u32 n; /* Bytes of content, or number of sub-nodes */ + u32 n; /* Bytes of content for INT, REAL or STRING + ** Number of sub-nodes for ARRAY and OBJECT + ** Node that SUBST applies to */ union { const char *zJContent; /* 1: Content for INT, REAL, and STRING */ u32 iAppend; /* 2: More terms for ARRAY and OBJECT */ u32 iKey; /* 3: Key for ARRAY objects in json_tree() */ - u32 iReplace; /* 4: Replacement content for JNODE_REPLACE */ - JsonNode *pPatch; /* 5: Node chain of patch for JNODE_PATCH */ + u32 iPrev; /* 4: Previous SUBST node, or 0 */ } u; }; -/* A completely parsed JSON string + +/* A parsed and possibly edited JSON string. Lifecycle: +** +** 1. JSON comes in and is parsed into an array aNode[]. The original +** JSON text is stored in zJson. +** +** 2. Zero or more changes are made (via json_remove() or json_replace() +** or similar) to the aNode[] array. +** +** 3. A new, edited and mimified JSON string is generated from aNode +** and stored in zAlt. The JsonParse object always owns zAlt. +** +** Step 1 always happens. Step 2 and 3 may or may not happen, depending +** on the operation. +** +** aNode[].u.zJContent entries typically point into zJson. Hence zJson +** must remain valid for the lifespan of the parse. For edits, +** aNode[].u.zJContent might point to malloced space other than zJson. +** Entries in pClup are responsible for freeing that extra malloced space. +** +** When walking the parse tree in aNode[], edits are ignored if useMod is +** false. */ struct JsonParse { u32 nNode; /* Number of slots of aNode[] used */ u32 nAlloc; /* Number of slots of aNode[] allocated */ JsonNode *aNode; /* Array of nodes containing the parse */ - const char *zJson; /* Original JSON string */ + char *zJson; /* Original JSON string (before edits) */ + char *zAlt; /* Revised and/or mimified JSON */ u32 *aUp; /* Index of parent of each node */ - u8 oom; /* Set to true if out of memory */ + JsonCleanup *pClup;/* Cleanup operations prior to freeing this object */ + u16 iDepth; /* Nesting depth */ u8 nErr; /* Number of errors seen */ - u16 iDepth; /* Nesting depth */ + u8 oom; /* Set to true if out of memory */ + u8 bJsonIsRCStr; /* True if zJson is an RCStr */ + u8 hasNonstd; /* True if input uses non-standard features like JSON5 */ + u8 useMod; /* Actually use the edits contain inside aNode */ + u8 hasMod; /* aNode contains edits from the original zJson */ + u32 nJPRef; /* Number of references to this object */ int nJson; /* Length of the zJson string in bytes */ - u32 iHold; /* Replace cache line with the lowest iHold value */ + int nAlt; /* Length of alternative JSON string zAlt, in bytes */ + u32 iErr; /* Error location in zJson[] */ + u32 iSubst; /* Last JSON_SUBST entry in aNode[] */ + u32 iHold; /* Age of this entry in the cache for LRU replacement */ }; /* ** Maximum nesting depth of JSON for this implementation. ** ** This limit is needed to avoid a stack overflow in the recursive -** descent parser. A depth of 2000 is far deeper than any sane JSON -** should go. +** descent parser. A depth of 1000 is far deeper than any sane JSON +** should go. Historical note: This limit was 2000 prior to version 3.42.0 */ -#define JSON_MAX_DEPTH 2000 +#define JSON_MAX_DEPTH 1000 /************************************************************************** ** Utility routines for dealing with JsonString objects

@@ -197347,16 +202882,14 @@ p->bErr = 0;

jsonZero(p); } - /* Free all allocated memory and reset the JsonString object back to its ** initial state. */ static void jsonReset(JsonString *p){ - if( !p->bStatic ) sqlite3_free(p->zBuf); + if( !p->bStatic ) sqlite3RCStrUnref(p->zBuf); jsonZero(p); } - /* Report an out-of-memory (OOM) condition */ static void jsonOom(JsonString *p){

@@ -197373,7 +202906,7 @@ u64 nTotal = N<p->nAlloc ? p->nAlloc*2 : p->nAlloc+N+10;

char *zNew; if( p->bStatic ){ if( p->bErr ) return 1; - zNew = sqlite3_malloc64(nTotal); + zNew = sqlite3RCStrNew(nTotal); if( zNew==0 ){ jsonOom(p); return SQLITE_NOMEM;

@@ -197382,12 +202915,12 @@ memcpy(zNew, p->zBuf, (size_t)p->nUsed);

p->zBuf = zNew; p->bStatic = 0; }else{ - zNew = sqlite3_realloc64(p->zBuf, nTotal); - if( zNew==0 ){ - jsonOom(p); + p->zBuf = sqlite3RCStrResize(p->zBuf, nTotal); + if( p->zBuf==0 ){ + p->bErr = 1; + jsonZero(p); return SQLITE_NOMEM; } - p->zBuf = zNew; } p->nAlloc = nTotal; return SQLITE_OK;

@@ -197395,12 +202928,35 @@ }

/* Append N bytes from zIn onto the end of the JsonString string. */ +static SQLITE_NOINLINE void jsonAppendExpand( + JsonString *p, + const char *zIn, + u32 N +){ + assert( N>0 ); + if( jsonGrow(p,N) ) return; + memcpy(p->zBuf+p->nUsed, zIn, N); + p->nUsed += N; +} static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){ if( N==0 ) return; - if( (N+p->nUsed >= p->nAlloc) && jsonGrow(p,N)!=0 ) return; - memcpy(p->zBuf+p->nUsed, zIn, N); - p->nUsed += N; + if( N+p->nUsed >= p->nAlloc ){ + jsonAppendExpand(p,zIn,N); + }else{ + memcpy(p->zBuf+p->nUsed, zIn, N); + p->nUsed += N; + } +} +static void jsonAppendRawNZ(JsonString *p, const char *zIn, u32 N){ + assert( N>0 ); + if( N+p->nUsed >= p->nAlloc ){ + jsonAppendExpand(p,zIn,N); + }else{ + memcpy(p->zBuf+p->nUsed, zIn, N); + p->nUsed += N; + } } + /* Append formatted text (not to exceed N bytes) to the JsonString. */

@@ -197415,10 +202971,35 @@ }

/* Append a single character */ +static SQLITE_NOINLINE void jsonAppendCharExpand(JsonString *p, char c){ + if( jsonGrow(p,1) ) return; + p->zBuf[p->nUsed++] = c; +} static void jsonAppendChar(JsonString *p, char c){ - if( p->nUsed>=p->nAlloc && jsonGrow(p,1)!=0 ) return; - p->zBuf[p->nUsed++] = c; + if( p->nUsed>=p->nAlloc ){ + jsonAppendCharExpand(p,c); + }else{ + p->zBuf[p->nUsed++] = c; + } +} + +/* Try to force the string to be a zero-terminated RCStr string. +** +** Return true on success. Return false if an OOM prevents this +** from happening. +*/ +static int jsonForceRCStr(JsonString *p){ + jsonAppendChar(p, 0); + if( p->bErr ) return 0; + p->nUsed--; + if( p->bStatic==0 ) return 1; + p->nAlloc = 0; + p->nUsed++; + jsonGrow(p, p->nUsed); + p->nUsed--; + return p->bStatic==0; } + /* Append a comma separator to the output buffer, if the previous ** character is not '[' or '{'.

@@ -197427,7 +203008,8 @@ static void jsonAppendSeparator(JsonString *p){

char c; if( p->nUsed==0 ) return; c = p->zBuf[p->nUsed-1]; - if( c!='[' && c!='{' ) jsonAppendChar(p, ','); + if( c=='[' || c=='{' ) return; + jsonAppendChar(p, ','); } /* Append the N-byte string in zIn to the end of the JsonString string

@@ -197441,11 +203023,16 @@ if( zIn==0 || ((N+p->nUsed+2 >= p->nAlloc) && jsonGrow(p,N+2)!=0) ) return;

p->zBuf[p->nUsed++] = '"'; for(i=0; i<N; i++){ unsigned char c = ((unsigned const char*)zIn)[i]; - if( c=='"' || c=='\\' ){ + if( jsonIsOk[c] ){ + p->zBuf[p->nUsed++] = c; + }else if( c=='"' || c=='\\' ){ json_simple_escape: if( (p->nUsed+N+3-i > p->nAlloc) && jsonGrow(p,N+3-i)!=0 ) return; p->zBuf[p->nUsed++] = '\\'; - }else if( c<=0x1f ){ + p->zBuf[p->nUsed++] = c; + }else if( c=='\'' ){ + p->zBuf[p->nUsed++] = c; + }else{ static const char aSpecial[] = { 0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0

@@ -197456,6 +203043,7 @@ assert( aSpecial['\f']=='f' );

assert( aSpecial['\n']=='n' ); assert( aSpecial['\r']=='r' ); assert( aSpecial['\t']=='t' ); + assert( c>=0 && c<sizeof(aSpecial) ); if( aSpecial[c] ){ c = aSpecial[c]; goto json_simple_escape;

@@ -197465,16 +203053,145 @@ p->zBuf[p->nUsed++] = '\\';

p->zBuf[p->nUsed++] = 'u'; p->zBuf[p->nUsed++] = '0'; p->zBuf[p->nUsed++] = '0'; - p->zBuf[p->nUsed++] = '0' + (c>>4); - c = "0123456789abcdef"[c&0xf]; + p->zBuf[p->nUsed++] = "0123456789abcdef"[c>>4]; + p->zBuf[p->nUsed++] = "0123456789abcdef"[c&0xf]; } - p->zBuf[p->nUsed++] = c; } p->zBuf[p->nUsed++] = '"'; assert( p->nUsed<p->nAlloc ); } /* +** The zIn[0..N] string is a JSON5 string literal. Append to p a translation +** of the string literal that standard JSON and that omits all JSON5 +** features. +*/ +static void jsonAppendNormalizedString(JsonString *p, const char *zIn, u32 N){ + u32 i; + jsonAppendChar(p, '"'); + zIn++; + N -= 2; + while( N>0 ){ + for(i=0; i<N && zIn[i]!='\\' && zIn[i]!='"'; i++){} + if( i>0 ){ + jsonAppendRawNZ(p, zIn, i); + zIn += i; + N -= i; + if( N==0 ) break; + } + if( zIn[0]=='"' ){ + jsonAppendRawNZ(p, "\\\"", 2); + zIn++; + N--; + continue; + } + assert( zIn[0]=='\\' ); + switch( (u8)zIn[1] ){ + case '\'': + jsonAppendChar(p, '\''); + break; + case 'v': + jsonAppendRawNZ(p, "\\u0009", 6); + break; + case 'x': + jsonAppendRawNZ(p, "\\u00", 4); + jsonAppendRawNZ(p, &zIn[2], 2); + zIn += 2; + N -= 2; + break; + case '0': + jsonAppendRawNZ(p, "\\u0000", 6); + break; + case '\r': + if( zIn[2]=='\n' ){ + zIn++; + N--; + } + break; + case '\n': + break; + case 0xe2: + assert( N>=4 ); + assert( 0x80==(u8)zIn[2] ); + assert( 0xa8==(u8)zIn[3] || 0xa9==(u8)zIn[3] ); + zIn += 2; + N -= 2; + break; + default: + jsonAppendRawNZ(p, zIn, 2); + break; + } + zIn += 2; + N -= 2; + } + jsonAppendChar(p, '"'); +} + +/* +** The zIn[0..N] string is a JSON5 integer literal. Append to p a translation +** of the string literal that standard JSON and that omits all JSON5 +** features. +*/ +static void jsonAppendNormalizedInt(JsonString *p, const char *zIn, u32 N){ + if( zIn[0]=='+' ){ + zIn++; + N--; + }else if( zIn[0]=='-' ){ + jsonAppendChar(p, '-'); + zIn++; + N--; + } + if( zIn[0]=='0' && (zIn[1]=='x' || zIn[1]=='X') ){ + sqlite3_int64 i = 0; + int rc = sqlite3DecOrHexToI64(zIn, &i); + if( rc<=1 ){ + jsonPrintf(100,p,"%lld",i); + }else{ + assert( rc==2 ); + jsonAppendRawNZ(p, "9.0e999", 7); + } + return; + } + assert( N>0 ); + jsonAppendRawNZ(p, zIn, N); +} + +/* +** The zIn[0..N] string is a JSON5 real literal. Append to p a translation +** of the string literal that standard JSON and that omits all JSON5 +** features. +*/ +static void jsonAppendNormalizedReal(JsonString *p, const char *zIn, u32 N){ + u32 i; + if( zIn[0]=='+' ){ + zIn++; + N--; + }else if( zIn[0]=='-' ){ + jsonAppendChar(p, '-'); + zIn++; + N--; + } + if( zIn[0]=='.' ){ + jsonAppendChar(p, '0'); + } + for(i=0; i<N; i++){ + if( zIn[i]=='.' && (i+1==N || !sqlite3Isdigit(zIn[i+1])) ){ + i++; + jsonAppendRaw(p, zIn, i); + zIn += i; + N -= i; + jsonAppendChar(p, '0'); + break; + } + } + if( N>0 ){ + jsonAppendRawNZ(p, zIn, N); + } +} + + + +/* ** Append a function parameter value to the JSON string under ** construction. */

@@ -197484,10 +203201,13 @@ sqlite3_value *pValue /* Value to append */

){ switch( sqlite3_value_type(pValue) ){ case SQLITE_NULL: { - jsonAppendRaw(p, "null", 4); + jsonAppendRawNZ(p, "null", 4); break; } - case SQLITE_INTEGER: + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + jsonPrintf(100, p, "%!0.15g", sqlite3_value_double(pValue)); + break; + } || sqlite3GetInt32(pToken->z, &iValue)==0 ){ const char *z = (const char*)sqlite3_value_text(pValue); u32 n = (u32)sqlite3_value_bytes(pValue);

@@ -197517,15 +203237,25 @@ }

/* Make the JSON in p the result of the SQL function. +** +** The JSON string is reset. */ static void jsonResult(JsonString *p){ if( p->bErr==0 ){ - sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed, - p->bStatic ? SQLITE_TRANSIENT : sqlite3_free, - SQLITE_UTF8); - jsonZero(p); + if( p->bStatic ){ + sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed, + SQLITE_TRANSIENT, SQLITE_UTF8); + }else if( jsonForceRCStr(p) ){ + sqlite3RCStrRef(p->zBuf); + sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed, + sqlite3RCStrUnref, + SQLITE_UTF8); + } + } + if( p->bErr==1 ){ + sqlite3_result_error_nomem(p->pCtx); } - assert( p->bStatic ); + jsonReset(p); } /**************************************************************************

@@ -197550,20 +203280,73 @@ ** Reclaim all memory allocated by a JsonParse object. But do not

** delete the JsonParse object itself. */ static void jsonParseReset(JsonParse *pParse){ - sqlite3_free(pParse->aNode); - pParse->aNode = 0; + while( pParse->pClup ){ + JsonCleanup *pTask = pParse->pClup; + pParse->pClup = pTask->pJCNext; + pTask->xOp(pTask->pArg); + sqlite3_free(pTask); + } + assert( pParse->nJPRef<=1 ); + if( pParse->aNode ){ + sqlite3_free(pParse->aNode); + pParse->aNode = 0; + } pParse->nNode = 0; pParse->nAlloc = 0; - sqlite3_free(pParse->aUp); - pParse->aUp = 0; + if( pParse->aUp ){ + sqlite3_free(pParse->aUp); + pParse->aUp = 0; + } + if( pParse->bJsonIsRCStr ){ + sqlite3RCStrUnref(pParse->zJson); + pParse->zJson = 0; + pParse->bJsonIsRCStr = 0; + } + if( pParse->zAlt ){ + sqlite3RCStrUnref(pParse->zAlt); + pParse->zAlt = 0; + } } /* ** Free a JsonParse object that was obtained from sqlite3_malloc(). +** +** Note that destroying JsonParse might call sqlite3RCStrUnref() to +** destroy the zJson value. The RCStr object might recursively invoke +** JsonParse to destroy this pParse object again. Take care to ensure +** that this recursive destructor sequence terminates harmlessly. */ static void jsonParseFree(JsonParse *pParse){ - jsonParseReset(pParse); - sqlite3_free(pParse); + if( pParse->nJPRef>1 ){ + pParse->nJPRef--; + }else{ + jsonParseReset(pParse); + sqlite3_free(pParse); + } +} + +/* +** Add a cleanup task to the JsonParse object. +** +** If an OOM occurs, the cleanup operation happens immediately +** and this function returns SQLITE_NOMEM. +*/ +static int jsonParseAddCleanup( + JsonParse *pParse, /* Add the cleanup task to this parser */ + void(*xOp)(void*), /* The cleanup task */ + void *pArg /* Argument to the cleanup */ +){ + JsonCleanup *pTask = sqlite3_malloc64( sizeof(*pTask) ); + if( pTask==0 ){ + pParse->oom = 1; + xOp(pArg); + return SQLITE_ERROR; + } + pTask->pJCNext = pParse->pClup; + pParse->pClup = pTask; + pTask->xOp = xOp; + pTask->pArg = pArg; + return SQLITE_OK; } /*

@@ -197572,46 +203355,76 @@ ** append to pOut. Subsubstructure is also included. Return

** the number of JsonNode objects that are encoded. */ static void jsonRenderNode( + JsonParse *pParse, /* the complete parse of the JSON */ JsonNode *pNode, /* The node to render */ - JsonString *pOut, /* Write JSON here */ - sqlite3_value **aReplace /* Replacement values */ + JsonString *pOut /* Write JSON here */ ){ assert( pNode!=0 ); - if( pNode->jnFlags & (JNODE_REPLACE|JNODE_PATCH) ){ - if( (pNode->jnFlags & JNODE_REPLACE)!=0 && ALWAYS(aReplace!=0) ){ - assert( pNode->eU==4 ); - jsonAppendValue(pOut, aReplace[pNode->u.iReplace]); - return; + while( (pNode->jnFlags & JNODE_REPLACE)!=0 && pParse->useMod ){ + u32 idx = (u32)(pNode - pParse->aNode); + u32 i = pParse->iSubst; + while( 1 /*exit-by-break*/ ){ + assert( i<pParse->nNode ); + assert( pParse->aNode[i].eType==JSON_SUBST ); + assert( pParse->aNode[i].eU==4 ); + assert( pParse->aNode[i].u.iPrev<i ); + if( pParse->aNode[i].n==idx ){ + pNode = &pParse->aNode[i+1]; + break; + } + i = pParse->aNode[i].u.iPrev; } - assert( pNode->eU==5 ); - pNode = pNode->u.pPatch; } switch( pNode->eType ){ default: { assert( pNode->eType==JSON_NULL ); - jsonAppendRaw(pOut, "null", 4); + jsonAppendRawNZ(pOut, "null", 4); break; } case JSON_TRUE: { - jsonAppendRaw(pOut, "true", 4); + jsonAppendRawNZ(pOut, "true", 4); break; } case JSON_FALSE: { - jsonAppendRaw(pOut, "false", 5); + jsonAppendRawNZ(pOut, "false", 5); break; } case JSON_STRING: { + assert( pNode->eU==1 ); if( pNode->jnFlags & JNODE_RAW ){ - assert( pNode->eU==1 ); - jsonAppendString(pOut, pNode->u.zJContent, pNode->n); - break; + if( pNode->jnFlags & JNODE_LABEL ){ + jsonAppendChar(pOut, '"'); + jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n); + jsonAppendChar(pOut, '"'); + }else{ + jsonAppendString(pOut, pNode->u.zJContent, pNode->n); + } + }else if( pNode->jnFlags & JNODE_JSON5 ){ + jsonAppendNormalizedString(pOut, pNode->u.zJContent, pNode->n); + }else{ + assert( pNode->n>0 ); + jsonAppendRawNZ(pOut, pNode->u.zJContent, pNode->n); + } + break; + } + case JSON_REAL: { + assert( pNode->eU==1 ); + if( pNode->jnFlags & JNODE_JSON5 ){ + jsonAppendNormalizedReal(pOut, pNode->u.zJContent, pNode->n); + }else{ + assert( pNode->n>0 ); + jsonAppendRawNZ(pOut, pNode->u.zJContent, pNode->n); } - /* no break */ deliberate_fall_through + break; } - case JSON_REAL: case JSON_INT: { assert( pNode->eU==1 ); - jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n); + if( pNode->jnFlags & JNODE_JSON5 ){ + jsonAppendNormalizedInt(pOut, pNode->u.zJContent, pNode->n); + }else{ + assert( pNode->n>0 ); + jsonAppendRawNZ(pOut, pNode->u.zJContent, pNode->n); + } break; } case JSON_ARRAY: {

@@ -197619,15 +203432,16 @@ u32 j = 1;

jsonAppendChar(pOut, '['); for(;;){ while( j<=pNode->n ){ - if( (pNode[j].jnFlags & JNODE_REMOVE)==0 ){ + if( (pNode[j].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ){ jsonAppendSeparator(pOut); - jsonRenderNode(&pNode[j], pOut, aReplace); + jsonRenderNode(pParse, &pNode[j], pOut); } j += jsonNodeSize(&pNode[j]); } if( (pNode->jnFlags & JNODE_APPEND)==0 ) break; + if( pParse->useMod==0 ) break; assert( pNode->eU==2 ); - pNode = &pNode[pNode->u.iAppend]; + pNode = &pParse->aNode[pNode->u.iAppend]; j = 1; } jsonAppendChar(pOut, ']');

@@ -197638,17 +203452,18 @@ u32 j = 1;

jsonAppendChar(pOut, '{'); for(;;){ while( j<=pNode->n ){ - if( (pNode[j+1].jnFlags & JNODE_REMOVE)==0 ){ + if( (pNode[j+1].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ){ jsonAppendSeparator(pOut); - jsonRenderNode(&pNode[j], pOut, aReplace); + jsonRenderNode(pParse, &pNode[j], pOut); jsonAppendChar(pOut, ':'); - jsonRenderNode(&pNode[j+1], pOut, aReplace); + jsonRenderNode(pParse, &pNode[j+1], pOut); } j += 1 + jsonNodeSize(&pNode[j+1]); } if( (pNode->jnFlags & JNODE_APPEND)==0 ) break; + if( pParse->useMod==0 ) break; assert( pNode->eU==2 ); - pNode = &pNode[pNode->u.iAppend]; + pNode = &pParse->aNode[pNode->u.iAppend]; j = 1; } jsonAppendChar(pOut, '}');

@@ -197658,18 +203473,30 @@ }

} /* -** Return a JsonNode and all its descendents as a JSON string. +** Return a JsonNode and all its descendants as a JSON string. */ static void jsonReturnJson( + JsonParse *pParse, /* The complete JSON */ JsonNode *pNode, /* Node to return */ sqlite3_context *pCtx, /* Return value for this function */ - sqlite3_value **aReplace /* Array of replacement values */ + int bGenerateAlt, /* Also store the rendered text in zAlt */ + int omitSubtype /* Do not call sqlite3_result_subtype() */ ){ JsonString s; - jsonInit(&s, pCtx); - jsonRenderNode(pNode, &s, aReplace); - jsonResult(&s); - sqlite3_result_subtype(pCtx, JSON_SUBTYPE); + if( pParse->oom ){ + sqlite3_result_error_nomem(pCtx); + return; + } + if( pParse->nErr==0 ){ + jsonInit(&s, pCtx); + jsonRenderNode(pParse, pNode, &s); + if( bGenerateAlt && pParse->zAlt==0 && jsonForceRCStr(&s) ){ + pParse->zAlt = sqlite3RCStrRef(s.zBuf); + pParse->nAlt = s.nUsed; + } + jsonResult(&s); + if( !omitSubtype ) sqlite3_result_subtype(pCtx, JSON_SUBTYPE); + } } /*

@@ -197707,9 +203534,10 @@ /*

** Make the JsonNode the return value of the function. */ static void jsonReturn( + JsonParse *pParse, /* Complete JSON parse tree */ JsonNode *pNode, /* Node to return */ sqlite3_context *pCtx, /* Return value for this function */ - sqlite3_value **aReplace /* Array of replacement values */ + int omitSubtype /* Do not call sqlite3_result_subtype() */ ){ switch( pNode->eType ){ default: {

@@ -197727,59 +203555,40 @@ break;

} case JSON_INT: { sqlite3_int64 i = 0; + int rc; + int bNeg = 0; const char *z; + assert( pNode->eU==1 ); z = pNode->u.zJContent; - if( z[0]=='-' ){ z++; } - while( z[0]>='0' && z[0]<='9' ){ - unsigned v = *(z++) - '0'; - if( i>=LARGEST_INT64/10 ){ - if( i>LARGEST_INT64/10 ) goto int_as_real; - if( z[0]>='0' && z[0]<='9' ) goto int_as_real; - if( v==9 ) goto int_as_real; - if( v==8 ){ - if( pNode->u.zJContent[0]=='-' ){ - sqlite3_result_int64(pCtx, SMALLEST_INT64); - goto int_done; - }else{ - goto int_as_real; - } - } - } - i = i*10 + v; + if( z[0]=='-' ){ z++; bNeg = 1; } + else if( z[0]=='+' ){ z++; } + rc = sqlite3DecOrHexToI64(z, &i); + if( rc<=1 ){ + sqlite3_result_int64(pCtx, bNeg ? -i : i); + }else if( rc==3 && bNeg ){ + sqlite3_result_int64(pCtx, SMALLEST_INT64); + }else{ + goto to_double; } - if( pNode->u.zJContent[0]=='-' ){ i = -i; } - sqlite3_result_int64(pCtx, i); - int_done: break; - int_as_real: ; /* no break */ deliberate_fall_through } case JSON_REAL: { double r; -#ifdef SQLITE_AMALGAMATION const char *z; assert( pNode->eU==1 ); + to_double: z = pNode->u.zJContent; sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8); -#else - assert( pNode->eU==1 ); - r = strtod(pNode->u.zJContent, 0); -#endif sqlite3_result_double(pCtx, r); break; } case JSON_STRING: { -#if 0 /* Never happens because JNODE_RAW is only set by json_set(), - ** json_insert() and json_replace() and those routines do not - ** call jsonReturn() */ if( pNode->jnFlags & JNODE_RAW ){ assert( pNode->eU==1 ); sqlite3_result_text(pCtx, pNode->u.zJContent, pNode->n, SQLITE_TRANSIENT); - }else -#endif - assert( (pNode->jnFlags & JNODE_RAW)==0 ); - if( (pNode->jnFlags & JNODE_ESCAPE)==0 ){ + }else if( (pNode->jnFlags & JNODE_ESCAPE)==0 ){ /* JSON formatted without any backslash-escapes */ assert( pNode->eU==1 ); sqlite3_result_text(pCtx, pNode->u.zJContent+1, pNode->n-2,

@@ -197791,18 +203600,17 @@ u32 n = pNode->n;

const char *z; char *zOut; u32 j; + u32 nOut = n; assert( pNode->eU==1 ); z = pNode->u.zJContent; - zOut = sqlite3_malloc( n+1 ); + zOut = sqlite3_malloc( nOut+1 ); if( zOut==0 ){ sqlite3_result_error_nomem(pCtx); break; } for(i=1, j=0; i<n-1; i++){ char c = z[i]; - if( c!='\\' ){ - zOut[j++] = c; - }else{ + if( c=='\\' ){ c = z[++i]; if( c=='u' ){ u32 v = jsonHexToInt4(z+i+1);

@@ -197834,22 +203642,40 @@ zOut[j++] = 0x80 | ((v>>6)&0x3f);

zOut[j++] = 0x80 | (v&0x3f); } } + continue; + }else if( c=='b' ){ + c = '\b'; + }else if( c=='f' ){ + c = '\f'; + }else if( c=='n' ){ + c = '\n'; + }else if( c=='r' ){ + c = '\r'; + }else if( c=='t' ){ + c = '\t'; + }else if( c=='v' ){ + c = '\v'; + }else if( c=='\'' || c=='"' || c=='/' || c=='\\' ){ + /* pass through unchanged */ + }else if( c=='0' ){ + c = 0; + }else if( c=='x' ){ + c = (jsonHexToInt(z[i+1])<<4) | jsonHexToInt(z[i+2]); + i += 2; + }else if( c=='\r' && z[i+1]=='\n' ){ + i++; + continue; + }else if( 0xe2==(u8)c ){ + assert( 0x80==(u8)z[i+1] ); + assert( 0xa8==(u8)z[i+2] || 0xa9==(u8)z[i+2] ); + i += 2; + continue; }else{ - if( c=='b' ){ - c = '\b'; - }else if( c=='f' ){ - c = '\f'; - }else if( c=='n' ){ - c = '\n'; - }else if( c=='r' ){ - c = '\r'; - }else if( c=='t' ){ - c = '\t'; - } - zOut[j++] = c; + continue; } - } - } + } /* end if( c=='\\' ) */ + zOut[j++] = c; + } /* end for() */ zOut[j] = 0; sqlite3_result_text(pCtx, zOut, j, sqlite3_free); }

@@ -197857,7 +203683,7 @@ break;

} case JSON_ARRAY: case JSON_OBJECT: { - jsonReturnJson(pNode, pCtx, aReplace); + jsonReturnJson(pParse, pNode, pCtx, 0, omitSubtype); break; } }

@@ -197879,6 +203705,12 @@ # define JSON_NOINLINE

#endif +/* +** Add a single node to pParse->aNode after first expanding the +** size of the aNode array. Return the index of the new node. +** +** If an OOM error occurs, set pParse->oom and return -1. +*/ static JSON_NOINLINE int jsonParseAddNodeExpand( JsonParse *pParse, /* Append the node to this object */ u32 eType, /* Node type */

@@ -197895,7 +203727,7 @@ if( pNew==0 ){

pParse->oom = 1; return -1; } - pParse->nAlloc = nNew; + pParse->nAlloc = sqlite3_msize(pNew)/sizeof(JsonNode); pParse->aNode = pNew; assert( pParse->nNode<pParse->nAlloc ); return jsonParseAddNode(pParse, eType, n, zContent);

@@ -197913,12 +203745,15 @@ u32 n, /* Content size or sub-node count */

const char *zContent /* Content */ ){ JsonNode *p; - if( pParse->aNode==0 || pParse->nNode>=pParse->nAlloc ){ + assert( pParse->aNode!=0 || pParse->nNode>=pParse->nAlloc ); + if( pParse->nNode>=pParse->nAlloc ){ return jsonParseAddNodeExpand(pParse, eType, n, zContent); } + assert( pParse->aNode!=0 ); p = &pParse->aNode[pParse->nNode]; - p->eType = (u8)eType; - p->jnFlags = 0; + assert( p!=0 ); + p->eType = (u8)(eType & 0xff); + p->jnFlags = (u8)(eType >> 8); VVA( p->eU = zContent ? 1 : 0 ); p->n = n; p->u.zJContent = zContent;

@@ -197926,21 +203761,223 @@ return pParse->nNode++;

} /* +** Add an array of new nodes to the current pParse->aNode array. +** Return the index of the first node added. +** +** If an OOM error occurs, set pParse->oom. +*/ +static void jsonParseAddNodeArray( + JsonParse *pParse, /* Append the node to this object */ + JsonNode *aNode, /* Array of nodes to add */ + u32 nNode /* Number of elements in aNew */ +){ + assert( aNode!=0 ); + assert( nNode>=1 ); + if( pParse->nNode + nNode > pParse->nAlloc ){ + u32 nNew = pParse->nNode + nNode; + JsonNode *aNew = sqlite3_realloc64(pParse->aNode, nNew*sizeof(JsonNode)); + if( aNew==0 ){ + pParse->oom = 1; + return; + } + pParse->nAlloc = sqlite3_msize(aNew)/sizeof(JsonNode); + pParse->aNode = aNew; + } + memcpy(&pParse->aNode[pParse->nNode], aNode, nNode*sizeof(JsonNode)); + pParse->nNode += nNode; +} + +/* +** Add a new JSON_SUBST node. The node immediately following +** this new node will be the substitute content for iNode. +*/ +static int jsonParseAddSubstNode( + JsonParse *pParse, /* Add the JSON_SUBST here */ + u32 iNode /* References this node */ +){ + int idx = jsonParseAddNode(pParse, JSON_SUBST, iNode, 0); + if( pParse->oom ) return -1; + pParse->aNode[iNode].jnFlags |= JNODE_REPLACE; + pParse->aNode[idx].eU = 4; + pParse->aNode[idx].u.iPrev = pParse->iSubst; + pParse->iSubst = idx; + pParse->hasMod = 1; + pParse->useMod = 1; + return idx; +} + +/* +** Return true if z[] begins with 2 (or more) hexadecimal digits +*/ +static int jsonIs2Hex(const char *z){ + return sqlite3Isxdigit(z[0]) && sqlite3Isxdigit(z[1]); +} + +/* ** Return true if z[] begins with 4 (or more) hexadecimal digits */ static int jsonIs4Hex(const char *z){ - int i; - for(i=0; i<4; i++) if( !sqlite3Isxdigit(z[i]) ) return 0; - return 1; + return jsonIs2Hex(z) && jsonIs2Hex(&z[2]); } /* +** Return the number of bytes of JSON5 whitespace at the beginning of +** the input string z[]. +** +** JSON5 whitespace consists of any of the following characters: +** +** Unicode UTF-8 Name +** U+0009 09 horizontal tab +** U+000a 0a line feed +** U+000b 0b vertical tab +** U+000c 0c form feed +** U+000d 0d carriage return +** U+0020 20 space +** U+00a0 c2 a0 non-breaking space +** U+1680 e1 9a 80 ogham space mark +** U+2000 e2 80 80 en quad +** U+2001 e2 80 81 em quad +** U+2002 e2 80 82 en space +** U+2003 e2 80 83 em space +** U+2004 e2 80 84 three-per-em space +** U+2005 e2 80 85 four-per-em space +** U+2006 e2 80 86 six-per-em space +** U+2007 e2 80 87 figure space +** U+2008 e2 80 88 punctuation space +** U+2009 e2 80 89 thin space +** U+200a e2 80 8a hair space +** U+2028 e2 80 a8 line separator +** U+2029 e2 80 a9 paragraph separator +** U+202f e2 80 af narrow no-break space (NNBSP) +** U+205f e2 81 9f medium mathematical space (MMSP) +** U+3000 e3 80 80 ideographical space +** U+FEFF ef bb bf byte order mark +** +** In addition, comments between '/', '*' and '*', '/' and +** from '/', '/' to end-of-line are also considered to be whitespace. +*/ +static int json5Whitespace(const char *zIn){ + int n = 0; + const u8 *z = (u8*)zIn; + while( 1 /*exit by "goto whitespace_done"*/ ){ + switch( z[n] ){ + case 0x09: + case 0x0a: + case 0x0b: + case 0x0c: + case 0x0d: + case 0x20: { + n++; + break; + } + case '/': { + if( z[n+1]=='*' && z[n+2]!=0 ){ + int j; + for(j=n+3; z[j]!='/' || z[j-1]!='*'; j++){ + if( z[j]==0 ) goto whitespace_done; + } + n = j+1; + break; + }else if( z[n+1]=='/' ){ + int j; + char c; + for(j=n+2; (c = z[j])!=0; j++){ + if( c=='\n' || c=='\r' ) break; + if( 0xe2==(u8)c && 0x80==(u8)z[j+1] + && (0xa8==(u8)z[j+2] || 0xa9==(u8)z[j+2]) + ){ + j += 2; + break; + } + } + n = j; + if( z[n] ) n++; + break; + } + goto whitespace_done; + } + case 0xc2: { + if( z[n+1]==0xa0 ){ + n += 2; + break; + } + goto whitespace_done; + } + case 0xe1: { + if( z[n+1]==0x9a && z[n+2]==0x80 ){ + n += 3; + break; + } + goto whitespace_done; + } + case 0xe2: { + if( z[n+1]==0x80 ){ + u8 c = z[n+2]; + if( c<0x80 ) goto whitespace_done; + if( c<=0x8a || c==0xa8 || c==0xa9 || c==0xaf ){ + n += 3; + break; + } + }else if( z[n+1]==0x81 && z[n+2]==0x9f ){ + n += 3; + break; + } + goto whitespace_done; + } + case 0xe3: { + if( z[n+1]==0x80 && z[n+2]==0x80 ){ + n += 3; + break; + } + goto whitespace_done; + } + case 0xef: { + if( z[n+1]==0xbb && z[n+2]==0xbf ){ + n += 3; + break; + } + goto whitespace_done; + } + default: { + goto whitespace_done; + } + } + } + whitespace_done: + return n; +} + +/* +** Extra floating-point literals to allow in JSON. +*/ +static const struct NanInfName { + char c1; + char c2; + char n; + char eType; + char nRepl; + char *zMatch; + char *zRepl; +} aNanInfName[] = { + { 'i', 'I', 3, JSON_REAL, 7, "inf", "9.0e999" }, + { 'i', 'I', 8, JSON_REAL, 7, "infinity", "9.0e999" }, + { 'n', 'N', 3, JSON_NULL, 4, "NaN", "null" }, + { 'q', 'Q', 4, JSON_NULL, 4, "QNaN", "null" }, + { 's', 'S', 4, JSON_NULL, 4, "SNaN", "null" }, +}; + +/* ** Parse a single JSON value which begins at pParse->zJson[i]. Return the ** index of the first character past the end of the value parsed. ** -** Return negative for a syntax error. Special cases: return -2 if the -** first non-whitespace character is '}' and return -3 if the first -** non-whitespace character is ']'. +** Special return values: +** +** 0 End of input +** -1 Syntax error +** -2 '}' seen +** -3 ']' seen +** -4 ',' seen +** -5 ':' seen */ static int jsonParseValue(JsonParse *pParse, u32 i){ char c;

@@ -197949,175 +203986,457 @@ int iThis;

int x; JsonNode *pNode; const char *z = pParse->zJson; - while( fast_isspace(z[i]) ){ i++; } - if( (c = z[i])=='{' ){ +json_parse_restart: + switch( (u8)z[i] ){ + case '{': { /* Parse object */ iThis = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0); if( iThis<0 ) return -1; + if( ++pParse->iDepth > JSON_MAX_DEPTH ){ + pParse->iErr = i; + return -1; + } for(j=i+1;;j++){ - while( fast_isspace(z[j]) ){ j++; } - if( ++pParse->iDepth > JSON_MAX_DEPTH ) return -1; + u32 nNode = pParse->nNode; x = jsonParseValue(pParse, j); - if( x<0 ){ - pParse->iDepth--; - if( x==(-2) && pParse->nNode==(u32)iThis+1 ) return j+1; - return -1; + if( x<=0 ){ + if( x==(-2) ){ + j = pParse->iErr; + if( pParse->nNode!=(u32)iThis+1 ) pParse->hasNonstd = 1; + break; + } + j += json5Whitespace(&z[j]); + if( sqlite3JsonId1(z[j]) + || (z[j]=='\\' && z[j+1]=='u' && jsonIs4Hex(&z[j+2])) + ){ + int k = j+1; + while( (sqlite3JsonId2(z[k]) && json5Whitespace(&z[k])==0) + || (z[k]=='\\' && z[k+1]=='u' && jsonIs4Hex(&z[k+2])) + ){ + k++; + } + jsonParseAddNode(pParse, JSON_STRING | (JNODE_RAW<<8), k-j, &z[j]); + pParse->hasNonstd = 1; + x = k; + }else{ + if( x!=-1 ) pParse->iErr = j; + return -1; + } } if( pParse->oom ) return -1; - pNode = &pParse->aNode[pParse->nNode-1]; - if( pNode->eType!=JSON_STRING ) return -1; + pNode = &pParse->aNode[nNode]; + if( pNode->eType!=JSON_STRING ){ + pParse->iErr = j; + return -1; + } pNode->jnFlags |= JNODE_LABEL; j = x; - while( fast_isspace(z[j]) ){ j++; } - if( z[j]!=':' ) return -1; - j++; + if( z[j]==':' ){ + j++; + }else{ + if( fast_isspace(z[j]) ){ + do{ j++; }while( fast_isspace(z[j]) ); + if( z[j]==':' ){ + j++; + goto parse_object_value; + } + } + x = jsonParseValue(pParse, j); + if( x!=(-5) ){ + if( x!=(-1) ) pParse->iErr = j; + return -1; + } + j = pParse->iErr+1; + } + parse_object_value: x = jsonParseValue(pParse, j); - pParse->iDepth--; - if( x<0 ) return -1; + if( x<=0 ){ + if( x!=(-1) ) pParse->iErr = j; + return -1; + } j = x; - while( fast_isspace(z[j]) ){ j++; } - c = z[j]; - if( c==',' ) continue; - if( c!='}' ) return -1; - break; + if( z[j]==',' ){ + continue; + }else if( z[j]=='}' ){ + break; + }else{ + if( fast_isspace(z[j]) ){ + do{ j++; }while( fast_isspace(z[j]) ); + if( z[j]==',' ){ + continue; + }else if( z[j]=='}' ){ + break; + } + } + x = jsonParseValue(pParse, j); + if( x==(-4) ){ + j = pParse->iErr; + continue; + } + if( x==(-2) ){ + j = pParse->iErr; + break; + } + } + pParse->iErr = j; + return -1; } pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1; + pParse->iDepth--; return j+1; - }else if( c=='[' ){ + } + case '[': { /* Parse array */ iThis = jsonParseAddNode(pParse, JSON_ARRAY, 0, 0); if( iThis<0 ) return -1; + if( ++pParse->iDepth > JSON_MAX_DEPTH ){ + pParse->iErr = i; + return -1; + } memset(&pParse->aNode[iThis].u, 0, sizeof(pParse->aNode[iThis].u)); for(j=i+1;;j++){ - while( fast_isspace(z[j]) ){ j++; } - if( ++pParse->iDepth > JSON_MAX_DEPTH ) return -1; x = jsonParseValue(pParse, j); - pParse->iDepth--; - if( x<0 ){ - if( x==(-3) && pParse->nNode==(u32)iThis+1 ) return j+1; + if( x<=0 ){ + if( x==(-3) ){ + j = pParse->iErr; + if( pParse->nNode!=(u32)iThis+1 ) pParse->hasNonstd = 1; + break; + } + if( x!=(-1) ) pParse->iErr = j; return -1; } j = x; - while( fast_isspace(z[j]) ){ j++; } - c = z[j]; - if( c==',' ) continue; - if( c!=']' ) return -1; - break; + if( z[j]==',' ){ + continue; + }else if( z[j]==']' ){ + break; + }else{ + if( fast_isspace(z[j]) ){ + do{ j++; }while( fast_isspace(z[j]) ); + if( z[j]==',' ){ + continue; + }else if( z[j]==']' ){ + break; + } + } + x = jsonParseValue(pParse, j); + if( x==(-4) ){ + j = pParse->iErr; + continue; + } + if( x==(-3) ){ + j = pParse->iErr; + break; + } + } + pParse->iErr = j; + return -1; } pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1; + pParse->iDepth--; return j+1; - }else if( c=='"' ){ + } + case '\'': { + u8 jnFlags; + char cDelim; + pParse->hasNonstd = 1; + jnFlags = JNODE_JSON5; + goto parse_string; + case '"': /* Parse string */ - u8 jnFlags = 0; - j = i+1; - for(;;){ + jnFlags = 0; + parse_string: + cDelim = z[i]; + for(j=i+1; 1; j++){ + if( jsonIsOk[(unsigned char)z[j]] ) continue; c = z[j]; - if( (c & ~0x1f)==0 ){ - /* Control characters are not allowed in strings */ - return -1; - } - if( c=='\\' ){ + if( c==cDelim ){ + break; + }else if( c=='\\' ){ c = z[++j]; if( c=='"' || c=='\\' || c=='/' || c=='b' || c=='f' || c=='n' || c=='r' || c=='t' - || (c=='u' && jsonIs4Hex(z+j+1)) ){ - jnFlags = JNODE_ESCAPE; + || (c=='u' && jsonIs4Hex(&z[j+1])) ){ + jnFlags |= JNODE_ESCAPE; + }else if( c=='\'' || c=='0' || c=='v' || c=='\n' + || (0xe2==(u8)c && 0x80==(u8)z[j+1] + && (0xa8==(u8)z[j+2] || 0xa9==(u8)z[j+2])) + || (c=='x' && jsonIs2Hex(&z[j+1])) ){ + jnFlags |= (JNODE_ESCAPE|JNODE_JSON5); + pParse->hasNonstd = 1; + }else if( c=='\r' ){ + if( z[j+1]=='\n' ) j++; + jnFlags |= (JNODE_ESCAPE|JNODE_JSON5); + pParse->hasNonstd = 1; }else{ + pParse->iErr = j; return -1; } - }else if( c=='"' ){ - break; + }else if( c<=0x1f ){ + /* Control characters are not allowed in strings */ + pParse->iErr = j; + return -1; } - j++; } - jsonParseAddNode(pParse, JSON_STRING, j+1-i, &z[i]); - if( !pParse->oom ) pParse->aNode[pParse->nNode-1].jnFlags = jnFlags; + jsonParseAddNode(pParse, JSON_STRING | (jnFlags<<8), j+1-i, &z[i]); return j+1; - }else if( c=='n' - && strncmp(z+i,"null",4)==0 - && !sqlite3Isalnum(z[i+4]) ){ - jsonParseAddNode(pParse, JSON_NULL, 0, 0); - return i+4; - }else if( c=='t' - && strncmp(z+i,"true",4)==0 - && !sqlite3Isalnum(z[i+4]) ){ - jsonParseAddNode(pParse, JSON_TRUE, 0, 0); - return i+4; - }else if( c=='f' - && strncmp(z+i,"false",5)==0 - && !sqlite3Isalnum(z[i+5]) ){ - jsonParseAddNode(pParse, JSON_FALSE, 0, 0); - return i+5; - }else if( c=='-' || (c>='0' && c<='9') ){ + } + case 't': { + if( strncmp(z+i,"true",4)==0 && !sqlite3Isalnum(z[i+4]) ){ + jsonParseAddNode(pParse, JSON_TRUE, 0, 0); + return i+4; + } + pParse->iErr = i; + return -1; + } + case 'f': { + if( strncmp(z+i,"false",5)==0 && !sqlite3Isalnum(z[i+5]) ){ + jsonParseAddNode(pParse, JSON_FALSE, 0, 0); + return i+5; + } + pParse->iErr = i; + return -1; + } + case '+': { + u8 seenDP, seenE, jnFlags; + pParse->hasNonstd = 1; + jnFlags = JNODE_JSON5; + goto parse_number; + case '.': + if( sqlite3Isdigit(z[i+1]) ){ + pParse->hasNonstd = 1; + jnFlags = JNODE_JSON5; + seenE = 0; + seenDP = JSON_REAL; + goto parse_number_2; + } + pParse->iErr = i; + return -1; + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': /* Parse number */ - u8 seenDP = 0; - u8 seenE = 0; + jnFlags = 0; + parse_number: + seenDP = JSON_INT; + seenE = 0; assert( '-' < '0' ); + assert( '+' < '0' ); + assert( '.' < '0' ); + c = z[i]; + if( c<='0' ){ - j = c=='-' ? i+1 : i; - if( z[j]=='0' && z[j+1]>='0' && z[j+1]<='9' ) return -1; + if( c=='0' ){ + if( (z[i+1]=='x' || z[i+1]=='X') && sqlite3Isxdigit(z[i+2]) ){ + assert( seenDP==JSON_INT ); + pParse->hasNonstd = 1; + jnFlags |= JNODE_JSON5; + for(j=i+3; sqlite3Isxdigit(z[j]); j++){} + goto parse_number_finish; + }else if( sqlite3Isdigit(z[i+1]) ){ + pParse->iErr = i+1; + return -1; + } + }else{ + if( !sqlite3Isdigit(z[i+1]) ){ + /* JSON5 allows for "+Infinity" and "-Infinity" using exactly + ** that case. SQLite also allows these in any case and it allows + ** "+inf" and "-inf". */ + if( (z[i+1]=='I' || z[i+1]=='i') + && sqlite3StrNICmp(&z[i+1], "inf",3)==0 + ){ + pParse->hasNonstd = 1; + if( z[i]=='-' ){ + jsonParseAddNode(pParse, JSON_REAL, 8, "-9.0e999"); + }else{ + jsonParseAddNode(pParse, JSON_REAL, 7, "9.0e999"); + } + return i + (sqlite3StrNICmp(&z[i+4],"inity",5)==0 ? 9 : 4); + } + if( z[i+1]=='.' ){ + pParse->hasNonstd = 1; + jnFlags |= JNODE_JSON5; + goto parse_number_2; + } + pParse->iErr = i; + return -1; + } + if( z[i+1]=='0' ){ + if( sqlite3Isdigit(z[i+2]) ){ + pParse->iErr = i+1; + return -1; + }else if( (z[i+2]=='x' || z[i+2]=='X') && sqlite3Isxdigit(z[i+3]) ){ + pParse->hasNonstd = 1; + jnFlags |= JNODE_JSON5; + for(j=i+4; sqlite3Isxdigit(z[j]); j++){} + goto parse_number_finish; + } + } + } } - j = i+1; - for(;; j++){ + parse_number_2: + for(j=i+1;; j++){ c = z[j]; - if( c>='0' && c<='9' ) continue; + if( sqlite3Isdigit(c) ) continue; if( c=='.' ){ - if( z[j-1]=='-' ) return -1; - if( seenDP ) return -1; - seenDP = 1; + if( seenDP==JSON_REAL ){ + pParse->iErr = j; + return -1; + } + seenDP = JSON_REAL; continue; } if( c=='e' || c=='E' ){ - if( z[j-1]<'0' ) return -1; - if( seenE ) return -1; - seenDP = seenE = 1; + if( z[j-1]<'0' ){ + if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){ + pParse->hasNonstd = 1; + jnFlags |= JNODE_JSON5; + }else{ + pParse->iErr = j; + return -1; + } + } + if( seenE ){ + pParse->iErr = j; + return -1; + } + seenDP = JSON_REAL; + seenE = 1; c = z[j+1]; if( c=='+' || c=='-' ){ j++; c = z[j+1]; } - if( c<'0' || c>'9' ) return -1; + if( c<'0' || c>'9' ){ + pParse->iErr = j; + return -1; + } continue; } break; } - if( z[j-1]<'0' ) return -1; - jsonParseAddNode(pParse, seenDP ? JSON_REAL : JSON_INT, - j - i, &z[i]); + if( z[j-1]<'0' ){ + if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){ + pParse->hasNonstd = 1; + jnFlags |= JNODE_JSON5; + }else{ + pParse->iErr = j; + return -1; + } + } + parse_number_finish: + jsonParseAddNode(pParse, seenDP | (jnFlags<<8), j - i, &z[i]); return j; - }else if( c=='}' ){ + } + case '}': { + pParse->iErr = i; return -2; /* End of {...} */ - }else if( c==']' ){ + } + case ']': { + pParse->iErr = i; return -3; /* End of [...] */ - }else if( c==0 ){ + } + case ',': { + pParse->iErr = i; + return -4; /* List separator */ + } + case ':': { + pParse->iErr = i; + return -5; /* Object label/value separator */ + } + case 0: { return 0; /* End of file */ - }else{ + } + case 0x09: + case 0x0a: + case 0x0d: + case 0x20: { + do{ + i++; + }while( fast_isspace(z[i]) ); + goto json_parse_restart; + } + case 0x0b: + case 0x0c: + case '/': + case 0xc2: + case 0xe1: + case 0xe2: + case 0xe3: + case 0xef: { + j = json5Whitespace(&z[i]); + if( j>0 ){ + i += j; + pParse->hasNonstd = 1; + goto json_parse_restart; + } + pParse->iErr = i; + return -1; + } + case 'n': { + if( strncmp(z+i,"null",4)==0 && !sqlite3Isalnum(z[i+4]) ){ + jsonParseAddNode(pParse, JSON_NULL, 0, 0); + return i+4; + } + /* fall-through into the default case that checks for NaN */ + } + default: { + u32 k; + int nn; + c = z[i]; + for(k=0; k<sizeof(aNanInfName)/sizeof(aNanInfName[0]); k++){ + if( c!=aNanInfName[k].c1 && c!=aNanInfName[k].c2 ) continue; + nn = aNanInfName[k].n; + if( sqlite3StrNICmp(&z[i], aNanInfName[k].zMatch, nn)!=0 ){ + continue; + } + if( sqlite3Isalnum(z[i+nn]) ) continue; + jsonParseAddNode(pParse, aNanInfName[k].eType, + aNanInfName[k].nRepl, aNanInfName[k].zRepl); + pParse->hasNonstd = 1; + return i + nn; + } + pParse->iErr = i; return -1; /* Syntax error */ } + } /* End switch(z[i]) */ } /* ** Parse a complete JSON string. Return 0 on success or non-zero if there -** are any errors. If an error occurs, free all memory associated with -** pParse. +** are any errors. If an error occurs, free all memory held by pParse, +** but not pParse itself. ** -** pParse is uninitialized when this routine is called. +** pParse must be initialized to an empty parse object prior to calling +** this routine. */ static int jsonParse( JsonParse *pParse, /* Initialize and fill this JsonParse object */ - sqlite3_context *pCtx, /* Report errors here */ - const char *zJson /* Input JSON text to be parsed */ + sqlite3_context *pCtx /* Report errors here */ ){ int i; - memset(pParse, 0, sizeof(*pParse)); - if( zJson==0 ) return 1; - pParse->zJson = zJson; + const char *zJson = pParse->zJson; i = jsonParseValue(pParse, 0); if( pParse->oom ) i = -1; if( i>0 ){ assert( pParse->iDepth==0 ); while( fast_isspace(zJson[i]) ) i++; - if( zJson[i] ) i = -1; + if( zJson[i] ){ + i += json5Whitespace(&zJson[i]); + if( zJson[i] ){ + jsonParseReset(pParse); + return 1; + } + pParse->hasNonstd = 1; + } } if( i<=0 ){ if( pCtx!=0 ){

@@ -198132,6 +204451,7 @@ return 1;

} return 0; } + /* Mark node i of pParse as being a child of iParent. Call recursively ** to fill in all the descendants of node i.

@@ -198182,26 +204502,49 @@ #define JSON_CACHE_ID (-429938) /* First cache entry */

#define JSON_CACHE_SZ 4 /* Max number of cache entries */ /* -** Obtain a complete parse of the JSON found in the first argument -** of the argv array. Use the sqlite3_get_auxdata() cache for this -** parse if it is available. If the cache is not available or if it -** is no longer valid, parse the JSON again and return the new parse, -** and also register the new parse so that it will be available for +** Obtain a complete parse of the JSON found in the pJson argument +** +** Use the sqlite3_get_auxdata() cache to find a preexisting parse +** if it is available. If the cache is not available or if it +** is no longer valid, parse the JSON again and return the new parse. +** Also register the new parse so that it will be available for ** future sqlite3_get_auxdata() calls. +** +** If an error occurs and pErrCtx!=0 then report the error on pErrCtx +** and return NULL. +** +** The returned pointer (if it is not NULL) is owned by the cache in +** most cases, not the caller. The caller does NOT need to invoke +** jsonParseFree(), in most cases. +** +** Except, if an error occurs and pErrCtx==0 then return the JsonParse +** object with JsonParse.nErr non-zero and the caller will own the JsonParse +** object. In that case, it will be the responsibility of the caller to +** invoke jsonParseFree(). To summarize: +** +** pErrCtx!=0 || p->nErr==0 ==> Return value p is owned by the +** cache. Call does not need to +** free it. +** +** pErrCtx==0 && p->nErr!=0 ==> Return value is owned by the caller +** and so the caller must free it. */ static JsonParse *jsonParseCached( - sqlite3_context *pCtx, - sqlite3_value **argv, - sqlite3_context *pErrCtx + sqlite3_context *pCtx, /* Context to use for cache search */ + sqlite3_value *pJson, /* Function param containing JSON text */ + sqlite3_context *pErrCtx, /* Write parse errors here if not NULL */ + int bUnedited /* No prior edits allowed */ ){ - const char *zJson = (const char*)sqlite3_value_text(argv[0]); - int nJson = sqlite3_value_bytes(argv[0]); + char *zJson = (char*)sqlite3_value_text(pJson); + int nJson = sqlite3_value_bytes(pJson); JsonParse *p; JsonParse *pMatch = 0; int iKey; int iMinKey = 0; u32 iMinHold = 0xffffffff; u32 iMaxHold = 0; + int bJsonRCStr; + if( zJson==0 ) return 0; for(iKey=0; iKey<JSON_CACHE_SZ; iKey++){ p = (JsonParse*)sqlite3_get_auxdata(pCtx, JSON_CACHE_ID+iKey);

@@ -198211,9 +204554,21 @@ break;

} if( pMatch==0 && p->nJson==nJson - && memcmp(p->zJson,zJson,nJson)==0 + && (p->hasMod==0 || bUnedited==0) + && (p->zJson==zJson || memcmp(p->zJson,zJson,nJson)==0) ){ p->nErr = 0; + p->useMod = 0; + pMatch = p; + }else + if( pMatch==0 + && p->zAlt!=0 + && bUnedited==0 + && p->nAlt==nJson + && memcmp(p->zAlt, zJson, nJson)==0 + ){ + p->nErr = 0; + p->useMod = 1; pMatch = p; }else if( p->iHold<iMinHold ){ iMinHold = p->iHold;

@@ -198224,24 +204579,44 @@ iMaxHold = p->iHold;

} } if( pMatch ){ + /* The input JSON text was found in the cache. Use the preexisting + ** parse of this JSON */ pMatch->nErr = 0; pMatch->iHold = iMaxHold+1; + assert( pMatch->nJPRef>0 ); /* pMatch is owned by the cache */ return pMatch; } - p = sqlite3_malloc64( sizeof(*p) + nJson + 1 ); + + /* The input JSON was not found anywhere in the cache. We will need + ** to parse it ourselves and generate a new JsonParse object. + */ + bJsonRCStr = sqlite3ValueIsOfClass(pJson,sqlite3RCStrUnref); + p = sqlite3_malloc64( sizeof(*p) + (bJsonRCStr ? 0 : nJson+1) ); if( p==0 ){ sqlite3_result_error_nomem(pCtx); return 0; } memset(p, 0, sizeof(*p)); - p->zJson = (char*)&p[1]; - memcpy((char*)p->zJson, zJson, nJson+1); - if( jsonParse(p, pErrCtx, p->zJson) ){ - sqlite3_free(p); + if( bJsonRCStr ){ + p->zJson = sqlite3RCStrRef(zJson); + p->bJsonIsRCStr = 1; + }else{ + p->zJson = (char*)&p[1]; + memcpy(p->zJson, zJson, nJson+1); + } + p->nJPRef = 1; + if( jsonParse(p, pErrCtx) ){ + if( pErrCtx==0 ){ + p->nErr = 1; + assert( p->nJPRef==1 ); /* Caller will own the new JsonParse object p */ + return p; + } + jsonParseFree(p); return 0; } p->nJson = nJson; p->iHold = iMaxHold+1; + /* Transfer ownership of the new JsonParse to the cache */ sqlite3_set_auxdata(pCtx, JSON_CACHE_ID+iMinKey, p, (void(*)(void*))jsonParseFree); return (JsonParse*)sqlite3_get_auxdata(pCtx, JSON_CACHE_ID+iMinKey);

@@ -198251,7 +204626,7 @@ /*

** Compare the OBJECT label at pNode against zKey,nKey. Return true on ** a match. */ -static int jsonLabelCompare(JsonNode *pNode, const char *zKey, u32 nKey){ +static int jsonLabelCompare(const JsonNode *pNode, const char *zKey, u32 nKey){ assert( pNode->eU==1 ); if( pNode->jnFlags & JNODE_RAW ){ if( pNode->n!=nKey ) return 0;

@@ -198261,6 +204636,15 @@ if( pNode->n!=nKey+2 ) return 0;

return strncmp(pNode->u.zJContent+1, zKey, nKey)==0; } } +static int jsonSameLabel(const JsonNode *p1, const JsonNode *p2){ + if( p1->jnFlags & JNODE_RAW ){ + return jsonLabelCompare(p2, p1->u.zJContent, p1->n); + }else if( p2->jnFlags & JNODE_RAW ){ + return jsonLabelCompare(p1, p2->u.zJContent, p2->n); + }else{ + return p1->n==p2->n && strncmp(p1->u.zJContent,p2->u.zJContent,p1->n)==0; + } +} /* forward declaration */ static JsonNode *jsonLookupAppend(JsonParse*,const char*,int*,const char**);

@@ -198283,9 +204667,31 @@ const char **pzErr /* Make *pzErr point to any syntax error in zPath */

){ u32 i, j, nKey; const char *zKey; - JsonNode *pRoot = &pParse->aNode[iRoot]; + JsonNode *pRoot; + if( pParse->oom ) return 0; + pRoot = &pParse->aNode[iRoot]; + if( pRoot->jnFlags & (JNODE_REPLACE|JNODE_REMOVE) && pParse->useMod ){ + while( (pRoot->jnFlags & JNODE_REPLACE)!=0 ){ + u32 idx = (u32)(pRoot - pParse->aNode); + i = pParse->iSubst; + while( 1 /*exit-by-break*/ ){ + assert( i<pParse->nNode ); + assert( pParse->aNode[i].eType==JSON_SUBST ); + assert( pParse->aNode[i].eU==4 ); + assert( pParse->aNode[i].u.iPrev<i ); + if( pParse->aNode[i].n==idx ){ + pRoot = &pParse->aNode[i+1]; + iRoot = i+1; + break; + } + i = pParse->aNode[i].u.iPrev; + } + } + if( pRoot->jnFlags & JNODE_REMOVE ){ + return 0; + } + } if( zPath[0]==0 ) return pRoot; - if( pRoot->jnFlags & JNODE_REPLACE ) return 0; if( zPath[0]=='.' ){ if( pRoot->eType!=JSON_OBJECT ) return 0; zPath++;

@@ -198319,14 +204725,16 @@ j++;

j += jsonNodeSize(&pRoot[j]); } if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break; + if( pParse->useMod==0 ) break; assert( pRoot->eU==2 ); - iRoot += pRoot->u.iAppend; + iRoot = pRoot->u.iAppend; pRoot = &pParse->aNode[iRoot]; j = 1; } if( pApnd ){ u32 iStart, iLabel; JsonNode *pNode; + assert( pParse->useMod ); iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0); iLabel = jsonParseAddNode(pParse, JSON_STRING, nKey, zKey); zPath += i;

@@ -198335,7 +204743,7 @@ if( pParse->oom ) return 0;

if( pNode ){ pRoot = &pParse->aNode[iRoot]; assert( pRoot->eU==0 ); - pRoot->u.iAppend = iStart - iRoot; + pRoot->u.iAppend = iStart; pRoot->jnFlags |= JNODE_APPEND; VVA( pRoot->eU = 2 ); pParse->aNode[iLabel].jnFlags |= JNODE_RAW;

@@ -198356,12 +204764,13 @@ int iBase = iRoot;

if( pRoot->eType!=JSON_ARRAY ) return 0; for(;;){ while( j<=pBase->n ){ - if( (pBase[j].jnFlags & JNODE_REMOVE)==0 ) i++; + if( (pBase[j].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ) i++; j += jsonNodeSize(&pBase[j]); } if( (pBase->jnFlags & JNODE_APPEND)==0 ) break; + if( pParse->useMod==0 ) break; assert( pBase->eU==2 ); - iBase += pBase->u.iAppend; + iBase = pBase->u.iAppend; pBase = &pParse->aNode[iBase]; j = 1; }

@@ -198389,13 +204798,17 @@ if( pRoot->eType!=JSON_ARRAY ) return 0;

zPath += j + 1; j = 1; for(;;){ - while( j<=pRoot->n && (i>0 || (pRoot[j].jnFlags & JNODE_REMOVE)!=0) ){ - if( (pRoot[j].jnFlags & JNODE_REMOVE)==0 ) i--; + while( j<=pRoot->n + && (i>0 || ((pRoot[j].jnFlags & JNODE_REMOVE)!=0 && pParse->useMod)) + ){ + if( (pRoot[j].jnFlags & JNODE_REMOVE)==0 || pParse->useMod==0 ) i--; j += jsonNodeSize(&pRoot[j]); } + if( i==0 && j<=pRoot->n ) break; if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break; + if( pParse->useMod==0 ) break; assert( pRoot->eU==2 ); - iRoot += pRoot->u.iAppend; + iRoot = pRoot->u.iAppend; pRoot = &pParse->aNode[iRoot]; j = 1; }

@@ -198405,13 +204818,14 @@ }

if( i==0 && pApnd ){ u32 iStart; JsonNode *pNode; + assert( pParse->useMod ); iStart = jsonParseAddNode(pParse, JSON_ARRAY, 1, 0); pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr); if( pParse->oom ) return 0; if( pNode ){ pRoot = &pParse->aNode[iRoot]; assert( pRoot->eU==0 ); - pRoot->u.iAppend = iStart - iRoot; + pRoot->u.iAppend = iStart; pRoot->jnFlags |= JNODE_APPEND; VVA( pRoot->eU = 2 ); }

@@ -198538,47 +204952,90 @@ /****************************************************************************

** SQL functions used for testing and debugging ****************************************************************************/ +#if SQLITE_DEBUG +/* +** Print N node entries. +*/ +static void jsonDebugPrintNodeEntries( + JsonNode *aNode, /* First node entry to print */ + int N /* Number of node entries to print */ +){ + int i; + for(i=0; i<N; i++){ + const char *zType; + if( aNode[i].jnFlags & JNODE_LABEL ){ + zType = "label"; + }else{ + zType = jsonType[aNode[i].eType]; + } + printf("node %4u: %-7s n=%-5d", i, zType, aNode[i].n); + if( (aNode[i].jnFlags & ~JNODE_LABEL)!=0 ){ + u8 f = aNode[i].jnFlags; + if( f & JNODE_RAW ) printf(" RAW"); + if( f & JNODE_ESCAPE ) printf(" ESCAPE"); + if( f & JNODE_REMOVE ) printf(" REMOVE"); + if( f & JNODE_REPLACE ) printf(" REPLACE"); + if( f & JNODE_APPEND ) printf(" APPEND"); + if( f & JNODE_JSON5 ) printf(" JSON5"); + } + switch( aNode[i].eU ){ + case 1: printf(" zJContent=[%.*s]\n", + aNode[i].n, aNode[i].u.zJContent); break; + case 2: printf(" iAppend=%u\n", aNode[i].u.iAppend); break; + case 3: printf(" iKey=%u\n", aNode[i].u.iKey); break; + case 4: printf(" iPrev=%u\n", aNode[i].u.iPrev); break; + default: printf("\n"); + } + } +} +#endif /* SQLITE_DEBUG */ + + +#if 0 /* 1 for debugging. 0 normally. Requires -DSQLITE_DEBUG too */ +static void jsonDebugPrintParse(JsonParse *p){ + jsonDebugPrintNodeEntries(p->aNode, p->nNode); +} +static void jsonDebugPrintNode(JsonNode *pNode){ + jsonDebugPrintNodeEntries(pNode, jsonNodeSize(pNode)); +} +#else + /* The usual case */ +# define jsonDebugPrintNode(X) +# define jsonDebugPrintParse(X) +#endif + #ifdef SQLITE_DEBUG /* -** The json_parse(JSON) function returns a string which describes -** a parse of the JSON provided. Or it returns NULL if JSON is not -** well-formed. +** SQL function: json_parse(JSON) +** +** Parse JSON using jsonParseCached(). Then print a dump of that +** parse on standard output. Return the mimified JSON result, just +** like the json() function. */ static void jsonParseFunc( sqlite3_context *ctx, int argc, sqlite3_value **argv ){ - JsonString s; /* Output string - not real JSON */ - JsonParse x; /* The parse */ - u32 i; + JsonParse *p; /* The parse */ assert( argc==1 ); - if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return; - jsonParseFindParents(&x); - jsonInit(&s, ctx); - for(i=0; i<x.nNode; i++){ - const char *zType; - if( x.aNode[i].jnFlags & JNODE_LABEL ){ - assert( x.aNode[i].eType==JSON_STRING ); - zType = "label"; - }else{ - zType = jsonType[x.aNode[i].eType]; - } - jsonPrintf(100, &s,"node %3u: %7s n=%-4d up=%-4d", - i, zType, x.aNode[i].n, x.aUp[i]); - assert( x.aNode[i].eU==0 || x.aNode[i].eU==1 ); - if( x.aNode[i].u.zJContent!=0 ){ - assert( x.aNode[i].eU==1 ); - jsonAppendRaw(&s, " ", 1); - jsonAppendRaw(&s, x.aNode[i].u.zJContent, x.aNode[i].n); - }else{ - assert( x.aNode[i].eU==0 ); - } - jsonAppendRaw(&s, "\n", 1); - } - jsonParseReset(&x); - jsonResult(&s); + p = jsonParseCached(ctx, argv[0], ctx, 0); + if( p==0 ) return; + printf("nNode = %u\n", p->nNode); + printf("nAlloc = %u\n", p->nAlloc); + printf("nJson = %d\n", p->nJson); + printf("nAlt = %d\n", p->nAlt); + printf("nErr = %u\n", p->nErr); + printf("oom = %u\n", p->oom); + printf("hasNonstd = %u\n", p->hasNonstd); + printf("useMod = %u\n", p->useMod); + printf("hasMod = %u\n", p->hasMod); + printf("nJPRef = %u\n", p->nJPRef); + printf("iSubst = %u\n", p->iSubst); + printf("iHold = %u\n", p->iHold); + jsonDebugPrintNodeEntries(p->aNode, p->nNode); + jsonReturnJson(p, p->aNode, ctx, 1, 0); } /*

@@ -198662,7 +205119,7 @@ sqlite3_int64 n = 0;

u32 i; JsonNode *pNode; - p = jsonParseCached(ctx, argv, ctx); + p = jsonParseCached(ctx, argv[0], ctx, 0); if( p==0 ) return; assert( p->nNode ); if( argc==2 ){

@@ -198675,9 +205132,16 @@ if( pNode==0 ){

return; } if( pNode->eType==JSON_ARRAY ){ - assert( (pNode->jnFlags & JNODE_APPEND)==0 ); - for(i=1; i<=pNode->n; n++){ - i += jsonNodeSize(&pNode[i]); + while( 1 /*exit-by-break*/ ){ + i = 1; + while( i<=pNode->n ){ + if( (pNode[i].jnFlags & JNODE_REMOVE)==0 ) n++; + i += jsonNodeSize(&pNode[i]); + } + if( (pNode->jnFlags & JNODE_APPEND)==0 ) break; + if( p->useMod==0 ) break; + assert( pNode->eU==2 ); + pNode = &p->aNode[pNode->u.iAppend]; } } sqlite3_result_int64(ctx, n);

@@ -198724,14 +205188,14 @@ int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx));

JsonString jx; if( argc<2 ) return; - p = jsonParseCached(ctx, argv, ctx); + p = jsonParseCached(ctx, argv[0], ctx, 0); if( p==0 ) return; if( argc==2 ){ /* With a single PATH argument */ zPath = (const char*)sqlite3_value_text(argv[1]); if( zPath==0 ) return; if( flags & JSON_ABPATH ){ - if( zPath[0]!='$' ){ + if( zPath[0]!='$' || (zPath[1]!='.' && zPath[1]!='[' && zPath[1]!=0) ){ /* The -> and ->> operators accept abbreviated PATH arguments. This ** is mostly for compatibility with PostgreSQL, but also for ** convenience.

@@ -198742,11 +205206,11 @@ ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience

*/ jsonInit(&jx, ctx); if( sqlite3Isdigit(zPath[0]) ){ - jsonAppendRaw(&jx, "$[", 2); + jsonAppendRawNZ(&jx, "$[", 2); jsonAppendRaw(&jx, zPath, (int)strlen(zPath)); - jsonAppendRaw(&jx, "]", 2); + jsonAppendRawNZ(&jx, "]", 2); }else{ - jsonAppendRaw(&jx, "$.", 1 + (zPath[0]!='[')); + jsonAppendRawNZ(&jx, "$.", 1 + (zPath[0]!='[')); jsonAppendRaw(&jx, zPath, (int)strlen(zPath)); jsonAppendChar(&jx, 0); }

@@ -198757,15 +205221,14 @@ pNode = jsonLookup(p, zPath, 0, ctx);

} if( pNode ){ if( flags & JSON_JSON ){ - jsonReturnJson(pNode, ctx, 0); + jsonReturnJson(p, pNode, ctx, 0, 0); }else{ - jsonReturn(pNode, ctx, 0); - sqlite3_result_subtype(ctx, 0); + jsonReturn(p, pNode, ctx, 1); } } }else{ pNode = jsonLookup(p, zPath, 0, ctx); - if( p->nErr==0 && pNode ) jsonReturn(pNode, ctx, 0); + if( p->nErr==0 && pNode ) jsonReturn(p, pNode, ctx, 0); } }else{ /* Two or more PATH arguments results in a JSON array with each

@@ -198779,9 +205242,9 @@ pNode = jsonLookup(p, zPath, 0, ctx);

if( p->nErr ) break; jsonAppendSeparator(&jx); if( pNode ){ - jsonRenderNode(pNode, &jx, 0); + jsonRenderNode(p, pNode, &jx); }else{ - jsonAppendRaw(&jx, "null", 4); + jsonAppendRawNZ(&jx, "null", 4); } } if( i==argc ){

@@ -198822,51 +205285,42 @@ assert( pPatch[i].jnFlags & JNODE_LABEL );

assert( pPatch[i].eU==1 ); nKey = pPatch[i].n; zKey = pPatch[i].u.zJContent; - assert( (pPatch[i].jnFlags & JNODE_RAW)==0 ); for(j=1; j<pTarget->n; j += jsonNodeSize(&pTarget[j+1])+1 ){ assert( pTarget[j].eType==JSON_STRING ); assert( pTarget[j].jnFlags & JNODE_LABEL ); - assert( (pPatch[i].jnFlags & JNODE_RAW)==0 ); - if( pTarget[j].n==nKey && strncmp(pTarget[j].u.zJContent,zKey,nKey)==0 ){ - if( pTarget[j+1].jnFlags & (JNODE_REMOVE|JNODE_PATCH) ) break; + if( jsonSameLabel(&pPatch[i], &pTarget[j]) ){ + if( pTarget[j+1].jnFlags & (JNODE_REMOVE|JNODE_REPLACE) ) break; if( pPatch[i+1].eType==JSON_NULL ){ pTarget[j+1].jnFlags |= JNODE_REMOVE; }else{ JsonNode *pNew = jsonMergePatch(pParse, iTarget+j+1, &pPatch[i+1]); if( pNew==0 ) return 0; + if( pNew!=&pParse->aNode[iTarget+j+1] ){ + jsonParseAddSubstNode(pParse, iTarget+j+1); + jsonParseAddNodeArray(pParse, pNew, jsonNodeSize(pNew)); + } pTarget = &pParse->aNode[iTarget]; - if( pNew!=&pTarget[j+1] ){ - assert( pTarget[j+1].eU==0 - || pTarget[j+1].eU==1 - || pTarget[j+1].eU==2 ); - testcase( pTarget[j+1].eU==1 ); - testcase( pTarget[j+1].eU==2 ); - VVA( pTarget[j+1].eU = 5 ); - pTarget[j+1].u.pPatch = pNew; - pTarget[j+1].jnFlags |= JNODE_PATCH; - } } break; } } if( j>=pTarget->n && pPatch[i+1].eType!=JSON_NULL ){ - int iStart, iPatch; - iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0); + int iStart; + JsonNode *pApnd; + u32 nApnd; + iStart = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0); jsonParseAddNode(pParse, JSON_STRING, nKey, zKey); - iPatch = jsonParseAddNode(pParse, JSON_TRUE, 0, 0); + pApnd = &pPatch[i+1]; + if( pApnd->eType==JSON_OBJECT ) jsonRemoveAllNulls(pApnd); + nApnd = jsonNodeSize(pApnd); + jsonParseAddNodeArray(pParse, pApnd, jsonNodeSize(pApnd)); if( pParse->oom ) return 0; - jsonRemoveAllNulls(pPatch); - pTarget = &pParse->aNode[iTarget]; - assert( pParse->aNode[iRoot].eU==0 || pParse->aNode[iRoot].eU==2 ); - testcase( pParse->aNode[iRoot].eU==2 ); + pParse->aNode[iStart].n = 1+nApnd; pParse->aNode[iRoot].jnFlags |= JNODE_APPEND; + pParse->aNode[iRoot].u.iAppend = iStart; VVA( pParse->aNode[iRoot].eU = 2 ); - pParse->aNode[iRoot].u.iAppend = iStart - iRoot; iRoot = iStart; - assert( pParse->aNode[iPatch].eU==0 ); - VVA( pParse->aNode[iPatch].eU = 5 ); - pParse->aNode[iPatch].jnFlags |= JNODE_PATCH; - pParse->aNode[iPatch].u.pPatch = &pPatch[i+1]; + pTarget = &pParse->aNode[iTarget]; } } return pTarget;

@@ -198882,25 +205336,28 @@ sqlite3_context *ctx,

int argc, sqlite3_value **argv ){ - JsonParse x; /* The JSON that is being patched */ - JsonParse y; /* The patch */ + JsonParse *pX; /* The JSON that is being patched */ + JsonParse *pY; /* The patch */ JsonNode *pResult; /* The result of the merge */ UNUSED_PARAMETER(argc); - if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return; - if( jsonParse(&y, ctx, (const char*)sqlite3_value_text(argv[1])) ){ - jsonParseReset(&x); - return; - } - pResult = jsonMergePatch(&x, 0, y.aNode); - assert( pResult!=0 || x.oom ); - if( pResult ){ - jsonReturnJson(pResult, ctx, 0); + pX = jsonParseCached(ctx, argv[0], ctx, 1); + if( pX==0 ) return; + assert( pX->hasMod==0 ); + pX->hasMod = 1; + pY = jsonParseCached(ctx, argv[1], ctx, 1); + if( pY==0 ) return; + pX->useMod = 1; + pY->useMod = 1; + pResult = jsonMergePatch(pX, 0, pY->aNode); + assert( pResult!=0 || pX->oom ); + if( pResult && pX->oom==0 ){ + jsonDebugPrintParse(pX); + jsonDebugPrintNode(pResult); + jsonReturnJson(pX, pResult, ctx, 0, 0); }else{ sqlite3_result_error_nomem(ctx); } - jsonParseReset(&x); - jsonParseReset(&y); }

@@ -198956,26 +205413,120 @@ sqlite3_context *ctx,

int argc, sqlite3_value **argv ){ - JsonParse x; /* The parse */ + JsonParse *pParse; /* The parse */ JsonNode *pNode; const char *zPath; u32 i; if( argc<1 ) return; - if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return; - assert( x.nNode ); + pParse = jsonParseCached(ctx, argv[0], ctx, argc>1); + if( pParse==0 ) return; for(i=1; i<(u32)argc; i++){ zPath = (const char*)sqlite3_value_text(argv[i]); if( zPath==0 ) goto remove_done; - pNode = jsonLookup(&x, zPath, 0, ctx); - if( x.nErr ) goto remove_done; - if( pNode ) pNode->jnFlags |= JNODE_REMOVE; + pNode = jsonLookup(pParse, zPath, 0, ctx); + if( pParse->nErr ) goto remove_done; + if( pNode ){ + pNode->jnFlags |= JNODE_REMOVE; + pParse->hasMod = 1; + pParse->useMod = 1; + } } - if( (x.aNode[0].jnFlags & JNODE_REMOVE)==0 ){ - jsonReturnJson(x.aNode, ctx, 0); + if( (pParse->aNode[0].jnFlags & JNODE_REMOVE)==0 ){ + jsonReturnJson(pParse, pParse->aNode, ctx, 1, 0); } remove_done: - jsonParseReset(&x); + jsonDebugPrintParse(p); +} + +/* +** Substitute the value at iNode with the pValue parameter. +*/ +static void jsonReplaceNode( + sqlite3_context *pCtx, + JsonParse *p, + int iNode, + sqlite3_value *pValue +){ + int idx = jsonParseAddSubstNode(p, iNode); + if( idx<=0 ){ + assert( p->oom ); + return; + } + switch( sqlite3_value_type(pValue) ){ + case SQLITE_NULL: { + jsonParseAddNode(p, JSON_NULL, 0, 0); + break; + } + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + char *z = sqlite3_mprintf("%!0.15g", sqlite3_value_double(pValue)); + int n; + if( z==0 ){ + p->oom = 1; + break; + } + n = sqlite3Strlen30(z); + jsonParseAddNode(p, JSON_REAL, n, z); + jsonParseAddCleanup(p, sqlite3_free, z); + break; + } + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + char *z = sqlite3_mprintf("%lld", sqlite3_value_int64(pValue)); + int n; + if( z==0 ){ + p->oom = 1; + break; + } + n = sqlite3Strlen30(z); + jsonParseAddNode(p, JSON_INT, n, z); + jsonParseAddCleanup(p, sqlite3_free, z); + + break; + } + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + const char *z = (const char*)sqlite3_value_text(pValue); + u32 n = (u32)sqlite3_value_bytes(pValue); + if( z==0 ){ + p->oom = 1; + break; + } + if( sqlite3_value_subtype(pValue)!=JSON_SUBTYPE ){ + char *zCopy = sqlite3_malloc64( n+1 ); + int k; + if( zCopy ){ + memcpy(zCopy, z, n); + zCopy[n] = 0; + jsonParseAddCleanup(p, sqlite3_free, zCopy); + }else{ + p->oom = 1; + sqlite3_result_error_nomem(pCtx); + } + k = jsonParseAddNode(p, JSON_STRING, n, zCopy); + assert( k>0 || p->oom ); + if( p->oom==0 ) p->aNode[k].jnFlags |= JNODE_RAW; + }else{ + JsonParse *pPatch = jsonParseCached(pCtx, pValue, pCtx, 1); + if( pPatch==0 ){ + p->oom = 1; + break; + } + jsonParseAddNodeArray(p, pPatch->aNode, pPatch->nNode); + /* The nodes copied out of pPatch and into p likely contain + ** u.zJContent pointers into pPatch->zJson. So preserve the + ** content of pPatch until p is destroyed. */ + assert( pPatch->nJPRef>=1 ); + pPatch->nJPRef++; + jsonParseAddCleanup(p, (void(*)(void*))jsonParseFree, pPatch); + } + break; + } + default: { + jsonParseAddNode(p, JSON_NULL, 0, 0); + sqlite3_result_error(pCtx, "JSON cannot hold BLOB values", -1); + p->nErr++; + break; + } + } } /*

@@ -198989,7 +205540,7 @@ sqlite3_context *ctx,

int argc, sqlite3_value **argv ){ - JsonParse x; /* The parse */ + JsonParse *pParse; /* The parse */ JsonNode *pNode; const char *zPath; u32 i;

@@ -198999,28 +205550,22 @@ if( (argc&1)==0 ) {

jsonWrongNumArgs(ctx, "replace"); return; } - if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return; - assert( x.nNode ); + pParse = jsonParseCached(ctx, argv[0], ctx, argc>1); + if( pParse==0 ) return; + pParse->nJPRef++; for(i=1; i<(u32)argc; i+=2){ zPath = (const char*)sqlite3_value_text(argv[i]); - pNode = jsonLookup(&x, zPath, 0, ctx); - if( x.nErr ) goto replace_err; + pParse->useMod = 1; + pNode = jsonLookup(pParse, zPath, 0, ctx); + if( pParse->nErr ) goto replace_err; if( pNode ){ - assert( pNode->eU==0 || pNode->eU==1 || pNode->eU==4 ); - testcase( pNode->eU!=0 && pNode->eU!=1 ); - pNode->jnFlags |= (u8)JNODE_REPLACE; - VVA( pNode->eU = 4 ); - pNode->u.iReplace = i + 1; + jsonReplaceNode(ctx, pParse, (u32)(pNode - pParse->aNode), argv[i+1]); } } - if( x.aNode[0].jnFlags & JNODE_REPLACE ){ - assert( x.aNode[0].eU==4 ); - sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]); - }else{ - jsonReturnJson(x.aNode, ctx, argv); - } + jsonReturnJson(pParse, pParse->aNode, ctx, 1, 0); replace_err: - jsonParseReset(&x); + jsonDebugPrintParse(pParse); + jsonParseFree(pParse); }

@@ -199041,7 +205586,7 @@ sqlite3_context *ctx,

int argc, sqlite3_value **argv ){ - JsonParse x; /* The parse */ + JsonParse *pParse; /* The parse */ JsonNode *pNode; const char *zPath; u32 i;

@@ -199053,33 +205598,27 @@ if( (argc&1)==0 ) {

jsonWrongNumArgs(ctx, bIsSet ? "set" : "insert"); return; } - if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return; - assert( x.nNode ); + pParse = jsonParseCached(ctx, argv[0], ctx, argc>1); + if( pParse==0 ) return; + pParse->nJPRef++; for(i=1; i<(u32)argc; i+=2){ zPath = (const char*)sqlite3_value_text(argv[i]); bApnd = 0; - pNode = jsonLookup(&x, zPath, &bApnd, ctx); - if( x.oom ){ + pParse->useMod = 1; + pNode = jsonLookup(pParse, zPath, &bApnd, ctx); + if( pParse->oom ){ sqlite3_result_error_nomem(ctx); goto jsonSetDone; - }else if( x.nErr ){ + }else if( pParse->nErr ){ goto jsonSetDone; }else if( pNode && (bApnd || bIsSet) ){ - testcase( pNode->eU!=0 && pNode->eU!=1 ); - assert( pNode->eU!=3 && pNode->eU!=5 ); - VVA( pNode->eU = 4 ); - pNode->jnFlags |= (u8)JNODE_REPLACE; - pNode->u.iReplace = i + 1; + jsonReplaceNode(ctx, pParse, (u32)(pNode - pParse->aNode), argv[i+1]); } } - if( x.aNode[0].jnFlags & JNODE_REPLACE ){ - assert( x.aNode[0].eU==4 ); - sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]); - }else{ - jsonReturnJson(x.aNode, ctx, argv); - } + jsonDebugPrintParse(pParse); + jsonReturnJson(pParse, pParse->aNode, ctx, 1, 0); jsonSetDone: - jsonParseReset(&x); + jsonParseFree(pParse); } /*

@@ -199098,7 +205637,7 @@ JsonParse *p; /* The parse */

const char *zPath; JsonNode *pNode; - p = jsonParseCached(ctx, argv, ctx); + p = jsonParseCached(ctx, argv[0], ctx, 0); if( p==0 ) return; if( argc==2 ){ zPath = (const char*)sqlite3_value_text(argv[1]);

@@ -199114,8 +205653,8 @@

/* ** json_valid(JSON) ** -** Return 1 if JSON is a well-formed JSON string according to RFC-7159. -** Return 0 otherwise. +** Return 1 if JSON is a well-formed canonical JSON string according +** to RFC-7159. Return 0 otherwise. */ static void jsonValidFunc( sqlite3_context *ctx,

@@ -199124,8 +205663,75 @@ sqlite3_value **argv

){ JsonParse *p; /* The parse */ UNUSED_PARAMETER(argc); - p = jsonParseCached(ctx, argv, 0); - sqlite3_result_int(ctx, p!=0); + if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ +#ifdef SQLITE_LEGACY_JSON_VALID + /* Incorrect legacy behavior was to return FALSE for a NULL input */ + sqlite3_result_int(ctx, 0); +#endif + return; + } + p = jsonParseCached(ctx, argv[0], 0, 0); + if( p==0 || p->oom ){ + sqlite3_result_error_nomem(ctx); + sqlite3_free(p); + }else{ + sqlite3_result_int(ctx, p->nErr==0 && (p->hasNonstd==0 || p->useMod)); + if( p->nErr ) jsonParseFree(p); + } +} + +/* +** json_error_position(JSON) +** +** If the argument is not an interpretable JSON string, then return the 1-based +** character position at which the parser first recognized that the input +** was in error. The left-most character is 1. If the string is valid +** JSON, then return 0. +** +** Note that json_valid() is only true for strictly conforming canonical JSON. +** But this routine returns zero if the input contains extension. Thus: +** +** (1) If the input X is strictly conforming canonical JSON: +** +** json_valid(X) returns true +** json_error_position(X) returns 0 +** +** (2) If the input X is JSON but it includes extension (such as JSON5) that +** are not part of RFC-8259: +** +** json_valid(X) returns false +** json_error_position(X) return 0 +** +** (3) If the input X cannot be interpreted as JSON even taking extensions +** into account: +** +** json_valid(X) return false +** json_error_position(X) returns 1 or more +*/ +static void jsonErrorFunc( + sqlite3_context *ctx, + int argc, + sqlite3_value **argv +){ + JsonParse *p; /* The parse */ + UNUSED_PARAMETER(argc); + if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; + p = jsonParseCached(ctx, argv[0], 0, 0); + if( p==0 || p->oom ){ + sqlite3_result_error_nomem(ctx); + sqlite3_free(p); + }else if( p->nErr==0 ){ + sqlite3_result_int(ctx, 0); + }else{ + int n = 1; + u32 i; + const char *z = (const char*)sqlite3_value_text(argv[0]); + for(i=0; i<p->iErr && ALWAYS(z[i]); i++){ + if( (z[i]&0xc0)!=0x80 ) n++; + } + sqlite3_result_int(ctx, n); + jsonParseFree(p); + } }

@@ -199167,7 +205773,8 @@ if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx);

assert( pStr->bStatic ); }else if( isFinal ){ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, - pStr->bStatic ? SQLITE_TRANSIENT : sqlite3_free); + pStr->bStatic ? SQLITE_TRANSIENT : + sqlite3RCStrUnref); pStr->bStatic = 1; }else{ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT);

@@ -199208,7 +205815,7 @@ UNUSED_PARAMETER(argv);

pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0); #ifdef NEVER /* pStr is always non-NULL since jsonArrayStep() or jsonObjectStep() will - ** always have been called to initalize it */ + ** always have been called to initialize it */ if( NEVER(!pStr) ) return; #endif z = pStr->zBuf;

@@ -199275,7 +205882,8 @@ if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx);

assert( pStr->bStatic ); }else if( isFinal ){ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, - pStr->bStatic ? SQLITE_TRANSIENT : sqlite3_free); + pStr->bStatic ? SQLITE_TRANSIENT : + sqlite3RCStrUnref); pStr->bStatic = 1; }else{ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT);

@@ -199386,7 +205994,6 @@

/* Reset a JsonEachCursor back to its original state. Free any memory ** held. */ static void jsonEachCursorReset(JsonEachCursor *p){ - sqlite3_free(p->zJson); sqlite3_free(p->zRoot); jsonParseReset(&p->sParse); p->iRowid = 0;

@@ -199469,14 +206076,16 @@ assert( pNode->jnFlags & JNODE_LABEL );

assert( pNode->eU==1 ); z = pNode->u.zJContent; nn = pNode->n; - assert( nn>=2 ); - assert( z[0]=='"' ); - assert( z[nn-1]=='"' ); - if( nn>2 && sqlite3Isalpha(z[1]) ){ - for(jj=2; jj<nn-1 && sqlite3Isalnum(z[jj]); jj++){} - if( jj==nn-1 ){ - z++; - nn -= 2; + if( (pNode->jnFlags & JNODE_RAW)==0 ){ + assert( nn>=2 ); + assert( z[0]=='"' || z[0]=='\'' ); + assert( z[nn-1]=='"' || z[0]=='\'' ); + if( nn>2 && sqlite3Isalpha(z[1]) ){ + for(jj=2; jj<nn-1 && sqlite3Isalnum(z[jj]); jj++){} + if( jj==nn-1 ){ + z++; + nn -= 2; + } } } jsonPrintf(nn+2, pStr, ".%.*s", nn, z);

@@ -199522,7 +206131,7 @@ switch( i ){

case JEACH_KEY: { if( p->i==0 ) break; if( p->eType==JSON_OBJECT ){ - jsonReturn(pThis, ctx, 0); + jsonReturn(&p->sParse, pThis, ctx, 0); }else if( p->eType==JSON_ARRAY ){ u32 iKey; if( p->bRecursive ){

@@ -199538,7 +206147,7 @@ break;

} case JEACH_VALUE: { if( pThis->jnFlags & JNODE_LABEL ) pThis++; - jsonReturn(pThis, ctx, 0); + jsonReturn(&p->sParse, pThis, ctx, 0); break; } case JEACH_TYPE: {

@@ -199549,7 +206158,7 @@ }

case JEACH_ATOM: { if( pThis->jnFlags & JNODE_LABEL ) pThis++; if( pThis->eType>=JSON_ARRAY ) break; - jsonReturn(pThis, ctx, 0); + jsonReturn(&p->sParse, pThis, ctx, 0); break; } case JEACH_ID: {

@@ -199653,6 +206262,13 @@ aIdx[iCol] = i;

idxMask |= iMask; } } + if( pIdxInfo->nOrderBy>0 + && pIdxInfo->aOrderBy[0].iColumn<0 + && pIdxInfo->aOrderBy[0].desc==0 + ){ + pIdxInfo->orderByConsumed = 1; + } + if( (unusableMask & ~idxMask)!=0 ){ /* If there are any unusable constraints on JSON or ROOT, then reject ** this entire plan */

@@ -199697,11 +206313,19 @@ jsonEachCursorReset(p);

if( idxNum==0 ) return SQLITE_OK; z = (const char*)sqlite3_value_text(argv[0]); if( z==0 ) return SQLITE_OK; - n = sqlite3_value_bytes(argv[0]); - p->zJson = sqlite3_malloc64( n+1 ); - if( p->zJson==0 ) return SQLITE_NOMEM; - memcpy(p->zJson, z, (size_t)n+1); - if( jsonParse(&p->sParse, 0, p->zJson) ){ + memset(&p->sParse, 0, sizeof(p->sParse)); + p->sParse.nJPRef = 1; + if( sqlite3ValueIsOfClass(argv[0], sqlite3RCStrUnref) ){ + p->sParse.zJson = sqlite3RCStrRef((char*)z); + }else{ + n = sqlite3_value_bytes(argv[0]); + p->sParse.zJson = sqlite3RCStrNew( n+1 ); + if( p->sParse.zJson==0 ) return SQLITE_NOMEM; + memcpy(p->sParse.zJson, z, (size_t)n+1); + } + p->sParse.bJsonIsRCStr = 1; + p->zJson = p->sParse.zJson; + if( jsonParse(&p->sParse, 0) ){ int rc = SQLITE_NOMEM; if( p->sParse.oom==0 ){ sqlite3_free(cur->pVtab->zErrMsg);

@@ -199786,7 +206410,8 @@ 0, /* xRename */

0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; /* The methods of the json_tree virtual table. */

@@ -199814,7 +206439,8 @@ 0, /* xRename */

0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; #endif /* SQLITE_OMIT_VIRTUALTABLE */ #endif /* !defined(SQLITE_OMIT_JSON) */

@@ -199825,33 +206451,43 @@ */

SQLITE_PRIVATE void sqlite3RegisterJsonFunctions(void){ #ifndef SQLITE_OMIT_JSON static FuncDef aJsonFunc[] = { - JFUNCTION(json, 1, 0, jsonRemoveFunc), - JFUNCTION(json_array, -1, 0, jsonArrayFunc), - JFUNCTION(json_array_length, 1, 0, jsonArrayLengthFunc), - JFUNCTION(json_array_length, 2, 0, jsonArrayLengthFunc), - JFUNCTION(json_extract, -1, 0, jsonExtractFunc), - JFUNCTION(->, 2, JSON_JSON, jsonExtractFunc), - JFUNCTION(->>, 2, JSON_SQL, jsonExtractFunc), - JFUNCTION(json_insert, -1, 0, jsonSetFunc), - JFUNCTION(json_object, -1, 0, jsonObjectFunc), - JFUNCTION(json_patch, 2, 0, jsonPatchFunc), - JFUNCTION(json_quote, 1, 0, jsonQuoteFunc), - JFUNCTION(json_remove, -1, 0, jsonRemoveFunc), - JFUNCTION(json_replace, -1, 0, jsonReplaceFunc), - JFUNCTION(json_set, -1, JSON_ISSET, jsonSetFunc), - JFUNCTION(json_type, 1, 0, jsonTypeFunc), - JFUNCTION(json_type, 2, 0, jsonTypeFunc), - JFUNCTION(json_valid, 1, 0, jsonValidFunc), -#if SQLITE_DEBUG - JFUNCTION(json_parse, 1, 0, jsonParseFunc), - JFUNCTION(json_test1, 1, 0, jsonTest1Func), + /* calls sqlite3_result_subtype() */ + /* | */ + /* Uses cache ______ | __ calls sqlite3_value_subtype() */ + /* | | | */ + /* Num args _________ | | | ___ Flags */ + /* | | | | | */ + /* | | | | | */ + JFUNCTION(json, 1, 1, 1, 0, 0, jsonRemoveFunc), + JFUNCTION(json_array, -1, 0, 1, 1, 0, jsonArrayFunc), + JFUNCTION(json_array_length, 1, 1, 0, 0, 0, jsonArrayLengthFunc), + JFUNCTION(json_array_length, 2, 1, 0, 0, 0, jsonArrayLengthFunc), + JFUNCTION(json_error_position,1, 1, 0, 0, 0, jsonErrorFunc), + JFUNCTION(json_extract, -1, 1, 1, 0, 0, jsonExtractFunc), + JFUNCTION(->, 2, 1, 1, 0, JSON_JSON, jsonExtractFunc), + JFUNCTION(->>, 2, 1, 0, 0, JSON_SQL, jsonExtractFunc), + JFUNCTION(json_insert, -1, 1, 1, 1, 0, jsonSetFunc), + JFUNCTION(json_object, -1, 0, 1, 1, 0, jsonObjectFunc), + JFUNCTION(json_patch, 2, 1, 1, 0, 0, jsonPatchFunc), + JFUNCTION(json_quote, 1, 0, 1, 1, 0, jsonQuoteFunc), + JFUNCTION(json_remove, -1, 1, 1, 0, 0, jsonRemoveFunc), + JFUNCTION(json_replace, -1, 1, 1, 1, 0, jsonReplaceFunc), + JFUNCTION(json_set, -1, 1, 1, 1, JSON_ISSET, jsonSetFunc), + JFUNCTION(json_type, 1, 1, 0, 0, 0, jsonTypeFunc), + JFUNCTION(json_type, 2, 1, 0, 0, 0, jsonTypeFunc), + JFUNCTION(json_valid, 1, 1, 0, 0, 0, jsonValidFunc), +#ifdef SQLITE_DEBUG + JFUNCTION(json_parse, 1, 1, 1, 0, 0, jsonParseFunc), + JFUNCTION(json_test1, 1, 1, 0, 1, 0, jsonTest1Func), #endif WAGGREGATE(json_group_array, 1, 0, 0, jsonArrayStep, jsonArrayFinal, jsonArrayValue, jsonGroupInverse, - SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS), + SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8| + SQLITE_DETERMINISTIC), WAGGREGATE(json_group_object, 2, 0, 0, jsonObjectStep, jsonObjectFinal, jsonObjectValue, jsonGroupInverse, - SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS) + SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8| + SQLITE_DETERMINISTIC) }; sqlite3InsertBuiltinFuncs(aJsonFunc, ArraySize(aJsonFunc)); #endif

@@ -199978,6 +206614,11 @@ # define NEVER(X) (X)

#endif #endif /* !defined(SQLITE_AMALGAMATION) */ +/* Macro to check for 4-byte alignment. Only used inside of assert() */ +#ifdef SQLITE_DEBUG +# define FOUR_BYTE_ALIGNED(X) ((((char*)(X) - (char*)0) & 3)==0) +#endif + /* #include <string.h> */ /* #include <stdio.h> */ /* #include <assert.h> */

@@ -200043,6 +206684,7 @@ #endif

int iDepth; /* Current depth of the r-tree structure */ char *zDb; /* Name of database containing r-tree table */ char *zName; /* Name of r-tree table */ + char *zNodeName; /* Name of the %_node table */ u32 nBusy; /* Current number of users of this structure */ i64 nRowEst; /* Estimated number of rows in this table */ u32 nCursor; /* Number of open cursors */

@@ -200055,7 +206697,6 @@ ** RtreeNode.pNext. RtreeNode.iNode stores the depth of the sub-tree

** headed by the node (leaf nodes have RtreeNode.iNode==0). */ RtreeNode *pDeleted; - int iReinsertHeight; /* Height of sub-trees Reinsert() has run on */ /* Blob I/O on xxx_node */ sqlite3_blob *pNodeBlob;

@@ -200352,17 +206993,23 @@ ** using C-preprocessor macros. If that is unsuccessful, or if

** -DSQLITE_RUNTIME_BYTEORDER=1 is set, then byte-order is determined ** at run-time. */ -#ifndef SQLITE_BYTEORDER -#if defined(i386) || defined(__i386__) || defined(_M_IX86) || \ - defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \ - defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \ - defined(__arm__) -# define SQLITE_BYTEORDER 1234 -#elif defined(sparc) || defined(__ppc__) -# define SQLITE_BYTEORDER 4321 -#else -# define SQLITE_BYTEORDER 0 /* 0 means "unknown at compile-time" */ -#endif +#ifndef SQLITE_BYTEORDER /* Replicate changes at tag-20230904a */ +# if defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__ +# define SQLITE_BYTEORDER 4321 +# elif defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__ +# define SQLITE_BYTEORDER 1234 +# elif defined(__BIG_ENDIAN__) && __BIG_ENDIAN__==1 +# define SQLITE_BYTEORDER 4321 +# elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \ + defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \ + defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \ + defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64) +# define SQLITE_BYTEORDER 1234 +# elif defined(sparc) || defined(__ARMEB__) || defined(__AARCH64EB__) +# define SQLITE_BYTEORDER 4321 +# else +# define SQLITE_BYTEORDER 0 +# endif #endif

@@ -200383,7 +207030,7 @@ static int readInt16(u8 *p){

return (p[0]<<8) + p[1]; } static void readCoord(u8 *p, RtreeCoord *pCoord){ - assert( ((((char*)p) - (char*)0)&3)==0 ); /* p is always 4-byte aligned */ + assert( FOUR_BYTE_ALIGNED(p) ); #if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 pCoord->u = _byteswap_ulong(*(u32*)p); #elif SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000

@@ -200437,7 +207084,7 @@ p[1] = (i>> 0)&0xFF;

} static int writeCoord(u8 *p, RtreeCoord *pCoord){ u32 i; - assert( ((((char*)p) - (char*)0)&3)==0 ); /* p is always 4-byte aligned */ + assert( FOUR_BYTE_ALIGNED(p) ); assert( sizeof(RtreeCoord)==4 ); assert( sizeof(u32)==4 ); #if SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000

@@ -200608,11 +207255,9 @@ if( rc==SQLITE_NOMEM ) return SQLITE_NOMEM;

} } if( pRtree->pNodeBlob==0 ){ - char *zTab = sqlite3_mprintf("%s_node", pRtree->zName); - if( zTab==0 ) return SQLITE_NOMEM; - rc = sqlite3_blob_open(pRtree->db, pRtree->zDb, zTab, "data", iNode, 0, + rc = sqlite3_blob_open(pRtree->db, pRtree->zDb, pRtree->zNodeName, + "data", iNode, 0, &pRtree->pNodeBlob); - sqlite3_free(zTab); } if( rc ){ nodeBlobReset(pRtree);

@@ -201165,7 +207810,7 @@

assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE || p->op==RTREE_GT || p->op==RTREE_EQ || p->op==RTREE_TRUE || p->op==RTREE_FALSE ); - assert( ((((char*)pCellData) - (char*)0)&3)==0 ); /* 4-byte aligned */ + assert( FOUR_BYTE_ALIGNED(pCellData) ); switch( p->op ){ case RTREE_TRUE: return; /* Always satisfied */ case RTREE_FALSE: break; /* Never satisfied */

@@ -201218,7 +207863,7 @@ assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE

|| p->op==RTREE_GT || p->op==RTREE_EQ || p->op==RTREE_TRUE || p->op==RTREE_FALSE ); pCellData += 8 + p->iCoord*4; - assert( ((((char*)pCellData) - (char*)0)&3)==0 ); /* 4-byte aligned */ + assert( FOUR_BYTE_ALIGNED(pCellData) ); RTREE_DECODE_COORD(eInt, pCellData, xN); switch( p->op ){ case RTREE_TRUE: return; /* Always satisfied */

@@ -201788,7 +208433,20 @@ }

p->pInfo->nCoord = pRtree->nDim2; p->pInfo->anQueue = pCsr->anQueue; p->pInfo->mxLevel = pRtree->iDepth + 1; - }else if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ + }else if( eType==SQLITE_INTEGER ){ + sqlite3_int64 iVal = sqlite3_value_int64(argv[ii]); +#ifdef SQLITE_RTREE_INT_ONLY + p->u.rValue = iVal; +#else + p->u.rValue = (double)iVal; + if( iVal>=((sqlite3_int64)1)<<48 + || iVal<=-(((sqlite3_int64)1)<<48) + ){ + if( p->op==RTREE_LT ) p->op = RTREE_LE; + if( p->op==RTREE_GT ) p->op = RTREE_GE; + } +#endif + }else if( eType==SQLITE_FLOAT ){ #ifdef SQLITE_RTREE_INT_ONLY p->u.rValue = sqlite3_value_int64(argv[ii]); #else

@@ -201919,11 +208577,12 @@ && ((p->iColumn>0 && p->iColumn<=pRtree->nDim2)

|| p->op==SQLITE_INDEX_CONSTRAINT_MATCH) ){ u8 op; + u8 doOmit = 1; switch( p->op ){ - case SQLITE_INDEX_CONSTRAINT_EQ: op = RTREE_EQ; break; - case SQLITE_INDEX_CONSTRAINT_GT: op = RTREE_GT; break; + case SQLITE_INDEX_CONSTRAINT_EQ: op = RTREE_EQ; doOmit = 0; break; + case SQLITE_INDEX_CONSTRAINT_GT: op = RTREE_GT; doOmit = 0; break; case SQLITE_INDEX_CONSTRAINT_LE: op = RTREE_LE; break; - case SQLITE_INDEX_CONSTRAINT_LT: op = RTREE_LT; break; + case SQLITE_INDEX_CONSTRAINT_LT: op = RTREE_LT; doOmit = 0; break; case SQLITE_INDEX_CONSTRAINT_GE: op = RTREE_GE; break; case SQLITE_INDEX_CONSTRAINT_MATCH: op = RTREE_MATCH; break; default: op = 0; break;

@@ -201932,15 +208591,19 @@ if( op ){

zIdxStr[iIdx++] = op; zIdxStr[iIdx++] = (char)(p->iColumn - 1 + '0'); pIdxInfo->aConstraintUsage[ii].argvIndex = (iIdx/2); - pIdxInfo->aConstraintUsage[ii].omit = 1; + pIdxInfo->aConstraintUsage[ii].omit = doOmit; } } } pIdxInfo->idxNum = 2; pIdxInfo->needToFreeIdxStr = 1; - if( iIdx>0 && 0==(pIdxInfo->idxStr = sqlite3_mprintf("%s", zIdxStr)) ){ - return SQLITE_NOMEM; + if( iIdx>0 ){ + pIdxInfo->idxStr = sqlite3_malloc( iIdx+1 ); + if( pIdxInfo->idxStr==0 ){ + return SQLITE_NOMEM; + } + memcpy(pIdxInfo->idxStr, zIdxStr, iIdx+1); } nRow = pRtree->nRowEst >> (iIdx/2);

@@ -202019,31 +208682,22 @@ ** by p1. False otherwise.

*/ static int cellContains(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){ int ii; - int isInt = (pRtree->eCoordType==RTREE_COORD_INT32); - for(ii=0; ii<pRtree->nDim2; ii+=2){ - RtreeCoord *a1 = &p1->aCoord[ii]; - RtreeCoord *a2 = &p2->aCoord[ii]; - if( (!isInt && (a2[0].f<a1[0].f || a2[1].f>a1[1].f)) - || ( isInt && (a2[0].i<a1[0].i || a2[1].i>a1[1].i)) - ){ - return 0; + if( pRtree->eCoordType==RTREE_COORD_INT32 ){ + for(ii=0; ii<pRtree->nDim2; ii+=2){ + RtreeCoord *a1 = &p1->aCoord[ii]; + RtreeCoord *a2 = &p2->aCoord[ii]; + if( a2[0].i<a1[0].i || a2[1].i>a1[1].i ) return 0; + } + }else{ + for(ii=0; ii<pRtree->nDim2; ii+=2){ + RtreeCoord *a1 = &p1->aCoord[ii]; + RtreeCoord *a2 = &p2->aCoord[ii]; + if( a2[0].f<a1[0].f || a2[1].f>a1[1].f ) return 0; } } return 1; } -/* -** Return the amount cell p would grow by if it were unioned with pCell. -*/ -static RtreeDValue cellGrowth(Rtree *pRtree, RtreeCell *p, RtreeCell *pCell){ - RtreeDValue area; - RtreeCell cell; - memcpy(&cell, p, sizeof(RtreeCell)); - area = cellArea(pRtree, &cell); - cellUnion(pRtree, &cell, pCell); - return (cellArea(pRtree, &cell)-area); -} - static RtreeDValue cellOverlap( Rtree *pRtree, RtreeCell *p,

@@ -202090,38 +208744,52 @@

for(ii=0; rc==SQLITE_OK && ii<(pRtree->iDepth-iHeight); ii++){ int iCell; sqlite3_int64 iBest = 0; - + int bFound = 0; RtreeDValue fMinGrowth = RTREE_ZERO; RtreeDValue fMinArea = RTREE_ZERO; - int nCell = NCELL(pNode); - RtreeCell cell; RtreeNode *pChild = 0; - RtreeCell *aCell = 0; - - /* Select the child node which will be enlarged the least if pCell - ** is inserted into it. Resolve ties by choosing the entry with - ** the smallest area. + /* First check to see if there is are any cells in pNode that completely + ** contains pCell. If two or more cells in pNode completely contain pCell + ** then pick the smallest. */ for(iCell=0; iCell<nCell; iCell++){ - int bBest = 0; - RtreeDValue growth; - RtreeDValue area; + RtreeCell cell; nodeGetCell(pRtree, pNode, iCell, &cell); - growth = cellGrowth(pRtree, &cell, pCell); - area = cellArea(pRtree, &cell); - if( iCell==0||growth<fMinGrowth||(growth==fMinGrowth && area<fMinArea) ){ - bBest = 1; + if( cellContains(pRtree, &cell, pCell) ){ + RtreeDValue area = cellArea(pRtree, &cell); + if( bFound==0 || area<fMinArea ){ + iBest = cell.iRowid; + fMinArea = area; + bFound = 1; + } } - if( bBest ){ - fMinGrowth = growth; - fMinArea = area; - iBest = cell.iRowid; + } + if( !bFound ){ + /* No cells of pNode will completely contain pCell. So pick the + ** cell of pNode that grows by the least amount when pCell is added. + ** Break ties by selecting the smaller cell. + */ + for(iCell=0; iCell<nCell; iCell++){ + RtreeCell cell; + RtreeDValue growth; + RtreeDValue area; + nodeGetCell(pRtree, pNode, iCell, &cell); + area = cellArea(pRtree, &cell); + cellUnion(pRtree, &cell, pCell); + growth = cellArea(pRtree, &cell)-area; + if( iCell==0 + || growth<fMinGrowth + || (growth==fMinGrowth && area<fMinArea) + ){ + fMinGrowth = growth; + fMinArea = area; + iBest = cell.iRowid; + } } } - sqlite3_free(aCell); rc = nodeAcquire(pRtree, iBest, pNode, &pChild); nodeRelease(pRtree, pNode); pNode = pChild;

@@ -202194,77 +208862,6 @@

static int rtreeInsertCell(Rtree *, RtreeNode *, RtreeCell *, int); -/* -** Arguments aIdx, aDistance and aSpare all point to arrays of size -** nIdx. The aIdx array contains the set of integers from 0 to -** (nIdx-1) in no particular order. This function sorts the values -** in aIdx according to the indexed values in aDistance. For -** example, assuming the inputs: -** -** aIdx = { 0, 1, 2, 3 } -** aDistance = { 5.0, 2.0, 7.0, 6.0 } -** -** this function sets the aIdx array to contain: -** -** aIdx = { 0, 1, 2, 3 } -** -** The aSpare array is used as temporary working space by the -** sorting algorithm. -*/ -static void SortByDistance( - int *aIdx, - int nIdx, - RtreeDValue *aDistance, - int *aSpare -){ - if( nIdx>1 ){ - int iLeft = 0; - int iRight = 0; - - int nLeft = nIdx/2; - int nRight = nIdx-nLeft; - int *aLeft = aIdx; - int *aRight = &aIdx[nLeft]; - - SortByDistance(aLeft, nLeft, aDistance, aSpare); - SortByDistance(aRight, nRight, aDistance, aSpare); - - memcpy(aSpare, aLeft, sizeof(int)*nLeft); - aLeft = aSpare; - - while( iLeft<nLeft || iRight<nRight ){ - if( iLeft==nLeft ){ - aIdx[iLeft+iRight] = aRight[iRight]; - iRight++; - }else if( iRight==nRight ){ - aIdx[iLeft+iRight] = aLeft[iLeft]; - iLeft++; - }else{ - RtreeDValue fLeft = aDistance[aLeft[iLeft]]; - RtreeDValue fRight = aDistance[aRight[iRight]]; - if( fLeft<fRight ){ - aIdx[iLeft+iRight] = aLeft[iLeft]; - iLeft++; - }else{ - aIdx[iLeft+iRight] = aRight[iRight]; - iRight++; - } - } - } - -#if 0 - /* Check that the sort worked */ - { - int jj; - for(jj=1; jj<nIdx; jj++){ - RtreeDValue left = aDistance[aIdx[jj-1]]; - RtreeDValue right = aDistance[aIdx[jj]]; - assert( left<=right ); - } - } -#endif - } -} /* ** Arguments aIdx, aCell and aSpare all point to arrays of size

@@ -202749,107 +209346,6 @@

return rc; } -static int Reinsert( - Rtree *pRtree, - RtreeNode *pNode, - RtreeCell *pCell, - int iHeight -){ - int *aOrder; - int *aSpare; - RtreeCell *aCell; - RtreeDValue *aDistance; - int nCell; - RtreeDValue aCenterCoord[RTREE_MAX_DIMENSIONS]; - int iDim; - int ii; - int rc = SQLITE_OK; - int n; - - memset(aCenterCoord, 0, sizeof(RtreeDValue)*RTREE_MAX_DIMENSIONS); - - nCell = NCELL(pNode)+1; - n = (nCell+1)&(~1); - - /* Allocate the buffers used by this operation. The allocation is - ** relinquished before this function returns. - */ - aCell = (RtreeCell *)sqlite3_malloc64(n * ( - sizeof(RtreeCell) + /* aCell array */ - sizeof(int) + /* aOrder array */ - sizeof(int) + /* aSpare array */ - sizeof(RtreeDValue) /* aDistance array */ - )); - if( !aCell ){ - return SQLITE_NOMEM; - } - aOrder = (int *)&aCell[n]; - aSpare = (int *)&aOrder[n]; - aDistance = (RtreeDValue *)&aSpare[n]; - - for(ii=0; ii<nCell; ii++){ - if( ii==(nCell-1) ){ - memcpy(&aCell[ii], pCell, sizeof(RtreeCell)); - }else{ - nodeGetCell(pRtree, pNode, ii, &aCell[ii]); - } - aOrder[ii] = ii; - for(iDim=0; iDim<pRtree->nDim; iDim++){ - aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2]); - aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2+1]); - } - } - for(iDim=0; iDim<pRtree->nDim; iDim++){ - aCenterCoord[iDim] = (aCenterCoord[iDim]/(nCell*(RtreeDValue)2)); - } - - for(ii=0; ii<nCell; ii++){ - aDistance[ii] = RTREE_ZERO; - for(iDim=0; iDim<pRtree->nDim; iDim++){ - RtreeDValue coord = (DCOORD(aCell[ii].aCoord[iDim*2+1]) - - DCOORD(aCell[ii].aCoord[iDim*2])); - aDistance[ii] += (coord-aCenterCoord[iDim])*(coord-aCenterCoord[iDim]); - } - } - - SortByDistance(aOrder, nCell, aDistance, aSpare); - nodeZero(pRtree, pNode); - - for(ii=0; rc==SQLITE_OK && ii<(nCell-(RTREE_MINCELLS(pRtree)+1)); ii++){ - RtreeCell *p = &aCell[aOrder[ii]]; - nodeInsertCell(pRtree, pNode, p); - if( p->iRowid==pCell->iRowid ){ - if( iHeight==0 ){ - rc = rowidWrite(pRtree, p->iRowid, pNode->iNode); - }else{ - rc = parentWrite(pRtree, p->iRowid, pNode->iNode); - } - } - } - if( rc==SQLITE_OK ){ - rc = fixBoundingBox(pRtree, pNode); - } - for(; rc==SQLITE_OK && ii<nCell; ii++){ - /* Find a node to store this cell in. pNode->iNode currently contains - ** the height of the sub-tree headed by the cell. - */ - RtreeNode *pInsert; - RtreeCell *p = &aCell[aOrder[ii]]; - rc = ChooseLeaf(pRtree, p, iHeight, &pInsert); - if( rc==SQLITE_OK ){ - int rc2; - rc = rtreeInsertCell(pRtree, pInsert, p, iHeight); - rc2 = nodeRelease(pRtree, pInsert); - if( rc==SQLITE_OK ){ - rc = rc2; - } - } - } - - sqlite3_free(aCell); - return rc; -} - /* ** Insert cell pCell into node pNode. Node pNode is the head of a ** subtree iHeight high (leaf nodes have iHeight==0).

@@ -202870,12 +209366,7 @@ pChild->pParent = pNode;

} } if( nodeInsertCell(pRtree, pNode, pCell) ){ - if( iHeight<=pRtree->iReinsertHeight || pNode->iNode==1){ - rc = SplitNode(pRtree, pNode, pCell, iHeight); - }else{ - pRtree->iReinsertHeight = iHeight; - rc = Reinsert(pRtree, pNode, pCell, iHeight); - } + rc = SplitNode(pRtree, pNode, pCell, iHeight); }else{ rc = AdjustTree(pRtree, pNode, pCell); if( ALWAYS(rc==SQLITE_OK) ){

@@ -203218,7 +209709,6 @@ rc = ChooseLeaf(pRtree, &cell, 0, &pLeaf);

} if( rc==SQLITE_OK ){ int rc2; - pRtree->iReinsertHeight = -1; rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0); rc2 = nodeRelease(pRtree, pLeaf); if( rc==SQLITE_OK ){

@@ -203359,8 +209849,11 @@ }

return 0; } +/* Forward declaration */ +static int rtreeIntegrity(sqlite3_vtab*, const char*, const char*, int, char**); + static sqlite3_module rtreeModule = { - 3, /* iVersion */ + 4, /* iVersion */ rtreeCreate, /* xCreate - create a table */ rtreeConnect, /* xConnect - connect to an existing table */ rtreeBestIndex, /* xBestIndex - Determine search strategy */

@@ -203383,7 +209876,8 @@ rtreeRename, /* xRename - rename the table */

rtreeSavepoint, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - rtreeShadowName /* xShadowName */ + rtreeShadowName, /* xShadowName */ + rtreeIntegrity /* xIntegrity */ }; static int rtreeSqlInit(

@@ -203639,22 +210133,27 @@ return SQLITE_ERROR;

} sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); + sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS); + /* Allocate the sqlite3_vtab structure */ nDb = (int)strlen(argv[1]); nName = (int)strlen(argv[2]); - pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName+2); + pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName*2+8); if( !pRtree ){ return SQLITE_NOMEM; } - memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2); + memset(pRtree, 0, sizeof(Rtree)+nDb+nName*2+8); pRtree->nBusy = 1; pRtree->base.pModule = &rtreeModule; pRtree->zDb = (char *)&pRtree[1]; pRtree->zName = &pRtree->zDb[nDb+1]; + pRtree->zNodeName = &pRtree->zName[nName+1]; pRtree->eCoordType = (u8)eCoordType; memcpy(pRtree->zDb, argv[1], nDb); memcpy(pRtree->zName, argv[2], nName); + memcpy(pRtree->zNodeName, argv[2], nName); + memcpy(&pRtree->zNodeName[nName], "_node", 6); /* Create/Connect to the underlying relational database schema. If

@@ -204151,7 +210650,6 @@ char **pzReport /* OUT: sqlite3_malloc'd report text */

){ RtreeCheck check; /* Common context for various routines */ sqlite3_stmt *pStmt = 0; /* Used to find column count of rtree table */ - int bEnd = 0; /* True if transaction should be closed */ int nAux = 0; /* Number of extra columns. */ /* Initialize the context object */

@@ -204159,14 +210657,6 @@ memset(&check, 0, sizeof(check));

check.db = db; check.zDb = zDb; check.zTab = zTab; - - /* If there is not already an open transaction, open one now. This is - ** to ensure that the queries run as part of this integrity-check operate - ** on a consistent snapshot. */ - if( sqlite3_get_autocommit(db) ){ - check.rc = sqlite3_exec(db, "BEGIN", 0, 0, 0); - bEnd = 1; - } /* Find the number of auxiliary columns */ if( check.rc==SQLITE_OK ){

@@ -204208,16 +210698,35 @@ sqlite3_finalize(check.pGetNode);

sqlite3_finalize(check.aCheckMapping[0]); sqlite3_finalize(check.aCheckMapping[1]); - /* If one was opened, close the transaction */ - if( bEnd ){ - int rc = sqlite3_exec(db, "END", 0, 0, 0); - if( check.rc==SQLITE_OK ) check.rc = rc; - } *pzReport = check.zReport; return check.rc; } /* +** Implementation of the xIntegrity method for Rtree. +*/ +static int rtreeIntegrity( + sqlite3_vtab *pVtab, /* The virtual table to check */ + const char *zSchema, /* Schema in which the virtual table lives */ + const char *zName, /* Name of the virtual table */ + int isQuick, /* True for a quick_check */ + char **pzErr /* Write results here */ +){ + Rtree *pRtree = (Rtree*)pVtab; + int rc; + assert( pzErr!=0 && *pzErr==0 ); + UNUSED_PARAMETER(zSchema); + UNUSED_PARAMETER(zName); + UNUSED_PARAMETER(isQuick); + rc = rtreeCheckTable(pRtree->db, pRtree->zDb, pRtree->zName, pzErr); + if( rc==SQLITE_OK && *pzErr ){ + *pzErr = sqlite3_mprintf("In RTree %s.%s:\n%z", + pRtree->zDb, pRtree->zName, *pzErr); + } + return rc; +} + +/* ** Usage: ** ** rtreecheck(<rtree-table>);

@@ -204590,7 +211099,7 @@ GeoPoly *p = 0;

int nByte; testcase( pCtx==0 ); if( sqlite3_value_type(pVal)==SQLITE_BLOB - && (nByte = sqlite3_value_bytes(pVal))>=(4+6*sizeof(GeoCoord)) + && (nByte = sqlite3_value_bytes(pVal))>=(int)(4+6*sizeof(GeoCoord)) ){ const unsigned char *a = sqlite3_value_blob(pVal); int nVertex;

@@ -204648,6 +211157,7 @@ int argc,

sqlite3_value **argv ){ GeoPoly *p = geopolyFuncParam(context, argv[0], 0); + (void)argc; if( p ){ sqlite3_result_blob(context, p->hdr, 4+8*p->nVertex, SQLITE_TRANSIENT);

@@ -204667,6 +211177,7 @@ int argc,

sqlite3_value **argv ){ GeoPoly *p = geopolyFuncParam(context, argv[0], 0); + (void)argc; if( p ){ sqlite3 *db = sqlite3_context_db_handle(context); sqlite3_str *x = sqlite3_str_new(db);

@@ -204748,6 +211259,7 @@ double E = sqlite3_value_double(argv[5]);

double F = sqlite3_value_double(argv[6]); GeoCoord x1, y1, x0, y0; int ii; + (void)argc; if( p ){ for(ii=0; ii<p->nVertex; ii++){ x0 = GeoX(p,ii);

@@ -204798,6 +211310,7 @@ int argc,

sqlite3_value **argv ){ GeoPoly *p = geopolyFuncParam(context, argv[0], 0); + (void)argc; if( p ){ sqlite3_result_double(context, geopolyArea(p)); sqlite3_free(p);

@@ -204823,6 +211336,7 @@ int argc,

sqlite3_value **argv ){ GeoPoly *p = geopolyFuncParam(context, argv[0], 0); + (void)argc; if( p ){ if( geopolyArea(p)<0.0 ){ int ii, jj;

@@ -204877,6 +211391,7 @@ double r = sqlite3_value_double(argv[2]);

int n = sqlite3_value_int(argv[3]); int i; GeoPoly *p; + (void)argc; if( n<3 || r<=0.0 ) return; if( n>1000 ) n = 1000;

@@ -204986,6 +211501,7 @@ int argc,

sqlite3_value **argv ){ GeoPoly *p = geopolyBBox(context, argv[0], 0, 0); + (void)argc; if( p ){ sqlite3_result_blob(context, p->hdr, 4+8*p->nVertex, SQLITE_TRANSIENT);

@@ -205013,6 +211529,7 @@ sqlite3_value **argv

){ RtreeCoord a[4]; int rc = SQLITE_OK; + (void)argc; (void)geopolyBBox(context, argv[0], a, &rc); if( rc==SQLITE_OK ){ GeoBBox *pBBox;

@@ -205101,6 +211618,8 @@ double y0 = sqlite3_value_double(argv[2]);

int v = 0; int cnt = 0; int ii; + (void)argc; + if( p1==0 ) return; for(ii=0; ii<p1->nVertex-1; ii++){ v = pointBeneathLine(x0,y0,GeoX(p1,ii), GeoY(p1,ii),

@@ -205140,6 +211659,7 @@ sqlite3_value **argv

){ GeoPoly *p1 = geopolyFuncParam(context, argv[0], 0); GeoPoly *p2 = geopolyFuncParam(context, argv[1], 0); + (void)argc; if( p1 && p2 ){ int x = geopolyOverlap(p1, p2); if( x<0 ){

@@ -205470,6 +211990,7 @@ sqlite3_value **argv

){ GeoPoly *p1 = geopolyFuncParam(context, argv[0], 0); GeoPoly *p2 = geopolyFuncParam(context, argv[1], 0); + (void)argc; if( p1 && p2 ){ int x = geopolyOverlap(p1, p2); if( x<0 ){

@@ -205490,8 +212011,12 @@ sqlite3_context *context,

int argc, sqlite3_value **argv ){ + (void)context; + (void)argc; #ifdef GEOPOLY_ENABLE_DEBUG geo_debug = sqlite3_value_int(argv[0]); +#else + (void)argv; #endif }

@@ -205519,26 +212044,31 @@ sqlite3_int64 nName; /* Length of string argv[2] */

sqlite3_str *pSql; char *zSql; int ii; + (void)pAux; sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); + sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS); /* Allocate the sqlite3_vtab structure */ nDb = strlen(argv[1]); nName = strlen(argv[2]); - pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName+2); + pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName*2+8); if( !pRtree ){ return SQLITE_NOMEM; } - memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2); + memset(pRtree, 0, sizeof(Rtree)+nDb+nName*2+8); pRtree->nBusy = 1; pRtree->base.pModule = &rtreeModule; pRtree->zDb = (char *)&pRtree[1]; pRtree->zName = &pRtree->zDb[nDb+1]; + pRtree->zNodeName = &pRtree->zName[nName+1]; pRtree->eCoordType = RTREE_COORD_REAL32; pRtree->nDim = 2; pRtree->nDim2 = 4; memcpy(pRtree->zDb, argv[1], nDb); memcpy(pRtree->zName, argv[2], nName); + memcpy(pRtree->zNodeName, argv[2], nName); + memcpy(&pRtree->zNodeName[nName], "_node", 6); /* Create/Connect to the underlying relational database schema. If

@@ -205635,6 +212165,7 @@ RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor;

RtreeNode *pRoot = 0; int rc = SQLITE_OK; int iCell = 0; + (void)idxStr; rtreeReference(pRtree);

@@ -205761,6 +212292,7 @@ int ii;

int iRowidTerm = -1; int iFuncTerm = -1; int idxNum = 0; + (void)tab; for(ii=0; ii<pIdxInfo->nConstraint; ii++){ struct sqlite3_index_constraint *p = &pIdxInfo->aConstraint[ii];

@@ -205950,7 +212482,6 @@ rc = ChooseLeaf(pRtree, &cell, 0, &pLeaf);

} if( rc==SQLITE_OK ){ int rc2; - pRtree->iReinsertHeight = -1; rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0); rc2 = nodeRelease(pRtree, pLeaf); if( rc==SQLITE_OK ){

@@ -206007,6 +212538,8 @@ const char *zName,

void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), void **ppArg ){ + (void)pVtab; + (void)nArg; if( sqlite3_stricmp(zName, "geopoly_overlap")==0 ){ *pxFunc = geopolyOverlapFunc; *ppArg = 0;

@@ -206045,7 +212578,8 @@ rtreeRename, /* xRename - rename the table */

rtreeSavepoint, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - rtreeShadowName /* xShadowName */ + rtreeShadowName, /* xShadowName */ + rtreeIntegrity /* xIntegrity */ }; static int sqlite3_geopoly_init(sqlite3 *db){

@@ -206076,7 +212610,7 @@ const char *zName;

} aAgg[] = { { geopolyBBoxStep, geopolyBBoxFinal, "geopoly_group_bbox" }, }; - int i; + unsigned int i; for(i=0; i<sizeof(aFunc)/sizeof(aFunc[0]) && rc==SQLITE_OK; i++){ int enc; if( aFunc[i].bPure ){

@@ -207297,7 +213831,7 @@ **

** The order of the columns in the data_% table does not matter. ** ** Instead of a regular table, the RBU database may also contain virtual -** tables or view named using the data_<target> naming scheme. +** tables or views named using the data_<target> naming scheme. ** ** Instead of the plain data_<target> naming scheme, RBU database tables ** may also be named data<integer>_<target>, where <integer> is any sequence

@@ -207310,7 +213844,7 @@ ** content" FTS4 tables are updated before their underlying content tables.

** ** If the target database table is a virtual table or a table that has no ** PRIMARY KEY declaration, the data_% table must also contain a column -** named "rbu_rowid". This column is mapped to the tables implicit primary +** named "rbu_rowid". This column is mapped to the table's implicit primary ** key column - "rowid". Virtual tables for which the "rowid" column does ** not function like a primary key value cannot be updated using RBU. For ** example, if the target db contains either of the following:

@@ -210776,11 +217310,11 @@ ** READ0 and CHECKPOINT locks taken as part of the checkpoint are

** no-ops. These locks will not be released until the connection ** is closed. ** - ** * Attempting to xSync() the database file causes an SQLITE_INTERNAL + ** * Attempting to xSync() the database file causes an SQLITE_NOTICE ** error. ** ** As a result, unless an error (i.e. OOM or SQLITE_BUSY) occurs, the - ** checkpoint below fails with SQLITE_INTERNAL, and leaves the aFrame[] + ** checkpoint below fails with SQLITE_NOTICE, and leaves the aFrame[] ** array populated with a set of (frame -> page) mappings. Because the ** WRITER, CHECKPOINT and READ0 locks are still held, it is safe to copy ** data from the wal file into the database file according to the

@@ -210790,7 +217324,7 @@ if( p->rc==SQLITE_OK ){

int rc2; p->eStage = RBU_STAGE_CAPTURE; rc2 = sqlite3_exec(p->dbMain, "PRAGMA main.wal_checkpoint=restart", 0, 0,0); - if( rc2!=SQLITE_INTERNAL ) p->rc = rc2; + if( rc2!=SQLITE_NOTICE ) p->rc = rc2; } if( p->rc==SQLITE_OK && p->nFrame>0 ){

@@ -210836,7 +217370,7 @@ u32 iFrame;

if( pRbu->mLock!=mReq ){ pRbu->rc = SQLITE_BUSY; - return SQLITE_INTERNAL; + return SQLITE_NOTICE_RBU; } pRbu->pgsz = iAmt;

@@ -210886,6 +217420,11 @@ iOff = (i64)(pFrame->iDbPage-1) * p->pgsz;

p->rc = pDb->pMethods->xWrite(pDb, p->aBuf, p->pgsz, iOff); } +/* +** This value is copied from the definition of ZIPVFS_CTRL_FILE_POINTER +** in zipvfs.h. +*/ +#define RBU_ZIPVFS_CTRL_FILE_POINTER 230439 /* ** Take an EXCLUSIVE lock on the database file. Return SQLITE_OK if

@@ -210894,9 +217433,20 @@ */

static int rbuLockDatabase(sqlite3 *db){ int rc = SQLITE_OK; sqlite3_file *fd = 0; - sqlite3_file_control(db, "main", SQLITE_FCNTL_FILE_POINTER, &fd); + + sqlite3_file_control(db, "main", RBU_ZIPVFS_CTRL_FILE_POINTER, &fd); + if( fd ){ + sqlite3_file_control(db, "main", SQLITE_FCNTL_FILE_POINTER, &fd); + rc = fd->pMethods->xLock(fd, SQLITE_LOCK_SHARED); + if( rc==SQLITE_OK ){ + rc = fd->pMethods->xUnlock(fd, SQLITE_LOCK_NONE); + } + sqlite3_file_control(db, "main", RBU_ZIPVFS_CTRL_FILE_POINTER, &fd); + }else{ + sqlite3_file_control(db, "main", SQLITE_FCNTL_FILE_POINTER, &fd); + } - if( fd->pMethods ){ + if( rc==SQLITE_OK && fd->pMethods ){ rc = fd->pMethods->xLock(fd, SQLITE_LOCK_SHARED); if( rc==SQLITE_OK ){ rc = fd->pMethods->xLock(fd, SQLITE_LOCK_EXCLUSIVE);

@@ -211575,7 +218125,8 @@ */

static void rbuDeleteOalFile(sqlite3rbu *p){ char *zOal = rbuMPrintf(p, "%s-oal", p->zTarget); if( zOal ){ - sqlite3_vfs *pVfs = sqlite3_vfs_find(0); + sqlite3_vfs *pVfs = 0; + sqlite3_file_control(p->dbMain, "main", SQLITE_FCNTL_VFS_POINTER, &pVfs); assert( pVfs && p->rc==SQLITE_OK && p->zErrmsg==0 ); pVfs->xDelete(pVfs, zOal, 0); sqlite3_free(zOal);

@@ -212223,7 +218774,7 @@ ** xShmLock() WRITER, CHECKPOINTER and READ0 locks on the target

** database file are recorded. xShmLock() calls to unlock the same ** locks are no-ops (so that once obtained, these locks are never ** relinquished). Finally, calls to xSync() on the target database -** file fail with SQLITE_INTERNAL errors. +** file fail with SQLITE_NOTICE errors. */ static void rbuUnlockShm(rbu_file *p){

@@ -212332,9 +218883,12 @@ p->apShm = 0;

sqlite3_free(p->zDel); if( p->openFlags & SQLITE_OPEN_MAIN_DB ){ + const sqlite3_io_methods *pMeth = p->pReal->pMethods; rbuMainlistRemove(p); rbuUnlockShm(p); - p->pReal->pMethods->xShmUnmap(p->pReal, 0); + if( pMeth->iVersion>1 && pMeth->xShmUnmap ){ + pMeth->xShmUnmap(p->pReal, 0); + } } else if( (p->openFlags & SQLITE_OPEN_DELETEONCLOSE) && p->pRbu ){ rbuUpdateTempSize(p, 0);

@@ -212502,7 +219056,7 @@ static int rbuVfsSync(sqlite3_file *pFile, int flags){

rbu_file *p = (rbu_file *)pFile; if( p->pRbu && p->pRbu->eStage==RBU_STAGE_CAPTURE ){ if( p->openFlags & SQLITE_OPEN_MAIN_DB ){ - return SQLITE_INTERNAL; + return SQLITE_NOTICE_RBU; } return SQLITE_OK; }

@@ -212793,6 +219347,25 @@ rbuVfsShmBarrier, /* xShmBarrier */

rbuVfsShmUnmap, /* xShmUnmap */ 0, 0 /* xFetch, xUnfetch */ }; + static sqlite3_io_methods rbuvfs_io_methods1 = { + 1, /* iVersion */ + rbuVfsClose, /* xClose */ + rbuVfsRead, /* xRead */ + rbuVfsWrite, /* xWrite */ + rbuVfsTruncate, /* xTruncate */ + rbuVfsSync, /* xSync */ + rbuVfsFileSize, /* xFileSize */ + rbuVfsLock, /* xLock */ + rbuVfsUnlock, /* xUnlock */ + rbuVfsCheckReservedLock, /* xCheckReservedLock */ + rbuVfsFileControl, /* xFileControl */ + rbuVfsSectorSize, /* xSectorSize */ + rbuVfsDeviceCharacteristics, /* xDeviceCharacteristics */ + 0, 0, 0, 0, 0, 0 + }; + + + rbu_vfs *pRbuVfs = (rbu_vfs*)pVfs; sqlite3_vfs *pRealVfs = pRbuVfs->pRealVfs; rbu_file *pFd = (rbu_file *)pFile;

@@ -212847,10 +219420,15 @@ if( rc==SQLITE_OK ){

rc = pRealVfs->xOpen(pRealVfs, zOpen, pFd->pReal, oflags, pOutFlags); } if( pFd->pReal->pMethods ){ + const sqlite3_io_methods *pMeth = pFd->pReal->pMethods; /* The xOpen() operation has succeeded. Set the sqlite3_file.pMethods ** pointer and, if the file is a main database file, link it into the ** mutex protected linked list of all such files. */ - pFile->pMethods = &rbuvfs_io_methods; + if( pMeth->iVersion<2 || pMeth->xShmLock==0 ){ + pFile->pMethods = &rbuvfs_io_methods1; + }else{ + pFile->pMethods = &rbuvfs_io_methods; + } if( flags & SQLITE_OPEN_MAIN_DB ){ rbuMainlistAdd(pFd); }

@@ -213283,6 +219861,7 @@ ){

StatTable *pTab = 0; int rc = SQLITE_OK; int iDb; + (void)pAux; if( argc>=4 ){ Token nm;

@@ -213336,6 +219915,7 @@ int i;

int iSchema = -1; int iName = -1; int iAgg = -1; + (void)tab; /* Look for a valid schema=? constraint. If found, change the idxNum to ** 1 and request the value of that constraint be sent to xFilter. And

@@ -213861,6 +220441,8 @@ char *zSql; /* String value of pSql */

int iArg = 0; /* Count of argv[] parameters used so far */ int rc = SQLITE_OK; /* Result of this operation */ const char *zName = 0; /* Only provide analysis of this table */ + (void)argc; + (void)idxStr; statResetCsr(pCsr); sqlite3_finalize(pCsr->pStmt);

@@ -213944,16 +220526,16 @@ sqlite3_result_text(ctx, pCsr->zPagetype, -1, SQLITE_STATIC);

} break; case 4: /* ncell */ - sqlite3_result_int(ctx, pCsr->nCell); + sqlite3_result_int64(ctx, pCsr->nCell); break; case 5: /* payload */ - sqlite3_result_int(ctx, pCsr->nPayload); + sqlite3_result_int64(ctx, pCsr->nPayload); break; case 6: /* unused */ - sqlite3_result_int(ctx, pCsr->nUnused); + sqlite3_result_int64(ctx, pCsr->nUnused); break; case 7: /* mx_payload */ - sqlite3_result_int(ctx, pCsr->nMxPayload); + sqlite3_result_int64(ctx, pCsr->nMxPayload); break; case 8: /* pgoffset */ if( !pCsr->isAgg ){

@@ -213961,7 +220543,7 @@ sqlite3_result_int64(ctx, pCsr->iOffset);

} break; case 9: /* pgsize */ - sqlite3_result_int(ctx, pCsr->szPage); + sqlite3_result_int64(ctx, pCsr->szPage); break; case 10: { /* schema */ sqlite3 *db = sqlite3_context_db_handle(ctx);

@@ -214011,7 +220593,8 @@ 0, /* xRename */

0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; return sqlite3_create_module(db, "dbstat", &dbstat_module, 0); }

@@ -214095,8 +220678,13 @@ char **pzErr

){ DbpageTable *pTab = 0; int rc = SQLITE_OK; + (void)pAux; + (void)argc; + (void)argv; + (void)pzErr; sqlite3_vtab_config(db, SQLITE_VTAB_DIRECTONLY); + sqlite3_vtab_config(db, SQLITE_VTAB_USES_ALL_SCHEMAS); rc = sqlite3_declare_vtab(db, "CREATE TABLE x(pgno INTEGER PRIMARY KEY, data BLOB, schema HIDDEN)"); if( rc==SQLITE_OK ){

@@ -214133,6 +220721,7 @@ */

static int dbpageBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ int i; int iPlan = 0; + (void)tab; /* If there is a schema= constraint, it must be honored. Report a ** ridiculously large estimated cost if the schema= constraint is

@@ -214179,7 +220768,6 @@ && pIdxInfo->aOrderBy[0].desc==0

){ pIdxInfo->orderByConsumed = 1; } - sqlite3VtabUsesAllSchemas(pIdxInfo); return SQLITE_OK; }

@@ -214248,6 +220836,8 @@ int rc;

sqlite3 *db = pTab->db; Btree *pBt; + (void)idxStr; + /* Default setting is no rows of result */ pCsr->pgno = 1; pCsr->mxPgno = 0;

@@ -214262,7 +220852,7 @@ }else{

pCsr->iDb = 0; } pBt = db->aDb[pCsr->iDb].pBt; - if( pBt==0 ) return SQLITE_OK; + if( NEVER(pBt==0) ) return SQLITE_OK; pCsr->pPager = sqlite3BtreePager(pBt); pCsr->szPage = sqlite3BtreeGetPageSize(pBt); pCsr->mxPgno = sqlite3BtreeLastPage(pBt);

@@ -214343,6 +220933,7 @@ Btree *pBt;

Pager *pPager; int szPage; + (void)pRowid; if( pTab->db->flags & SQLITE_Defensive ){ zErr = "read-only"; goto update_fail;

@@ -214352,18 +220943,20 @@ zErr = "cannot delete";

goto update_fail; } pgno = sqlite3_value_int(argv[0]); - if( (Pgno)sqlite3_value_int(argv[1])!=pgno ){ + if( sqlite3_value_type(argv[0])==SQLITE_NULL + || (Pgno)sqlite3_value_int(argv[1])!=pgno + ){ zErr = "cannot insert"; goto update_fail; } zSchema = (const char*)sqlite3_value_text(argv[4]); - iDb = zSchema ? sqlite3FindDbName(pTab->db, zSchema) : -1; - if( iDb<0 ){ + iDb = ALWAYS(zSchema) ? sqlite3FindDbName(pTab->db, zSchema) : -1; + if( NEVER(iDb<0) ){ zErr = "no such schema"; goto update_fail; } pBt = pTab->db->aDb[iDb].pBt; - if( pgno<1 || pBt==0 || pgno>sqlite3BtreeLastPage(pBt) ){ + if( NEVER(pgno<1) || NEVER(pBt==0) || NEVER(pgno>sqlite3BtreeLastPage(pBt)) ){ zErr = "bad page number"; goto update_fail; }

@@ -214402,12 +220995,11 @@ static int dbpageBegin(sqlite3_vtab *pVtab){

DbpageTable *pTab = (DbpageTable *)pVtab; sqlite3 *db = pTab->db; int i; - int rc = SQLITE_OK; - for(i=0; rc==SQLITE_OK && i<db->nDb; i++){ + for(i=0; i<db->nDb; i++){ Btree *pBt = db->aDb[i].pBt; - if( pBt ) rc = sqlite3BtreeBeginTrans(pBt, 1, 0); + if( pBt ) (void)sqlite3BtreeBeginTrans(pBt, 1, 0); } - return rc; + return SQLITE_OK; }

@@ -214439,7 +221031,8 @@ 0, /* xRename */

0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; return sqlite3_create_module(db, "sqlite_dbpage", &dbpage_module, 0); }

@@ -214476,6 +221069,8 @@ # define SESSIONS_STRM_CHUNK_SIZE 1024

# endif #endif +#define SESSIONS_ROWID "_rowid_" + static int sessions_strm_chunk_size = SESSIONS_STRM_CHUNK_SIZE; typedef struct SessionHook SessionHook;

@@ -214497,6 +221092,7 @@ int bEnableSize; /* True if changeset_size() enabled */

int bEnable; /* True if currently recording */ int bIndirect; /* True if all changes are indirect */ int bAutoAttach; /* True to auto-attach tables */ + int bImplicitPK; /* True to handle tables with implicit PK */ int rc; /* Non-zero if an error has occurred */ void *pFilterCtx; /* First argument to pass to xTableFilter */ int (*xTableFilter)(void *pCtx, const char *zTab);

@@ -214567,17 +221163,32 @@ **

** The data associated with each hash-table entry is a structure containing ** a subset of the initial values that the modified row contained at the ** start of the session. Or no initial values if the row was inserted. +** +** pDfltStmt: +** This is only used by the sqlite3changegroup_xxx() APIs, not by +** regular sqlite3_session objects. It is a SELECT statement that +** selects the default value for each table column. For example, +** if the table is +** +** CREATE TABLE xx(a DEFAULT 1, b, c DEFAULT 'abc') +** +** then this variable is the compiled version of: +** +** SELECT 1, NULL, 'abc' */ struct SessionTable { SessionTable *pNext; char *zName; /* Local name of table */ int nCol; /* Number of columns in table zName */ int bStat1; /* True if this is sqlite_stat1 */ + int bRowid; /* True if this table uses rowid for PK */ const char **azCol; /* Column names */ + const char **azDflt; /* Default value expressions */ u8 *abPK; /* Array of primary key flags */ int nEntry; /* Total number of entries in hash table */ int nChange; /* Size of apChange[] array */ SessionChange **apChange; /* Hash table buckets */ + sqlite3_stmt *pDfltStmt; }; /*

@@ -214746,6 +221357,7 @@ */

struct SessionChange { u8 op; /* One of UPDATE, DELETE, INSERT */ u8 bIndirect; /* True if this change is "indirect" */ + u16 nRecordField; /* Number of fields in aRecord[] */ int nMaxSize; /* Max size of eventual changeset record */ int nRecord; /* Number of bytes in buffer aRecord[] */ u8 *aRecord; /* Buffer containing old.* record */

@@ -214771,7 +221383,7 @@ /*

** Read a varint value from aBuf[] into *piVal. Return the number of ** bytes read. */ -static int sessionVarintGet(u8 *aBuf, int *piVal){ +static int sessionVarintGet(const u8 *aBuf, int *piVal){ return getVarint32(aBuf, *piVal); }

@@ -214965,6 +221577,7 @@ ** and the output variables are set as described above.

*/ static int sessionPreupdateHash( sqlite3_session *pSession, /* Session object that owns pTab */ + i64 iRowid, SessionTable *pTab, /* Session table handle */ int bNew, /* True to hash the new.* PK */ int *piHash, /* OUT: Hash value */

@@ -214973,48 +221586,53 @@ ){

unsigned int h = 0; /* Hash value to return */ int i; /* Used to iterate through columns */ - assert( *pbNullPK==0 ); - assert( pTab->nCol==pSession->hook.xCount(pSession->hook.pCtx) ); - for(i=0; i<pTab->nCol; i++){ - if( pTab->abPK[i] ){ - int rc; - int eType; - sqlite3_value *pVal; - - if( bNew ){ - rc = pSession->hook.xNew(pSession->hook.pCtx, i, &pVal); - }else{ - rc = pSession->hook.xOld(pSession->hook.pCtx, i, &pVal); - } - if( rc!=SQLITE_OK ) return rc; + if( pTab->bRowid ){ + assert( pTab->nCol-1==pSession->hook.xCount(pSession->hook.pCtx) ); + h = sessionHashAppendI64(h, iRowid); + }else{ + assert( *pbNullPK==0 ); + assert( pTab->nCol==pSession->hook.xCount(pSession->hook.pCtx) ); + for(i=0; i<pTab->nCol; i++){ + if( pTab->abPK[i] ){ + int rc; + int eType; + sqlite3_value *pVal; - eType = sqlite3_value_type(pVal); - h = sessionHashAppendType(h, eType); - if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ - i64 iVal; - if( eType==SQLITE_INTEGER ){ - iVal = sqlite3_value_int64(pVal); + if( bNew ){ + rc = pSession->hook.xNew(pSession->hook.pCtx, i, &pVal); }else{ - double rVal = sqlite3_value_double(pVal); - assert( sizeof(iVal)==8 && sizeof(rVal)==8 ); - memcpy(&iVal, &rVal, 8); + rc = pSession->hook.xOld(pSession->hook.pCtx, i, &pVal); } - h = sessionHashAppendI64(h, iVal); - }else if( eType==SQLITE_TEXT || eType==SQLITE_BLOB ){ - const u8 *z; - int n; - if( eType==SQLITE_TEXT ){ - z = (const u8 *)sqlite3_value_text(pVal); + if( rc!=SQLITE_OK ) return rc; + + eType = sqlite3_value_type(pVal); + h = sessionHashAppendType(h, eType); + if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ + i64 iVal; + if( eType==SQLITE_INTEGER ){ + iVal = sqlite3_value_int64(pVal); + }else{ + double rVal = sqlite3_value_double(pVal); + assert( sizeof(iVal)==8 && sizeof(rVal)==8 ); + memcpy(&iVal, &rVal, 8); + } + h = sessionHashAppendI64(h, iVal); + }else if( eType==SQLITE_TEXT || eType==SQLITE_BLOB ){ + const u8 *z; + int n; + if( eType==SQLITE_TEXT ){ + z = (const u8 *)sqlite3_value_text(pVal); + }else{ + z = (const u8 *)sqlite3_value_blob(pVal); + } + n = sqlite3_value_bytes(pVal); + if( !z && (eType!=SQLITE_BLOB || n>0) ) return SQLITE_NOMEM; + h = sessionHashAppendBlob(h, n, z); }else{ - z = (const u8 *)sqlite3_value_blob(pVal); + assert( eType==SQLITE_NULL ); + assert( pTab->bStat1==0 || i!=1 ); + *pbNullPK = 1; } - n = sqlite3_value_bytes(pVal); - if( !z && (eType!=SQLITE_BLOB || n>0) ) return SQLITE_NOMEM; - h = sessionHashAppendBlob(h, n, z); - }else{ - assert( eType==SQLITE_NULL ); - assert( pTab->bStat1==0 || i!=1 ); - *pbNullPK = 1; } } }

@@ -215028,9 +221646,11 @@ ** The buffer that the argument points to contains a serialized SQL value.

** Return the number of bytes of space occupied by the value (including ** the type byte). */ -static int sessionSerialLen(u8 *a){ - int e = *a; +static int sessionSerialLen(const u8 *a){ + int e; int n; + assert( a!=0 ); + e = *a; if( e==0 || e==0xFF ) return 1; if( e==SQLITE_NULL ) return 1; if( e==SQLITE_INTEGER || e==SQLITE_FLOAT ) return 9;

@@ -215297,12 +221917,18 @@ ** false.

*/ static int sessionPreupdateEqual( sqlite3_session *pSession, /* Session object that owns SessionTable */ + i64 iRowid, /* Rowid value if pTab->bRowid */ SessionTable *pTab, /* Table associated with change */ SessionChange *pChange, /* Change to compare to */ int op /* Current pre-update operation */ ){ int iCol; /* Used to iterate through columns */ u8 *a = pChange->aRecord; /* Cursor used to scan change record */ + + if( pTab->bRowid ){ + if( a[0]!=SQLITE_INTEGER ) return 0; + return sessionGetI64(&a[1])==iRowid; + } assert( op==SQLITE_INSERT || op==SQLITE_UPDATE || op==SQLITE_DELETE ); for(iCol=0; iCol<pTab->nCol; iCol++){

@@ -215326,6 +221952,7 @@ /* assert( db->pPreUpdate->pUnpacked ); */

rc = pSession->hook.xOld(pSession->hook.pCtx, iCol, &pVal); } assert( rc==SQLITE_OK ); + (void)rc; /* Suppress warning about unused variable */ if( sqlite3_value_type(pVal)!=eType ) return 0; /* A SessionChange object never has a NULL value in a PK column */

@@ -215428,13 +222055,14 @@ ** column is part of the primary key.

** ** For example, if the table is declared as: ** -** CREATE TABLE tbl1(w, x, y, z, PRIMARY KEY(w, z)); +** CREATE TABLE tbl1(w, x DEFAULT 'abc', y, z, PRIMARY KEY(w, z)); ** -** Then the four output variables are populated as follows: +** Then the five output variables are populated as follows: ** ** *pnCol = 4 ** *pzTab = "tbl1" ** *pazCol = {"w", "x", "y", "z"} +** *pazDflt = {NULL, 'abc', NULL, NULL} ** *pabPK = {1, 0, 0, 1} ** ** All returned buffers are part of the same single allocation, which must

@@ -215448,7 +222076,9 @@ const char *zThis, /* Table name */

int *pnCol, /* OUT: number of columns */ const char **pzTab, /* OUT: Copy of zThis */ const char ***pazCol, /* OUT: Array of column names for table */ - u8 **pabPK /* OUT: Array of booleans - true for PK col */ + const char ***pazDflt, /* OUT: Array of default value expressions */ + u8 **pabPK, /* OUT: Array of booleans - true for PK col */ + int *pbRowid /* OUT: True if only PK is a rowid */ ){ char *zPragma; sqlite3_stmt *pStmt;

@@ -215459,10 +222089,18 @@ int nThis;

int i; u8 *pAlloc = 0; char **azCol = 0; + char **azDflt = 0; u8 *abPK = 0; + int bRowid = 0; /* Set to true to use rowid as PK */ assert( pazCol && pabPK ); + *pazCol = 0; + *pabPK = 0; + *pnCol = 0; + if( pzTab ) *pzTab = 0; + if( pazDflt ) *pazDflt = 0; + nThis = sqlite3Strlen30(zThis); if( nThis==12 && 0==sqlite3_stricmp("sqlite_stat1", zThis) ){ rc = sqlite3_table_column_metadata(db, zDb, zThis, 0, 0, 0, 0, 0, 0);

@@ -215476,50 +222114,47 @@ );

}else if( rc==SQLITE_ERROR ){ zPragma = sqlite3_mprintf(""); }else{ - *pazCol = 0; - *pabPK = 0; - *pnCol = 0; - if( pzTab ) *pzTab = 0; return rc; } }else{ zPragma = sqlite3_mprintf("PRAGMA '%q'.table_info('%q')", zDb, zThis); } if( !zPragma ){ - *pazCol = 0; - *pabPK = 0; - *pnCol = 0; - if( pzTab ) *pzTab = 0; return SQLITE_NOMEM; } rc = sqlite3_prepare_v2(db, zPragma, -1, &pStmt, 0); sqlite3_free(zPragma); if( rc!=SQLITE_OK ){ - *pazCol = 0; - *pabPK = 0; - *pnCol = 0; - if( pzTab ) *pzTab = 0; return rc; } nByte = nThis + 1; + bRowid = (pbRowid!=0); while( SQLITE_ROW==sqlite3_step(pStmt) ){ - nByte += sqlite3_column_bytes(pStmt, 1); + nByte += sqlite3_column_bytes(pStmt, 1); /* name */ + nByte += sqlite3_column_bytes(pStmt, 4); /* dflt_value */ nDbCol++; + if( sqlite3_column_int(pStmt, 5) ) bRowid = 0; /* pk */ } + if( nDbCol==0 ) bRowid = 0; + nDbCol += bRowid; + nByte += strlen(SESSIONS_ROWID); rc = sqlite3_reset(pStmt); if( rc==SQLITE_OK ){ - nByte += nDbCol * (sizeof(const char *) + sizeof(u8) + 1); + nByte += nDbCol * (sizeof(const char *)*2 + sizeof(u8) + 1 + 1); pAlloc = sessionMalloc64(pSession, nByte); if( pAlloc==0 ){ rc = SQLITE_NOMEM; + }else{ + memset(pAlloc, 0, nByte); } } if( rc==SQLITE_OK ){ azCol = (char **)pAlloc; - pAlloc = (u8 *)&azCol[nDbCol]; + azDflt = (char**)&azCol[nDbCol]; + pAlloc = (u8 *)&azDflt[nDbCol]; abPK = (u8 *)pAlloc; pAlloc = &abPK[nDbCol]; if( pzTab ){

@@ -215529,43 +222164,57 @@ pAlloc += nThis+1;

} i = 0; + if( bRowid ){ + size_t nName = strlen(SESSIONS_ROWID); + memcpy(pAlloc, SESSIONS_ROWID, nName+1); + azCol[i] = (char*)pAlloc; + pAlloc += nName+1; + abPK[i] = 1; + i++; + } while( SQLITE_ROW==sqlite3_step(pStmt) ){ int nName = sqlite3_column_bytes(pStmt, 1); + int nDflt = sqlite3_column_bytes(pStmt, 4); const unsigned char *zName = sqlite3_column_text(pStmt, 1); + const unsigned char *zDflt = sqlite3_column_text(pStmt, 4); + if( zName==0 ) break; memcpy(pAlloc, zName, nName+1); azCol[i] = (char *)pAlloc; pAlloc += nName+1; + if( zDflt ){ + memcpy(pAlloc, zDflt, nDflt+1); + azDflt[i] = (char *)pAlloc; + pAlloc += nDflt+1; + }else{ + azDflt[i] = 0; + } abPK[i] = sqlite3_column_int(pStmt, 5); i++; } rc = sqlite3_reset(pStmt); - } /* If successful, populate the output variables. Otherwise, zero them and ** free any allocation made. An error code will be returned in this case. */ if( rc==SQLITE_OK ){ - *pazCol = (const char **)azCol; + *pazCol = (const char**)azCol; + if( pazDflt ) *pazDflt = (const char**)azDflt; *pabPK = abPK; *pnCol = nDbCol; }else{ - *pazCol = 0; - *pabPK = 0; - *pnCol = 0; - if( pzTab ) *pzTab = 0; sessionFree(pSession, azCol); } + if( pbRowid ) *pbRowid = bRowid; sqlite3_finalize(pStmt); return rc; } /* -** This function is only called from within a pre-update handler for a -** write to table pTab, part of session pSession. If this is the first -** write to this table, initalize the SessionTable.nCol, azCol[] and -** abPK[] arrays accordingly. +** This function is called to initialize the SessionTable.nCol, azCol[] +** abPK[] and azDflt[] members of SessionTable object pTab. If these +** fields are already initilialized, this function is a no-op. ** ** If an error occurs, an error code is stored in sqlite3_session.rc and ** non-zero returned. Or, if no error occurs but the table has no primary

@@ -215573,14 +222222,22 @@ ** key, sqlite3_session.rc is left set to SQLITE_OK and non-zero returned to

** indicate that updates on this table should be ignored. SessionTable.abPK ** is set to NULL in this case. */ -static int sessionInitTable(sqlite3_session *pSession, SessionTable *pTab){ +static int sessionInitTable( + sqlite3_session *pSession, /* Optional session handle */ + SessionTable *pTab, /* Table object to initialize */ + sqlite3 *db, /* Database handle to read schema from */ + const char *zDb /* Name of db - "main", "temp" etc. */ +){ + int rc = SQLITE_OK; + if( pTab->nCol==0 ){ u8 *abPK; assert( pTab->azCol==0 || pTab->abPK==0 ); - pSession->rc = sessionTableInfo(pSession, pSession->db, pSession->zDb, - pTab->zName, &pTab->nCol, 0, &pTab->azCol, &abPK + rc = sessionTableInfo(pSession, db, zDb, + pTab->zName, &pTab->nCol, 0, &pTab->azCol, &pTab->azDflt, &abPK, + ((pSession==0 || pSession->bImplicitPK) ? &pTab->bRowid : 0) ); - if( pSession->rc==SQLITE_OK ){ + if( rc==SQLITE_OK ){ int i; for(i=0; i<pTab->nCol; i++){ if( abPK[i] ){

@@ -215592,14 +222249,321 @@ if( 0==sqlite3_stricmp("sqlite_stat1", pTab->zName) ){

pTab->bStat1 = 1; } - if( pSession->bEnableSize ){ + if( pSession && pSession->bEnableSize ){ pSession->nMaxChangesetSize += ( 1 + sessionVarintLen(pTab->nCol) + pTab->nCol + strlen(pTab->zName)+1 ); } } } - return (pSession->rc || pTab->abPK==0); + + if( pSession ){ + pSession->rc = rc; + return (rc || pTab->abPK==0); + } + return rc; +} + +/* +** Re-initialize table object pTab. +*/ +static int sessionReinitTable(sqlite3_session *pSession, SessionTable *pTab){ + int nCol = 0; + const char **azCol = 0; + const char **azDflt = 0; + u8 *abPK = 0; + int bRowid = 0; + + assert( pSession->rc==SQLITE_OK ); + + pSession->rc = sessionTableInfo(pSession, pSession->db, pSession->zDb, + pTab->zName, &nCol, 0, &azCol, &azDflt, &abPK, + (pSession->bImplicitPK ? &bRowid : 0) + ); + if( pSession->rc==SQLITE_OK ){ + if( pTab->nCol>nCol || pTab->bRowid!=bRowid ){ + pSession->rc = SQLITE_SCHEMA; + }else{ + int ii; + int nOldCol = pTab->nCol; + for(ii=0; ii<nCol; ii++){ + if( ii<pTab->nCol ){ + if( pTab->abPK[ii]!=abPK[ii] ){ + pSession->rc = SQLITE_SCHEMA; + } + }else if( abPK[ii] ){ + pSession->rc = SQLITE_SCHEMA; + } + } + + if( pSession->rc==SQLITE_OK ){ + const char **a = pTab->azCol; + pTab->azCol = azCol; + pTab->nCol = nCol; + pTab->azDflt = azDflt; + pTab->abPK = abPK; + azCol = a; + } + if( pSession->bEnableSize ){ + pSession->nMaxChangesetSize += (nCol - nOldCol); + pSession->nMaxChangesetSize += sessionVarintLen(nCol); + pSession->nMaxChangesetSize -= sessionVarintLen(nOldCol); + } + } + } + + sqlite3_free((char*)azCol); + return pSession->rc; +} + +/* +** Session-change object (*pp) contains an old.* record with fewer than +** nCol fields. This function updates it with the default values for +** the missing fields. +*/ +static void sessionUpdateOneChange( + sqlite3_session *pSession, /* For memory accounting */ + int *pRc, /* IN/OUT: Error code */ + SessionChange **pp, /* IN/OUT: Change object to update */ + int nCol, /* Number of columns now in table */ + sqlite3_stmt *pDflt /* SELECT <default-values...> */ +){ + SessionChange *pOld = *pp; + + while( pOld->nRecordField<nCol ){ + SessionChange *pNew = 0; + int nByte = 0; + int nIncr = 0; + int iField = pOld->nRecordField; + int eType = sqlite3_column_type(pDflt, iField); + switch( eType ){ + case SQLITE_NULL: + nIncr = 1; + break; + case SQLITE_INTEGER: + case SQLITE_FLOAT: + nIncr = 9; + break; + default: { + int n = sqlite3_column_bytes(pDflt, iField); + nIncr = 1 + sessionVarintLen(n) + n; + assert( eType==SQLITE_TEXT || eType==SQLITE_BLOB ); + break; + } + } + + nByte = nIncr + (sizeof(SessionChange) + pOld->nRecord); + pNew = sessionMalloc64(pSession, nByte); + if( pNew==0 ){ + *pRc = SQLITE_NOMEM; + return; + }else{ + memcpy(pNew, pOld, sizeof(SessionChange)); + pNew->aRecord = (u8*)&pNew[1]; + memcpy(pNew->aRecord, pOld->aRecord, pOld->nRecord); + pNew->aRecord[pNew->nRecord++] = (u8)eType; + switch( eType ){ + case SQLITE_INTEGER: { + i64 iVal = sqlite3_column_int64(pDflt, iField); + sessionPutI64(&pNew->aRecord[pNew->nRecord], iVal); + pNew->nRecord += 8; + break; + } + + case SQLITE_FLOAT: { + double rVal = sqlite3_column_double(pDflt, iField); + i64 iVal = 0; + memcpy(&iVal, &rVal, sizeof(rVal)); + sessionPutI64(&pNew->aRecord[pNew->nRecord], iVal); + pNew->nRecord += 8; + break; + } + + case SQLITE_TEXT: { + int n = sqlite3_column_bytes(pDflt, iField); + const char *z = (const char*)sqlite3_column_text(pDflt, iField); + pNew->nRecord += sessionVarintPut(&pNew->aRecord[pNew->nRecord], n); + memcpy(&pNew->aRecord[pNew->nRecord], z, n); + pNew->nRecord += n; + break; + } + + case SQLITE_BLOB: { + int n = sqlite3_column_bytes(pDflt, iField); + const u8 *z = (const u8*)sqlite3_column_blob(pDflt, iField); + pNew->nRecord += sessionVarintPut(&pNew->aRecord[pNew->nRecord], n); + memcpy(&pNew->aRecord[pNew->nRecord], z, n); + pNew->nRecord += n; + break; + } + + default: + assert( eType==SQLITE_NULL ); + break; + } + + sessionFree(pSession, pOld); + *pp = pOld = pNew; + pNew->nRecordField++; + pNew->nMaxSize += nIncr; + if( pSession ){ + pSession->nMaxChangesetSize += nIncr; + } + } + } +} + +/* +** Ensure that there is room in the buffer to append nByte bytes of data. +** If not, use sqlite3_realloc() to grow the buffer so that there is. +** +** If successful, return zero. Otherwise, if an OOM condition is encountered, +** set *pRc to SQLITE_NOMEM and return non-zero. +*/ +static int sessionBufferGrow(SessionBuffer *p, i64 nByte, int *pRc){ +#define SESSION_MAX_BUFFER_SZ (0x7FFFFF00 - 1) + i64 nReq = p->nBuf + nByte; + if( *pRc==SQLITE_OK && nReq>p->nAlloc ){ + u8 *aNew; + i64 nNew = p->nAlloc ? p->nAlloc : 128; + + do { + nNew = nNew*2; + }while( nNew<nReq ); + + /* The value of SESSION_MAX_BUFFER_SZ is copied from the implementation + ** of sqlite3_realloc64(). Allocations greater than this size in bytes + ** always fail. It is used here to ensure that this routine can always + ** allocate up to this limit - instead of up to the largest power of + ** two smaller than the limit. */ + if( nNew>SESSION_MAX_BUFFER_SZ ){ + nNew = SESSION_MAX_BUFFER_SZ; + if( nNew<nReq ){ + *pRc = SQLITE_NOMEM; + return 1; + } + } + + aNew = (u8 *)sqlite3_realloc64(p->aBuf, nNew); + if( 0==aNew ){ + *pRc = SQLITE_NOMEM; + }else{ + p->aBuf = aNew; + p->nAlloc = nNew; + } + } + return (*pRc!=SQLITE_OK); +} + + +/* +** This function is a no-op if *pRc is other than SQLITE_OK when it is +** called. Otherwise, append a string to the buffer. All bytes in the string +** up to (but not including) the nul-terminator are written to the buffer. +** +** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before +** returning. +*/ +static void sessionAppendStr( + SessionBuffer *p, + const char *zStr, + int *pRc +){ + int nStr = sqlite3Strlen30(zStr); + if( 0==sessionBufferGrow(p, nStr+1, pRc) ){ + memcpy(&p->aBuf[p->nBuf], zStr, nStr); + p->nBuf += nStr; + p->aBuf[p->nBuf] = 0x00; + } +} + +/* +** Format a string using printf() style formatting and then append it to the +** buffer using sessionAppendString(). +*/ +static void sessionAppendPrintf( + SessionBuffer *p, /* Buffer to append to */ + int *pRc, + const char *zFmt, + ... +){ + if( *pRc==SQLITE_OK ){ + char *zApp = 0; + va_list ap; + va_start(ap, zFmt); + zApp = sqlite3_vmprintf(zFmt, ap); + if( zApp==0 ){ + *pRc = SQLITE_NOMEM; + }else{ + sessionAppendStr(p, zApp, pRc); + } + va_end(ap); + sqlite3_free(zApp); + } +} + +/* +** Prepare a statement against database handle db that SELECTs a single +** row containing the default values for each column in table pTab. For +** example, if pTab is declared as: +** +** CREATE TABLE pTab(a PRIMARY KEY, b DEFAULT 123, c DEFAULT 'abcd'); +** +** Then this function prepares and returns the SQL statement: +** +** SELECT NULL, 123, 'abcd'; +*/ +static int sessionPrepareDfltStmt( + sqlite3 *db, /* Database handle */ + SessionTable *pTab, /* Table to prepare statement for */ + sqlite3_stmt **ppStmt /* OUT: Statement handle */ +){ + SessionBuffer sql = {0,0,0}; + int rc = SQLITE_OK; + const char *zSep = " "; + int ii = 0; + + *ppStmt = 0; + sessionAppendPrintf(&sql, &rc, "SELECT"); + for(ii=0; ii<pTab->nCol; ii++){ + const char *zDflt = pTab->azDflt[ii] ? pTab->azDflt[ii] : "NULL"; + sessionAppendPrintf(&sql, &rc, "%s%s", zSep, zDflt); + zSep = ", "; + } + if( rc==SQLITE_OK ){ + rc = sqlite3_prepare_v2(db, (const char*)sql.aBuf, -1, ppStmt, 0); + } + sqlite3_free(sql.aBuf); + + return rc; +} + +/* +** Table pTab has one or more existing change-records with old.* records +** with fewer than pTab->nCol columns. This function updates all such +** change-records with the default values for the missing columns. +*/ +static int sessionUpdateChanges(sqlite3_session *pSession, SessionTable *pTab){ + sqlite3_stmt *pStmt = 0; + int rc = pSession->rc; + + rc = sessionPrepareDfltStmt(pSession->db, pTab, &pStmt); + if( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ + int ii = 0; + SessionChange **pp = 0; + for(ii=0; ii<pTab->nChange; ii++){ + for(pp=&pTab->apChange[ii]; *pp; pp=&((*pp)->pNext)){ + if( (*pp)->nRecordField!=pTab->nCol ){ + sessionUpdateOneChange(pSession, &rc, pp, pTab->nCol, pStmt); + } + } + } + } + + pSession->rc = rc; + rc = sqlite3_finalize(pStmt); + if( pSession->rc==SQLITE_OK ) pSession->rc = rc; + return pSession->rc; } /*

@@ -215650,6 +222614,7 @@ SessionChange *pC /* Update pC->nMaxSize */

){ i64 nNew = 2; if( pC->op==SQLITE_INSERT ){ + if( pTab->bRowid ) nNew += 9; if( op!=SQLITE_DELETE ){ int ii; for(ii=0; ii<pTab->nCol; ii++){

@@ -215666,12 +222631,16 @@ }

}else{ int ii; u8 *pCsr = pC->aRecord; - for(ii=0; ii<pTab->nCol; ii++){ + if( pTab->bRowid ){ + nNew += 9 + 1; + pCsr += 9; + } + for(ii=pTab->bRowid; ii<pTab->nCol; ii++){ int bChanged = 1; int nOld = 0; int eType; sqlite3_value *p = 0; - pSession->hook.xNew(pSession->hook.pCtx, ii, &p); + pSession->hook.xNew(pSession->hook.pCtx, ii-pTab->bRowid, &p); if( p==0 ){ return SQLITE_NOMEM; }

@@ -215750,22 +222719,29 @@ ** to the changed-rows hash table associated with table pTab.

*/ static void sessionPreupdateOneChange( int op, /* One of SQLITE_UPDATE, INSERT, DELETE */ + i64 iRowid, sqlite3_session *pSession, /* Session object pTab is attached to */ SessionTable *pTab /* Table that change applies to */ ){ int iHash; int bNull = 0; int rc = SQLITE_OK; + int nExpect = 0; SessionStat1Ctx stat1 = {{0,0,0,0,0},0}; if( pSession->rc ) return; /* Load table details if required */ - if( sessionInitTable(pSession, pTab) ) return; + if( sessionInitTable(pSession, pTab, pSession->db, pSession->zDb) ) return; /* Check the number of columns in this xPreUpdate call matches the ** number of columns in the table. */ - if( pTab->nCol!=pSession->hook.xCount(pSession->hook.pCtx) ){ + nExpect = pSession->hook.xCount(pSession->hook.pCtx); + if( (pTab->nCol-pTab->bRowid)<nExpect ){ + if( sessionReinitTable(pSession, pTab) ) return; + if( sessionUpdateChanges(pSession, pTab) ) return; + } + if( (pTab->nCol-pTab->bRowid)!=nExpect ){ pSession->rc = SQLITE_SCHEMA; return; }

@@ -215798,14 +222774,16 @@

/* Calculate the hash-key for this change. If the primary key of the row ** includes a NULL value, exit early. Such changes are ignored by the ** session module. */ - rc = sessionPreupdateHash(pSession, pTab, op==SQLITE_INSERT, &iHash, &bNull); + rc = sessionPreupdateHash( + pSession, iRowid, pTab, op==SQLITE_INSERT, &iHash, &bNull + ); if( rc!=SQLITE_OK ) goto error_out; if( bNull==0 ){ /* Search the hash table for an existing record for this row. */ SessionChange *pC; for(pC=pTab->apChange[iHash]; pC; pC=pC->pNext){ - if( sessionPreupdateEqual(pSession, pTab, pC, op) ) break; + if( sessionPreupdateEqual(pSession, iRowid, pTab, pC, op) ) break; } if( pC==0 ){

@@ -215820,7 +222798,7 @@ pTab->nEntry++;

/* Figure out how large an allocation is required */ nByte = sizeof(SessionChange); - for(i=0; i<pTab->nCol; i++){ + for(i=0; i<(pTab->nCol-pTab->bRowid); i++){ sqlite3_value *p = 0; if( op!=SQLITE_INSERT ){ TESTONLY(int trc = ) pSession->hook.xOld(pSession->hook.pCtx, i, &p);

@@ -215835,9 +222813,12 @@ ** be converted to utf-8 and an OOM error occurs while doing so. */

rc = sessionSerializeValue(0, p, &nByte); if( rc!=SQLITE_OK ) goto error_out; } + if( pTab->bRowid ){ + nByte += 9; /* Size of rowid field - an integer */ + } /* Allocate the change object */ - pC = (SessionChange *)sessionMalloc64(pSession, nByte); + pC = (SessionChange*)sessionMalloc64(pSession, nByte); if( !pC ){ rc = SQLITE_NOMEM; goto error_out;

@@ -215851,7 +222832,12 @@ ** preupdate_new() or SerializeValue() calls below may fail as all

** required values and encodings have already been cached in memory. ** It is not possible for an OOM to occur in this block. */ nByte = 0; - for(i=0; i<pTab->nCol; i++){ + if( pTab->bRowid ){ + pC->aRecord[0] = SQLITE_INTEGER; + sessionPutI64(&pC->aRecord[1], iRowid); + nByte = 9; + } + for(i=0; i<(pTab->nCol-pTab->bRowid); i++){ sqlite3_value *p = 0; if( op!=SQLITE_INSERT ){ pSession->hook.xOld(pSession->hook.pCtx, i, &p);

@@ -215865,6 +222851,7 @@ /* Add the change to the hash-table */

if( pSession->bIndirect || pSession->hook.xDepth(pSession->hook.pCtx) ){ pC->bIndirect = 1; } + pC->nRecordField = pTab->nCol; pC->nRecord = nByte; pC->op = op; pC->pNext = pTab->apChange[iHash];

@@ -215950,6 +222937,8 @@ sqlite3_session *pSession;

int nDb = sqlite3Strlen30(zDb); assert( sqlite3_mutex_held(db->mutex) ); + (void)iKey1; + (void)iKey2; for(pSession=(sqlite3_session *)pCtx; pSession; pSession=pSession->pNext){ SessionTable *pTab;

@@ -215964,9 +222953,10 @@

pSession->rc = sessionFindTable(pSession, zName, &pTab); if( pTab ){ assert( pSession->rc==SQLITE_OK ); - sessionPreupdateOneChange(op, pSession, pTab); + assert( op==SQLITE_UPDATE || iKey1==iKey2 ); + sessionPreupdateOneChange(op, iKey1, pSession, pTab); if( op==SQLITE_UPDATE ){ - sessionPreupdateOneChange(SQLITE_INSERT, pSession, pTab); + sessionPreupdateOneChange(SQLITE_INSERT, iKey2, pSession, pTab); } } }

@@ -216005,6 +222995,7 @@

typedef struct SessionDiffCtx SessionDiffCtx; struct SessionDiffCtx { sqlite3_stmt *pStmt; + int bRowid; int nOldOff; };

@@ -216013,19 +223004,20 @@ ** The diff hook implementations.

*/ static int sessionDiffOld(void *pCtx, int iVal, sqlite3_value **ppVal){ SessionDiffCtx *p = (SessionDiffCtx*)pCtx; - *ppVal = sqlite3_column_value(p->pStmt, iVal+p->nOldOff); + *ppVal = sqlite3_column_value(p->pStmt, iVal+p->nOldOff+p->bRowid); return SQLITE_OK; } static int sessionDiffNew(void *pCtx, int iVal, sqlite3_value **ppVal){ SessionDiffCtx *p = (SessionDiffCtx*)pCtx; - *ppVal = sqlite3_column_value(p->pStmt, iVal); + *ppVal = sqlite3_column_value(p->pStmt, iVal+p->bRowid); return SQLITE_OK; } static int sessionDiffCount(void *pCtx){ SessionDiffCtx *p = (SessionDiffCtx*)pCtx; - return p->nOldOff ? p->nOldOff : sqlite3_column_count(p->pStmt); + return (p->nOldOff ? p->nOldOff : sqlite3_column_count(p->pStmt)) - p->bRowid; } static int sessionDiffDepth(void *pCtx){ + (void)pCtx; return 0; }

@@ -216099,17 +223091,18 @@ return zRet;

} static char *sessionSelectFindNew( - int nCol, const char *zDb1, /* Pick rows in this db only */ const char *zDb2, /* But not in this one */ + int bRowid, const char *zTbl, /* Table name */ const char *zExpr ){ + const char *zSel = (bRowid ? SESSIONS_ROWID ", *" : "*"); char *zRet = sqlite3_mprintf( - "SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS (" + "SELECT %s FROM \"%w\".\"%w\" WHERE NOT EXISTS (" " SELECT 1 FROM \"%w\".\"%w\" WHERE %s" ")", - zDb1, zTbl, zDb2, zTbl, zExpr + zSel, zDb1, zTbl, zDb2, zTbl, zExpr ); return zRet; }

@@ -216123,7 +223116,9 @@ const char *zDb2,

char *zExpr ){ int rc = SQLITE_OK; - char *zStmt = sessionSelectFindNew(pTab->nCol, zDb1, zDb2, pTab->zName,zExpr); + char *zStmt = sessionSelectFindNew( + zDb1, zDb2, pTab->bRowid, pTab->zName, zExpr + ); if( zStmt==0 ){ rc = SQLITE_NOMEM;

@@ -216134,8 +223129,10 @@ if( rc==SQLITE_OK ){

SessionDiffCtx *pDiffCtx = (SessionDiffCtx*)pSession->hook.pCtx; pDiffCtx->pStmt = pStmt; pDiffCtx->nOldOff = 0; + pDiffCtx->bRowid = pTab->bRowid; while( SQLITE_ROW==sqlite3_step(pStmt) ){ - sessionPreupdateOneChange(op, pSession, pTab); + i64 iRowid = (pTab->bRowid ? sqlite3_column_int64(pStmt, 0) : 0); + sessionPreupdateOneChange(op, iRowid, pSession, pTab); } rc = sqlite3_finalize(pStmt); }

@@ -216145,6 +223142,27 @@

return rc; } +/* +** Return a comma-separated list of the fully-qualified (with both database +** and table name) column names from table pTab. e.g. +** +** "main"."t1"."a", "main"."t1"."b", "main"."t1"."c" +*/ +static char *sessionAllCols( + const char *zDb, + SessionTable *pTab +){ + int ii; + char *zRet = 0; + for(ii=0; ii<pTab->nCol; ii++){ + zRet = sqlite3_mprintf("%z%s\"%w\".\"%w\".\"%w\"", + zRet, (zRet ? ", " : ""), zDb, pTab->zName, pTab->azCol[ii] + ); + if( !zRet ) break; + } + return zRet; +} + static int sessionDiffFindModified( sqlite3_session *pSession, SessionTable *pTab,

@@ -216159,11 +223177,13 @@ );

if( zExpr2==0 ){ rc = SQLITE_NOMEM; }else{ + char *z1 = sessionAllCols(pSession->zDb, pTab); + char *z2 = sessionAllCols(zFrom, pTab); char *zStmt = sqlite3_mprintf( - "SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)", - pSession->zDb, pTab->zName, zFrom, pTab->zName, zExpr, zExpr2 + "SELECT %s,%s FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)", + z1, z2, pSession->zDb, pTab->zName, zFrom, pTab->zName, zExpr, zExpr2 ); - if( zStmt==0 ){ + if( zStmt==0 || z1==0 || z2==0 ){ rc = SQLITE_NOMEM; }else{ sqlite3_stmt *pStmt;

@@ -216174,12 +223194,15 @@ SessionDiffCtx *pDiffCtx = (SessionDiffCtx*)pSession->hook.pCtx;

pDiffCtx->pStmt = pStmt; pDiffCtx->nOldOff = pTab->nCol; while( SQLITE_ROW==sqlite3_step(pStmt) ){ - sessionPreupdateOneChange(SQLITE_UPDATE, pSession, pTab); + i64 iRowid = (pTab->bRowid ? sqlite3_column_int64(pStmt, 0) : 0); + sessionPreupdateOneChange(SQLITE_UPDATE, iRowid, pSession, pTab); } rc = sqlite3_finalize(pStmt); } - sqlite3_free(zStmt); } + sqlite3_free(zStmt); + sqlite3_free(z1); + sqlite3_free(z2); } return rc;

@@ -216208,7 +223231,7 @@

/* Locate and if necessary initialize the target table object */ rc = sessionFindTable(pSession, zTbl, &pTo); if( pTo==0 ) goto diff_out; - if( sessionInitTable(pSession, pTo) ){ + if( sessionInitTable(pSession, pTo, pSession->db, pSession->zDb) ){ rc = pSession->rc; goto diff_out; }

@@ -216218,9 +223241,12 @@ if( rc==SQLITE_OK ){

int bHasPk = 0; int bMismatch = 0; int nCol; /* Columns in zFrom.zTbl */ + int bRowid = 0; u8 *abPK; const char **azCol = 0; - rc = sessionTableInfo(0, db, zFrom, zTbl, &nCol, 0, &azCol, &abPK); + rc = sessionTableInfo(0, db, zFrom, zTbl, &nCol, 0, &azCol, 0, &abPK, + pSession->bImplicitPK ? &bRowid : 0 + ); if( rc==SQLITE_OK ){ if( pTo->nCol!=nCol ){ bMismatch = 1;

@@ -216333,6 +223359,7 @@ pNextChange = p->pNext;

sessionFree(pSession, p); } } + sqlite3_finalize(pTab->pDfltStmt); sessionFree(pSession, (char*)pTab->azCol); /* cast works around VC++ bug */ sessionFree(pSession, pTab->apChange); sessionFree(pSession, pTab);

@@ -216367,7 +223394,7 @@ sessionDeleteTable(pSession, pSession->pTable);

/* Assert that all allocations have been freed and then free the ** session object itself. */ - assert( pSession->nMalloc==0 ); + // assert( pSession->nMalloc==0 ); sqlite3_free(pSession); }

@@ -216439,48 +223466,6 @@ return rc;

} /* -** Ensure that there is room in the buffer to append nByte bytes of data. -** If not, use sqlite3_realloc() to grow the buffer so that there is. -** -** If successful, return zero. Otherwise, if an OOM condition is encountered, -** set *pRc to SQLITE_NOMEM and return non-zero. -*/ -static int sessionBufferGrow(SessionBuffer *p, i64 nByte, int *pRc){ -#define SESSION_MAX_BUFFER_SZ (0x7FFFFF00 - 1) - i64 nReq = p->nBuf + nByte; - if( *pRc==SQLITE_OK && nReq>p->nAlloc ){ - u8 *aNew; - i64 nNew = p->nAlloc ? p->nAlloc : 128; - - do { - nNew = nNew*2; - }while( nNew<nReq ); - - /* The value of SESSION_MAX_BUFFER_SZ is copied from the implementation - ** of sqlite3_realloc64(). Allocations greater than this size in bytes - ** always fail. It is used here to ensure that this routine can always - ** allocate up to this limit - instead of up to the largest power of - ** two smaller than the limit. */ - if( nNew>SESSION_MAX_BUFFER_SZ ){ - nNew = SESSION_MAX_BUFFER_SZ; - if( nNew<nReq ){ - *pRc = SQLITE_NOMEM; - return 1; - } - } - - aNew = (u8 *)sqlite3_realloc64(p->aBuf, nNew); - if( 0==aNew ){ - *pRc = SQLITE_NOMEM; - }else{ - p->aBuf = aNew; - p->nAlloc = nNew; - } - } - return (*pRc!=SQLITE_OK); -} - -/* ** Append the value passed as the second argument to the buffer passed ** as the first. **

@@ -216550,26 +223535,6 @@ }

/* ** This function is a no-op if *pRc is other than SQLITE_OK when it is -** called. Otherwise, append a string to the buffer. All bytes in the string -** up to (but not including) the nul-terminator are written to the buffer. -** -** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before -** returning. -*/ -static void sessionAppendStr( - SessionBuffer *p, - const char *zStr, - int *pRc -){ - int nStr = sqlite3Strlen30(zStr); - if( 0==sessionBufferGrow(p, nStr, pRc) ){ - memcpy(&p->aBuf[p->nBuf], zStr, nStr); - p->nBuf += nStr; - } -} - -/* -** This function is a no-op if *pRc is other than SQLITE_OK when it is ** called. Otherwise, append the string representation of integer iVal ** to the buffer. No nul-terminator is written. **

@@ -216600,7 +223565,7 @@ SessionBuffer *p, /* Buffer to a append to */

const char *zStr, /* String to quote, escape and append */ int *pRc /* IN/OUT: Error code */ ){ - int nStr = sqlite3Strlen30(zStr)*2 + 2 + 1; + int nStr = sqlite3Strlen30(zStr)*2 + 2 + 2; if( 0==sessionBufferGrow(p, nStr, pRc) ){ char *zOut = (char *)&p->aBuf[p->nBuf]; const char *zIn = zStr;

@@ -216611,6 +223576,7 @@ *zOut++ = *(zIn++);

} *zOut++ = '"'; p->nBuf = (int)((u8 *)zOut - p->aBuf); + p->aBuf[p->nBuf] = 0x00; } }

@@ -216746,7 +223712,7 @@

/* If at least one field has been modified, this is not a no-op. */ if( bChanged ) bNoop = 0; - /* Add a field to the old.* record. This is omitted if this modules is + /* Add a field to the old.* record. This is omitted if this module is ** currently generating a patchset. */ if( bPatchset==0 ){ if( bChanged || abPK[i] ){

@@ -216835,12 +223801,20 @@ /*

** Formulate and prepare a SELECT statement to retrieve a row from table ** zTab in database zDb based on its primary key. i.e. ** -** SELECT * FROM zDb.zTab WHERE pk1 = ? AND pk2 = ? AND ... +** SELECT *, <noop-test> FROM zDb.zTab WHERE (pk1, pk2,...) IS (?1, ?2,...) +** +** where <noop-test> is: +** +** 1 AND (?A OR ?1 IS <column>) AND ... +** +** for each non-pk <column>. */ static int sessionSelectStmt( sqlite3 *db, /* Database handle */ + int bIgnoreNoop, const char *zDb, /* Database name */ const char *zTab, /* Table name */ + int bRowid, int nCol, /* Number of columns in table */ const char **azCol, /* Names of table columns */ u8 *abPK, /* PRIMARY KEY array */

@@ -216848,8 +223822,50 @@ sqlite3_stmt **ppStmt /* OUT: Prepared SELECT statement */

){ int rc = SQLITE_OK; char *zSql = 0; + const char *zSep = ""; + const char *zCols = bRowid ? SESSIONS_ROWID ", *" : "*"; int nSql = -1; + int i; + + SessionBuffer nooptest = {0, 0, 0}; + SessionBuffer pkfield = {0, 0, 0}; + SessionBuffer pkvar = {0, 0, 0}; + sessionAppendStr(&nooptest, ", 1", &rc); + + if( 0==sqlite3_stricmp("sqlite_stat1", zTab) ){ + sessionAppendStr(&nooptest, " AND (?6 OR ?3 IS stat)", &rc); + sessionAppendStr(&pkfield, "tbl, idx", &rc); + sessionAppendStr(&pkvar, + "?1, (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)", &rc + ); + zCols = "tbl, ?2, stat"; + }else{ + for(i=0; i<nCol; i++){ + if( abPK[i] ){ + sessionAppendStr(&pkfield, zSep, &rc); + sessionAppendStr(&pkvar, zSep, &rc); + zSep = ", "; + sessionAppendIdent(&pkfield, azCol[i], &rc); + sessionAppendPrintf(&pkvar, &rc, "?%d", i+1); + }else{ + sessionAppendPrintf(&nooptest, &rc, + " AND (?%d OR ?%d IS %w.%w)", i+1+nCol, i+1, zTab, azCol[i] + ); + } + } + } + + if( rc==SQLITE_OK ){ + zSql = sqlite3_mprintf( + "SELECT %s%s FROM %Q.%Q WHERE (%s) IS (%s)", + zCols, (bIgnoreNoop ? (char*)nooptest.aBuf : ""), + zDb, zTab, (char*)pkfield.aBuf, (char*)pkvar.aBuf + ); + if( zSql==0 ) rc = SQLITE_NOMEM; + } + +#if 0 if( 0==sqlite3_stricmp("sqlite_stat1", zTab) ){ zSql = sqlite3_mprintf( "SELECT tbl, ?2, stat FROM %Q.sqlite_stat1 WHERE tbl IS ?1 AND "

@@ -216857,7 +223873,6 @@ "idx IS (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)", zDb

); if( zSql==0 ) rc = SQLITE_NOMEM; }else{ - int i; const char *zSep = ""; SessionBuffer buf = {0, 0, 0};

@@ -216878,11 +223893,15 @@ }

zSql = (char*)buf.aBuf; nSql = buf.nBuf; } +#endif if( rc==SQLITE_OK ){ rc = sqlite3_prepare_v2(db, zSql, nSql, ppStmt, 0); } sqlite3_free(zSql); + sqlite3_free(nooptest.aBuf); + sqlite3_free(pkfield.aBuf); + sqlite3_free(pkvar.aBuf); return rc; }

@@ -217022,18 +224041,16 @@

for(pTab=pSession->pTable; rc==SQLITE_OK && pTab; pTab=pTab->pNext){ if( pTab->nEntry ){ const char *zName = pTab->zName; - int nCol = 0; /* Number of columns in table */ - u8 *abPK = 0; /* Primary key array */ - const char **azCol = 0; /* Table columns */ int i; /* Used to iterate through hash buckets */ sqlite3_stmt *pSel = 0; /* SELECT statement to query table pTab */ int nRewind = buf.nBuf; /* Initial size of write buffer */ int nNoop; /* Size of buffer after writing tbl header */ + int nOldCol = pTab->nCol; /* Check the table schema is still Ok. */ - rc = sessionTableInfo(0, db, pSession->zDb, zName, &nCol, 0,&azCol,&abPK); - if( !rc && (pTab->nCol!=nCol || memcmp(abPK, pTab->abPK, nCol)) ){ - rc = SQLITE_SCHEMA; + rc = sessionReinitTable(pSession, pTab); + if( rc==SQLITE_OK && pTab->nCol!=nOldCol ){ + rc = sessionUpdateChanges(pSession, pTab); } /* Write a table header */

@@ -217041,8 +224058,9 @@ sessionAppendTableHdr(&buf, bPatchset, pTab, &rc);

/* Build and compile a statement to execute: */ if( rc==SQLITE_OK ){ - rc = sessionSelectStmt( - db, pSession->zDb, zName, nCol, azCol, abPK, &pSel); + rc = sessionSelectStmt(db, 0, pSession->zDb, + zName, pTab->bRowid, pTab->nCol, pTab->azCol, pTab->abPK, &pSel + ); } nNoop = buf.nBuf;

@@ -217050,22 +224068,22 @@ for(i=0; i<pTab->nChange && rc==SQLITE_OK; i++){

SessionChange *p; /* Used to iterate through changes */ for(p=pTab->apChange[i]; rc==SQLITE_OK && p; p=p->pNext){ - rc = sessionSelectBind(pSel, nCol, abPK, p); + rc = sessionSelectBind(pSel, pTab->nCol, pTab->abPK, p); if( rc!=SQLITE_OK ) continue; if( sqlite3_step(pSel)==SQLITE_ROW ){ if( p->op==SQLITE_INSERT ){ int iCol; sessionAppendByte(&buf, SQLITE_INSERT, &rc); sessionAppendByte(&buf, p->bIndirect, &rc); - for(iCol=0; iCol<nCol; iCol++){ + for(iCol=0; iCol<pTab->nCol; iCol++){ sessionAppendCol(&buf, pSel, iCol, &rc); } }else{ - assert( abPK!=0 ); /* Because sessionSelectStmt() returned ok */ - rc = sessionAppendUpdate(&buf, bPatchset, pSel, p, abPK); + assert( pTab->abPK!=0 ); + rc = sessionAppendUpdate(&buf, bPatchset, pSel, p, pTab->abPK); } }else if( p->op!=SQLITE_INSERT ){ - rc = sessionAppendDelete(&buf, bPatchset, p, nCol, abPK); + rc = sessionAppendDelete(&buf, bPatchset, p, pTab->nCol,pTab->abPK); } if( rc==SQLITE_OK ){ rc = sqlite3_reset(pSel);

@@ -217090,7 +224108,6 @@ sqlite3_finalize(pSel);

if( buf.nBuf==nNoop ){ buf.nBuf = nRewind; } - sqlite3_free((char*)azCol); /* cast works around VC++ bug */ } }

@@ -217125,7 +224142,7 @@ ){

int rc; if( pnChangeset==0 || ppChangeset==0 ) return SQLITE_MISUSE; - rc = sessionGenerateChangeset(pSession, 0, 0, 0, pnChangeset,ppChangeset); + rc = sessionGenerateChangeset(pSession, 0, 0, 0, pnChangeset, ppChangeset); assert( rc || pnChangeset==0 || pSession->bEnableSize==0 || *pnChangeset<=pSession->nMaxChangesetSize );

@@ -217240,6 +224257,19 @@ pSession->bEnableSize = (iArg!=0);

} } *(int*)pArg = pSession->bEnableSize; + break; + } + + case SQLITE_SESSION_OBJCONFIG_ROWID: { + int iArg = *(int*)pArg; + if( iArg>=0 ){ + if( pSession->pTable ){ + rc = SQLITE_MISUSE; + }else{ + pSession->bImplicitPK = (iArg!=0); + } + } + *(int*)pArg = pSession->bImplicitPK; break; }

@@ -217501,15 +224531,19 @@ }

} } if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ - sqlite3_int64 v = sessionGetI64(aVal); - if( eType==SQLITE_INTEGER ){ - sqlite3VdbeMemSetInt64(apOut[i], v); + if( (pIn->nData-pIn->iNext)<8 ){ + rc = SQLITE_CORRUPT_BKPT; }else{ - double d; - memcpy(&d, &v, 8); - sqlite3VdbeMemSetDouble(apOut[i], d); + sqlite3_int64 v = sessionGetI64(aVal); + if( eType==SQLITE_INTEGER ){ + sqlite3VdbeMemSetInt64(apOut[i], v); + }else{ + double d; + memcpy(&d, &v, 8); + sqlite3VdbeMemSetDouble(apOut[i], d); + } + pIn->iNext += 8; } - pIn->iNext += 8; } } }

@@ -217778,6 +224812,22 @@ }else if( p->bInvert ){

if( p->op==SQLITE_INSERT ) p->op = SQLITE_DELETE; else if( p->op==SQLITE_DELETE ) p->op = SQLITE_INSERT; } + + /* If this is an UPDATE that is part of a changeset, then check that + ** there are no fields in the old.* record that are not (a) PK fields, + ** or (b) also present in the new.* record. + ** + ** Such records are technically corrupt, but the rebaser was at one + ** point generating them. Under most circumstances this is benign, but + ** can cause spurious SQLITE_RANGE errors when applying the changeset. */ + if( p->bPatchset==0 && p->op==SQLITE_UPDATE){ + for(i=0; i<p->nCol; i++){ + if( p->abPK[i]==0 && p->apValue[i+p->nCol]==0 ){ + sqlite3ValueFree(p->apValue[i]); + p->apValue[i] = 0; + } + } + } } return SQLITE_ROW;

@@ -218215,6 +225265,8 @@ SessionBuffer constraints; /* Deferred constraints are stored here */

SessionBuffer rebase; /* Rebase information (if any) here */ u8 bRebaseStarted; /* If table header is already in rebase */ u8 bRebase; /* True to collect rebase information */ + u8 bIgnoreNoop; /* True to ignore no-op conflicts */ + int bRowid; }; /* Number of prepared UPDATE statements to cache. */

@@ -218465,8 +225517,10 @@ sqlite3 *db, /* Database handle */

const char *zTab, /* Table name */ SessionApplyCtx *p /* Session changeset-apply context */ ){ - return sessionSelectStmt( - db, "main", zTab, p->nCol, p->azCol, p->abPK, &p->pSelect); + /* TODO */ + return sessionSelectStmt(db, p->bIgnoreNoop, + "main", zTab, p->bRowid, p->nCol, p->azCol, p->abPK, &p->pSelect + ); } /*

@@ -218624,22 +225678,34 @@ ** new.* record to the SELECT statement. Or, if it points to a DELETE or

** UPDATE, bind values from the old.* record. */ static int sessionSeekToRow( - sqlite3 *db, /* Database handle */ sqlite3_changeset_iter *pIter, /* Changeset iterator */ - u8 *abPK, /* Primary key flags array */ - sqlite3_stmt *pSelect /* SELECT statement from sessionSelectRow() */ + SessionApplyCtx *p ){ + sqlite3_stmt *pSelect = p->pSelect; int rc; /* Return code */ int nCol; /* Number of columns in table */ int op; /* Changset operation (SQLITE_UPDATE etc.) */ const char *zDummy; /* Unused */ + sqlite3_clear_bindings(pSelect); sqlite3changeset_op(pIter, &zDummy, &nCol, &op, 0); rc = sessionBindRow(pIter, op==SQLITE_INSERT ? sqlite3changeset_new : sqlite3changeset_old, - nCol, abPK, pSelect + nCol, p->abPK, pSelect ); + if( op!=SQLITE_DELETE && p->bIgnoreNoop ){ + int ii; + for(ii=0; rc==SQLITE_OK && ii<nCol; ii++){ + if( p->abPK[ii]==0 ){ + sqlite3_value *pVal = 0; + sqlite3changeset_new(pIter, ii, &pVal); + sqlite3_bind_int(pSelect, ii+1+nCol, (pVal==0)); + if( pVal ) rc = sessionBindValue(pSelect, ii+1, pVal); + } + } + } + if( rc==SQLITE_OK ){ rc = sqlite3_step(pSelect); if( rc!=SQLITE_ROW ) rc = sqlite3_reset(pSelect);

@@ -218754,16 +225820,22 @@ assert( SQLITE_CHANGESET_DATA+1==SQLITE_CHANGESET_NOTFOUND );

/* Bind the new.* PRIMARY KEY values to the SELECT statement. */ if( pbReplace ){ - rc = sessionSeekToRow(p->db, pIter, p->abPK, p->pSelect); + rc = sessionSeekToRow(pIter, p); }else{ rc = SQLITE_OK; } if( rc==SQLITE_ROW ){ /* There exists another row with the new.* primary key. */ - pIter->pConflict = p->pSelect; - res = xConflict(pCtx, eType, pIter); - pIter->pConflict = 0; + if( p->bIgnoreNoop + && sqlite3_column_int(p->pSelect, sqlite3_column_count(p->pSelect)-1) + ){ + res = SQLITE_CHANGESET_OMIT; + }else{ + pIter->pConflict = p->pSelect; + res = xConflict(pCtx, eType, pIter); + pIter->pConflict = 0; + } rc = sqlite3_reset(p->pSelect); }else if( rc==SQLITE_OK ){ if( p->bDeferConstraints && eType==SQLITE_CHANGESET_CONFLICT ){

@@ -218871,7 +225943,7 @@ if( rc!=SQLITE_OK ) return rc;

sqlite3_step(p->pDelete); rc = sqlite3_reset(p->pDelete); - if( rc==SQLITE_OK && sqlite3_changes(p->db)==0 ){ + if( rc==SQLITE_OK && sqlite3_changes(p->db)==0 && p->bIgnoreNoop==0 ){ rc = sessionConflictHandler( SQLITE_CHANGESET_DATA, p, pIter, xConflict, pCtx, pbRetry );

@@ -218928,7 +226000,7 @@ if( p->bStat1 ){

/* Check if there is a conflicting row. For sqlite_stat1, this needs ** to be done using a SELECT, as there is no PRIMARY KEY in the ** database schema to throw an exception if a duplicate is inserted. */ - rc = sessionSeekToRow(p->db, pIter, p->abPK, p->pSelect); + rc = sessionSeekToRow(pIter, p); if( rc==SQLITE_ROW ){ rc = SQLITE_CONSTRAINT; sqlite3_reset(p->pSelect);

@@ -219105,6 +226177,7 @@ pIter->in.bNoDiscard = 1;

memset(&sApply, 0, sizeof(sApply)); sApply.bRebase = (ppRebase && pnRebase); sApply.bInvertConstraints = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); + sApply.bIgnoreNoop = !!(flags & SQLITE_CHANGESETAPPLY_IGNORENOOP); sqlite3_mutex_enter(sqlite3_db_mutex(db)); if( (flags & SQLITE_CHANGESETAPPLY_NOSAVEPOINT)==0 ){ rc = sqlite3_exec(db, "SAVEPOINT changeset_apply", 0, 0, 0);

@@ -219142,6 +226215,7 @@ sApply.abPK = 0;

sApply.bStat1 = 0; sApply.bDeferConstraints = 1; sApply.bRebaseStarted = 0; + sApply.bRowid = 0; memset(&sApply.constraints, 0, sizeof(SessionBuffer)); /* If an xFilter() callback was specified, invoke it now. If the

@@ -219161,8 +226235,8 @@ int nMinCol = 0;

int i; sqlite3changeset_pk(pIter, &abPK, 0); - rc = sessionTableInfo(0, - db, "main", zNew, &sApply.nCol, &zTab, &sApply.azCol, &sApply.abPK + rc = sessionTableInfo(0, db, "main", zNew, + &sApply.nCol, &zTab, &sApply.azCol, 0, &sApply.abPK, &sApply.bRowid ); if( rc!=SQLITE_OK ) break; for(i=0; i<sApply.nCol; i++){

@@ -219294,10 +226368,23 @@ ){

sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ int bInv = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); int rc = sessionChangesetStart(&pIter, 0, 0, nChangeset, pChangeset, bInv, 1); + u64 savedFlag = db->flags & SQLITE_FkNoAction; + + if( flags & SQLITE_CHANGESETAPPLY_FKNOACTION ){ + db->flags |= ((u64)SQLITE_FkNoAction); + db->aDb[0].pSchema->schema_cookie -= 32; + } + if( rc==SQLITE_OK ){ rc = sessionChangesetApply( db, pIter, xFilter, xConflict, pCtx, ppRebase, pnRebase, flags ); + } + + if( (flags & SQLITE_CHANGESETAPPLY_FKNOACTION) && savedFlag==0 ){ + assert( db->flags & SQLITE_FkNoAction ); + db->flags &= ~((u64)SQLITE_FkNoAction); + db->aDb[0].pSchema->schema_cookie -= 32; } return rc; }

@@ -219386,6 +226473,9 @@ struct sqlite3_changegroup {

int rc; /* Error code */ int bPatch; /* True to accumulate patchsets */ SessionTable *pList; /* List of tables in current patch */ + + sqlite3 *db; /* Configured by changegroup_schema() */ + char *zDb; /* Configured by changegroup_schema() */ }; /*

@@ -219406,6 +226496,7 @@ SessionChange **ppNew /* OUT: Merged change */

){ SessionChange *pNew = 0; int rc = SQLITE_OK; + assert( aRec!=0 ); if( !pExist ){ pNew = (SessionChange *)sqlite3_malloc64(sizeof(SessionChange) + nRec);

@@ -219572,6 +226663,114 @@ return rc;

} /* +** Check if a changeset entry with nCol columns and the PK array passed +** as the final argument to this function is compatible with SessionTable +** pTab. If so, return 1. Otherwise, if they are incompatible in some way, +** return 0. +*/ +static int sessionChangesetCheckCompat( + SessionTable *pTab, + int nCol, + u8 *abPK +){ + if( pTab->azCol && nCol<pTab->nCol ){ + int ii; + for(ii=0; ii<pTab->nCol; ii++){ + u8 bPK = (ii < nCol) ? abPK[ii] : 0; + if( pTab->abPK[ii]!=bPK ) return 0; + } + return 1; + } + return (pTab->nCol==nCol && 0==memcmp(abPK, pTab->abPK, nCol)); +} + +static int sessionChangesetExtendRecord( + sqlite3_changegroup *pGrp, + SessionTable *pTab, + int nCol, + int op, + const u8 *aRec, + int nRec, + SessionBuffer *pOut +){ + int rc = SQLITE_OK; + int ii = 0; + + assert( pTab->azCol ); + assert( nCol<pTab->nCol ); + + pOut->nBuf = 0; + if( op==SQLITE_INSERT || (op==SQLITE_DELETE && pGrp->bPatch==0) ){ + /* Append the missing default column values to the record. */ + sessionAppendBlob(pOut, aRec, nRec, &rc); + if( rc==SQLITE_OK && pTab->pDfltStmt==0 ){ + rc = sessionPrepareDfltStmt(pGrp->db, pTab, &pTab->pDfltStmt); + } + for(ii=nCol; rc==SQLITE_OK && ii<pTab->nCol; ii++){ + int eType = sqlite3_column_type(pTab->pDfltStmt, ii); + sessionAppendByte(pOut, eType, &rc); + switch( eType ){ + case SQLITE_FLOAT: + case SQLITE_INTEGER: { + i64 iVal; + if( eType==SQLITE_INTEGER ){ + iVal = sqlite3_column_int64(pTab->pDfltStmt, ii); + }else{ + double rVal = sqlite3_column_int64(pTab->pDfltStmt, ii); + memcpy(&iVal, &rVal, sizeof(i64)); + } + if( SQLITE_OK==sessionBufferGrow(pOut, 8, &rc) ){ + sessionPutI64(&pOut->aBuf[pOut->nBuf], iVal); + } + break; + } + + case SQLITE_BLOB: + case SQLITE_TEXT: { + int n = sqlite3_column_bytes(pTab->pDfltStmt, ii); + sessionAppendVarint(pOut, n, &rc); + if( eType==SQLITE_TEXT ){ + const u8 *z = (const u8*)sqlite3_column_text(pTab->pDfltStmt, ii); + sessionAppendBlob(pOut, z, n, &rc); + }else{ + const u8 *z = (const u8*)sqlite3_column_blob(pTab->pDfltStmt, ii); + sessionAppendBlob(pOut, z, n, &rc); + } + break; + } + + default: + assert( eType==SQLITE_NULL ); + break; + } + } + }else if( op==SQLITE_UPDATE ){ + /* Append missing "undefined" entries to the old.* record. And, if this + ** is an UPDATE, to the new.* record as well. */ + int iOff = 0; + if( pGrp->bPatch==0 ){ + for(ii=0; ii<nCol; ii++){ + iOff += sessionSerialLen(&aRec[iOff]); + } + sessionAppendBlob(pOut, aRec, iOff, &rc); + for(ii=0; ii<(pTab->nCol-nCol); ii++){ + sessionAppendByte(pOut, 0x00, &rc); + } + } + + sessionAppendBlob(pOut, &aRec[iOff], nRec-iOff, &rc); + for(ii=0; ii<(pTab->nCol-nCol); ii++){ + sessionAppendByte(pOut, 0x00, &rc); + } + }else{ + assert( op==SQLITE_DELETE && pGrp->bPatch ); + sessionAppendBlob(pOut, aRec, nRec, &rc); + } + + return rc; +} + +/* ** Add all changes in the changeset traversed by the iterator passed as ** the first argument to the changegroup hash tables. */

@@ -219584,6 +226783,7 @@ u8 *aRec;

int nRec; int rc = SQLITE_OK; SessionTable *pTab = 0; + SessionBuffer rec = {0, 0, 0}; while( SQLITE_ROW==sessionChangesetNext(pIter, &aRec, &nRec, 0) ){ const char *zNew;

@@ -219595,6 +226795,9 @@ SessionChange *pChange;

SessionChange *pExist = 0; SessionChange **pp; + /* Ensure that only changesets, or only patchsets, but not a mixture + ** of both, are being combined. It is an error to try to combine a + ** changeset and a patchset. */ if( pGrp->pList==0 ){ pGrp->bPatch = pIter->bPatchset; }else if( pIter->bPatchset!=pGrp->bPatch ){

@@ -219627,16 +226830,36 @@ memcpy(pTab->abPK, abPK, nCol);

pTab->zName = (char*)&pTab->abPK[nCol]; memcpy(pTab->zName, zNew, nNew+1); + if( pGrp->db ){ + pTab->nCol = 0; + rc = sessionInitTable(0, pTab, pGrp->db, pGrp->zDb); + if( rc ){ + assert( pTab->azCol==0 ); + sqlite3_free(pTab); + break; + } + } + /* The new object must be linked on to the end of the list, not ** simply added to the start of it. This is to ensure that the ** tables within the output of sqlite3changegroup_output() are in ** the right order. */ for(ppTab=&pGrp->pList; *ppTab; ppTab=&(*ppTab)->pNext); *ppTab = pTab; - }else if( pTab->nCol!=nCol || memcmp(pTab->abPK, abPK, nCol) ){ + } + + if( !sessionChangesetCheckCompat(pTab, nCol, abPK) ){ rc = SQLITE_SCHEMA; break; } + } + + if( nCol<pTab->nCol ){ + assert( pGrp->db ); + rc = sessionChangesetExtendRecord(pGrp, pTab, nCol, op, aRec, nRec, &rec); + if( rc ) break; + aRec = rec.aBuf; + nRec = rec.nBuf; } if( sessionGrowHash(0, pIter->bPatchset, pTab) ){

@@ -219676,6 +226899,7 @@ pTab->nEntry++;

} } + sqlite3_free(rec.aBuf); if( rc==SQLITE_OK ) rc = pIter->rc; return rc; }

@@ -219763,6 +226987,31 @@ return rc;

} /* +** Provide a database schema to the changegroup object. +*/ +SQLITE_API int sqlite3changegroup_schema( + sqlite3_changegroup *pGrp, + sqlite3 *db, + const char *zDb +){ + int rc = SQLITE_OK; + + if( pGrp->pList || pGrp->db ){ + /* Cannot add a schema after one or more calls to sqlite3changegroup_add(), + ** or after sqlite3changegroup_schema() has already been called. */ + rc = SQLITE_MISUSE; + }else{ + pGrp->zDb = sqlite3_mprintf("%s", zDb); + if( pGrp->zDb==0 ){ + rc = SQLITE_NOMEM; + }else{ + pGrp->db = db; + } + } + return rc; +} + +/* ** Add the changeset currently stored in buffer pData, size nData bytes, ** to changeset-group p. */

@@ -219825,6 +227074,7 @@ ** Delete a changegroup object.

*/ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup *pGrp){ if( pGrp ){ + sqlite3_free(pGrp->zDb); sessionDeleteTable(0, pGrp->pList); sqlite3_free(pGrp); }

@@ -219974,7 +227224,7 @@ if( pIter->abPK[i] || a2[0]==0 ){

if( !pIter->abPK[i] && a1[0] ) bData = 1; memcpy(pOut, a1, n1); pOut += n1; - }else if( a2[0]!=0xFF ){ + }else if( a2[0]!=0xFF && a1[0] ){ bData = 1; memcpy(pOut, a2, n2); pOut += n2;

@@ -220532,7 +227782,7 @@ ** xPhraseNextColumn()

** See xPhraseFirstColumn above. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 3 */ + int iVersion; /* Currently always set to 2 */ void *(*xUserData)(Fts5Context*);

@@ -220761,8 +228011,8 @@ ** On the other hand, it may require more CPU cycles to run MATCH queries,

** as separate queries of the FTS index are required for each synonym. ** ** When using methods (2) or (3), it is important that the tokenizer only -** provide synonyms when tokenizing document text (method (2)) or query -** text (method (3)), not both. Doing so will not cause any errors, but is +** provide synonyms when tokenizing document text (method (3)) or query +** text (method (2)), not both. Doing so will not cause any errors, but is ** inefficient. */ typedef struct Fts5Tokenizer Fts5Tokenizer;

@@ -220810,7 +228060,7 @@ /* Create a new tokenizer */

int (*xCreateTokenizer)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_tokenizer *pTokenizer, void (*xDestroy)(void*) );

@@ -220819,7 +228069,7 @@ /* Find an existing tokenizer */

int (*xFindTokenizer)( fts5_api *pApi, const char *zName, - void **ppContext, + void **ppUserData, fts5_tokenizer *pTokenizer );

@@ -220827,7 +228077,7 @@ /* Create a new auxiliary function */

int (*xCreateFunction)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_extension_function xFunction, void (*xDestroy)(void*) );

@@ -220998,6 +228248,10 @@ ** nAutomerge:

** The minimum number of segments that an auto-merge operation should ** attempt to merge together. A value of 1 sets the object to use the ** compile time default. Zero disables auto-merge altogether. +** +** bContentlessDelete: +** True if the contentless_delete option was present in the CREATE +** VIRTUAL TABLE statement. ** ** zContent: **

@@ -221033,6 +228287,7 @@ u8 *abUnindexed; /* True for unindexed columns */

int nPrefix; /* Number of prefix indexes */ int *aPrefix; /* Sizes in bytes of nPrefix prefix indexes */ int eContent; /* An FTS5_CONTENT value */ + int bContentlessDelete; /* "contentless_delete=" option (dflt==0) */ char *zContent; /* content table */ char *zContentRowid; /* "content_rowid=" option value */ int bColumnsize; /* "columnsize=" option value (dflt==1) */

@@ -221044,6 +228299,7 @@ int bLock; /* True when table is preparing statement */

int ePattern; /* FTS_PATTERN_XXX constant */ /* Values loaded from the %_config table */ + int iVersion; /* fts5 file format 'version' */ int iCookie; /* Incremented when %_config is modified */ int pgsz; /* Approximate page size used in %_data */ int nAutomerge; /* 'automerge' setting */

@@ -221052,6 +228308,8 @@ int nUsermerge; /* 'usermerge' setting */

int nHashSize; /* Bytes of memory for in-memory hash */ char *zRank; /* Name of rank function */ char *zRankArgs; /* Arguments to rank function */ + int bSecureDelete; /* 'secure-delete' */ + int nDeleteMerge; /* 'deletemerge' */ /* If non-NULL, points to sqlite3_vtab.base.zErrmsg. Often NULL. */ char **pzErrmsg;

@@ -221061,8 +228319,11 @@ int bPrefixIndex; /* True to use prefix-indexes */

#endif }; -/* Current expected value of %_config table 'version' field */ -#define FTS5_CURRENT_VERSION 4 +/* Current expected value of %_config table 'version' field. And +** the expected version if the 'secure-delete' option has ever been +** set on the table. */ +#define FTS5_CURRENT_VERSION 4 +#define FTS5_CURRENT_VERSION_SECUREDELETE 5 #define FTS5_CONTENT_NORMAL 0 #define FTS5_CONTENT_NONE 1

@@ -221228,6 +228489,7 @@ ** defined here only to make it easier to avoid clashes with the flags

** above. */ #define FTS5INDEX_QUERY_SKIPEMPTY 0x0010 #define FTS5INDEX_QUERY_NOOUTPUT 0x0020 +#define FTS5INDEX_QUERY_SKIPHASH 0x0040 /* ** Create/destroy an Fts5Index object.

@@ -221370,6 +228632,9 @@ static int sqlite3Fts5IndexReset(Fts5Index *p);

static int sqlite3Fts5IndexLoadConfig(Fts5Index *p); +static int sqlite3Fts5IndexGetOrigin(Fts5Index *p, i64 *piOrigin); +static int sqlite3Fts5IndexContentlessDelete(Fts5Index *p, i64 iOrigin, i64 iRowid); + /* ** End of interface to code in fts5_index.c. **************************************************************************/

@@ -221382,7 +228647,7 @@ static int sqlite3Fts5GetVarintLen(u32 iVal);

static u8 sqlite3Fts5GetVarint(const unsigned char*, u64*); static int sqlite3Fts5PutVarint(unsigned char *p, u64 v); -#define fts5GetVarint32(a,b) sqlite3Fts5GetVarint32(a,(u32*)&b) +#define fts5GetVarint32(a,b) sqlite3Fts5GetVarint32(a,(u32*)&(b)) #define fts5GetVarint sqlite3Fts5GetVarint #define fts5FastGetVarint32(a, iOff, nVal) { \

@@ -221454,6 +228719,11 @@ ** Empty (but do not delete) a hash table.

*/ static void sqlite3Fts5HashClear(Fts5Hash*); +/* +** Return true if the hash is empty, false otherwise. +*/ +static int sqlite3Fts5HashIsEmpty(Fts5Hash*); + static int sqlite3Fts5HashQuery( Fts5Hash*, /* Hash table to query */ int nPre,

@@ -221473,6 +228743,7 @@ const char **pzTerm, /* OUT: term (nul-terminated) */

const u8 **ppDoclist, /* OUT: pointer to doclist */ int *pnDoclist /* OUT: size of doclist in bytes */ ); + /*

@@ -221718,7 +228989,8 @@ #define FTS5_PLUS 14

#define FTS5_STAR 15 /* This file is automatically generated by Lemon from input grammar -** source file "fts5parse.y". */ +** source file "fts5parse.y". +*/ /* ** 2000-05-29 **

@@ -223308,15 +230580,19 @@ ** Start of highlight() implementation.

*/ typedef struct HighlightContext HighlightContext; struct HighlightContext { - CInstIter iter; /* Coalesced Instance Iterator */ - int iPos; /* Current token offset in zIn[] */ + /* Constant parameters to fts5HighlightCb() */ int iRangeStart; /* First token to include */ int iRangeEnd; /* If non-zero, last token to include */ const char *zOpen; /* Opening highlight */ const char *zClose; /* Closing highlight */ const char *zIn; /* Input text */ int nIn; /* Size of input text in bytes */ - int iOff; /* Current offset within zIn[] */ + + /* Variables modified by fts5HighlightCb() */ + CInstIter iter; /* Coalesced Instance Iterator */ + int iPos; /* Current token offset in zIn[] */ + int iOff; /* Have copied up to this offset in zIn[] */ + int bOpen; /* True if highlight is open */ char *zOut; /* Output value */ };

@@ -223349,8 +230625,8 @@ void *pContext, /* Pointer to HighlightContext object */

int tflags, /* Mask of FTS5_TOKEN_* flags */ const char *pToken, /* Buffer containing token */ int nToken, /* Size of token in bytes */ - int iStartOff, /* Start offset of token */ - int iEndOff /* End offset of token */ + int iStartOff, /* Start byte offset of token */ + int iEndOff /* End byte offset of token */ ){ HighlightContext *p = (HighlightContext*)pContext; int rc = SQLITE_OK;

@@ -223361,35 +230637,52 @@

if( tflags & FTS5_TOKEN_COLOCATED ) return SQLITE_OK; iPos = p->iPos++; - if( p->iRangeEnd>0 ){ + if( p->iRangeEnd>=0 ){ if( iPos<p->iRangeStart || iPos>p->iRangeEnd ) return SQLITE_OK; if( p->iRangeStart && iPos==p->iRangeStart ) p->iOff = iStartOff; } - if( iPos==p->iter.iStart ){ + /* If the parenthesis is open, and this token is not part of the current + ** phrase, and the starting byte offset of this token is past the point + ** that has currently been copied into the output buffer, close the + ** parenthesis. */ + if( p->bOpen + && (iPos<=p->iter.iStart || p->iter.iStart<0) + && iStartOff>p->iOff + ){ + fts5HighlightAppend(&rc, p, p->zClose, -1); + p->bOpen = 0; + } + + /* If this is the start of a new phrase, and the highlight is not open: + ** + ** * copy text from the input up to the start of the phrase, and + ** * open the highlight. + */ + if( iPos==p->iter.iStart && p->bOpen==0 ){ fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iStartOff - p->iOff); fts5HighlightAppend(&rc, p, p->zOpen, -1); p->iOff = iStartOff; + p->bOpen = 1; } if( iPos==p->iter.iEnd ){ - if( p->iRangeEnd && p->iter.iStart<p->iRangeStart ){ + if( p->bOpen==0 ){ + assert( p->iRangeEnd>=0 ); fts5HighlightAppend(&rc, p, p->zOpen, -1); + p->bOpen = 1; } fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); - fts5HighlightAppend(&rc, p, p->zClose, -1); p->iOff = iEndOff; + if( rc==SQLITE_OK ){ rc = fts5CInstIterNext(&p->iter); } } - if( p->iRangeEnd>0 && iPos==p->iRangeEnd ){ + if( iPos==p->iRangeEnd ){ fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); p->iOff = iEndOff; - if( iPos>=p->iter.iStart && iPos<p->iter.iEnd ){ - fts5HighlightAppend(&rc, p, p->zClose, -1); - } } return rc;

@@ -223419,6 +230712,7 @@ iCol = sqlite3_value_int(apVal[0]);

memset(&ctx, 0, sizeof(HighlightContext)); ctx.zOpen = (const char*)sqlite3_value_text(apVal[1]); ctx.zClose = (const char*)sqlite3_value_text(apVal[2]); + ctx.iRangeEnd = -1; rc = pApi->xColumnText(pFts, iCol, &ctx.zIn, &ctx.nIn); if( ctx.zIn ){

@@ -223428,6 +230722,9 @@ }

if( rc==SQLITE_OK ){ rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb); + } + if( ctx.bOpen ){ + fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1); } fts5HighlightAppend(&rc, &ctx, &ctx.zIn[ctx.iOff], ctx.nIn - ctx.iOff);

@@ -223604,6 +230901,7 @@ memset(&ctx, 0, sizeof(HighlightContext));

iCol = sqlite3_value_int(apVal[0]); ctx.zOpen = fts5ValueToText(apVal[1]); ctx.zClose = fts5ValueToText(apVal[2]); + ctx.iRangeEnd = -1; zEllips = fts5ValueToText(apVal[3]); nToken = sqlite3_value_int(apVal[4]);

@@ -223705,6 +231003,9 @@ }

if( rc==SQLITE_OK ){ rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb); + } + if( ctx.bOpen ){ + fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1); } if( ctx.iRangeEnd>=(nColSize-1) ){ fts5HighlightAppend(&rc, &ctx, &ctx.zIn[ctx.iOff], ctx.nIn - ctx.iOff);

@@ -224344,6 +231645,8 @@ #define FTS5_DEFAULT_USERMERGE 4

#define FTS5_DEFAULT_CRISISMERGE 16 #define FTS5_DEFAULT_HASHSIZE (1024*1024) +#define FTS5_DEFAULT_DELETE_AUTOMERGE 10 /* default 10% */ + /* Maximum allowed page size */ #define FTS5_MAX_PAGE_SIZE (64*1024)

@@ -224674,6 +231977,16 @@ }

return rc; } + if( sqlite3_strnicmp("contentless_delete", zCmd, nCmd)==0 ){ + if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){ + *pzErr = sqlite3_mprintf("malformed contentless_delete=... directive"); + rc = SQLITE_ERROR; + }else{ + pConfig->bContentlessDelete = (zArg[0]=='1'); + } + return rc; + } + if( sqlite3_strnicmp("content_rowid", zCmd, nCmd)==0 ){ if( pConfig->zContentRowid ){ *pzErr = sqlite3_mprintf("multiple content_rowid=... directives");

@@ -224872,6 +232185,7 @@ *pzErr = sqlite3_mprintf("reserved fts5 table name: %s", pRet->zName);

rc = SQLITE_ERROR; } + assert( (pRet->abUnindexed && pRet->azCol) || rc!=SQLITE_OK ); for(i=3; rc==SQLITE_OK && i<nArg; i++){ const char *zOrig = azArg[i]; const char *z;

@@ -224917,6 +232231,28 @@ sqlite3_free(zOne);

sqlite3_free(zTwo); } + /* We only allow contentless_delete=1 if the table is indeed contentless. */ + if( rc==SQLITE_OK + && pRet->bContentlessDelete + && pRet->eContent!=FTS5_CONTENT_NONE + ){ + *pzErr = sqlite3_mprintf( + "contentless_delete=1 requires a contentless table" + ); + rc = SQLITE_ERROR; + } + + /* We only allow contentless_delete=1 if columnsize=0 is not present. + ** + ** This restriction may be removed at some point. + */ + if( rc==SQLITE_OK && pRet->bContentlessDelete && pRet->bColumnsize==0 ){ + *pzErr = sqlite3_mprintf( + "contentless_delete=1 is incompatible with columnsize=0" + ); + rc = SQLITE_ERROR; + } + /* If a tokenizer= option was successfully parsed, the tokenizer has ** already been allocated. Otherwise, allocate an instance of the default ** tokenizer (unicode61) now. */

@@ -225211,6 +232547,18 @@ pConfig->nCrisisMerge = nCrisisMerge;

} } + else if( 0==sqlite3_stricmp(zKey, "deletemerge") ){ + int nVal = -1; + if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){ + nVal = sqlite3_value_int(pVal); + }else{ + *pbBadkey = 1; + } + if( nVal<0 ) nVal = FTS5_DEFAULT_DELETE_AUTOMERGE; + if( nVal>100 ) nVal = 0; + pConfig->nDeleteMerge = nVal; + } + else if( 0==sqlite3_stricmp(zKey, "rank") ){ const char *zIn = (const char*)sqlite3_value_text(pVal); char *zRank;

@@ -225225,6 +232573,18 @@ }else if( rc==SQLITE_ERROR ){

rc = SQLITE_OK; *pbBadkey = 1; } + } + + else if( 0==sqlite3_stricmp(zKey, "secure-delete") ){ + int bVal = -1; + if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){ + bVal = sqlite3_value_int(pVal); + } + if( bVal<0 ){ + *pbBadkey = 1; + }else{ + pConfig->bSecureDelete = (bVal ? 1 : 0); + } }else{ *pbBadkey = 1; }

@@ -225247,6 +232607,7 @@ pConfig->nAutomerge = FTS5_DEFAULT_AUTOMERGE;

pConfig->nUsermerge = FTS5_DEFAULT_USERMERGE; pConfig->nCrisisMerge = FTS5_DEFAULT_CRISISMERGE; pConfig->nHashSize = FTS5_DEFAULT_HASHSIZE; + pConfig->nDeleteMerge = FTS5_DEFAULT_DELETE_AUTOMERGE; zSql = sqlite3Fts5Mprintf(&rc, zSelect, pConfig->zDb, pConfig->zName); || sqlite3GetInt32(pToken->z, &iValue)==0 ){

@@ -225269,15 +232630,20 @@ }

rc = sqlite3_finalize(p); } - if( rc==SQLITE_OK && iVersion!=FTS5_CURRENT_VERSION ){ + if( rc==SQLITE_OK + && iVersion!=FTS5_CURRENT_VERSION + && iVersion!=FTS5_CURRENT_VERSION_SECUREDELETE + ){ rc = SQLITE_ERROR; if( pConfig->pzErrmsg ){ assert( 0==*pConfig->pzErrmsg ); - *pConfig->pzErrmsg = sqlite3_mprintf( - "invalid fts5 file format (found %d, expected %d) - run 'rebuild'", - iVersion, FTS5_CURRENT_VERSION + *pConfig->pzErrmsg = sqlite3_mprintf("invalid fts5 file format " + "(found %d, expected %d or %d) - run 'rebuild'", + iVersion, FTS5_CURRENT_VERSION, FTS5_CURRENT_VERSION_SECUREDELETE ); } + }else{ + pConfig->iVersion = iVersion; } if( rc==SQLITE_OK ){

@@ -225304,6 +232670,10 @@

/* #include "fts5Int.h" */ /* #include "fts5parse.h" */ + +#ifndef SQLITE_FTS5_MAX_EXPR_DEPTH +# define SQLITE_FTS5_MAX_EXPR_DEPTH 256 +#endif /* ** All token types in the generated fts5parse.h file are greater than 0.

@@ -225345,11 +232715,17 @@ ** FTS5_OR (nChild, apChild valid)

** FTS5_NOT (nChild, apChild valid) ** FTS5_STRING (pNear valid) ** FTS5_TERM (pNear valid) +** +** iHeight: +** Distance from this node to furthest leaf. This is always 0 for nodes +** of type FTS5_STRING and FTS5_TERM. For all other nodes it is one +** greater than the largest child value. */ struct Fts5ExprNode { int eType; /* Node type */ int bEof; /* True at EOF */ int bNomatch; /* True if entry is not a match */ + int iHeight; /* Distance to tree leaf nodes */ /* Next method for this node. */ int (*xNext)(Fts5Expr*, Fts5ExprNode*, int, i64);

@@ -225419,6 +232795,31 @@ Fts5ExprNode *pExpr; /* Result of a successful parse */

int bPhraseToAnd; /* Convert "a+b" to "a AND b" */ }; +/* +** Check that the Fts5ExprNode.iHeight variables are set correctly in +** the expression tree passed as the only argument. +*/ +#ifndef NDEBUG +static void assert_expr_depth_ok(int rc, Fts5ExprNode *p){ + if( rc==SQLITE_OK ){ + if( p->eType==FTS5_TERM || p->eType==FTS5_STRING || p->eType==0 ){ + assert( p->iHeight==0 ); + }else{ + int ii; + int iMaxChild = 0; + for(ii=0; ii<p->nChild; ii++){ + Fts5ExprNode *pChild = p->apChild[ii]; + iMaxChild = MAX(iMaxChild, pChild->iHeight); + assert_expr_depth_ok(SQLITE_OK, pChild); + } + assert( p->iHeight==iMaxChild+1 ); + } + } +} +#else +# define assert_expr_depth_ok(rc, p) +#endif + static void sqlite3Fts5ParseError(Fts5Parse *pParse, const char *zFmt, ...){ va_list ap; va_start(ap, zFmt);

@@ -225533,6 +232934,8 @@ sqlite3Fts5Parser(pEngine, t, token, &sParse);

}while( sParse.rc==SQLITE_OK && t!=FTS5_EOF ); sqlite3Fts5ParserFree(pEngine, fts5ParseFree); + assert_expr_depth_ok(sParse.rc, sParse.pExpr); + /* If the LHS of the MATCH expression was a user column, apply the ** implicit column-filter. */ if( iCol<pConfig->nCol && sParse.pExpr && sParse.rc==SQLITE_OK ){

@@ -225578,6 +232981,19 @@ return sParse.rc;

} /* +** Assuming that buffer z is at least nByte bytes in size and contains a +** valid utf-8 string, return the number of characters in the string. +*/ +static int fts5ExprCountChar(const char *z, int nByte){ + int nRet = 0; + int ii; + for(ii=0; ii<nByte; ii++){ + if( (z[ii] & 0xC0)!=0x80 ) nRet++; + } + return nRet; +} + +/* ** This function is only called when using the special 'trigram' tokenizer. ** Argument zText contains the text of a LIKE or GLOB pattern matched ** against column iCol. This function creates and compiles an FTS5 MATCH

@@ -225614,7 +233030,8 @@ while( i<=nText ){

if( i==nText || zText[i]==aSpec[0] || zText[i]==aSpec[1] || zText[i]==aSpec[2] ){ - if( i-iFirst>=3 ){ + + if( fts5ExprCountChar(&zText[iFirst], i-iFirst)>=3 ){ int jj; zExpr[iOut++] = '"'; for(jj=iFirst; jj<i; jj++){

@@ -225681,7 +233098,7 @@ static int sqlite3Fts5ExprAnd(Fts5Expr **pp1, Fts5Expr *p2){

Fts5Parse sParse; memset(&sParse, 0, sizeof(sParse)); - if( *pp1 ){ + if( *pp1 && p2 ){ Fts5Expr *p1 = *pp1; int nPhrase = p1->nPhrase + p2->nPhrase;

@@ -225706,7 +233123,7 @@ }

} sqlite3_free(p2->apExprPhrase); sqlite3_free(p2); - }else{ + }else if( p2 ){ *pp1 = p2; }

@@ -227480,6 +234897,7 @@ }

} static void fts5ExprAddChildren(Fts5ExprNode *p, Fts5ExprNode *pSub){ + int ii = p->nChild; if( p->eType!=FTS5_NOT && pSub->eType==p->eType ){ int nByte = sizeof(Fts5ExprNode*) * pSub->nChild; memcpy(&p->apChild[p->nChild], pSub->apChild, nByte);

@@ -227487,6 +234905,9 @@ p->nChild += pSub->nChild;

sqlite3_free(pSub); }else{ p->apChild[p->nChild++] = pSub; + } + for( ; ii<p->nChild; ii++){ + p->iHeight = MAX(p->iHeight, p->apChild[ii]->iHeight + 1); } }

@@ -227518,6 +234939,7 @@ pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte);

|| sqlite3GetInt32(pToken->z, &iValue)==0 ){ pRet->eType = FTS5_AND; pRet->nChild = nTerm; + pRet->iHeight = 1; fts5ExprAssignXNext(pRet); pParse->nPhrase--; for(ii=0; ii<nTerm; ii++){

@@ -227623,6 +235045,14 @@ }

}else{ fts5ExprAddChildren(pRet, pLeft); fts5ExprAddChildren(pRet, pRight); + if( pRet->iHeight>SQLITE_FTS5_MAX_EXPR_DEPTH ){ + sqlite3Fts5ParseError(pParse, + "fts5 expression tree is too large (maximum depth %d)", + SQLITE_FTS5_MAX_EXPR_DEPTH + ); + sqlite3_free(pRet); + pRet = 0; + } } } }

@@ -227701,7 +235131,7 @@

return pRet; } -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){ sqlite3_int64 nByte = 0; Fts5ExprTerm *p;

@@ -227807,6 +235237,8 @@ if( zRet ) zRet = fts5PrintfAppend(zRet, "}");

if( zRet==0 ) return 0; } + }else if( pExpr->eType==0 ){ + zRet = sqlite3_mprintf("{}"); }else{ char const *zOp = 0; int i;

@@ -228068,14 +235500,14 @@ if( nArg==2 ) bRemoveDiacritics = sqlite3_value_int(apVal[1]);

sqlite3_result_int(pCtx, sqlite3Fts5UnicodeFold(iCode, bRemoveDiacritics)); } } -#endif /* ifdef SQLITE_TEST */ +#endif /* if SQLITE_TEST || SQLITE_FTS5_DEBUG */ /* ** This is called during initialization to register the fts5_expr() scalar ** UDF with the SQLite handle passed as the only argument. */ static int sqlite3Fts5ExprInit(Fts5Global *pGlobal, sqlite3 *db){ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) struct Fts5ExprFunc { const char *z; void (*x)(sqlite3_context*,int,sqlite3_value**);

@@ -228792,10 +236224,8 @@ return pRet;

} /* -** Extract all tokens from hash table iHash and link them into a list -** in sorted order. The hash table is cleared before returning. It is -** the responsibility of the caller to free the elements of the returned -** list. +** Link all tokens from hash table iHash into a list in sorted order. The +** tokens are not removed from the hash table. */ static int fts5HashEntrySort( Fts5Hash *pHash,

@@ -228835,7 +236265,6 @@ for(i=0; i<nMergeSlot; i++){

pList = fts5HashEntryMerge(pList, ap[i]); } - pHash->nEntry = 0; sqlite3_free(ap); *ppSorted = pList; return SQLITE_OK;

@@ -228889,6 +236318,28 @@ ){

return fts5HashEntrySort(p, pTerm, nTerm, &p->pScan); } +#ifdef SQLITE_DEBUG +static int fts5HashCount(Fts5Hash *pHash){ + int nEntry = 0; + int ii; + for(ii=0; ii<pHash->nSlot; ii++){ + Fts5HashEntry *p = 0; + for(p=pHash->aSlot[ii]; p; p=p->pHashNext){ + nEntry++; + } + } + return nEntry; +} +#endif + +/* +** Return true if the hash table is empty, false otherwise. +*/ +static int sqlite3Fts5HashIsEmpty(Fts5Hash *pHash){ + assert( pHash->nEntry==fts5HashCount(pHash) ); + return pHash->nEntry==0; +} + static void sqlite3Fts5HashScanNext(Fts5Hash *p){ assert( !sqlite3Fts5HashScanEof(p) ); p->pScan = p->pScan->pScanNext;

@@ -228975,6 +236426,26 @@ #if FTS5_MAX_PREFIX_INDEXES > 31

# error "FTS5_MAX_PREFIX_INDEXES is too large" #endif +#define FTS5_MAX_LEVEL 64 + +/* +** There are two versions of the format used for the structure record: +** +** 1. the legacy format, that may be read by all fts5 versions, and +** +** 2. the V2 format, which is used by contentless_delete=1 databases. +** +** Both begin with a 4-byte "configuration cookie" value. Then, a legacy +** format structure record contains a varint - the number of levels in +** the structure. Whereas a V2 structure record contains the constant +** 4 bytes [0xff 0x00 0x00 0x01]. This is unambiguous as the value of a +** varint has to be at least 16256 to begin with "0xFF". And the default +** maximum number of levels is 64. +** +** See below for more on structure record formats. +*/ +#define FTS5_STRUCTURE_V2 "\xFF\x00\x00\x01" + /* ** Details: **

@@ -228982,7 +236453,7 @@ ** The %_data table managed by this module,

** ** CREATE TABLE %_data(id INTEGER PRIMARY KEY, block BLOB); ** -** , contains the following 5 types of records. See the comments surrounding +** , contains the following 6 types of records. See the comments surrounding ** the FTS5_*_ROWID macros below for a description of how %_data rowids are ** assigned to each fo them. **

@@ -228991,12 +236462,12 @@ **

** The set of segments that make up an index - the index structure - are ** recorded in a single record within the %_data table. The record consists ** of a single 32-bit configuration cookie value followed by a list of -** SQLite varints. If the FTS table features more than one index (because -** there are one or more prefix indexes), it is guaranteed that all share -** the same cookie value. +** SQLite varints. +** +** If the structure record is a V2 record, the configuration cookie is +** followed by the following 4 bytes: [0xFF 0x00 0x00 0x01]. ** -** Immediately following the configuration cookie, the record begins with -** three varints: +** Next, the record continues with three varints: ** ** + number of levels, ** + total number of segments on all levels,

@@ -229010,6 +236481,12 @@ ** + for each segment from oldest to newest:

** + segment id (always > 0) ** + first leaf page number (often 1, always greater than 0) ** + final leaf page number +** +** Then, for V2 structures only: +** +** + lower origin counter value, +** + upper origin counter value, +** + the number of tombstone hash pages. ** ** 2. The Averages Record: **

@@ -229126,6 +236603,38 @@ **

** * A list of delta-encoded varints - the first rowid on each subsequent ** child page. ** +** 6. Tombstone Hash Page +** +** These records are only ever present in contentless_delete=1 tables. +** There are zero or more of these associated with each segment. They +** are used to store the tombstone rowids for rows contained in the +** associated segments. +** +** The set of nHashPg tombstone hash pages associated with a single +** segment together form a single hash table containing tombstone rowids. +** To find the page of the hash on which a key might be stored: +** +** iPg = (rowid % nHashPg) +** +** Then, within page iPg, which has nSlot slots: +** +** iSlot = (rowid / nHashPg) % nSlot +** +** Each tombstone hash page begins with an 8 byte header: +** +** 1-byte: Key-size (the size in bytes of each slot). Either 4 or 8. +** 1-byte: rowid-0-tombstone flag. This flag is only valid on the +** first tombstone hash page for each segment (iPg=0). If set, +** the hash table contains rowid 0. If clear, it does not. +** Rowid 0 is handled specially. +** 2-bytes: unused. +** 4-bytes: Big-endian integer containing number of entries on page. +** +** Following this are nSlot 4 or 8 byte slots (depending on the key-size +** in the first byte of the page header). The number of slots may be +** determined based on the size of the page record and the key-size: +** +** nSlot = (nByte - 8) / key-size */ /*

@@ -229159,6 +236668,7 @@ )

#define FTS5_SEGMENT_ROWID(segid, pgno) fts5_dri(segid, 0, 0, pgno) #define FTS5_DLIDX_ROWID(segid, height, pgno) fts5_dri(segid, 1, height, pgno) +#define FTS5_TOMBSTONE_ROWID(segid,ipg) fts5_dri(segid+(1<<16), 0, 0, ipg) #ifdef SQLITE_DEBUG static int sqlite3Fts5Corrupt() { return SQLITE_CORRUPT_VTAB; }

@@ -229194,6 +236704,12 @@ };

/* ** One object per %_data table. +** +** nContentlessDelete: +** The number of contentless delete operations since the most recent +** call to fts5IndexFlush() or fts5IndexDiscardData(). This is tracked +** so that extra auto-merge work can be done by fts5IndexFlush() to +** account for the delete operations. */ struct Fts5Index { Fts5Config *pConfig; /* Virtual table configuration */

@@ -229208,6 +236724,8 @@ Fts5Hash *pHash; /* Hash table for in-memory data */

int nPendingData; /* Current bytes of pending data */ i64 iWriteRowid; /* Rowid for current doc being written */ int bDelete; /* Current write is a delete */ + int nContentlessDelete; /* Number of contentless delete ops */ + int nPendingRow; /* Number of INSERT in hash table */ /* Error state. */ int rc; /* Current error code */

@@ -229221,6 +236739,8 @@ sqlite3_stmt *pIdxDeleter; /* "DELETE FROM %_idx WHERE segid=?" */

sqlite3_stmt *pIdxSelect; int nRead; /* Total number of blocks read */ + sqlite3_stmt *pDeleteFromIdx; + sqlite3_stmt *pDataVersion; i64 iStructVersion; /* data_version when pStruct read */ Fts5Structure *pStruct; /* Current db structure (or NULL) */

@@ -229240,11 +236760,23 @@ /*

** The contents of the "structure" record for each index are represented ** using an Fts5Structure record in memory. Which uses instances of the ** other Fts5StructureXXX types as components. +** +** nOriginCntr: +** This value is set to non-zero for structure records created for +** contentlessdelete=1 tables only. In that case it represents the +** origin value to apply to the next top-level segment created. */ struct Fts5StructureSegment { int iSegid; /* Segment id */ int pgnoFirst; /* First leaf page number in segment */ int pgnoLast; /* Last leaf page number in segment */ + + /* contentlessdelete=1 tables only: */ + u64 iOrigin1; + u64 iOrigin2; + int nPgTombstone; /* Number of tombstone hash table pages */ + u64 nEntryTombstone; /* Number of tombstone entries that "count" */ + u64 nEntry; /* Number of rows in this segment */ }; struct Fts5StructureLevel { int nMerge; /* Number of segments in incr-merge */

@@ -229254,6 +236786,7 @@ };

struct Fts5Structure { int nRef; /* Object reference count */ u64 nWriteCounter; /* Total leaves written to level 0 */ + u64 nOriginCntr; /* Origin value for next top-level segment */ int nSegment; /* Total segments in this structure */ int nLevel; /* Number of levels in this index */ Fts5StructureLevel aLevel[1]; /* Array of nLevel level objects */

@@ -229313,9 +236846,6 @@ **

** iLeafOffset: ** Byte offset within the current leaf that is the first byte of the ** position list data (one byte passed the position-list size field). -** rowid field of the current entry. Usually this is the size field of the -** position list data. The exception is if the rowid for the current entry -** is the last thing on the leaf page. ** ** pLeaf: ** Buffer containing current leaf page data. Set to NULL at EOF.

@@ -229345,6 +236875,13 @@ ** start of the "position-list-size" field within the page.

** ** iTermIdx: ** Index of current term on iTermLeafPgno. +** +** apTombstone/nTombstone: +** These are used for contentless_delete=1 tables only. When the cursor +** is first allocated, the apTombstone[] array is allocated so that it +** is large enough for all tombstones hash pages associated with the +** segment. The pages themselves are loaded lazily from the database as +** they are required. */ struct Fts5SegIter { Fts5StructureSegment *pSeg; /* Segment to iterate through */

@@ -229353,6 +236890,8 @@ int iLeafPgno; /* Current leaf page number */

Fts5Data *pLeaf; /* Current leaf data */ Fts5Data *pNextLeaf; /* Leaf page (iLeafPgno+1) */ i64 iLeafOffset; /* Byte offset within current leaf */ + Fts5Data **apTombstone; /* Array of tombstone pages */ + int nTombstone; /* Next method */ void (*xNext)(Fts5Index*, Fts5SegIter*, int*);

@@ -229483,6 +237022,60 @@ return ((u16)aIn[0] << 8) + aIn[1];

} /* +** The only argument points to a buffer at least 8 bytes in size. This +** function interprets the first 8 bytes of the buffer as a 64-bit big-endian +** unsigned integer and returns the result. +*/ +static u64 fts5GetU64(u8 *a){ + return ((u64)a[0] << 56) + + ((u64)a[1] << 48) + + ((u64)a[2] << 40) + + ((u64)a[3] << 32) + + ((u64)a[4] << 24) + + ((u64)a[5] << 16) + + ((u64)a[6] << 8) + + ((u64)a[7] << 0); +} + +/* +** The only argument points to a buffer at least 4 bytes in size. This +** function interprets the first 4 bytes of the buffer as a 32-bit big-endian +** unsigned integer and returns the result. +*/ +static u32 fts5GetU32(const u8 *a){ + return ((u32)a[0] << 24) + + ((u32)a[1] << 16) + + ((u32)a[2] << 8) + + ((u32)a[3] << 0); +} + +/* +** Write iVal, formated as a 64-bit big-endian unsigned integer, to the +** buffer indicated by the first argument. +*/ +static void fts5PutU64(u8 *a, u64 iVal){ + a[0] = ((iVal >> 56) & 0xFF); + a[1] = ((iVal >> 48) & 0xFF); + a[2] = ((iVal >> 40) & 0xFF); + a[3] = ((iVal >> 32) & 0xFF); + a[4] = ((iVal >> 24) & 0xFF); + a[5] = ((iVal >> 16) & 0xFF); + a[6] = ((iVal >> 8) & 0xFF); + a[7] = ((iVal >> 0) & 0xFF); +} + +/* +** Write iVal, formated as a 32-bit big-endian unsigned integer, to the +** buffer indicated by the first argument. +*/ +static void fts5PutU32(u8 *a, u32 iVal){ + a[0] = ((iVal >> 24) & 0xFF); + a[1] = ((iVal >> 16) & 0xFF); + a[2] = ((iVal >> 8) & 0xFF); + a[3] = ((iVal >> 0) & 0xFF); +} + +/* ** Allocate and return a buffer at least nByte bytes in size. ** ** If an OOM error is encountered, return NULL and set the error code in

@@ -229709,10 +237302,17 @@

/* ** Remove all records associated with segment iSegid. */ -static void fts5DataRemoveSegment(Fts5Index *p, int iSegid){ +static void fts5DataRemoveSegment(Fts5Index *p, Fts5StructureSegment *pSeg){ + int iSegid = pSeg->iSegid; i64 iFirst = FTS5_SEGMENT_ROWID(iSegid, 0); i64 iLast = FTS5_SEGMENT_ROWID(iSegid+1, 0)-1; fts5DataDelete(p, iFirst, iLast); + + if( pSeg->nPgTombstone ){ + i64 iTomb1 = FTS5_TOMBSTONE_ROWID(iSegid, 0); + i64 iTomb2 = FTS5_TOMBSTONE_ROWID(iSegid, pSeg->nPgTombstone-1); + fts5DataDelete(p, iTomb1, iTomb2); + } if( p->pIdxDeleter==0 ){ Fts5Config *pConfig = p->pConfig; fts5IndexPrepareStmt(p, &p->pIdxDeleter, sqlite3_mprintf(

@@ -229823,10 +237423,18 @@ int nLevel = 0;

int nSegment = 0; sqlite3_int64 nByte; /* Bytes of space to allocate at pRet */ Fts5Structure *pRet = 0; /* Structure object to return */ + int bStructureV2 = 0; /* True for FTS5_STRUCTURE_V2 */ + u64 nOriginCntr = 0; /* Largest origin value seen so far */ /* Grab the cookie value */ if( piCookie ) *piCookie = sqlite3Fts5Get32(pData); i = 4; + + /* Check if this is a V2 structure record. Set bStructureV2 if it is. */ + if( 0==memcmp(&pData[i], FTS5_STRUCTURE_V2, 4) ){ + i += 4; + bStructureV2 = 1; + } /* Read the total number of levels and segments from the start of the ** structure record. */

@@ -229874,9 +237482,18 @@ if( i>=nData ){

rc = FTS5_CORRUPT; break; } + assert( pSeg!=0 ); i += fts5GetVarint32(&pData[i], pSeg->iSegid); i += fts5GetVarint32(&pData[i], pSeg->pgnoFirst); i += fts5GetVarint32(&pData[i], pSeg->pgnoLast); + if( bStructureV2 ){ + i += fts5GetVarint(&pData[i], &pSeg->iOrigin1); + i += fts5GetVarint(&pData[i], &pSeg->iOrigin2); + i += fts5GetVarint32(&pData[i], pSeg->nPgTombstone); + i += fts5GetVarint(&pData[i], &pSeg->nEntryTombstone); + i += fts5GetVarint(&pData[i], &pSeg->nEntry); + nOriginCntr = MAX(nOriginCntr, pSeg->iOrigin2); + } if( pSeg->pgnoLast<pSeg->pgnoFirst ){ rc = FTS5_CORRUPT; break;

@@ -229887,6 +237504,9 @@ if( iLvl==nLevel-1 && pLvl->nMerge ) rc = FTS5_CORRUPT;

} } if( nSegment!=0 && rc==SQLITE_OK ) rc = FTS5_CORRUPT; + if( bStructureV2 ){ + pRet->nOriginCntr = nOriginCntr+1; + } if( rc!=SQLITE_OK ){ fts5StructureRelease(pRet);

@@ -229904,6 +237524,7 @@ ** (*ppStruct).

*/ static void fts5StructureAddLevel(int *pRc, Fts5Structure **ppStruct){ fts5StructureMakeWritable(pRc, ppStruct); + assert( (ppStruct!=0 && (*ppStruct)!=0) || (*pRc)!=SQLITE_OK ); if( *pRc==SQLITE_OK ){ Fts5Structure *pStruct = *ppStruct; int nLevel = pStruct->nLevel;

@@ -230098,6 +237719,7 @@ if( p->rc==SQLITE_OK ){

Fts5Buffer buf; /* Buffer to serialize record into */ int iLvl; /* Used to iterate through levels */ int iCookie; /* Cookie value to store */ + int nHdr = (pStruct->nOriginCntr>0 ? (4+4+9+9+9) : (4+9+9)); assert( pStruct->nSegment==fts5StructureCountSegments(pStruct) ); memset(&buf, 0, sizeof(Fts5Buffer));

@@ -230106,9 +237728,12 @@ /* Append the current configuration cookie */

iCookie = p->pConfig->iCookie; if( iCookie<0 ) iCookie = 0; - if( 0==sqlite3Fts5BufferSize(&p->rc, &buf, 4+9+9+9) ){ + if( 0==sqlite3Fts5BufferSize(&p->rc, &buf, nHdr) ){ sqlite3Fts5Put32(buf.p, iCookie); buf.n = 4; + if( pStruct->nOriginCntr>0 ){ + fts5BufferSafeAppendBlob(&buf, FTS5_STRUCTURE_V2, 4); + } fts5BufferSafeAppendVarint(&buf, pStruct->nLevel); fts5BufferSafeAppendVarint(&buf, pStruct->nSegment); fts5BufferSafeAppendVarint(&buf, (i64)pStruct->nWriteCounter);

@@ -230122,9 +237747,17 @@ fts5BufferAppendVarint(&p->rc, &buf, pLvl->nSeg);

assert( pLvl->nMerge<=pLvl->nSeg ); for(iSeg=0; iSeg<pLvl->nSeg; iSeg++){ - fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].iSegid); - fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].pgnoFirst); - fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].pgnoLast); + Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg]; + fts5BufferAppendVarint(&p->rc, &buf, pSeg->iSegid); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->pgnoFirst); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->pgnoLast); + if( pStruct->nOriginCntr>0 ){ + fts5BufferAppendVarint(&p->rc, &buf, pSeg->iOrigin1); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->iOrigin2); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->nPgTombstone); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->nEntryTombstone); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->nEntry); + } } }

@@ -230362,42 +237995,25 @@ if( iOff<=pLvl->iFirstOff ){

pLvl->bEof = 1; }else{ u8 *a = pLvl->pData->p; - i64 iVal; - int iLimit; - int ii; - int nZero = 0; - /* Currently iOff points to the first byte of a varint. This block - ** decrements iOff until it points to the first byte of the previous - ** varint. Taking care not to read any memory locations that occur - ** before the buffer in memory. */ - iLimit = (iOff>9 ? iOff-9 : 0); - for(iOff--; iOff>iLimit; iOff--){ - if( (a[iOff-1] & 0x80)==0 ) break; - } + pLvl->iOff = 0; + fts5DlidxLvlNext(pLvl); + while( 1 ){ + int nZero = 0; + int ii = pLvl->iOff; + u64 delta = 0; - fts5GetVarint(&a[iOff], (u64*)&iVal); - pLvl->iRowid -= iVal; - pLvl->iLeafPgno--; - - /* Skip backwards past any 0x00 varints. */ - for(ii=iOff-1; ii>=pLvl->iFirstOff && a[ii]==0x00; ii--){ - nZero++; - } - if( ii>=pLvl->iFirstOff && (a[ii] & 0x80) ){ - /* The byte immediately before the last 0x00 byte has the 0x80 bit - ** set. So the last 0x00 is only a varint 0 if there are 8 more 0x80 - ** bytes before a[ii]. */ - int bZero = 0; /* True if last 0x00 counts */ - if( (ii-8)>=pLvl->iFirstOff ){ - int j; - for(j=1; j<=8 && (a[ii-j] & 0x80); j++); - bZero = (j>8); + while( a[ii]==0 ){ + nZero++; + ii++; } - if( bZero==0 ) nZero--; + ii += sqlite3Fts5GetVarint(&a[ii], &delta); + + if( ii>=iOff ) break; + pLvl->iLeafPgno += nZero+1; + pLvl->iRowid += delta; + pLvl->iOff = ii; } - pLvl->iLeafPgno -= nZero; - pLvl->iOff = iOff - nZero; } return pLvl->bEof;

@@ -230593,7 +238209,7 @@ u8 *a = pIter->pLeaf->p; /* Buffer to read data from */

i64 iOff = pIter->iLeafOffset; ASSERT_SZLEAF_OK(pIter->pLeaf); - if( iOff>=pIter->pLeaf->szLeaf ){ + while( iOff>=pIter->pLeaf->szLeaf ){ fts5SegIterNextPage(p, pIter); if( pIter->pLeaf==0 ){ if( p->rc==SQLITE_OK ) p->rc = FTS5_CORRUPT;

@@ -230665,6 +238281,23 @@ }

} /* +** Allocate a tombstone hash page array (pIter->apTombstone) for the +** iterator passed as the second argument. If an OOM error occurs, leave +** an error in the Fts5Index object. +*/ +static void fts5SegIterAllocTombstone(Fts5Index *p, Fts5SegIter *pIter){ + const int nTomb = pIter->pSeg->nPgTombstone; + if( nTomb>0 ){ + Fts5Data **apTomb = 0; + apTomb = (Fts5Data**)sqlite3Fts5MallocZero(&p->rc, sizeof(Fts5Data)*nTomb); + if( apTomb ){ + pIter->apTombstone = apTomb; + pIter->nTombstone = nTomb; + } + } +} + +/* ** Initialize the iterator object pIter to iterate through the entries in ** segment pSeg. The iterator is left pointing to the first entry when ** this function returns.

@@ -230692,10 +238325,12 @@ memset(pIter, 0, sizeof(*pIter));

fts5SegIterSetNext(p, pIter); pIter->pSeg = pSeg; pIter->iLeafPgno = pSeg->pgnoFirst-1; - fts5SegIterNextPage(p, pIter); + do { + fts5SegIterNextPage(p, pIter); + }while( p->rc==SQLITE_OK && pIter->pLeaf && pIter->pLeaf->nn==4 ); } - if( p->rc==SQLITE_OK ){ + if( p->rc==SQLITE_OK && pIter->pLeaf ){ pIter->iLeafOffset = 4; assert( pIter->pLeaf!=0 ); assert_nc( pIter->pLeaf->nn>4 );

@@ -230703,6 +238338,7 @@ assert_nc( fts5LeafFirstTermOff(pIter->pLeaf)==4 );

pIter->iPgidxOff = pIter->pLeaf->szLeaf+1; fts5SegIterLoadTerm(p, pIter, 0); fts5SegIterLoadNPos(p, pIter); + fts5SegIterAllocTombstone(p, pIter); } }

@@ -230889,7 +238525,7 @@ ASSERT_SZLEAF_OK(pIter->pLeaf);

iOff = pIter->iLeafOffset; /* Next entry is on the next page */ - if( pIter->pSeg && iOff>=pIter->pLeaf->szLeaf ){ + while( pIter->pSeg && iOff>=pIter->pLeaf->szLeaf ){ fts5SegIterNextPage(p, pIter); if( p->rc || pIter->pLeaf==0 ) return; pIter->iRowid = 0;

@@ -231082,7 +238718,7 @@ Fts5DlidxIter *pDlidx = pIter->pDlidx;

Fts5Data *pLast = 0; int pgnoLast = 0; - if( pDlidx ){ + if( pDlidx && p->pConfig->iVersion==FTS5_CURRENT_VERSION ){ int iSegid = pIter->pSeg->iSegid; pgnoLast = fts5DlidxIterPgno(pDlidx); pLast = fts5LeafRead(p, FTS5_SEGMENT_ROWID(iSegid, pgnoLast));

@@ -231404,6 +239040,7 @@ }

} fts5SegIterSetNext(p, pIter); + fts5SegIterAllocTombstone(p, pIter); /* Either: **

@@ -231454,6 +239091,14 @@ if( pLeaf ){

pLeaf->p = (u8*)pList; } } + + /* The call to sqlite3Fts5HashScanInit() causes the hash table to + ** fill the size field of all existing position lists. This means they + ** can no longer be appended to. Since the only scenario in which they + ** can be appended to is if the previous operation on this table was + ** a DELETE, by clearing the Fts5Index.bDelete flag we can avoid this + ** possibility altogether. */ + p->bDelete = 0; }else{ p->rc = sqlite3Fts5HashQuery(p->pHash, sizeof(Fts5Data), (const char*)pTerm, nTerm, (void**)&pLeaf, &nList

@@ -231485,12 +239130,27 @@ fts5SegIterSetNext(p, pIter);

} /* +** Array ap[] contains n elements. Release each of these elements using +** fts5DataRelease(). Then free the array itself using sqlite3_free(). +*/ +static void fts5IndexFreeArray(Fts5Data **ap, int n){ + if( ap ){ + int ii; + for(ii=0; ii<n; ii++){ + fts5DataRelease(ap[ii]); + } + sqlite3_free(ap); + } +} + +/* ** Zero the iterator passed as the only argument. */ static void fts5SegIterClear(Fts5SegIter *pIter){ fts5BufferFree(&pIter->term); fts5DataRelease(pIter->pLeaf); fts5DataRelease(pIter->pNextLeaf); + fts5IndexFreeArray(pIter->apTombstone, pIter->nTombstone); fts5DlidxIterFree(pIter->pDlidx); sqlite3_free(pIter->aRowidOffset); memset(pIter, 0, sizeof(Fts5SegIter));

@@ -231624,7 +239284,6 @@ assert_nc( i2>i1 );

assert_nc( i2!=0 ); pRes->bTermEq = 1; if( p1->iRowid==p2->iRowid ){ - p1->bDel = p2->bDel; return i2; } res = ((p1->iRowid > p2->iRowid)==pIter->bRev) ? -1 : +1;

@@ -231643,7 +239302,8 @@ }

/* ** Move the seg-iter so that it points to the first rowid on page iLeafPgno. -** It is an error if leaf iLeafPgno does not exist or contains no rowids. +** It is an error if leaf iLeafPgno does not exist. Unless the db is +** a 'secure-delete' db, if it contains no rowids then this is also an error. */ static void fts5SegIterGotoPage( Fts5Index *p, /* FTS5 backend object */

@@ -231658,21 +239318,23 @@ }else{

fts5DataRelease(pIter->pNextLeaf); pIter->pNextLeaf = 0; pIter->iLeafPgno = iLeafPgno-1; - fts5SegIterNextPage(p, pIter); - assert( p->rc!=SQLITE_OK || pIter->iLeafPgno==iLeafPgno ); - if( p->rc==SQLITE_OK && ALWAYS(pIter->pLeaf!=0) ){ + while( p->rc==SQLITE_OK ){ int iOff; - u8 *a = pIter->pLeaf->p; - int n = pIter->pLeaf->szLeaf; - + fts5SegIterNextPage(p, pIter); + if( pIter->pLeaf==0 ) break; iOff = fts5LeafFirstRowidOff(pIter->pLeaf); - if( iOff<4 || iOff>=n ){ - p->rc = FTS5_CORRUPT; - }else{ - iOff += fts5GetVarint(&a[iOff], (u64*)&pIter->iRowid); - pIter->iLeafOffset = iOff; - fts5SegIterLoadNPos(p, pIter); + if( iOff>0 ){ + u8 *a = pIter->pLeaf->p; + int n = pIter->pLeaf->szLeaf; + if( iOff<4 || iOff>=n ){ + p->rc = FTS5_CORRUPT; + }else{ + iOff += fts5GetVarint(&a[iOff], (u64*)&pIter->iRowid); + pIter->iLeafOffset = iOff; + fts5SegIterLoadNPos(p, pIter); + } + break; } } }

@@ -231826,6 +239488,84 @@ pIter->iSwitchRowid = pSeg->iRowid;

} /* +** The argument to this macro must be an Fts5Data structure containing a +** tombstone hash page. This macro returns the key-size of the hash-page. +*/ +#define TOMBSTONE_KEYSIZE(pPg) (pPg->p[0]==4 ? 4 : 8) + +#define TOMBSTONE_NSLOT(pPg) \ + ((pPg->nn > 16) ? ((pPg->nn-8) / TOMBSTONE_KEYSIZE(pPg)) : 1) + +/* +** Query a single tombstone hash table for rowid iRowid. Return true if +** it is found or false otherwise. The tombstone hash table is one of +** nHashTable tables. +*/ +static int fts5IndexTombstoneQuery( + Fts5Data *pHash, /* Hash table page to query */ + int nHashTable, /* Number of pages attached to segment */ + u64 iRowid /* Rowid to query hash for */ +){ + const int szKey = TOMBSTONE_KEYSIZE(pHash); + const int nSlot = TOMBSTONE_NSLOT(pHash); + int iSlot = (iRowid / nHashTable) % nSlot; + int nCollide = nSlot; + + if( iRowid==0 ){ + return pHash->p[1]; + }else if( szKey==4 ){ + u32 *aSlot = (u32*)&pHash->p[8]; + while( aSlot[iSlot] ){ + if( fts5GetU32((u8*)&aSlot[iSlot])==iRowid ) return 1; + if( nCollide--==0 ) break; + iSlot = (iSlot+1)%nSlot; + } + }else{ + u64 *aSlot = (u64*)&pHash->p[8]; + while( aSlot[iSlot] ){ + if( fts5GetU64((u8*)&aSlot[iSlot])==iRowid ) return 1; + if( nCollide--==0 ) break; + iSlot = (iSlot+1)%nSlot; + } + } + + return 0; +} + +/* +** Return true if the iterator passed as the only argument points +** to an segment entry for which there is a tombstone. Return false +** if there is no tombstone or if the iterator is already at EOF. +*/ +static int fts5MultiIterIsDeleted(Fts5Iter *pIter){ + int iFirst = pIter->aFirst[1].iFirst; + Fts5SegIter *pSeg = &pIter->aSeg[iFirst]; + + if( pSeg->pLeaf && pSeg->nTombstone ){ + /* Figure out which page the rowid might be present on. */ + int iPg = ((u64)pSeg->iRowid) % pSeg->nTombstone; + assert( iPg>=0 ); + + /* If tombstone hash page iPg has not yet been loaded from the + ** database, load it now. */ + if( pSeg->apTombstone[iPg]==0 ){ + pSeg->apTombstone[iPg] = fts5DataRead(pIter->pIndex, + FTS5_TOMBSTONE_ROWID(pSeg->pSeg->iSegid, iPg) + ); + if( pSeg->apTombstone[iPg]==0 ) return 0; + } + + return fts5IndexTombstoneQuery( + pSeg->apTombstone[iPg], + pSeg->nTombstone, + pSeg->iRowid + ); + } + + return 0; +} + +/* ** Move the iterator to the next entry. ** ** If an error occurs, an error code is left in Fts5Index.rc. It is not

@@ -231862,7 +239602,9 @@ }

fts5AssertMultiIterSetup(p, pIter); assert( pSeg==&pIter->aSeg[pIter->aFirst[1].iFirst] && pSeg->pLeaf ); - if( pIter->bSkipEmpty==0 || pSeg->nPos ){ + if( (pIter->bSkipEmpty==0 || pSeg->nPos) + && 0==fts5MultiIterIsDeleted(pIter) + ){ pIter->xSetOutputs(pIter, pSeg); return; }

@@ -231894,7 +239636,9 @@ *pbNewTerm = 1;

} fts5AssertMultiIterSetup(p, pIter); - }while( fts5MultiIterIsEmpty(p, pIter) ); + }while( (fts5MultiIterIsEmpty(p, pIter) || fts5MultiIterIsDeleted(pIter)) + && (p->rc==SQLITE_OK) + ); } }

@@ -231907,7 +239651,7 @@ Fts5Index *p, /* FTS5 backend to iterate within */

int nSeg ){ Fts5Iter *pNew; - int nSlot; /* Power of two >= nSeg */ + i64 nSlot; /* Power of two >= nSeg */ for(nSlot=2; nSlot<nSeg; nSlot=nSlot*2); pNew = fts5IdxMalloc(p,

@@ -232387,7 +240131,7 @@ if( p->rc==SQLITE_OK ){

if( iLevel<0 ){ assert( pStruct->nSegment==fts5StructureCountSegments(pStruct) ); nSeg = pStruct->nSegment; - nSeg += (p->pHash ? 1 : 0); + nSeg += (p->pHash && 0==(flags & FTS5INDEX_QUERY_SKIPHASH)); }else{ nSeg = MIN(pStruct->aLevel[iLevel].nSeg, nSegment); }

@@ -232408,7 +240152,7 @@ /* Initialize each of the component segment iterators. */

if( p->rc==SQLITE_OK ){ if( iLevel<0 ){ Fts5StructureLevel *pEnd = &pStruct->aLevel[pStruct->nLevel]; - if( p->pHash ){ + if( p->pHash && 0==(flags & FTS5INDEX_QUERY_SKIPHASH) ){ /* Add a segment iterator for the current contents of the hash table. */ Fts5SegIter *pIter = &pNew->aSeg[iIter++]; fts5SegIterHashInit(p, pTerm, nTerm, flags, pIter);

@@ -232449,7 +240193,9 @@ }

fts5MultiIterSetEof(pNew); fts5AssertMultiIterSetup(p, pNew); - if( pNew->bSkipEmpty && fts5MultiIterIsEmpty(p, pNew) ){ + if( (pNew->bSkipEmpty && fts5MultiIterIsEmpty(p, pNew)) + || fts5MultiIterIsDeleted(pNew) + ){ fts5MultiIterNext(p, pNew, 0, 0); }else if( pNew->base.bEof==0 ){ Fts5SegIter *pSeg = &pNew->aSeg[pNew->aFirst[1].iFirst];

@@ -232627,7 +240373,9 @@ assert( p->pHash || p->nPendingData==0 );

if( p->pHash ){ sqlite3Fts5HashClear(p->pHash); p->nPendingData = 0; + p->nPendingRow = 0; } + p->nContentlessDelete = 0; } /*

@@ -233028,7 +240776,7 @@ Fts5PageWriter *pPage = &pWriter->writer;

const u8 *a = aData; int n = nData; - assert( p->pConfig->pgsz>0 ); + assert( p->pConfig->pgsz>0 || p->rc!=SQLITE_OK ); while( p->rc==SQLITE_OK && (pPage->buf.n + pPage->pgidx.n + n)>=p->pConfig->pgsz ){

@@ -233163,7 +240911,7 @@ fts5BufferGrow(&p->rc, &buf, pData->nn);

fts5BufferAppendBlob(&p->rc, &buf, sizeof(aHdr), aHdr); fts5BufferAppendVarint(&p->rc, &buf, pSeg->term.n); fts5BufferAppendBlob(&p->rc, &buf, pSeg->term.n, pSeg->term.p); - fts5BufferAppendBlob(&p->rc, &buf, pData->szLeaf-iOff,&pData->p[iOff]); + fts5BufferAppendBlob(&p->rc, &buf,pData->szLeaf-iOff,&pData->p[iOff]); if( p->rc==SQLITE_OK ){ /* Set the szLeaf field */ fts5PutU16(&buf.p[2], (u16)buf.n);

@@ -233264,6 +241012,12 @@ pStruct->nSegment++;

/* Read input from all segments in the input level */ nInput = pLvl->nSeg; + + /* Set the range of origins that will go into the output segment. */ + if( pStruct->nOriginCntr>0 ){ + pSeg->iOrigin1 = pLvl->aSeg[0].iOrigin1; + pSeg->iOrigin2 = pLvl->aSeg[pLvl->nSeg-1].iOrigin2; + } } bOldest = (pLvlOut->nSeg==1 && pStruct->nLevel==iLvl+2);

@@ -233323,8 +241077,11 @@ if( fts5MultiIterEof(p, pIter) ){

int i; /* Remove the redundant segments from the %_data table */ + assert( pSeg->nEntry==0 ); for(i=0; i<nInput; i++){ - fts5DataRemoveSegment(p, pLvl->aSeg[i].iSegid); + Fts5StructureSegment *pOld = &pLvl->aSeg[i]; + pSeg->nEntry += (pOld->nEntry - pOld->nEntryTombstone); + fts5DataRemoveSegment(p, pOld); } /* Remove the redundant segments from the input level */

@@ -233351,6 +241108,43 @@ if( pnRem ) *pnRem -= writer.nLeafWritten;

} /* +** If this is not a contentless_delete=1 table, or if the 'deletemerge' +** configuration option is set to 0, then this function always returns -1. +** Otherwise, it searches the structure object passed as the second argument +** for a level suitable for merging due to having a large number of +** tombstones in the tombstone hash. If one is found, its index is returned. +** Otherwise, if there is no suitable level, -1. +*/ +static int fts5IndexFindDeleteMerge(Fts5Index *p, Fts5Structure *pStruct){ + Fts5Config *pConfig = p->pConfig; + int iRet = -1; + if( pConfig->bContentlessDelete && pConfig->nDeleteMerge>0 ){ + int ii; + int nBest = 0; + + for(ii=0; ii<pStruct->nLevel; ii++){ + Fts5StructureLevel *pLvl = &pStruct->aLevel[ii]; + i64 nEntry = 0; + i64 nTomb = 0; + int iSeg; + for(iSeg=0; iSeg<pLvl->nSeg; iSeg++){ + nEntry += pLvl->aSeg[iSeg].nEntry; + nTomb += pLvl->aSeg[iSeg].nEntryTombstone; + } + assert_nc( nEntry>0 || pLvl->nSeg==0 ); + if( nEntry>0 ){ + int nPercent = (nTomb * 100) / nEntry; + if( nPercent>=pConfig->nDeleteMerge && nPercent>nBest ){ + iRet = ii; + nBest = nPercent; + } + } + } + } + return iRet; +} + +/* ** Do up to nPg pages of automerge work on the index. ** ** Return true if any changes were actually made, or false otherwise.

@@ -233369,14 +241163,15 @@ int iLvl; /* To iterate through levels */

int iBestLvl = 0; /* Level offering the most input segments */ int nBest = 0; /* Number of input segments on best level */ - /* Set iBestLvl to the level to read input segments from. */ + /* Set iBestLvl to the level to read input segments from. Or to -1 if + ** there is no level suitable to merge segments from. */ assert( pStruct->nLevel>0 ); for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){ Fts5StructureLevel *pLvl = &pStruct->aLevel[iLvl]; if( pLvl->nMerge ){ if( pLvl->nMerge>nBest ){ iBestLvl = iLvl; - nBest = pLvl->nMerge; + nBest = nMin; } break; }

@@ -233385,22 +241180,18 @@ nBest = pLvl->nSeg;

iBestLvl = iLvl; } } - - /* If nBest is still 0, then the index must be empty. */ -#ifdef SQLITE_DEBUG - for(iLvl=0; nBest==0 && iLvl<pStruct->nLevel; iLvl++){ - assert( pStruct->aLevel[iLvl].nSeg==0 ); + if( nBest<nMin ){ + iBestLvl = fts5IndexFindDeleteMerge(p, pStruct); } -#endif - if( nBest<nMin && pStruct->aLevel[iBestLvl].nMerge==0 ){ - break; - } + if( iBestLvl<0 ) break; bRet = 1; fts5IndexMergeLevel(p, &pStruct, iBestLvl, &nRem); if( p->rc==SQLITE_OK && pStruct->aLevel[iBestLvl].nMerge==0 ){ fts5StructurePromote(p, iBestLvl+1, pStruct); } + + if( nMin==1 ) nMin = 2; } *ppStruct = pStruct; return bRet;

@@ -233441,16 +241232,16 @@ Fts5Structure **ppStruct /* IN/OUT: Current structure of index */

){ const int nCrisis = p->pConfig->nCrisisMerge; Fts5Structure *pStruct = *ppStruct; - int iLvl = 0; - - assert( p->rc!=SQLITE_OK || pStruct->nLevel>0 ); - while( p->rc==SQLITE_OK && pStruct->aLevel[iLvl].nSeg>=nCrisis ){ - fts5IndexMergeLevel(p, &pStruct, iLvl, 0); - assert( p->rc!=SQLITE_OK || pStruct->nLevel>(iLvl+1) ); - fts5StructurePromote(p, iLvl+1, pStruct); - iLvl++; + if( pStruct && pStruct->nLevel>0 ){ + int iLvl = 0; + while( p->rc==SQLITE_OK && pStruct->aLevel[iLvl].nSeg>=nCrisis ){ + fts5IndexMergeLevel(p, &pStruct, iLvl, 0); + assert( p->rc!=SQLITE_OK || pStruct->nLevel>(iLvl+1) ); + fts5StructurePromote(p, iLvl+1, pStruct); + iLvl++; + } + *ppStruct = pStruct; } - *ppStruct = pStruct; } static int fts5IndexReturn(Fts5Index *p){

@@ -233485,6 +241276,469 @@ return ret;

} /* +** Execute the SQL statement: +** +** DELETE FROM %_idx WHERE (segid, (pgno/2)) = ($iSegid, $iPgno); +** +** This is used when a secure-delete operation removes the last term +** from a segment leaf page. In that case the %_idx entry is removed +** too. This is done to ensure that if all instances of a token are +** removed from an fts5 database in secure-delete mode, no trace of +** the token itself remains in the database. +*/ +static void fts5SecureDeleteIdxEntry( + Fts5Index *p, /* FTS5 backend object */ + int iSegid, /* Id of segment to delete entry for */ + int iPgno /* Page number within segment */ +){ + if( iPgno!=1 ){ + assert( p->pConfig->iVersion==FTS5_CURRENT_VERSION_SECUREDELETE ); + if( p->pDeleteFromIdx==0 ){ + fts5IndexPrepareStmt(p, &p->pDeleteFromIdx, sqlite3_mprintf( + "DELETE FROM '%q'.'%q_idx' WHERE (segid, (pgno/2)) = (?1, ?2)", + p->pConfig->zDb, p->pConfig->zName + )); + } + if( p->rc==SQLITE_OK ){ + sqlite3_bind_int(p->pDeleteFromIdx, 1, iSegid); + sqlite3_bind_int(p->pDeleteFromIdx, 2, iPgno); + sqlite3_step(p->pDeleteFromIdx); + p->rc = sqlite3_reset(p->pDeleteFromIdx); + } + } +} + +/* +** This is called when a secure-delete operation removes a position-list +** that overflows onto segment page iPgno of segment pSeg. This function +** rewrites node iPgno, and possibly one or more of its right-hand peers, +** to remove this portion of the position list. +** +** Output variable (*pbLastInDoclist) is set to true if the position-list +** removed is followed by a new term or the end-of-segment, or false if +** it is followed by another rowid/position list. +*/ +static void fts5SecureDeleteOverflow( + Fts5Index *p, + Fts5StructureSegment *pSeg, + int iPgno, + int *pbLastInDoclist +){ + const int bDetailNone = (p->pConfig->eDetail==FTS5_DETAIL_NONE); + int pgno; + Fts5Data *pLeaf = 0; + assert( iPgno!=1 ); + + *pbLastInDoclist = 1; + for(pgno=iPgno; p->rc==SQLITE_OK && pgno<=pSeg->pgnoLast; pgno++){ + i64 iRowid = FTS5_SEGMENT_ROWID(pSeg->iSegid, pgno); + int iNext = 0; + u8 *aPg = 0; + + pLeaf = fts5DataRead(p, iRowid); + if( pLeaf==0 ) break; + aPg = pLeaf->p; + + iNext = fts5GetU16(&aPg[0]); + if( iNext!=0 ){ + *pbLastInDoclist = 0; + } + if( iNext==0 && pLeaf->szLeaf!=pLeaf->nn ){ + fts5GetVarint32(&aPg[pLeaf->szLeaf], iNext); + } + + if( iNext==0 ){ + /* The page contains no terms or rowids. Replace it with an empty + ** page and move on to the right-hand peer. */ + const u8 aEmpty[] = {0x00, 0x00, 0x00, 0x04}; + assert_nc( bDetailNone==0 || pLeaf->nn==4 ); + if( bDetailNone==0 ) fts5DataWrite(p, iRowid, aEmpty, sizeof(aEmpty)); + fts5DataRelease(pLeaf); + pLeaf = 0; + }else if( bDetailNone ){ + break; + }else if( iNext>=pLeaf->szLeaf || pLeaf->nn<pLeaf->szLeaf || iNext<4 ){ + p->rc = FTS5_CORRUPT; + break; + }else{ + int nShift = iNext - 4; + int nPg; + + int nIdx = 0; + u8 *aIdx = 0; + + /* Unless the current page footer is 0 bytes in size (in which case + ** the new page footer will be as well), allocate and populate a + ** buffer containing the new page footer. Set stack variables aIdx + ** and nIdx accordingly. */ + if( pLeaf->nn>pLeaf->szLeaf ){ + int iFirst = 0; + int i1 = pLeaf->szLeaf; + int i2 = 0; + + i1 += fts5GetVarint32(&aPg[i1], iFirst); + if( iFirst<iNext ){ + p->rc = FTS5_CORRUPT; + break; + } + aIdx = sqlite3Fts5MallocZero(&p->rc, (pLeaf->nn-pLeaf->szLeaf)+2); + if( aIdx==0 ) break; + i2 = sqlite3Fts5PutVarint(aIdx, iFirst-nShift); + if( i1<pLeaf->nn ){ + memcpy(&aIdx[i2], &aPg[i1], pLeaf->nn-i1); + i2 += (pLeaf->nn-i1); + } + nIdx = i2; + } + + /* Modify the contents of buffer aPg[]. Set nPg to the new size + ** in bytes. The new page is always smaller than the old. */ + nPg = pLeaf->szLeaf - nShift; + memmove(&aPg[4], &aPg[4+nShift], nPg-4); + fts5PutU16(&aPg[2], nPg); + if( fts5GetU16(&aPg[0]) ) fts5PutU16(&aPg[0], 4); + if( nIdx>0 ){ + memcpy(&aPg[nPg], aIdx, nIdx); + nPg += nIdx; + } + sqlite3_free(aIdx); + + /* Write the new page to disk and exit the loop */ + assert( nPg>4 || fts5GetU16(aPg)==0 ); + fts5DataWrite(p, iRowid, aPg, nPg); + break; + } + } + fts5DataRelease(pLeaf); +} + +/* +** Completely remove the entry that pSeg currently points to from +** the database. +*/ +static void fts5DoSecureDelete( + Fts5Index *p, + Fts5SegIter *pSeg +){ + const int bDetailNone = (p->pConfig->eDetail==FTS5_DETAIL_NONE); + int iSegid = pSeg->pSeg->iSegid; + u8 *aPg = pSeg->pLeaf->p; + int nPg = pSeg->pLeaf->nn; + int iPgIdx = pSeg->pLeaf->szLeaf; + + u64 iDelta = 0; + int iNextOff = 0; + int iOff = 0; + int nIdx = 0; + u8 *aIdx = 0; + int bLastInDoclist = 0; + int iIdx = 0; + int iStart = 0; + int iDelKeyOff = 0; /* Offset of deleted key, if any */ + + nIdx = nPg-iPgIdx; + aIdx = sqlite3Fts5MallocZero(&p->rc, nIdx+16); + if( p->rc ) return; + memcpy(aIdx, &aPg[iPgIdx], nIdx); + + /* At this point segment iterator pSeg points to the entry + ** this function should remove from the b-tree segment. + ** + ** In detail=full or detail=column mode, pSeg->iLeafOffset is the + ** offset of the first byte in the position-list for the entry to + ** remove. Immediately before this comes two varints that will also + ** need to be removed: + ** + ** + the rowid or delta rowid value for the entry, and + ** + the size of the position list in bytes. + ** + ** Or, in detail=none mode, there is a single varint prior to + ** pSeg->iLeafOffset - the rowid or delta rowid value. + ** + ** This block sets the following variables: + ** + ** iStart: + ** The offset of the first byte of the rowid or delta-rowid + ** value for the doclist entry being removed. + ** + ** iDelta: + ** The value of the rowid or delta-rowid value for the doclist + ** entry being removed. + ** + ** iNextOff: + ** The offset of the next entry following the position list + ** for the one being removed. If the position list for this + ** entry overflows onto the next leaf page, this value will be + ** greater than pLeaf->szLeaf. + */ + { + int iSOP; /* Start-Of-Position-list */ + if( pSeg->iLeafPgno==pSeg->iTermLeafPgno ){ + iStart = pSeg->iTermLeafOffset; + }else{ + iStart = fts5GetU16(&aPg[0]); + } + + iSOP = iStart + fts5GetVarint(&aPg[iStart], &iDelta); + assert_nc( iSOP<=pSeg->iLeafOffset ); + + if( bDetailNone ){ + while( iSOP<pSeg->iLeafOffset ){ + if( aPg[iSOP]==0x00 ) iSOP++; + if( aPg[iSOP]==0x00 ) iSOP++; + iStart = iSOP; + iSOP = iStart + fts5GetVarint(&aPg[iStart], &iDelta); + } + + iNextOff = iSOP; + if( iNextOff<pSeg->iEndofDoclist && aPg[iNextOff]==0x00 ) iNextOff++; + if( iNextOff<pSeg->iEndofDoclist && aPg[iNextOff]==0x00 ) iNextOff++; + + }else{ + int nPos = 0; + iSOP += fts5GetVarint32(&aPg[iSOP], nPos); + while( iSOP<pSeg->iLeafOffset ){ + iStart = iSOP + (nPos/2); + iSOP = iStart + fts5GetVarint(&aPg[iStart], &iDelta); + iSOP += fts5GetVarint32(&aPg[iSOP], nPos); + } + assert_nc( iSOP==pSeg->iLeafOffset ); + iNextOff = pSeg->iLeafOffset + pSeg->nPos; + } + } + + iOff = iStart; + + /* If the position-list for the entry being removed flows over past + ** the end of this page, delete the portion of the position-list on the + ** next page and beyond. + ** + ** Set variable bLastInDoclist to true if this entry happens + ** to be the last rowid in the doclist for its term. */ + if( iNextOff>=iPgIdx ){ + int pgno = pSeg->iLeafPgno+1; + fts5SecureDeleteOverflow(p, pSeg->pSeg, pgno, &bLastInDoclist); + iNextOff = iPgIdx; + } + + if( pSeg->bDel==0 ){ + if( iNextOff!=iPgIdx ){ + /* Loop through the page-footer. If iNextOff (offset of the + ** entry following the one we are removing) is equal to the + ** offset of a key on this page, then the entry is the last + ** in its doclist. */ + int iKeyOff = 0; + for(iIdx=0; iIdx<nIdx; /* no-op */){ + u32 iVal = 0; + iIdx += fts5GetVarint32(&aIdx[iIdx], iVal); + iKeyOff += iVal; + if( iKeyOff==iNextOff ){ + bLastInDoclist = 1; + } + } + } + + /* If this is (a) the first rowid on a page and (b) is not followed by + ** another position list on the same page, set the "first-rowid" field + ** of the header to 0. */ + if( fts5GetU16(&aPg[0])==iStart && (bLastInDoclist || iNextOff==iPgIdx) ){ + fts5PutU16(&aPg[0], 0); + } + } + + if( pSeg->bDel ){ + iOff += sqlite3Fts5PutVarint(&aPg[iOff], iDelta); + aPg[iOff++] = 0x01; + }else if( bLastInDoclist==0 ){ + if( iNextOff!=iPgIdx ){ + u64 iNextDelta = 0; + iNextOff += fts5GetVarint(&aPg[iNextOff], &iNextDelta); + iOff += sqlite3Fts5PutVarint(&aPg[iOff], iDelta + iNextDelta); + } + }else if( + pSeg->iLeafPgno==pSeg->iTermLeafPgno + && iStart==pSeg->iTermLeafOffset + ){ + /* The entry being removed was the only position list in its + ** doclist. Therefore the term needs to be removed as well. */ + int iKey = 0; + int iKeyOff = 0; + + /* Set iKeyOff to the offset of the term that will be removed - the + ** last offset in the footer that is not greater than iStart. */ + for(iIdx=0; iIdx<nIdx; iKey++){ + u32 iVal = 0; + iIdx += fts5GetVarint32(&aIdx[iIdx], iVal); + if( (iKeyOff+iVal)>(u32)iStart ) break; + iKeyOff += iVal; + } + assert_nc( iKey>=1 ); + + /* Set iDelKeyOff to the value of the footer entry to remove from + ** the page. */ + iDelKeyOff = iOff = iKeyOff; + + if( iNextOff!=iPgIdx ){ + /* This is the only position-list associated with the term, and there + ** is another term following it on this page. So the subsequent term + ** needs to be moved to replace the term associated with the entry + ** being removed. */ + int nPrefix = 0; + int nSuffix = 0; + int nPrefix2 = 0; + int nSuffix2 = 0; + + iDelKeyOff = iNextOff; + iNextOff += fts5GetVarint32(&aPg[iNextOff], nPrefix2); + iNextOff += fts5GetVarint32(&aPg[iNextOff], nSuffix2); + + if( iKey!=1 ){ + iKeyOff += fts5GetVarint32(&aPg[iKeyOff], nPrefix); + } + iKeyOff += fts5GetVarint32(&aPg[iKeyOff], nSuffix); + + nPrefix = MIN(nPrefix, nPrefix2); + nSuffix = (nPrefix2 + nSuffix2) - nPrefix; + + if( (iKeyOff+nSuffix)>iPgIdx || (iNextOff+nSuffix2)>iPgIdx ){ + p->rc = FTS5_CORRUPT; + }else{ + if( iKey!=1 ){ + iOff += sqlite3Fts5PutVarint(&aPg[iOff], nPrefix); + } + iOff += sqlite3Fts5PutVarint(&aPg[iOff], nSuffix); + if( nPrefix2>pSeg->term.n ){ + p->rc = FTS5_CORRUPT; + }else if( nPrefix2>nPrefix ){ + memcpy(&aPg[iOff], &pSeg->term.p[nPrefix], nPrefix2-nPrefix); + iOff += (nPrefix2-nPrefix); + } + memmove(&aPg[iOff], &aPg[iNextOff], nSuffix2); + iOff += nSuffix2; + iNextOff += nSuffix2; + } + } + }else if( iStart==4 ){ + int iPgno; + + assert_nc( pSeg->iLeafPgno>pSeg->iTermLeafPgno ); + /* The entry being removed may be the only position list in + ** its doclist. */ + for(iPgno=pSeg->iLeafPgno-1; iPgno>pSeg->iTermLeafPgno; iPgno-- ){ + Fts5Data *pPg = fts5DataRead(p, FTS5_SEGMENT_ROWID(iSegid, iPgno)); + int bEmpty = (pPg && pPg->nn==4); + fts5DataRelease(pPg); + if( bEmpty==0 ) break; + } + + if( iPgno==pSeg->iTermLeafPgno ){ + i64 iId = FTS5_SEGMENT_ROWID(iSegid, pSeg->iTermLeafPgno); + Fts5Data *pTerm = fts5DataRead(p, iId); + if( pTerm && pTerm->szLeaf==pSeg->iTermLeafOffset ){ + u8 *aTermIdx = &pTerm->p[pTerm->szLeaf]; + int nTermIdx = pTerm->nn - pTerm->szLeaf; + int iTermIdx = 0; + int iTermOff = 0; + + while( 1 ){ + u32 iVal = 0; + int nByte = fts5GetVarint32(&aTermIdx[iTermIdx], iVal); + iTermOff += iVal; + if( (iTermIdx+nByte)>=nTermIdx ) break; + iTermIdx += nByte; + } + nTermIdx = iTermIdx; + + memmove(&pTerm->p[iTermOff], &pTerm->p[pTerm->szLeaf], nTermIdx); + fts5PutU16(&pTerm->p[2], iTermOff); + + fts5DataWrite(p, iId, pTerm->p, iTermOff+nTermIdx); + if( nTermIdx==0 ){ + fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iTermLeafPgno); + } + } + fts5DataRelease(pTerm); + } + } + + /* Assuming no error has occurred, this block does final edits to the + ** leaf page before writing it back to disk. Input variables are: + ** + ** nPg: Total initial size of leaf page. + ** iPgIdx: Initial offset of page footer. + ** + ** iOff: Offset to move data to + ** iNextOff: Offset to move data from + */ + if( p->rc==SQLITE_OK ){ + const int nMove = nPg - iNextOff; /* Number of bytes to move */ + int nShift = iNextOff - iOff; /* Distance to move them */ + + int iPrevKeyOut = 0; + int iKeyIn = 0; + + memmove(&aPg[iOff], &aPg[iNextOff], nMove); + iPgIdx -= nShift; + nPg = iPgIdx; + fts5PutU16(&aPg[2], iPgIdx); + + for(iIdx=0; iIdx<nIdx; /* no-op */){ + u32 iVal = 0; + iIdx += fts5GetVarint32(&aIdx[iIdx], iVal); + iKeyIn += iVal; + if( iKeyIn!=iDelKeyOff ){ + int iKeyOut = (iKeyIn - (iKeyIn>iOff ? nShift : 0)); + nPg += sqlite3Fts5PutVarint(&aPg[nPg], iKeyOut - iPrevKeyOut); + iPrevKeyOut = iKeyOut; + } + } + + if( iPgIdx==nPg && nIdx>0 && pSeg->iLeafPgno!=1 ){ + fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iLeafPgno); + } + + assert_nc( nPg>4 || fts5GetU16(aPg)==0 ); + fts5DataWrite(p, FTS5_SEGMENT_ROWID(iSegid,pSeg->iLeafPgno), aPg, nPg); + } + sqlite3_free(aIdx); +} + +/* +** This is called as part of flushing a delete to disk in 'secure-delete' +** mode. It edits the segments within the database described by argument +** pStruct to remove the entries for term zTerm, rowid iRowid. +*/ +static void fts5FlushSecureDelete( + Fts5Index *p, + Fts5Structure *pStruct, + const char *zTerm, + i64 iRowid +){ + const int f = FTS5INDEX_QUERY_SKIPHASH; + int nTerm = (int)strlen(zTerm); + Fts5Iter *pIter = 0; /* Used to find term instance */ + + fts5MultiIterNew(p, pStruct, f, 0, (const u8*)zTerm, nTerm, -1, 0, &pIter); + if( fts5MultiIterEof(p, pIter)==0 ){ + i64 iThis = fts5MultiIterRowid(pIter); + if( iThis<iRowid ){ + fts5MultiIterNextFrom(p, pIter, iRowid); + } + + if( p->rc==SQLITE_OK + && fts5MultiIterEof(p, pIter)==0 + && iRowid==fts5MultiIterRowid(pIter) + ){ + Fts5SegIter *pSeg = &pIter->aSeg[pIter->aFirst[1].iFirst]; + fts5DoSecureDelete(p, pSeg); + } + } + + fts5MultiIterFree(pIter); +} + + +/* ** Flush the contents of in-memory hash table iHash to a new level-0 ** segment on disk. Also update the corresponding structure record. **

@@ -233500,143 +241754,198 @@

/* Obtain a reference to the index structure and allocate a new segment-id ** for the new level-0 segment. */ pStruct = fts5StructureRead(p); - iSegid = fts5AllocateSegid(p, pStruct); fts5StructureInvalidate(p); - if( iSegid ){ - const int pgsz = p->pConfig->pgsz; - int eDetail = p->pConfig->eDetail; - Fts5StructureSegment *pSeg; /* New segment within pStruct */ - Fts5Buffer *pBuf; /* Buffer in which to assemble leaf page */ - Fts5Buffer *pPgidx; /* Buffer in which to assemble pgidx */ + if( sqlite3Fts5HashIsEmpty(pHash)==0 ){ + iSegid = fts5AllocateSegid(p, pStruct); + if( iSegid ){ + const int pgsz = p->pConfig->pgsz; + int eDetail = p->pConfig->eDetail; + int bSecureDelete = p->pConfig->bSecureDelete; + Fts5StructureSegment *pSeg; /* New segment within pStruct */ + Fts5Buffer *pBuf; /* Buffer in which to assemble leaf page */ + Fts5Buffer *pPgidx; /* Buffer in which to assemble pgidx */ - Fts5SegWriter writer; - fts5WriteInit(p, &writer, iSegid); + Fts5SegWriter writer; + fts5WriteInit(p, &writer, iSegid); - pBuf = &writer.writer.buf; - pPgidx = &writer.writer.pgidx; + pBuf = &writer.writer.buf; + pPgidx = &writer.writer.pgidx; - /* fts5WriteInit() should have initialized the buffers to (most likely) - ** the maximum space required. */ - assert( p->rc || pBuf->nSpace>=(pgsz + FTS5_DATA_PADDING) ); - assert( p->rc || pPgidx->nSpace>=(pgsz + FTS5_DATA_PADDING) ); + /* fts5WriteInit() should have initialized the buffers to (most likely) + ** the maximum space required. */ + assert( p->rc || pBuf->nSpace>=(pgsz + FTS5_DATA_PADDING) ); + assert( p->rc || pPgidx->nSpace>=(pgsz + FTS5_DATA_PADDING) ); + + /* Begin scanning through hash table entries. This loop runs once for each + ** term/doclist currently stored within the hash table. */ + if( p->rc==SQLITE_OK ){ + p->rc = sqlite3Fts5HashScanInit(pHash, 0, 0); + } + while( p->rc==SQLITE_OK && 0==sqlite3Fts5HashScanEof(pHash) ){ + const char *zTerm; /* Buffer containing term */ + int nTerm; /* Size of zTerm in bytes */ + const u8 *pDoclist; /* Pointer to doclist for this term */ + int nDoclist; /* Size of doclist in bytes */ + + /* Get the term and doclist for this entry. */ + sqlite3Fts5HashScanEntry(pHash, &zTerm, &pDoclist, &nDoclist); + nTerm = (int)strlen(zTerm); + if( bSecureDelete==0 ){ + fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm); + if( p->rc!=SQLITE_OK ) break; + assert( writer.bFirstRowidInPage==0 ); + } - /* Begin scanning through hash table entries. This loop runs once for each - ** term/doclist currently stored within the hash table. */ - if( p->rc==SQLITE_OK ){ - p->rc = sqlite3Fts5HashScanInit(pHash, 0, 0); - } - while( p->rc==SQLITE_OK && 0==sqlite3Fts5HashScanEof(pHash) ){ - const char *zTerm; /* Buffer containing term */ - const u8 *pDoclist; /* Pointer to doclist for this term */ - int nDoclist; /* Size of doclist in bytes */ + if( !bSecureDelete && pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){ + /* The entire doclist will fit on the current leaf. */ + fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist); + }else{ + int bTermWritten = !bSecureDelete; + i64 iRowid = 0; + i64 iPrev = 0; + int iOff = 0; - /* Write the term for this entry to disk. */ - sqlite3Fts5HashScanEntry(pHash, &zTerm, &pDoclist, &nDoclist); - fts5WriteAppendTerm(p, &writer, (int)strlen(zTerm), (const u8*)zTerm); - if( p->rc!=SQLITE_OK ) break; + /* The entire doclist will not fit on this leaf. The following + ** loop iterates through the poslists that make up the current + ** doclist. */ + while( p->rc==SQLITE_OK && iOff<nDoclist ){ + u64 iDelta = 0; + iOff += fts5GetVarint(&pDoclist[iOff], &iDelta); + iRowid += iDelta; - assert( writer.bFirstRowidInPage==0 ); - if( pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){ - /* The entire doclist will fit on the current leaf. */ - fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist); - }else{ - i64 iRowid = 0; - u64 iDelta = 0; - int iOff = 0; + /* If in secure delete mode, and if this entry in the poslist is + ** in fact a delete, then edit the existing segments directly + ** using fts5FlushSecureDelete(). */ + if( bSecureDelete ){ + if( eDetail==FTS5_DETAIL_NONE ){ + if( iOff<nDoclist && pDoclist[iOff]==0x00 ){ + fts5FlushSecureDelete(p, pStruct, zTerm, iRowid); + iOff++; + if( iOff<nDoclist && pDoclist[iOff]==0x00 ){ + iOff++; + nDoclist = 0; + }else{ + continue; + } + } + }else if( (pDoclist[iOff] & 0x01) ){ + fts5FlushSecureDelete(p, pStruct, zTerm, iRowid); + if( p->rc!=SQLITE_OK || pDoclist[iOff]==0x01 ){ + iOff++; + continue; + } + } + } - /* The entire doclist will not fit on this leaf. The following - ** loop iterates through the poslists that make up the current - ** doclist. */ - while( p->rc==SQLITE_OK && iOff<nDoclist ){ - iOff += fts5GetVarint(&pDoclist[iOff], &iDelta); - iRowid += iDelta; + if( p->rc==SQLITE_OK && bTermWritten==0 ){ + fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm); + bTermWritten = 1; + assert( p->rc!=SQLITE_OK || writer.bFirstRowidInPage==0 ); + } - if( writer.bFirstRowidInPage ){ - fts5PutU16(&pBuf->p[0], (u16)pBuf->n); /* first rowid on page */ - pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid); - writer.bFirstRowidInPage = 0; - fts5WriteDlidxAppend(p, &writer, iRowid); + if( writer.bFirstRowidInPage ){ + fts5PutU16(&pBuf->p[0], (u16)pBuf->n); /* first rowid on page */ + pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid); + writer.bFirstRowidInPage = 0; + fts5WriteDlidxAppend(p, &writer, iRowid); + }else{ + u64 iRowidDelta = (u64)iRowid - (u64)iPrev; + pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowidDelta); + } if( p->rc!=SQLITE_OK ) break; - }else{ - pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iDelta); - } - assert( pBuf->n<=pBuf->nSpace ); + assert( pBuf->n<=pBuf->nSpace ); + iPrev = iRowid; - if( eDetail==FTS5_DETAIL_NONE ){ - if( iOff<nDoclist && pDoclist[iOff]==0 ){ - pBuf->p[pBuf->n++] = 0; - iOff++; + if( eDetail==FTS5_DETAIL_NONE ){ if( iOff<nDoclist && pDoclist[iOff]==0 ){ pBuf->p[pBuf->n++] = 0; iOff++; + if( iOff<nDoclist && pDoclist[iOff]==0 ){ + pBuf->p[pBuf->n++] = 0; + iOff++; + } } - } - if( (pBuf->n + pPgidx->n)>=pgsz ){ - fts5WriteFlushLeaf(p, &writer); - } - }else{ - int bDummy; - int nPos; - int nCopy = fts5GetPoslistSize(&pDoclist[iOff], &nPos, &bDummy); - nCopy += nPos; - if( (pBuf->n + pPgidx->n + nCopy) <= pgsz ){ - /* The entire poslist will fit on the current leaf. So copy - ** it in one go. */ - fts5BufferSafeAppendBlob(pBuf, &pDoclist[iOff], nCopy); + if( (pBuf->n + pPgidx->n)>=pgsz ){ + fts5WriteFlushLeaf(p, &writer); + } }else{ - /* The entire poslist will not fit on this leaf. So it needs - ** to be broken into sections. The only qualification being - ** that each varint must be stored contiguously. */ - const u8 *pPoslist = &pDoclist[iOff]; - int iPos = 0; - while( p->rc==SQLITE_OK ){ - int nSpace = pgsz - pBuf->n - pPgidx->n; - int n = 0; - if( (nCopy - iPos)<=nSpace ){ - n = nCopy - iPos; - }else{ - n = fts5PoslistPrefix(&pPoslist[iPos], nSpace); - } - assert( n>0 ); - fts5BufferSafeAppendBlob(pBuf, &pPoslist[iPos], n); - iPos += n; - if( (pBuf->n + pPgidx->n)>=pgsz ){ - fts5WriteFlushLeaf(p, &writer); + int bDel = 0; + int nPos = 0; + int nCopy = fts5GetPoslistSize(&pDoclist[iOff], &nPos, &bDel); + if( bDel && bSecureDelete ){ + fts5BufferAppendVarint(&p->rc, pBuf, nPos*2); + iOff += nCopy; + nCopy = nPos; + }else{ + nCopy += nPos; + } + if( (pBuf->n + pPgidx->n + nCopy) <= pgsz ){ + /* The entire poslist will fit on the current leaf. So copy + ** it in one go. */ + fts5BufferSafeAppendBlob(pBuf, &pDoclist[iOff], nCopy); + }else{ + /* The entire poslist will not fit on this leaf. So it needs + ** to be broken into sections. The only qualification being + ** that each varint must be stored contiguously. */ + const u8 *pPoslist = &pDoclist[iOff]; + int iPos = 0; + while( p->rc==SQLITE_OK ){ + int nSpace = pgsz - pBuf->n - pPgidx->n; + int n = 0; + if( (nCopy - iPos)<=nSpace ){ + n = nCopy - iPos; + }else{ + n = fts5PoslistPrefix(&pPoslist[iPos], nSpace); + } + assert( n>0 ); + fts5BufferSafeAppendBlob(pBuf, &pPoslist[iPos], n); + iPos += n; + if( (pBuf->n + pPgidx->n)>=pgsz ){ + fts5WriteFlushLeaf(p, &writer); + } + if( iPos>=nCopy ) break; } - if( iPos>=nCopy ) break; } + iOff += nCopy; } - iOff += nCopy; } } + + /* TODO2: Doclist terminator written here. */ + /* pBuf->p[pBuf->n++] = '\0'; */ + assert( pBuf->n<=pBuf->nSpace ); + if( p->rc==SQLITE_OK ) sqlite3Fts5HashScanNext(pHash); } + fts5WriteFinish(p, &writer, &pgnoLast); - /* TODO2: Doclist terminator written here. */ - /* pBuf->p[pBuf->n++] = '\0'; */ - assert( pBuf->n<=pBuf->nSpace ); - if( p->rc==SQLITE_OK ) sqlite3Fts5HashScanNext(pHash); - } - sqlite3Fts5HashClear(pHash); - fts5WriteFinish(p, &writer, &pgnoLast); - - /* Update the Fts5Structure. It is written back to the database by the - ** fts5StructureRelease() call below. */ - if( pStruct->nLevel==0 ){ - fts5StructureAddLevel(&p->rc, &pStruct); - } - fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0); - if( p->rc==SQLITE_OK ){ - pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ]; - pSeg->iSegid = iSegid; - pSeg->pgnoFirst = 1; - pSeg->pgnoLast = pgnoLast; - pStruct->nSegment++; + assert( p->rc!=SQLITE_OK || bSecureDelete || pgnoLast>0 ); + if( pgnoLast>0 ){ + /* Update the Fts5Structure. It is written back to the database by the + ** fts5StructureRelease() call below. */ + if( pStruct->nLevel==0 ){ + fts5StructureAddLevel(&p->rc, &pStruct); + } + fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0); + if( p->rc==SQLITE_OK ){ + pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ]; + pSeg->iSegid = iSegid; + pSeg->pgnoFirst = 1; + pSeg->pgnoLast = pgnoLast; + if( pStruct->nOriginCntr>0 ){ + pSeg->iOrigin1 = pStruct->nOriginCntr; + pSeg->iOrigin2 = pStruct->nOriginCntr; + pSeg->nEntry = p->nPendingRow; + pStruct->nOriginCntr++; + } + pStruct->nSegment++; + } + fts5StructurePromote(p, 0, pStruct); + } } - fts5StructurePromote(p, 0, pStruct); } - fts5IndexAutomerge(p, &pStruct, pgnoLast); + fts5IndexAutomerge(p, &pStruct, pgnoLast + p->nContentlessDelete); fts5IndexCrisismerge(p, &pStruct); fts5StructureWrite(p, pStruct); fts5StructureRelease(pStruct);

@@ -233647,10 +241956,15 @@ ** Flush any data stored in the in-memory hash tables to the database.

*/ static void fts5IndexFlush(Fts5Index *p){ /* Unless it is empty, flush the hash table to disk */ - if( p->nPendingData ){ + if( p->nPendingData || p->nContentlessDelete ){ assert( p->pHash ); - p->nPendingData = 0; fts5FlushOneHash(p); + if( p->rc==SQLITE_OK ){ + sqlite3Fts5HashClear(p->pHash); + p->nPendingData = 0; + p->nPendingRow = 0; + p->nContentlessDelete = 0; + } } }

@@ -233666,17 +241980,22 @@

/* Figure out if this structure requires optimization. A structure does ** not require optimization if either: ** - ** + it consists of fewer than two segments, or - ** + all segments are on the same level, or - ** + all segments except one are currently inputs to a merge operation. + ** 1. it consists of fewer than two segments, or + ** 2. all segments are on the same level, or + ** 3. all segments except one are currently inputs to a merge operation. ** - ** In the first case, return NULL. In the second, increment the ref-count - ** on *pStruct and return a copy of the pointer to it. + ** In the first case, if there are no tombstone hash pages, return NULL. In + ** the second, increment the ref-count on *pStruct and return a copy of the + ** pointer to it. */ - if( nSeg<2 ) return 0; + if( nSeg==0 ) return 0; for(i=0; i<pStruct->nLevel; i++){ int nThis = pStruct->aLevel[i].nSeg; - if( nThis==nSeg || (nThis==nSeg-1 && pStruct->aLevel[i].nMerge==nThis) ){ + int nMerge = pStruct->aLevel[i].nMerge; + if( nThis>0 && (nThis==nSeg || (nThis==nSeg-1 && nMerge==nThis)) ){ + if( nSeg==1 && nThis==1 && pStruct->aLevel[i].aSeg[0].nPgTombstone==0 ){ + return 0; + } fts5StructureRef(pStruct); return pStruct; }

@@ -233689,10 +242008,11 @@

if( pNew ){ Fts5StructureLevel *pLvl; nByte = nSeg * sizeof(Fts5StructureSegment); - pNew->nLevel = pStruct->nLevel+1; + pNew->nLevel = MIN(pStruct->nLevel+1, FTS5_MAX_LEVEL); pNew->nRef = 1; pNew->nWriteCounter = pStruct->nWriteCounter; - pLvl = &pNew->aLevel[pStruct->nLevel]; + pNew->nOriginCntr = pStruct->nOriginCntr; + pLvl = &pNew->aLevel[pNew->nLevel-1]; pLvl->aSeg = (Fts5StructureSegment*)sqlite3Fts5MallocZero(&p->rc, nByte); if( pLvl->aSeg ){ int iLvl, iSeg;

@@ -233722,7 +242042,9 @@ Fts5Structure *pNew = 0;

assert( p->rc==SQLITE_OK ); fts5IndexFlush(p); + assert( p->rc!=SQLITE_OK || p->nContentlessDelete==0 ); pStruct = fts5StructureRead(p); + assert( p->rc!=SQLITE_OK || pStruct!=0 ); fts5StructureInvalidate(p); if( pStruct ){

@@ -233751,7 +242073,10 @@ ** This is called to implement the special "VALUES('merge', $nMerge)"

** INSERT command. */ static int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){ - Fts5Structure *pStruct = fts5StructureRead(p); + Fts5Structure *pStruct = 0; + + fts5IndexFlush(p); + pStruct = fts5StructureRead(p); if( pStruct ){ int nMin = p->pConfig->nUsermerge; fts5StructureInvalidate(p);

@@ -233759,7 +242084,7 @@ if( nMerge<0 ){

Fts5Structure *pNew = fts5IndexOptimizeStruct(p, pStruct); fts5StructureRelease(pStruct); pStruct = pNew; - nMin = 2; + nMin = 1; nMerge = nMerge*-1; } if( pStruct && pStruct->nLevel ){

@@ -233994,7 +242319,7 @@

/* Initialize a doclist-iterator for each input buffer. Arrange them in ** a linked-list starting at pHead in ascending order of rowid. Avoid ** linking any iterators already at EOF into the linked list at all. */ - assert( nBuf+1<=sizeof(aMerger)/sizeof(aMerger[0]) ); + assert( nBuf+1<=(int)(sizeof(aMerger)/sizeof(aMerger[0])) ); memset(aMerger, 0, sizeof(PrefixMerger)*(nBuf+1)); pHead = &aMerger[nBuf]; fts5DoclistIterInit(p1, &pHead->iter);

@@ -234273,6 +242598,9 @@ }

p->iWriteRowid = iRowid; p->bDelete = bDelete; + if( bDelete==0 ){ + p->nPendingRow++; + } return fts5IndexReturn(p); }

@@ -234310,6 +242638,9 @@ Fts5Structure s;

fts5StructureInvalidate(p); fts5IndexDiscardData(p); memset(&s, 0, sizeof(Fts5Structure)); + if( p->pConfig->bContentlessDelete ){ + s.nOriginCntr = 1; + } fts5DataWrite(p, FTS5_AVERAGES_ROWID, (const u8*)"", 0); fts5StructureWrite(p, &s); return fts5IndexReturn(p);

@@ -234374,6 +242705,7 @@ sqlite3_finalize(p->pIdxWriter);

sqlite3_finalize(p->pIdxDeleter); sqlite3_finalize(p->pIdxSelect); sqlite3_finalize(p->pDataVersion); + sqlite3_finalize(p->pDeleteFromIdx); sqlite3Fts5HashFree(p->pHash); sqlite3_free(p->zDataTbl); sqlite3_free(p);

@@ -234700,6 +243032,347 @@ fts5StructureRelease(pStruct);

return fts5IndexReturn(p); } +/* +** Retrieve the origin value that will be used for the segment currently +** being accumulated in the in-memory hash table when it is flushed to +** disk. If successful, SQLITE_OK is returned and (*piOrigin) set to +** the queried value. Or, if an error occurs, an error code is returned +** and the final value of (*piOrigin) is undefined. +*/ +static int sqlite3Fts5IndexGetOrigin(Fts5Index *p, i64 *piOrigin){ + Fts5Structure *pStruct; + pStruct = fts5StructureRead(p); + if( pStruct ){ + *piOrigin = pStruct->nOriginCntr; + fts5StructureRelease(pStruct); + } + return fts5IndexReturn(p); +} + +/* +** Buffer pPg contains a page of a tombstone hash table - one of nPg pages +** associated with the same segment. This function adds rowid iRowid to +** the hash table. The caller is required to guarantee that there is at +** least one free slot on the page. +** +** If parameter bForce is false and the hash table is deemed to be full +** (more than half of the slots are occupied), then non-zero is returned +** and iRowid not inserted. Or, if bForce is true or if the hash table page +** is not full, iRowid is inserted and zero returned. +*/ +static int fts5IndexTombstoneAddToPage( + Fts5Data *pPg, + int bForce, + int nPg, + u64 iRowid +){ + const int szKey = TOMBSTONE_KEYSIZE(pPg); + const int nSlot = TOMBSTONE_NSLOT(pPg); + const int nElem = fts5GetU32(&pPg->p[4]); + int iSlot = (iRowid / nPg) % nSlot; + int nCollide = nSlot; + + if( szKey==4 && iRowid>0xFFFFFFFF ) return 2; + if( iRowid==0 ){ + pPg->p[1] = 0x01; + return 0; + } + + if( bForce==0 && nElem>=(nSlot/2) ){ + return 1; + } + + fts5PutU32(&pPg->p[4], nElem+1); + if( szKey==4 ){ + u32 *aSlot = (u32*)&pPg->p[8]; + while( aSlot[iSlot] ){ + iSlot = (iSlot + 1) % nSlot; + if( nCollide--==0 ) return 0; + } + fts5PutU32((u8*)&aSlot[iSlot], (u32)iRowid); + }else{ + u64 *aSlot = (u64*)&pPg->p[8]; + while( aSlot[iSlot] ){ + iSlot = (iSlot + 1) % nSlot; + if( nCollide--==0 ) return 0; + } + fts5PutU64((u8*)&aSlot[iSlot], iRowid); + } + + return 0; +} + +/* +** This function attempts to build a new hash containing all the keys +** currently in the tombstone hash table for segment pSeg. The new +** hash will be stored in the nOut buffers passed in array apOut[]. +** All pages of the new hash use key-size szKey (4 or 8). +** +** Return 0 if the hash is successfully rebuilt into the nOut pages. +** Or non-zero if it is not (because one page became overfull). In this +** case the caller should retry with a larger nOut parameter. +** +** Parameter pData1 is page iPg1 of the hash table being rebuilt. +*/ +static int fts5IndexTombstoneRehash( + Fts5Index *p, + Fts5StructureSegment *pSeg, /* Segment to rebuild hash of */ + Fts5Data *pData1, /* One page of current hash - or NULL */ + int iPg1, /* Which page of the current hash is pData1 */ + int szKey, /* 4 or 8, the keysize */ + int nOut, /* Number of output pages */ + Fts5Data **apOut /* Array of output hash pages */ +){ + int ii; + int res = 0; + + /* Initialize the headers of all the output pages */ + for(ii=0; ii<nOut; ii++){ + apOut[ii]->p[0] = szKey; + fts5PutU32(&apOut[ii]->p[4], 0); + } + + /* Loop through the current pages of the hash table. */ + for(ii=0; res==0 && ii<pSeg->nPgTombstone; ii++){ + Fts5Data *pData = 0; /* Page ii of the current hash table */ + Fts5Data *pFree = 0; /* Free this at the end of the loop */ + + if( iPg1==ii ){ + pData = pData1; + }else{ + pFree = pData = fts5DataRead(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid, ii)); + } + + if( pData ){ + int szKeyIn = TOMBSTONE_KEYSIZE(pData); + int nSlotIn = (pData->nn - 8) / szKeyIn; + int iIn; + for(iIn=0; iIn<nSlotIn; iIn++){ + u64 iVal = 0; + + /* Read the value from slot iIn of the input page into iVal. */ + if( szKeyIn==4 ){ + u32 *aSlot = (u32*)&pData->p[8]; + if( aSlot[iIn] ) iVal = fts5GetU32((u8*)&aSlot[iIn]); + }else{ + u64 *aSlot = (u64*)&pData->p[8]; + if( aSlot[iIn] ) iVal = fts5GetU64((u8*)&aSlot[iIn]); + } + + /* If iVal is not 0 at this point, insert it into the new hash table */ + if( iVal ){ + Fts5Data *pPg = apOut[(iVal % nOut)]; + res = fts5IndexTombstoneAddToPage(pPg, 0, nOut, iVal); + if( res ) break; + } + } + + /* If this is page 0 of the old hash, copy the rowid-0-flag from the + ** old hash to the new. */ + if( ii==0 ){ + apOut[0]->p[1] = pData->p[1]; + } + } + fts5DataRelease(pFree); + } + + return res; +} + +/* +** This is called to rebuild the hash table belonging to segment pSeg. +** If parameter pData1 is not NULL, then one page of the existing hash table +** has already been loaded - pData1, which is page iPg1. The key-size for +** the new hash table is szKey (4 or 8). +** +** If successful, the new hash table is not written to disk. Instead, +** output parameter (*pnOut) is set to the number of pages in the new +** hash table, and (*papOut) to point to an array of buffers containing +** the new page data. +** +** If an error occurs, an error code is left in the Fts5Index object and +** both output parameters set to 0 before returning. +*/ +static void fts5IndexTombstoneRebuild( + Fts5Index *p, + Fts5StructureSegment *pSeg, /* Segment to rebuild hash of */ + Fts5Data *pData1, /* One page of current hash - or NULL */ + int iPg1, /* Which page of the current hash is pData1 */ + int szKey, /* 4 or 8, the keysize */ + int *pnOut, /* OUT: Number of output pages */ + Fts5Data ***papOut /* OUT: Output hash pages */ +){ + const int MINSLOT = 32; + int nSlotPerPage = MAX(MINSLOT, (p->pConfig->pgsz - 8) / szKey); + int nSlot = 0; /* Number of slots in each output page */ + int nOut = 0; + + /* Figure out how many output pages (nOut) and how many slots per + ** page (nSlot). There are three possibilities: + ** + ** 1. The hash table does not yet exist. In this case the new hash + ** table will consist of a single page with MINSLOT slots. + ** + ** 2. The hash table exists but is currently a single page. In this + ** case an attempt is made to grow the page to accommodate the new + ** entry. The page is allowed to grow up to nSlotPerPage (see above) + ** slots. + ** + ** 3. The hash table already consists of more than one page, or of + ** a single page already so large that it cannot be grown. In this + ** case the new hash consists of (nPg*2+1) pages of nSlotPerPage + ** slots each, where nPg is the current number of pages in the + ** hash table. + */ + if( pSeg->nPgTombstone==0 ){ + /* Case 1. */ + nOut = 1; + nSlot = MINSLOT; + }else if( pSeg->nPgTombstone==1 ){ + /* Case 2. */ + int nElem = (int)fts5GetU32(&pData1->p[4]); + assert( pData1 && iPg1==0 ); + nOut = 1; + nSlot = MAX(nElem*4, MINSLOT); + if( nSlot>nSlotPerPage ) nOut = 0; + } + if( nOut==0 ){ + /* Case 3. */ + nOut = (pSeg->nPgTombstone * 2 + 1); + nSlot = nSlotPerPage; + } + + /* Allocate the required array and output pages */ + while( 1 ){ + int res = 0; + int ii = 0; + int szPage = 0; + Fts5Data **apOut = 0; + + /* Allocate space for the new hash table */ + assert( nSlot>=MINSLOT ); + apOut = (Fts5Data**)sqlite3Fts5MallocZero(&p->rc, sizeof(Fts5Data*) * nOut); + szPage = 8 + nSlot*szKey; + for(ii=0; ii<nOut; ii++){ + Fts5Data *pNew = (Fts5Data*)sqlite3Fts5MallocZero(&p->rc, + sizeof(Fts5Data)+szPage + ); + if( pNew ){ + pNew->nn = szPage; + pNew->p = (u8*)&pNew[1]; + apOut[ii] = pNew; + } + } + + /* Rebuild the hash table. */ + if( p->rc==SQLITE_OK ){ + res = fts5IndexTombstoneRehash(p, pSeg, pData1, iPg1, szKey, nOut, apOut); + } + if( res==0 ){ + if( p->rc ){ + fts5IndexFreeArray(apOut, nOut); + apOut = 0; + nOut = 0; + } + *pnOut = nOut; + *papOut = apOut; + break; + } + + /* If control flows to here, it was not possible to rebuild the hash + ** table. Free all buffers and then try again with more pages. */ + assert( p->rc==SQLITE_OK ); + fts5IndexFreeArray(apOut, nOut); + nSlot = nSlotPerPage; + nOut = nOut*2 + 1; + } +} + + +/* +** Add a tombstone for rowid iRowid to segment pSeg. +*/ +static void fts5IndexTombstoneAdd( + Fts5Index *p, + Fts5StructureSegment *pSeg, + u64 iRowid +){ + Fts5Data *pPg = 0; + int iPg = -1; + int szKey = 0; + int nHash = 0; + Fts5Data **apHash = 0; + + p->nContentlessDelete++; + + if( pSeg->nPgTombstone>0 ){ + iPg = iRowid % pSeg->nPgTombstone; + pPg = fts5DataRead(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid,iPg)); + if( pPg==0 ){ + assert( p->rc!=SQLITE_OK ); + return; + } + + if( 0==fts5IndexTombstoneAddToPage(pPg, 0, pSeg->nPgTombstone, iRowid) ){ + fts5DataWrite(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid,iPg), pPg->p, pPg->nn); + fts5DataRelease(pPg); + return; + } + } + + /* Have to rebuild the hash table. First figure out the key-size (4 or 8). */ + szKey = pPg ? TOMBSTONE_KEYSIZE(pPg) : 4; + if( iRowid>0xFFFFFFFF ) szKey = 8; + + /* Rebuild the hash table */ + fts5IndexTombstoneRebuild(p, pSeg, pPg, iPg, szKey, &nHash, &apHash); + assert( p->rc==SQLITE_OK || (nHash==0 && apHash==0) ); + + /* If all has succeeded, write the new rowid into one of the new hash + ** table pages, then write them all out to disk. */ + if( nHash ){ + int ii = 0; + fts5IndexTombstoneAddToPage(apHash[iRowid % nHash], 1, nHash, iRowid); + for(ii=0; ii<nHash; ii++){ + i64 iTombstoneRowid = FTS5_TOMBSTONE_ROWID(pSeg->iSegid, ii); + fts5DataWrite(p, iTombstoneRowid, apHash[ii]->p, apHash[ii]->nn); + } + pSeg->nPgTombstone = nHash; + fts5StructureWrite(p, p->pStruct); + } + + fts5DataRelease(pPg); + fts5IndexFreeArray(apHash, nHash); +} + +/* +** Add iRowid to the tombstone list of the segment or segments that contain +** rows from origin iOrigin. Return SQLITE_OK if successful, or an SQLite +** error code otherwise. +*/ +static int sqlite3Fts5IndexContentlessDelete(Fts5Index *p, i64 iOrigin, i64 iRowid){ + Fts5Structure *pStruct; + pStruct = fts5StructureRead(p); + if( pStruct ){ + int bFound = 0; /* True after pSeg->nEntryTombstone incr. */ + int iLvl; + for(iLvl=pStruct->nLevel-1; iLvl>=0; iLvl--){ + int iSeg; + for(iSeg=pStruct->aLevel[iLvl].nSeg-1; iSeg>=0; iSeg--){ + Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg]; + if( pSeg->iOrigin1<=(u64)iOrigin && pSeg->iOrigin2>=(u64)iOrigin ){ + if( bFound==0 ){ + pSeg->nEntryTombstone++; + bFound = 1; + } + fts5IndexTombstoneAdd(p, pSeg, iRowid); + } + } + } + fts5StructureRelease(pStruct); + } + return fts5IndexReturn(p); +} /************************************************************************* **************************************************************************

@@ -235004,6 +243677,7 @@ Fts5Index *p, /* FTS5 backend object */

Fts5StructureSegment *pSeg /* Segment to check internal consistency */ ){ Fts5Config *pConfig = p->pConfig; + int bSecureDelete = (pConfig->iVersion==FTS5_CURRENT_VERSION_SECUREDELETE); sqlite3_stmt *pStmt = 0; int rc2; int iIdxPrevLeaf = pSeg->pgnoFirst-1;

@@ -235039,7 +243713,19 @@ ** to or larger than the split-key in zIdxTerm. Also check that if there

** is also a rowid pointer within the leaf page header, it points to a ** location before the term. */ if( pLeaf->nn<=pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + + if( nIdxTerm==0 + && pConfig->iVersion==FTS5_CURRENT_VERSION_SECUREDELETE + && pLeaf->nn==pLeaf->szLeaf + && pLeaf->nn==4 + ){ + /* special case - the very first page in a segment keeps its %_idx + ** entry even if all the terms are removed from it by secure-delete + ** operations. */ + }else{ + p->rc = FTS5_CORRUPT; + } + }else{ int iOff; /* Offset of first term on leaf */ int iRowidOff; /* Offset of first rowid on leaf */

@@ -235103,9 +243789,12 @@ int iRowidOff = fts5LeafFirstRowidOff(pLeaf);

ASSERT_SZLEAF_OK(pLeaf); if( iRowidOff>=pLeaf->szLeaf ){ p->rc = FTS5_CORRUPT; - }else{ + }else if( bSecureDelete==0 || iRowidOff>0 ){ + i64 iDlRowid = fts5DlidxIterRowid(pDlidx); fts5GetVarint(&pLeaf->p[iRowidOff], (u64*)&iRowid); - if( iRowid!=fts5DlidxIterRowid(pDlidx) ) p->rc = FTS5_CORRUPT; + if( iRowid<iDlRowid || (bSecureDelete==0 && iRowid!=iDlRowid) ){ + p->rc = FTS5_CORRUPT; + } } fts5DataRelease(pLeaf); }

@@ -235235,13 +243924,14 @@ ** Below this point is the implementation of the fts5_decode() scalar

** function only. */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** Decode a segment-data rowid from the %_data table. This function is ** the opposite of macro FTS5_SEGMENT_ROWID(). */ static void fts5DecodeRowid( i64 iRowid, /* Rowid from %_data table */ + int *pbTombstone, /* OUT: Tombstone hash flag */ int *piSegid, /* OUT: Segment id */ int *pbDlidx, /* OUT: Dlidx flag */ int *piHeight, /* OUT: Height */

@@ -235257,13 +243947,16 @@ *pbDlidx = (int)(iRowid & 0x0001);

iRowid >>= FTS5_DATA_DLI_B; *piSegid = (int)(iRowid & (((i64)1 << FTS5_DATA_ID_B) - 1)); + iRowid >>= FTS5_DATA_ID_B; + + *pbTombstone = (int)(iRowid & 0x0001); } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){ - int iSegid, iHeight, iPgno, bDlidx; /* Rowid compenents */ - fts5DecodeRowid(iKey, &iSegid, &bDlidx, &iHeight, &iPgno); + int iSegid, iHeight, iPgno, bDlidx, bTomb; /* Rowid compenents */ + fts5DecodeRowid(iKey, &bTomb, &iSegid, &bDlidx, &iHeight, &iPgno); if( iSegid==0 ){ if( iKey==FTS5_AVERAGES_ROWID ){

@@ -235273,14 +243966,16 @@ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{structure}");

} } else{ - sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{%ssegid=%d h=%d pgno=%d}", - bDlidx ? "dlidx " : "", iSegid, iHeight, iPgno + sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{%s%ssegid=%d h=%d pgno=%d}", + bDlidx ? "dlidx " : "", + bTomb ? "tombstone " : "", + iSegid, iHeight, iPgno ); } } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) static void fts5DebugStructure( int *pRc, /* IN/OUT: error code */ Fts5Buffer *pBuf,

@@ -235295,16 +243990,22 @@ " {lvl=%d nMerge=%d nSeg=%d", iLvl, pLvl->nMerge, pLvl->nSeg

); for(iSeg=0; iSeg<pLvl->nSeg; iSeg++){ Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg]; - sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " {id=%d leaves=%d..%d}", + sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " {id=%d leaves=%d..%d", pSeg->iSegid, pSeg->pgnoFirst, pSeg->pgnoLast ); + if( pSeg->iOrigin1>0 ){ + sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " origin=%lld..%lld", + pSeg->iOrigin1, pSeg->iOrigin2 + ); + } + sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "}"); } sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "}"); } } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** This is part of the fts5_decode() debugging aid. **

@@ -235329,9 +244030,9 @@

fts5DebugStructure(pRc, pBuf, p); fts5StructureRelease(p); } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** This is part of the fts5_decode() debugging aid. **

@@ -235354,9 +244055,9 @@ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "%s%d", zSpace, (int)iVal);

zSpace = " "; } } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** Buffer (a/n) is assumed to contain a list of serialized varints. Read ** each varint and append its string representation to buffer pBuf. Return

@@ -235373,9 +244074,9 @@ sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " %d", iVal);

} return iOff; } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** The start of buffer (a/n) contains the start of a doclist. The doclist ** may or may not finish within the buffer. This function appends a text

@@ -235408,9 +244109,9 @@ }

return iOff; } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** This function is part of the fts5_decode() debugging function. It is ** only ever used with detail=none tables.

@@ -235451,9 +244152,9 @@

sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " %lld%s", iRowid, zApp); } } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** The implementation of user-defined scalar function fts5_decode(). */

@@ -235464,6 +244165,7 @@ sqlite3_value **apVal /* Function arguments */

){ i64 iRowid; /* Rowid for record being decoded */ int iSegid,iHeight,iPgno,bDlidx;/* Rowid components */ + int bTomb; const u8 *aBlob; int n; /* Record to decode */ u8 *a = 0; Fts5Buffer s; /* Build up text to return here */

@@ -235486,7 +244188,7 @@ a = (u8*)sqlite3Fts5MallocZero(&rc, nSpace);

if( a==0 ) goto decode_out; if( n>0 ) memcpy(a, aBlob, n); - fts5DecodeRowid(iRowid, &iSegid, &bDlidx, &iHeight, &iPgno); + fts5DecodeRowid(iRowid, &bTomb, &iSegid, &bDlidx, &iHeight, &iPgno); fts5DebugRowid(&rc, &s, iRowid); if( bDlidx ){

@@ -235504,6 +244206,28 @@ for(fts5DlidxLvlNext(&lvl); lvl.bEof==0; fts5DlidxLvlNext(&lvl)){

sqlite3Fts5BufferAppendPrintf(&rc, &s, " %d(%lld)", lvl.iLeafPgno, lvl.iRowid ); + } + }else if( bTomb ){ + u32 nElem = fts5GetU32(&a[4]); + int szKey = (aBlob[0]==4 || aBlob[0]==8) ? aBlob[0] : 8; + int nSlot = (n - 8) / szKey; + int ii; + sqlite3Fts5BufferAppendPrintf(&rc, &s, " nElem=%d", (int)nElem); + if( aBlob[1] ){ + sqlite3Fts5BufferAppendPrintf(&rc, &s, " 0"); + } + for(ii=0; ii<nSlot; ii++){ + u64 iVal = 0; + if( szKey==4 ){ + u32 *aSlot = (u32*)&aBlob[8]; + if( aSlot[ii] ) iVal = fts5GetU32((u8*)&aSlot[ii]); + }else{ + u64 *aSlot = (u64*)&aBlob[8]; + if( aSlot[ii] ) iVal = fts5GetU64((u8*)&aSlot[ii]); + } + if( iVal!=0 ){ + sqlite3Fts5BufferAppendPrintf(&rc, &s, " %lld", (i64)iVal); + } } }else if( iSegid==0 ){ if( iRowid==FTS5_AVERAGES_ROWID ){

@@ -235530,7 +244254,7 @@ }

fts5DecodeRowidList(&rc, &s, &a[4], iTermOff-4); iOff = iTermOff; - while( iOff<szLeaf ){ + while( iOff<szLeaf && rc==SQLITE_OK ){ int nAppend; /* Read the term data for the next term*/

@@ -235550,8 +244274,11 @@ iTermOff += nIncr;

}else{ iTermOff = szLeaf; } - - fts5DecodeRowidList(&rc, &s, &a[iOff], iTermOff-iOff); + if( iTermOff>szLeaf ){ + rc = FTS5_CORRUPT; + }else{ + fts5DecodeRowidList(&rc, &s, &a[iOff], iTermOff-iOff); + } iOff = iTermOff; if( iOff<szLeaf ){ iOff += fts5GetVarint32(&a[iOff], nKeep);

@@ -235662,9 +244389,9 @@ sqlite3_result_error_code(pCtx, rc);

} fts5BufferFree(&s); } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** The implementation of user-defined scalar function fts5_rowid(). */

@@ -235698,7 +244425,235 @@ );

} } } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ + +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) + +typedef struct Fts5StructVtab Fts5StructVtab; +struct Fts5StructVtab { + sqlite3_vtab base; +}; + +typedef struct Fts5StructVcsr Fts5StructVcsr; +struct Fts5StructVcsr { + sqlite3_vtab_cursor base; + Fts5Structure *pStruct; + int iLevel; + int iSeg; + int iRowid; +}; + +/* +** Create a new fts5_structure() table-valued function. +*/ +static int fts5structConnectMethod( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + Fts5StructVtab *pNew = 0; + int rc = SQLITE_OK; + + rc = sqlite3_declare_vtab(db, + "CREATE TABLE xyz(" + "level, segment, merge, segid, leaf1, leaf2, loc1, loc2, " + "npgtombstone, nentrytombstone, nentry, struct HIDDEN);" + ); + if( rc==SQLITE_OK ){ + pNew = sqlite3Fts5MallocZero(&rc, sizeof(*pNew)); + } + + *ppVtab = (sqlite3_vtab*)pNew; + return rc; +} + +/* +** We must have a single struct=? constraint that will be passed through +** into the xFilter method. If there is no valid stmt=? constraint, +** then return an SQLITE_CONSTRAINT error. +*/ +static int fts5structBestIndexMethod( + sqlite3_vtab *tab, + sqlite3_index_info *pIdxInfo +){ + int i; + int rc = SQLITE_CONSTRAINT; + struct sqlite3_index_constraint *p; + pIdxInfo->estimatedCost = (double)100; + pIdxInfo->estimatedRows = 100; + pIdxInfo->idxNum = 0; + for(i=0, p=pIdxInfo->aConstraint; i<pIdxInfo->nConstraint; i++, p++){ + if( p->usable==0 ) continue; + if( p->op==SQLITE_INDEX_CONSTRAINT_EQ && p->iColumn==11 ){ + rc = SQLITE_OK; + pIdxInfo->aConstraintUsage[i].omit = 1; + pIdxInfo->aConstraintUsage[i].argvIndex = 1; + break; + } + } + return rc; +} + +/* +** This method is the destructor for bytecodevtab objects. +*/ +static int fts5structDisconnectMethod(sqlite3_vtab *pVtab){ + Fts5StructVtab *p = (Fts5StructVtab*)pVtab; + sqlite3_free(p); + return SQLITE_OK; +} + +/* +** Constructor for a new bytecodevtab_cursor object. +*/ +static int fts5structOpenMethod(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCsr){ + int rc = SQLITE_OK; + Fts5StructVcsr *pNew = 0; + + pNew = sqlite3Fts5MallocZero(&rc, sizeof(*pNew)); + *ppCsr = (sqlite3_vtab_cursor*)pNew; + + return SQLITE_OK; +} + +/* +** Destructor for a bytecodevtab_cursor. +*/ +static int fts5structCloseMethod(sqlite3_vtab_cursor *cur){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur; + fts5StructureRelease(pCsr->pStruct); + sqlite3_free(pCsr); + return SQLITE_OK; +} + + +/* +** Advance a bytecodevtab_cursor to its next row of output. +*/ +static int fts5structNextMethod(sqlite3_vtab_cursor *cur){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur; + Fts5Structure *p = pCsr->pStruct; + + assert( pCsr->pStruct ); + pCsr->iSeg++; + pCsr->iRowid++; + while( pCsr->iLevel<p->nLevel && pCsr->iSeg>=p->aLevel[pCsr->iLevel].nSeg ){ + pCsr->iLevel++; + pCsr->iSeg = 0; + } + if( pCsr->iLevel>=p->nLevel ){ + fts5StructureRelease(pCsr->pStruct); + pCsr->pStruct = 0; + } + return SQLITE_OK; +} + +/* +** Return TRUE if the cursor has been moved off of the last +** row of output. +*/ +static int fts5structEofMethod(sqlite3_vtab_cursor *cur){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur; + return pCsr->pStruct==0; +} + +static int fts5structRowidMethod( + sqlite3_vtab_cursor *cur, + sqlite_int64 *piRowid +){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur; + *piRowid = pCsr->iRowid; + return SQLITE_OK; +} + +/* +** Return values of columns for the row at which the bytecodevtab_cursor +** is currently pointing. +*/ +static int fts5structColumnMethod( + sqlite3_vtab_cursor *cur, /* The cursor */ + sqlite3_context *ctx, /* First argument to sqlite3_result_...() */ + int i /* Which column to return */ +){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur; + Fts5Structure *p = pCsr->pStruct; + Fts5StructureSegment *pSeg = &p->aLevel[pCsr->iLevel].aSeg[pCsr->iSeg]; + + switch( i ){ + case 0: /* level */ + sqlite3_result_int(ctx, pCsr->iLevel); + break; + case 1: /* segment */ + sqlite3_result_int(ctx, pCsr->iSeg); + break; + case 2: /* merge */ + sqlite3_result_int(ctx, pCsr->iSeg < p->aLevel[pCsr->iLevel].nMerge); + break; + case 3: /* segid */ + sqlite3_result_int(ctx, pSeg->iSegid); + break; + case 4: /* leaf1 */ + sqlite3_result_int(ctx, pSeg->pgnoFirst); + break; + case 5: /* leaf2 */ + sqlite3_result_int(ctx, pSeg->pgnoLast); + break; + case 6: /* origin1 */ + sqlite3_result_int64(ctx, pSeg->iOrigin1); + break; + case 7: /* origin2 */ + sqlite3_result_int64(ctx, pSeg->iOrigin2); + break; + case 8: /* npgtombstone */ + sqlite3_result_int(ctx, pSeg->nPgTombstone); + break; + case 9: /* nentrytombstone */ + sqlite3_result_int64(ctx, pSeg->nEntryTombstone); + break; + case 10: /* nentry */ + sqlite3_result_int64(ctx, pSeg->nEntry); + break; + } + return SQLITE_OK; +} + +/* +** Initialize a cursor. +** +** idxNum==0 means show all subprograms +** idxNum==1 means show only the main bytecode and omit subprograms. +*/ +static int fts5structFilterMethod( + sqlite3_vtab_cursor *pVtabCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr *)pVtabCursor; + int rc = SQLITE_OK; + + const u8 *aBlob = 0; + int nBlob = 0; + + assert( argc==1 ); + fts5StructureRelease(pCsr->pStruct); + pCsr->pStruct = 0; + + nBlob = sqlite3_value_bytes(argv[0]); + aBlob = (const u8*)sqlite3_value_blob(argv[0]); + rc = fts5StructureDecode(aBlob, nBlob, 0, &pCsr->pStruct); + if( rc==SQLITE_OK ){ + pCsr->iLevel = 0; + pCsr->iRowid = 0; + pCsr->iSeg = -1; + rc = fts5structNextMethod(pVtabCursor); + } + + return rc; +} + +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ /* ** This is called as part of registering the FTS5 module with database

@@ -235709,7 +244664,7 @@ ** If successful, SQLITE_OK is returned. If an error occurs, some other

** SQLite error code is returned instead. */ static int sqlite3Fts5IndexInit(sqlite3 *db){ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) int rc = sqlite3_create_function( db, "fts5_decode", 2, SQLITE_UTF8, 0, fts5DecodeFunction, 0, 0 );

@@ -235726,6 +244681,37 @@ rc = sqlite3_create_function(

db, "fts5_rowid", -1, SQLITE_UTF8, 0, fts5RowidFunction, 0, 0 ); } + + if( rc==SQLITE_OK ){ + static const sqlite3_module fts5structure_module = { + 0, /* iVersion */ + 0, /* xCreate */ + fts5structConnectMethod, /* xConnect */ + fts5structBestIndexMethod, /* xBestIndex */ + fts5structDisconnectMethod, /* xDisconnect */ + 0, /* xDestroy */ + fts5structOpenMethod, /* xOpen */ + fts5structCloseMethod, /* xClose */ + fts5structFilterMethod, /* xFilter */ + fts5structNextMethod, /* xNext */ + fts5structEofMethod, /* xEof */ + fts5structColumnMethod, /* xColumn */ + fts5structRowidMethod, /* xRowid */ + 0, /* xUpdate */ + 0, /* xBegin */ + 0, /* xSync */ + 0, /* xCommit */ + 0, /* xRollback */ + 0, /* xFindFunction */ + 0, /* xRename */ + 0, /* xSavepoint */ + 0, /* xRelease */ + 0, /* xRollbackTo */ + 0, /* xShadowName */ + 0 /* xIntegrity */ + }; + rc = sqlite3_create_module(db, "fts5_structure", &fts5structure_module, 0); + } return rc; #else return SQLITE_OK;

@@ -235861,6 +244847,8 @@ Fts5Table p; /* Public class members from fts5Int.h */

Fts5Storage *pStorage; /* Document store */ Fts5Global *pGlobal; /* Global (connection wide) data */ Fts5Cursor *pSortCsr; /* Sort data from this cursor */ + int iSavepoint; /* Successful xSavepoint()+1 */ + int bInSavepoint; #ifdef SQLITE_DEBUG struct Fts5TransactionState ts; #endif

@@ -236004,7 +244992,7 @@ p->ts.iSavepoint = -1;

break; case FTS5_SYNC: - assert( p->ts.eState==1 ); + assert( p->ts.eState==1 || p->ts.eState==2 ); p->ts.eState = 2; break;

@@ -236019,21 +245007,21 @@ p->ts.eState = 0;

break; case FTS5_SAVEPOINT: - assert( p->ts.eState==1 ); + assert( p->ts.eState>=1 ); assert( iSavepoint>=0 ); assert( iSavepoint>=p->ts.iSavepoint ); p->ts.iSavepoint = iSavepoint; break; case FTS5_RELEASE: - assert( p->ts.eState==1 ); + assert( p->ts.eState>=1 ); assert( iSavepoint>=0 ); assert( iSavepoint<=p->ts.iSavepoint ); p->ts.iSavepoint = iSavepoint-1; break; case FTS5_ROLLBACKTO: - assert( p->ts.eState==1 ); + assert( p->ts.eState>=1 ); assert( iSavepoint>=-1 ); /* The following assert() can fail if another vtab strikes an error ** within an xSavepoint() call then SQLite calls xRollbackTo() - without

@@ -236147,6 +245135,13 @@ pConfig->pzErrmsg = pzErr;

rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); sqlite3Fts5IndexRollback(pTab->p.pIndex); pConfig->pzErrmsg = 0; + } + + if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_NORMAL ){ + rc = sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, (int)1); + } + if( rc==SQLITE_OK ){ + rc = sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS); } if( rc!=SQLITE_OK ){

@@ -237073,6 +246068,9 @@ pCsr->iLastRowid = fts5GetRowidLimit(pRowidLe, LARGEST_INT64);

pCsr->iFirstRowid = fts5GetRowidLimit(pRowidGe, SMALLEST_INT64); } + rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + if( rc!=SQLITE_OK ) goto filter_out; + if( pTab->pSortCsr ){ /* If pSortCsr is non-NULL, then this call is being made as part of ** processing for a "... MATCH <expr> ORDER BY rank" query (ePlan is

@@ -237095,6 +246093,7 @@ pCsr->ePlan = FTS5_PLAN_SOURCE;

pCsr->pExpr = pTab->pSortCsr->pExpr; rc = fts5CursorFirst(pTab, pCsr, bDesc); }else if( pCsr->pExpr ){ + assert( rc==SQLITE_OK ); rc = fts5CursorParseRank(pConfig, pCsr, pRank); if( rc==SQLITE_OK ){ if( bOrderByRank ){

@@ -237266,6 +246265,7 @@ ){

Fts5Config *pConfig = pTab->p.pConfig; int rc = SQLITE_OK; int bError = 0; + int bLoadConfig = 0; if( 0==sqlite3_stricmp("delete-all", zCmd) ){ if( pConfig->eContent==FTS5_CONTENT_NORMAL ){

@@ -237277,6 +246277,7 @@ rc = SQLITE_ERROR;

}else{ rc = sqlite3Fts5StorageDeleteAll(pTab->pStorage); } + bLoadConfig = 1; }else if( 0==sqlite3_stricmp("rebuild", zCmd) ){ if( pConfig->eContent==FTS5_CONTENT_NONE ){ fts5SetVtabError(pTab,

@@ -237286,6 +246287,7 @@ rc = SQLITE_ERROR;

}else{ rc = sqlite3Fts5StorageRebuild(pTab->pStorage); } + bLoadConfig = 1; }else if( 0==sqlite3_stricmp("optimize", zCmd) ){ rc = sqlite3Fts5StorageOptimize(pTab->pStorage); }else if( 0==sqlite3_stricmp("merge", zCmd) ){

@@ -237298,6 +246300,8 @@ #ifdef SQLITE_DEBUG

}else if( 0==sqlite3_stricmp("prefix-index", zCmd) ){ pConfig->bPrefixIndex = sqlite3_value_int(pVal); #endif + }else if( 0==sqlite3_stricmp("flush", zCmd) ){ + rc = sqlite3Fts5FlushToDisk(&pTab->p); }else{ rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); if( rc==SQLITE_OK ){

@@ -237311,6 +246315,12 @@ rc = sqlite3Fts5StorageConfigValue(pTab->pStorage, zCmd, pVal, 0);

} } } + + if( rc==SQLITE_OK && bLoadConfig ){ + pTab->p.pConfig->iCookie--; + rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + } + return rc; }

@@ -237367,9 +246377,10 @@ Fts5FullTable *pTab = (Fts5FullTable*)pVtab;

Fts5Config *pConfig = pTab->p.pConfig; int eType0; /* value_type() of apVal[0] */ int rc = SQLITE_OK; /* Return code */ + int bUpdateOrDelete = 0; /* A transaction must be open when this is called. */ - assert( pTab->ts.eState==1 ); + assert( pTab->ts.eState==1 || pTab->ts.eState==2 ); assert( pVtab->zErrMsg==0 ); assert( nArg==1 || nArg==(2+pConfig->nCol+2) );

@@ -237377,6 +246388,11 @@ assert( sqlite3_value_type(apVal[0])==SQLITE_INTEGER

|| sqlite3_value_type(apVal[0])==SQLITE_NULL ); assert( pTab->p.pConfig->pzErrmsg==0 ); + if( pConfig->pgsz==0 ){ + rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + if( rc!=SQLITE_OK ) return rc; + } + pTab->p.pConfig->pzErrmsg = &pTab->p.base.zErrMsg; /* Put any active cursors into REQUIRE_SEEK state. */

@@ -237391,7 +246407,14 @@ const char *z = (const char*)sqlite3_value_text(apVal[2+pConfig->nCol]);

if( pConfig->eContent!=FTS5_CONTENT_NORMAL && 0==sqlite3_stricmp("delete", z) ){ - rc = fts5SpecialDelete(pTab, apVal); + if( pConfig->bContentlessDelete ){ + fts5SetVtabError(pTab, + "'delete' may not be used with a contentless_delete=1 table" + ); + rc = SQLITE_ERROR; + }else{ + rc = fts5SpecialDelete(pTab, apVal); + } }else{ rc = fts5SpecialInsert(pTab, z, apVal[2 + pConfig->nCol + 1]); }

@@ -237408,7 +246431,7 @@ **

** Cases 3 and 4 may violate the rowid constraint. */ int eConflict = SQLITE_ABORT; - if( pConfig->eContent==FTS5_CONTENT_NORMAL ){ + if( pConfig->eContent==FTS5_CONTENT_NORMAL || pConfig->bContentlessDelete ){ eConflict = sqlite3_vtab_on_conflict(pConfig->db); }

@@ -237416,8 +246439,12 @@ assert( eType0==SQLITE_INTEGER || eType0==SQLITE_NULL );

assert( nArg!=1 || eType0==SQLITE_INTEGER ); /* Filter out attempts to run UPDATE or DELETE on contentless tables. - ** This is not suported. */ - if( eType0==SQLITE_INTEGER && fts5IsContentless(pTab) ){ + ** This is not suported. Except - they are both supported if the CREATE + ** VIRTUAL TABLE statement contained "contentless_delete=1". */ + if( eType0==SQLITE_INTEGER + && pConfig->eContent==FTS5_CONTENT_NONE + && pConfig->bContentlessDelete==0 + ){ pTab->p.base.zErrMsg = sqlite3_mprintf( "cannot %s contentless fts5 table: %s", (nArg>1 ? "UPDATE" : "DELETE from"), pConfig->zName

@@ -237429,6 +246456,7 @@ /* DELETE */

else if( nArg==1 ){ i64 iDel = sqlite3_value_int64(apVal[0]); /* Rowid to delete */ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, 0); + bUpdateOrDelete = 1; } /* INSERT or UPDATE */

@@ -237440,10 +246468,12 @@ || sqlite3GetInt32(pToken->z, &iValue)==0 ){

} else if( eType0!=SQLITE_INTEGER ){ - /* If this is a REPLACE, first remove the current entry (if any) */ + /* An INSERT statement. If the conflict-mode is REPLACE, first remove + ** the current entry (if any). */ if( eConflict==SQLITE_REPLACE && eType1==SQLITE_INTEGER ){ i64 iNew = sqlite3_value_int64(apVal[1]); /* Rowid to delete */ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0); + bUpdateOrDelete = 1; } fts5StorageInsert(&rc, pTab, apVal, pRowid); }

@@ -237472,10 +246502,24 @@ }else{

rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); fts5StorageInsert(&rc, pTab, apVal, pRowid); } + bUpdateOrDelete = 1; } } } + if( rc==SQLITE_OK + && bUpdateOrDelete + && pConfig->bSecureDelete + && pConfig->iVersion==FTS5_CURRENT_VERSION + ){ + rc = sqlite3Fts5StorageConfigValue( + pTab->pStorage, "version", 0, FTS5_CURRENT_VERSION_SECUREDELETE + ); + if( rc==SQLITE_OK ){ + pConfig->iVersion = FTS5_CURRENT_VERSION_SECUREDELETE; + } + } + pTab->p.pConfig->pzErrmsg = 0; return rc; }

@@ -237488,8 +246532,7 @@ int rc;

Fts5FullTable *pTab = (Fts5FullTable*)pVtab; fts5CheckTransactionState(pTab, FTS5_SYNC, 0); pTab->p.pConfig->pzErrmsg = &pTab->p.base.zErrMsg; - fts5TripCursors(pTab); - rc = sqlite3Fts5StorageSync(pTab->pStorage); + rc = sqlite3Fts5FlushToDisk(&pTab->p); pTab->p.pConfig->pzErrmsg = 0; return rc; }

@@ -238256,6 +247299,12 @@ if( rc==SQLITE_OK ){

sqlite3_result_value(pCtx, sqlite3_column_value(pCsr->pStmt, iCol+1)); } pConfig->pzErrmsg = 0; + }else if( pConfig->bContentlessDelete && sqlite3_vtab_nochange(pCtx) ){ + char *zErr = sqlite3_mprintf("cannot UPDATE a subset of " + "columns on fts5 contentless-delete table: %s", pConfig->zName + ); + sqlite3_result_error(pCtx, zErr, -1); + sqlite3_free(zErr); } return rc; }

@@ -238294,8 +247343,12 @@ static int fts5RenameMethod(

sqlite3_vtab *pVtab, /* Virtual table handle */ const char *zName /* New name of table */ ){ + int rc; Fts5FullTable *pTab = (Fts5FullTable*)pVtab; - return sqlite3Fts5StorageRename(pTab->pStorage, zName); + pTab->bInSavepoint = 1; + rc = sqlite3Fts5StorageRename(pTab->pStorage, zName); + pTab->bInSavepoint = 0; + return rc; } static int sqlite3Fts5FlushToDisk(Fts5Table *pTab){

@@ -238309,9 +247362,29 @@ **

** Flush the contents of the pending-terms table to disk. */ static int fts5SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ - UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */ - fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_SAVEPOINT, iSavepoint); - return sqlite3Fts5FlushToDisk((Fts5Table*)pVtab); + Fts5FullTable *pTab = (Fts5FullTable*)pVtab; + int rc = SQLITE_OK; + char *zSql = 0; + fts5CheckTransactionState(pTab, FTS5_SAVEPOINT, iSavepoint); + + if( pTab->bInSavepoint==0 ){ + zSql = sqlite3_mprintf("INSERT INTO %Q.%Q(%Q) VALUES('flush')", + pTab->p.pConfig->zDb, pTab->p.pConfig->zName, pTab->p.pConfig->zName + ); + if( zSql ){ + pTab->bInSavepoint = 1; + rc = sqlite3_exec(pTab->p.pConfig->db, zSql, 0, 0, 0); + pTab->bInSavepoint = 0; + sqlite3_free(zSql); + }else{ + rc = SQLITE_NOMEM; + } + if( rc==SQLITE_OK ){ + pTab->iSavepoint = iSavepoint+1; + } + } + + return rc; } /*

@@ -238320,9 +247393,16 @@ **

** This is a no-op. */ static int fts5ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){ - UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */ - fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_RELEASE, iSavepoint); - return sqlite3Fts5FlushToDisk((Fts5Table*)pVtab); + Fts5FullTable *pTab = (Fts5FullTable*)pVtab; + int rc = SQLITE_OK; + fts5CheckTransactionState(pTab, FTS5_RELEASE, iSavepoint); + if( (iSavepoint+1)<pTab->iSavepoint ){ + rc = sqlite3Fts5FlushToDisk(&pTab->p); + if( rc==SQLITE_OK ){ + pTab->iSavepoint = iSavepoint; + } + } + return rc; } /*

@@ -238332,10 +247412,14 @@ ** Discard the contents of the pending terms table.

*/ static int fts5RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){ Fts5FullTable *pTab = (Fts5FullTable*)pVtab; - UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */ + int rc = SQLITE_OK; fts5CheckTransactionState(pTab, FTS5_ROLLBACKTO, iSavepoint); fts5TripCursors(pTab); - return sqlite3Fts5StorageRollback(pTab->pStorage); + pTab->p.pConfig->pgsz = 0; + if( (iSavepoint+1)<=pTab->iSavepoint ){ + rc = sqlite3Fts5StorageRollback(pTab->pStorage); + } + return rc; } /*

@@ -238537,7 +247621,7 @@ sqlite3_value **apUnused /* Function arguments */

){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2022-11-16 12:10:08 89c459e766ea7e9165d0beeb124708b955a4950d0f4792f457465d71b158d318", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2023-11-24 11:41:44 ebead0e7230cd33bcec9f95d2183069565b9e709bf745c9b5db65cc0cbf92c0f", -1, SQLITE_TRANSIENT); } /*

@@ -238555,9 +247639,46 @@ }

return 0; } +/* +** Run an integrity check on the FTS5 data structures. Return a string +** if anything is found amiss. Return a NULL pointer if everything is +** OK. +*/ +static int fts5Integrity( + sqlite3_vtab *pVtab, /* the FTS5 virtual table to check */ + const char *zSchema, /* Name of schema in which this table lives */ + const char *zTabname, /* Name of the table itself */ + int isQuick, /* True if this is a quick-check */ + char **pzErr /* Write error message here */ +){ + Fts5FullTable *pTab = (Fts5FullTable*)pVtab; + Fts5Config *pConfig = pTab->p.pConfig; + char *zSql; + char *zErr = 0; + int rc; + assert( pzErr!=0 && *pzErr==0 ); + UNUSED_PARAM(isQuick); + zSql = sqlite3_mprintf( + "INSERT INTO \"%w\".\"%w\"(\"%w\") VALUES('integrity-check');", + zSchema, zTabname, pConfig->zName); + if( zSql==0 ) return SQLITE_NOMEM; + rc = sqlite3_exec(pConfig->db, zSql, 0, 0, &zErr); + sqlite3_free(zSql); + if( (rc&0xff)==SQLITE_CORRUPT ){ + *pzErr = sqlite3_mprintf("malformed inverted index for FTS5 table %s.%s", + zSchema, zTabname); + }else if( rc!=SQLITE_OK ){ + *pzErr = sqlite3_mprintf("unable to validate the inverted index for" + " FTS5 table %s.%s: %s", + zSchema, zTabname, zErr); + } + sqlite3_free(zErr); + return SQLITE_OK; +} + static int fts5Init(sqlite3 *db){ static const sqlite3_module fts5Mod = { - /* iVersion */ 3, + /* iVersion */ 4, /* xCreate */ fts5CreateMethod, /* xConnect */ fts5ConnectMethod, /* xBestIndex */ fts5BestIndexMethod,

@@ -238580,7 +247701,8 @@ /* xRename */ fts5RenameMethod,

/* xSavepoint */ fts5SavepointMethod, /* xRelease */ fts5ReleaseMethod, /* xRollbackTo */ fts5RollbackToMethod, - /* xShadowName */ fts5ShadowName + /* xShadowName */ fts5ShadowName, + /* xIntegrity */ fts5Integrity }; int rc;

@@ -238610,7 +247732,9 @@ );

} if( rc==SQLITE_OK ){ rc = sqlite3_create_function( - db, "fts5_source_id", 0, SQLITE_UTF8, p, fts5SourceIdFunc, 0, 0 + db, "fts5_source_id", 0, + SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, + p, fts5SourceIdFunc, 0, 0 ); } }

@@ -238748,10 +247872,10 @@

"INSERT INTO %Q.'%q_content' VALUES(%s)", /* INSERT_CONTENT */ "REPLACE INTO %Q.'%q_content' VALUES(%s)", /* REPLACE_CONTENT */ "DELETE FROM %Q.'%q_content' WHERE id=?", /* DELETE_CONTENT */ - "REPLACE INTO %Q.'%q_docsize' VALUES(?,?)", /* REPLACE_DOCSIZE */ + "REPLACE INTO %Q.'%q_docsize' VALUES(?,?%s)", /* REPLACE_DOCSIZE */ "DELETE FROM %Q.'%q_docsize' WHERE id=?", /* DELETE_DOCSIZE */ - "SELECT sz FROM %Q.'%q_docsize' WHERE id=?", /* LOOKUP_DOCSIZE */ + "SELECT sz%s FROM %Q.'%q_docsize' WHERE id=?", /* LOOKUP_DOCSIZE */ "REPLACE INTO %Q.'%q_config' VALUES(?,?)", /* REPLACE_CONFIG */ "SELECT %s FROM %s AS T", /* SCAN */

@@ -238799,6 +247923,19 @@ }

break; } + case FTS5_STMT_REPLACE_DOCSIZE: + zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName, + (pC->bContentlessDelete ? ",?" : "") + ); + break; + + case FTS5_STMT_LOOKUP_DOCSIZE: + zSql = sqlite3_mprintf(azStmt[eStmt], + (pC->bContentlessDelete ? ",origin" : ""), + pC->zDb, pC->zName + ); + break; + default: zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName); break;

@@ -238988,9 +248125,11 @@ sqlite3_free(zDefn);

} if( rc==SQLITE_OK && pConfig->bColumnsize ){ - rc = sqlite3Fts5CreateTable( - pConfig, "docsize", "id INTEGER PRIMARY KEY, sz BLOB", 0, pzErr - ); + const char *zCols = "id INTEGER PRIMARY KEY, sz BLOB"; + if( pConfig->bContentlessDelete ){ + zCols = "id INTEGER PRIMARY KEY, sz BLOB, origin INTEGER"; + } + rc = sqlite3Fts5CreateTable(pConfig, "docsize", zCols, 0, pzErr); } if( rc==SQLITE_OK ){ rc = sqlite3Fts5CreateTable(

@@ -239067,7 +248206,7 @@ sqlite3_value **apVal

){ Fts5Config *pConfig = p->pConfig; sqlite3_stmt *pSeek = 0; /* SELECT to read row iDel from %_data */ - int rc; /* Return code */ + int rc = SQLITE_OK; /* Return code */ int rc2; /* sqlite3_reset() return code */ int iCol; Fts5InsertCtx ctx;

@@ -239083,7 +248222,6 @@ }

ctx.pStorage = p; ctx.iCol = -1; - rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 1, iDel); for(iCol=1; rc==SQLITE_OK && iCol<=pConfig->nCol; iCol++){ if( pConfig->abUnindexed[iCol-1]==0 ){ const char *zText;

@@ -239120,6 +248258,37 @@ if( rc==SQLITE_OK ) rc = rc2;

return rc; } +/* +** This function is called to process a DELETE on a contentless_delete=1 +** table. It adds the tombstone required to delete the entry with rowid +** iDel. If successful, SQLITE_OK is returned. Or, if an error occurs, +** an SQLite error code. +*/ +static int fts5StorageContentlessDelete(Fts5Storage *p, i64 iDel){ + i64 iOrigin = 0; + sqlite3_stmt *pLookup = 0; + int rc = SQLITE_OK; + + assert( p->pConfig->bContentlessDelete ); + assert( p->pConfig->eContent==FTS5_CONTENT_NONE ); + + /* Look up the origin of the document in the %_docsize table. Store + ** this in stack variable iOrigin. */ + rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP_DOCSIZE, &pLookup, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pLookup, 1, iDel); + if( SQLITE_ROW==sqlite3_step(pLookup) ){ + iOrigin = sqlite3_column_int64(pLookup, 1); + } + rc = sqlite3_reset(pLookup); + } + + if( rc==SQLITE_OK && iOrigin!=0 ){ + rc = sqlite3Fts5IndexContentlessDelete(p->pIndex, iOrigin, iDel); + } + + return rc; +} /* ** Insert a record into the %_docsize table. Specifically, do:

@@ -239140,10 +248309,17 @@ sqlite3_stmt *pReplace = 0;

rc = fts5StorageGetStmt(p, FTS5_STMT_REPLACE_DOCSIZE, &pReplace, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pReplace, 1, iRowid); - sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC); - sqlite3_step(pReplace); - rc = sqlite3_reset(pReplace); - sqlite3_bind_null(pReplace, 2); + if( p->pConfig->bContentlessDelete ){ + i64 iOrigin = 0; + rc = sqlite3Fts5IndexGetOrigin(p->pIndex, &iOrigin); + sqlite3_bind_int64(pReplace, 3, iOrigin); + } + if( rc==SQLITE_OK ){ + sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC); + sqlite3_step(pReplace); + rc = sqlite3_reset(pReplace); + sqlite3_bind_null(pReplace, 2); + } } } return rc;

@@ -239207,7 +248383,15 @@ rc = fts5StorageLoadTotals(p, 1);

/* Delete the index records */ if( rc==SQLITE_OK ){ - rc = fts5StorageDeleteFromIndex(p, iDel, apVal); + rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 1, iDel); + } + + if( rc==SQLITE_OK ){ + if( p->pConfig->bContentlessDelete ){ + rc = fts5StorageContentlessDelete(p, iDel); + }else{ + rc = fts5StorageDeleteFromIndex(p, iDel, apVal); + } } /* Delete the %_docsize record */

@@ -239795,7 +248979,9 @@ int rc = SQLITE_OK;

i64 iLastRowid = sqlite3_last_insert_rowid(p->pConfig->db); if( p->bTotalsValid ){ rc = fts5StorageSaveTotals(p); - p->bTotalsValid = 0; + if( rc==SQLITE_OK ){ + p->bTotalsValid = 0; + } } if( rc==SQLITE_OK ){ rc = sqlite3Fts5IndexSync(p->pIndex);

@@ -243163,7 +252349,8 @@ /* xRename */ 0,

/* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ 0, - /* xShadowName */ 0 + /* xShadowName */ 0, + /* xIntegrity */ 0 }; void *p = (void*)pGlobal;

@@ -243275,6 +252462,10 @@ #define STMT_COLUMN_RUN 9 /* SQLITE_STMTSTATUS_RUN */

#define STMT_COLUMN_MEM 10 /* SQLITE_STMTSTATUS_MEMUSED */ + (void)pAux; + (void)argc; + (void)argv; + (void)pzErr; rc = sqlite3_declare_vtab(db, "CREATE TABLE x(sql,ncol,ro,busy,nscan,nsort,naidx,nstep," "reprep,run,mem)");

@@ -243394,6 +252585,10 @@ sqlite3_stmt *p = 0;

sqlite3_int64 iRowid = 1; StmtRow **ppRow = 0; + (void)idxNum; + (void)idxStr; + (void)argc; + (void)argv; stmtCsrReset(pCur); ppRow = &pCur->pRow; for(p=sqlite3_next_stmt(pCur->db, 0); p; p=sqlite3_next_stmt(pCur->db, p)){

@@ -243449,6 +252644,7 @@ static int stmtBestIndex(

sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo ){ + (void)tab; pIdxInfo->estimatedCost = (double)500; pIdxInfo->estimatedRows = 500; return SQLITE_OK;

@@ -243483,6 +252679,7 @@ 0, /* xSavepoint */

0, /* xRelease */ 0, /* xRollbackTo */ 0, /* xShadowName */ + 0 /* xIntegrity */ }; #endif /* SQLITE_OMIT_VIRTUALTABLE */
M src/litestorepkg/vendor/sqlite/sqlite3.hsrc/litestorepkg/vendor/sqlite/sqlite3.h

@@ -146,9 +146,9 @@ ** See also: [sqlite3_libversion()],

** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.40.0" -#define SQLITE_VERSION_NUMBER 3040000 -#define SQLITE_SOURCE_ID "2022-11-16 12:10:08 89c459e766ea7e9165d0beeb124708b955a4950d0f4792f457465d71b158d318" +#define SQLITE_VERSION "3.44.2" +#define SQLITE_VERSION_NUMBER 3044002 +#define SQLITE_SOURCE_ID "2023-11-24 11:41:44 ebead0e7230cd33bcec9f95d2183069565b9e709bf745c9b5db65cc0cbf92c0f" /* ** CAPI3REF: Run-Time Library Version Numbers

@@ -528,6 +528,7 @@ #define SQLITE_IOERR_COMMIT_ATOMIC (SQLITE_IOERR | (30<<8))

#define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8)) #define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8)) #define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8)) +#define SQLITE_IOERR_IN_PAGE (SQLITE_IOERR | (34<<8)) #define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) #define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8)) #define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8))

@@ -563,6 +564,7 @@ #define SQLITE_CONSTRAINT_PINNED (SQLITE_CONSTRAINT |(11<<8))

#define SQLITE_CONSTRAINT_DATATYPE (SQLITE_CONSTRAINT |(12<<8)) #define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8)) #define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) +#define SQLITE_NOTICE_RBU (SQLITE_NOTICE | (3<<8)) #define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) #define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8)) #define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8))

@@ -1175,7 +1177,6 @@ ** The [SQLITE_FCNTL_CKPT_DONE] opcode is invoked from within a checkpoint

** in wal mode after the client has finished copying pages from the wal ** file to the database file, but before the *-shm file is updated to ** record the fact that the pages have been checkpointed. -** </ul> ** ** <li>[[SQLITE_FCNTL_EXTERNAL_READER]] ** The EXPERIMENTAL [SQLITE_FCNTL_EXTERNAL_READER] opcode is used to detect

@@ -1188,10 +1189,16 @@ ** currently has an SQL transaction open on the database. It is set to 0 if

** the database is not a wal-mode db, or if there is no such connection in any ** other process. This opcode cannot be used to detect transactions opened ** by clients within the current process, only within other processes. -** </ul> ** ** <li>[[SQLITE_FCNTL_CKSM_FILE]] -** Used by the cksmvfs VFS module only. +** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use internally by the +** [checksum VFS shim] only. +** +** <li>[[SQLITE_FCNTL_RESET_CACHE]] +** If there is currently no transaction open on the database, and the +** database is not a temp db, then the [SQLITE_FCNTL_RESET_CACHE] file-control +** purges the contents of the in-memory page cache. If there is an open +** transaction, or if the db is a temp-db, this opcode is a no-op, not an error. ** </ul> */ #define SQLITE_FCNTL_LOCKSTATE 1

@@ -1234,6 +1241,7 @@ #define SQLITE_FCNTL_RESERVE_BYTES 38

#define SQLITE_FCNTL_CKPT_START 39 #define SQLITE_FCNTL_EXTERNAL_READER 40 #define SQLITE_FCNTL_CKSM_FILE 41 +#define SQLITE_FCNTL_RESET_CACHE 42 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE

@@ -1648,19 +1656,22 @@ ** <b>The sqlite3_config() interface is not threadsafe. The application

** must ensure that no other SQLite interfaces are invoked by other ** threads while sqlite3_config() is running.</b> ** -** The sqlite3_config() interface +** The first argument to sqlite3_config() is an integer +** [configuration option] that determines +** what property of SQLite is to be configured. Subsequent arguments +** vary depending on the [configuration option] +** in the first argument. +** +** For most configuration options, the sqlite3_config() interface ** may only be invoked prior to library initialization using ** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. +** The exceptional configuration options that may be invoked at any time +** are called "anytime configuration options". ** ^If sqlite3_config() is called after [sqlite3_initialize()] and before -** [sqlite3_shutdown()] then it will return SQLITE_MISUSE. +** [sqlite3_shutdown()] with a first argument that is not an anytime +** configuration option, then the sqlite3_config() call will return SQLITE_MISUSE. ** Note, however, that ^sqlite3_config() can be called as part of the ** implementation of an application-defined [sqlite3_os_init()]. -** -** The first argument to sqlite3_config() is an integer -** [configuration option] that determines -** what property of SQLite is to be configured. Subsequent arguments -** vary depending on the [configuration option] -** in the first argument. ** ** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK]. ** ^If the option is unknown or SQLite is unable to set the option

@@ -1769,6 +1780,23 @@ **

** These constants are the available integer configuration options that ** can be passed as the first argument to the [sqlite3_config()] interface. ** +** Most of the configuration options for sqlite3_config() +** will only work if invoked prior to [sqlite3_initialize()] or after +** [sqlite3_shutdown()]. The few exceptions to this rule are called +** "anytime configuration options". +** ^Calling [sqlite3_config()] with a first argument that is not an +** anytime configuration option in between calls to [sqlite3_initialize()] and +** [sqlite3_shutdown()] is a no-op that returns SQLITE_MISUSE. +** +** The set of anytime configuration options can change (by insertions +** and/or deletions) from one release of SQLite to the next. +** As of SQLite version 3.42.0, the complete set of anytime configuration +** options is: +** <ul> +** <li> SQLITE_CONFIG_LOG +** <li> SQLITE_CONFIG_PCACHE_HDRSZ +** </ul> +** ** New configuration options may be added in future releases of SQLite. ** Existing configuration options might be discontinued. Applications ** should check the return code from [sqlite3_config()] to make sure that

@@ -2099,7 +2127,7 @@ ** than the configured sorter-reference size threshold - then a reference

** is stored in each sorted record and the required column values loaded ** from the database as records are returned in sorted order. The default ** value for this option is to never use this optimization. Specifying a -** negative value for this option restores the default behaviour. +** negative value for this option restores the default behavior. ** This option is only available if SQLite is compiled with the ** [SQLITE_ENABLE_SORTER_REFERENCES] compile-time option. **

@@ -2115,28 +2143,28 @@ ** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that

** compile-time option is not set, then the default maximum is 1073741824. ** </dl> */ -#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ -#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ -#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ -#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */ -#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ -#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ -#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ -#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ -#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ -/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ -#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ -#define SQLITE_CONFIG_PCACHE 14 /* no-op */ -#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ -#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ -#define SQLITE_CONFIG_URI 17 /* int */ -#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ -#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ +#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ +#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ +#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */ +#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ +#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ +#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ +#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ +#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ +/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ +#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ +#define SQLITE_CONFIG_PCACHE 14 /* no-op */ +#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ +#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ +#define SQLITE_CONFIG_URI 17 /* int */ +#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ #define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */ -#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ -#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ +#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ +#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ #define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */ #define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */ #define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */

@@ -2177,7 +2205,7 @@ ** rounded down to the next smaller multiple of 8. ^(The lookaside memory

** configuration for a database connection can only be changed when that ** connection is not currently using lookaside memory, or in other words ** when the "current value" returned by -** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero. +** [sqlite3_db_status](D,[SQLITE_DBSTATUS_LOOKASIDE_USED],...) is zero. ** Any attempt to change the lookaside memory configuration when lookaside ** memory is in use leaves the configuration unchanged and returns ** [SQLITE_BUSY].)^</dd>

@@ -2274,7 +2302,7 @@ ** <dd> Usually, when a database in wal mode is closed or detached from a

** database handle, SQLite checks if this will mean that there are now no ** connections at all to the database. If so, it performs a checkpoint ** operation before closing the connection. This option may be used to -** override this behaviour. The first parameter passed to this operation +** override this behavior. The first parameter passed to this operation ** is an integer - positive to disable checkpoints-on-close, or zero (the ** default) to enable them, and negative to leave the setting unchanged. ** The second parameter is a pointer to an integer

@@ -2327,8 +2355,12 @@ ** <li> [sqlite3_exec](db, "[VACUUM]", 0, 0, 0);

** <li> sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 0, 0); ** </ol> ** Because resetting a database is destructive and irreversible, the -** process requires the use of this obscure API and multiple steps to help -** ensure that it does not happen by accident. +** process requires the use of this obscure API and multiple steps to +** help ensure that it does not happen by accident. Because this +** feature must be capable of resetting corrupt databases, and +** shutting down virtual tables may require access to that corrupt +** storage, the library must abandon any installed virtual tables +** without calling their xDestroy() methods. ** ** [[SQLITE_DBCONFIG_DEFENSIVE]] <dt>SQLITE_DBCONFIG_DEFENSIVE</dt> ** <dd>The SQLITE_DBCONFIG_DEFENSIVE option activates or deactivates the

@@ -2367,7 +2399,7 @@ ** using the [PRAGMA legacy_alter_table] statement.

** </dd> ** ** [[SQLITE_DBCONFIG_DQS_DML]] -** <dt>SQLITE_DBCONFIG_DQS_DML</td> +** <dt>SQLITE_DBCONFIG_DQS_DML</dt> ** <dd>The SQLITE_DBCONFIG_DQS_DML option activates or deactivates ** the legacy [double-quoted string literal] misfeature for DML statements ** only, that is DELETE, INSERT, SELECT, and UPDATE statements. The

@@ -2376,7 +2408,7 @@ ** compile-time option.

** </dd> ** ** [[SQLITE_DBCONFIG_DQS_DDL]] -** <dt>SQLITE_DBCONFIG_DQS_DDL</td> +** <dt>SQLITE_DBCONFIG_DQS_DDL</dt> ** <dd>The SQLITE_DBCONFIG_DQS option activates or deactivates ** the legacy [double-quoted string literal] misfeature for DDL statements, ** such as CREATE TABLE and CREATE INDEX. The

@@ -2385,7 +2417,7 @@ ** compile-time option.

** </dd> ** ** [[SQLITE_DBCONFIG_TRUSTED_SCHEMA]] -** <dt>SQLITE_DBCONFIG_TRUSTED_SCHEMA</td> +** <dt>SQLITE_DBCONFIG_TRUSTED_SCHEMA</dt> ** <dd>The SQLITE_DBCONFIG_TRUSTED_SCHEMA option tells SQLite to ** assume that database schemas are untainted by malicious content. ** When the SQLITE_DBCONFIG_TRUSTED_SCHEMA option is disabled, SQLite

@@ -2405,7 +2437,7 @@ ** can also be controlled using the [PRAGMA trusted_schema] statement.

** </dd> ** ** [[SQLITE_DBCONFIG_LEGACY_FILE_FORMAT]] -** <dt>SQLITE_DBCONFIG_LEGACY_FILE_FORMAT</td> +** <dt>SQLITE_DBCONFIG_LEGACY_FILE_FORMAT</dt> ** <dd>The SQLITE_DBCONFIG_LEGACY_FILE_FORMAT option activates or deactivates ** the legacy file format flag. When activated, this flag causes all newly ** created database file to have a schema format version number (the 4-byte

@@ -2414,7 +2446,7 @@ ** means that the resulting database file will be readable and writable by

** any SQLite version back to 3.0.0 ([dateof:3.0.0]). Without this setting, ** newly created databases are generally not understandable by SQLite versions ** prior to 3.3.0 ([dateof:3.3.0]). As these words are written, there -** is now scarcely any need to generated database files that are compatible +** is now scarcely any need to generate database files that are compatible ** all the way back to version 3.0.0, and so this setting is of little ** practical use, but is provided so that SQLite can continue to claim the ** ability to generate new database files that are compatible with version

@@ -2423,8 +2455,40 @@ ** <p>Note that when the SQLITE_DBCONFIG_LEGACY_FILE_FORMAT setting is on,

** the [VACUUM] command will fail with an obscure error when attempting to ** process a table with generated columns and a descending index. This is ** not considered a bug since SQLite versions 3.3.0 and earlier do not support -** either generated columns or decending indexes. +** either generated columns or descending indexes. +** </dd> +** +** [[SQLITE_DBCONFIG_STMT_SCANSTATUS]] +** <dt>SQLITE_DBCONFIG_STMT_SCANSTATUS</dt> +** <dd>The SQLITE_DBCONFIG_STMT_SCANSTATUS option is only useful in +** SQLITE_ENABLE_STMT_SCANSTATUS builds. In this case, it sets or clears +** a flag that enables collection of the sqlite3_stmt_scanstatus_v2() +** statistics. For statistics to be collected, the flag must be set on +** the database handle both when the SQL statement is prepared and when it +** is stepped. The flag is set (collection of statistics is enabled) +** by default. This option takes two arguments: an integer and a pointer to +** an integer.. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the statement scanstatus option. If the second argument +** is not NULL, then the value of the statement scanstatus setting after +** processing the first argument is written into the integer that the second +** argument points to. +** </dd> +** +** [[SQLITE_DBCONFIG_REVERSE_SCANORDER]] +** <dt>SQLITE_DBCONFIG_REVERSE_SCANORDER</dt> +** <dd>The SQLITE_DBCONFIG_REVERSE_SCANORDER option changes the default order +** in which tables and indexes are scanned so that the scans start at the end +** and work toward the beginning rather than starting at the beginning and +** working toward the end. Setting SQLITE_DBCONFIG_REVERSE_SCANORDER is the +** same as setting [PRAGMA reverse_unordered_selects]. This option takes +** two arguments which are an integer and a pointer to an integer. The first +** argument is 1, 0, or -1 to enable, disable, or leave unchanged the +** reverse scan order flag, respectively. If the second argument is not NULL, +** then 0 or 1 is written into the integer that the second argument points to +** depending on if the reverse scan order flag is set after processing the +** first argument. ** </dd> +** ** </dl> */ #define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */

@@ -2445,7 +2509,9 @@ #define SQLITE_DBCONFIG_DQS_DDL 1014 /* int int* */

#define SQLITE_DBCONFIG_ENABLE_VIEW 1015 /* int int* */ #define SQLITE_DBCONFIG_LEGACY_FILE_FORMAT 1016 /* int int* */ #define SQLITE_DBCONFIG_TRUSTED_SCHEMA 1017 /* int int* */ -#define SQLITE_DBCONFIG_MAX 1017 /* Largest DBCONFIG */ +#define SQLITE_DBCONFIG_STMT_SCANSTATUS 1018 /* int int* */ +#define SQLITE_DBCONFIG_REVERSE_SCANORDER 1019 /* int int* */ +#define SQLITE_DBCONFIG_MAX 1019 /* Largest DBCONFIG */ /* ** CAPI3REF: Enable Or Disable Extended Result Codes

@@ -2667,8 +2733,13 @@ ** not effected by the sqlite3_interrupt().

** ^A call to sqlite3_interrupt(D) that occurs when there are no running ** SQL statements is a no-op and has no effect on SQL statements ** that are started after the sqlite3_interrupt() call returns. +** +** ^The [sqlite3_is_interrupted(D)] interface can be used to determine whether +** or not an interrupt is currently in effect for [database connection] D. +** It returns 1 if an interrupt is currently in effect, or 0 otherwise. */ SQLITE_API void sqlite3_interrupt(sqlite3*); +SQLITE_API int sqlite3_is_interrupted(sqlite3*); /* ** CAPI3REF: Determine If An SQL Statement Is Complete

@@ -3286,8 +3357,8 @@ ** [[SQLITE_TRACE_PROFILE]] <dt>SQLITE_TRACE_PROFILE</dt>

** <dd>^An SQLITE_TRACE_PROFILE callback provides approximately the same ** information as is provided by the [sqlite3_profile()] callback. ** ^The P argument is a pointer to the [prepared statement] and the -** X argument points to a 64-bit integer which is the estimated of -** the number of nanosecond that the prepared statement took to run. +** X argument points to a 64-bit integer which is approximately +** the number of nanoseconds that the prepared statement took to run. ** ^The SQLITE_TRACE_PROFILE callback is invoked when the statement finishes. ** ** [[SQLITE_TRACE_ROW]] <dt>SQLITE_TRACE_ROW</dt>

@@ -3319,8 +3390,10 @@ ** NULL or if the M mask is zero, then tracing is disabled. The

** M argument should be the bitwise OR-ed combination of ** zero or more [SQLITE_TRACE] constants. ** -** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides -** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2(). +** ^Each call to either sqlite3_trace(D,X,P) or sqlite3_trace_v2(D,M,X,P) +** overrides (cancels) all prior calls to sqlite3_trace(D,X,P) or +** sqlite3_trace_v2(D,M,X,P) for the [database connection] D. Each +** database connection may have at most one trace callback. ** ** ^The X callback is invoked whenever any of the events identified by ** mask M occur. ^The integer return value from the callback is currently

@@ -3350,7 +3423,7 @@ ** METHOD: sqlite3

** ** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback ** function X to be invoked periodically during long running calls to -** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for +** [sqlite3_step()] and [sqlite3_prepare()] and similar for ** database connection D. An example use for this ** interface is to keep a GUI updated during a large query. **

@@ -3375,6 +3448,13 @@ ** the database connection that invoked the progress handler.

** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their ** database connections for the meaning of "modify" in this paragraph. ** +** The progress handler callback would originally only be invoked from the +** bytecode engine. It still might be invoked during [sqlite3_prepare()] +** and similar because those routines might force a reparse of the schema +** which involves running the bytecode engine. However, beginning with +** SQLite version 3.41.0, the progress handler callback might also be +** invoked directly from [sqlite3_prepare()] while analyzing and generating +** code for complex queries. */ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);

@@ -3411,13 +3491,18 @@ ** three flag combinations:)^

** ** <dl> ** ^(<dt>[SQLITE_OPEN_READONLY]</dt> -** <dd>The database is opened in read-only mode. If the database does not -** already exist, an error is returned.</dd>)^ +** <dd>The database is opened in read-only mode. If the database does +** not already exist, an error is returned.</dd>)^ ** ** ^(<dt>[SQLITE_OPEN_READWRITE]</dt> -** <dd>The database is opened for reading and writing if possible, or reading -** only if the file is write protected by the operating system. In either -** case the database must already exist, otherwise an error is returned.</dd>)^ +** <dd>The database is opened for reading and writing if possible, or +** reading only if the file is write protected by the operating +** system. In either case the database must already exist, otherwise +** an error is returned. For historical reasons, if opening in +** read-write mode fails due to OS-level permissions, an attempt is +** made to open it in read-only mode. [sqlite3_db_readonly()] can be +** used to determine whether the database is actually +** read-write.</dd>)^ ** ** ^(<dt>[SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]</dt> ** <dd>The database is opened for reading and writing, and is created if

@@ -3677,7 +3762,7 @@ ** The first parameter to these interfaces (hereafter referred to

** as F) must be one of: ** <ul> ** <li> A database filename pointer created by the SQLite core and -** passed into the xOpen() method of a VFS implemention, or +** passed into the xOpen() method of a VFS implementation, or ** <li> A filename obtained from [sqlite3_db_filename()], or ** <li> A new filename constructed using [sqlite3_create_filename()]. ** </ul>

@@ -3790,7 +3875,7 @@

/* ** CAPI3REF: Create and Destroy VFS Filenames ** -** These interfces are provided for use by [VFS shim] implementations and +** These interfaces are provided for use by [VFS shim] implementations and ** are not useful outside of that context. ** ** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of

@@ -3870,6 +3955,7 @@ ** </ul>

** ** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language ** text that describes the error, as either UTF-8 or UTF-16 respectively. +** (See how SQLite handles [invalid UTF] for exceptions to this rule.) ** ^(Memory to hold the error message string is managed internally. ** The application does not need to worry about freeing the result. ** However, the error string might be overwritten or deallocated by

@@ -4338,6 +4424,41 @@ */

SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt); /* +** CAPI3REF: Change The EXPLAIN Setting For A Prepared Statement +** METHOD: sqlite3_stmt +** +** The sqlite3_stmt_explain(S,E) interface changes the EXPLAIN +** setting for [prepared statement] S. If E is zero, then S becomes +** a normal prepared statement. If E is 1, then S behaves as if +** its SQL text began with "[EXPLAIN]". If E is 2, then S behaves as if +** its SQL text began with "[EXPLAIN QUERY PLAN]". +** +** Calling sqlite3_stmt_explain(S,E) might cause S to be reprepared. +** SQLite tries to avoid a reprepare, but a reprepare might be necessary +** on the first transition into EXPLAIN or EXPLAIN QUERY PLAN mode. +** +** Because of the potential need to reprepare, a call to +** sqlite3_stmt_explain(S,E) will fail with SQLITE_ERROR if S cannot be +** reprepared because it was created using [sqlite3_prepare()] instead of +** the newer [sqlite3_prepare_v2()] or [sqlite3_prepare_v3()] interfaces and +** hence has no saved SQL text with which to reprepare. +** +** Changing the explain setting for a prepared statement does not change +** the original SQL text for the statement. Hence, if the SQL text originally +** began with EXPLAIN or EXPLAIN QUERY PLAN, but sqlite3_stmt_explain(S,0) +** is called to convert the statement into an ordinary statement, the EXPLAIN +** or EXPLAIN QUERY PLAN keywords will still appear in the sqlite3_sql(S) +** output, even though the statement now acts like a normal SQL statement. +** +** This routine returns SQLITE_OK if the explain mode is successfully +** changed, or an error code if the explain mode could not be changed. +** The explain mode cannot be changed while a statement is active. +** Hence, it is good practice to call [sqlite3_reset(S)] +** immediately prior to calling sqlite3_stmt_explain(S,E). +*/ +SQLITE_API int sqlite3_stmt_explain(sqlite3_stmt *pStmt, int eMode); + +/* ** CAPI3REF: Determine If A Prepared Statement Has Been Reset ** METHOD: sqlite3_stmt **

@@ -4500,7 +4621,7 @@ ** ^ (1) A destructor to dispose of the BLOB or string after SQLite has finished

** with it may be passed. ^It is called to dispose of the BLOB or string even ** if the call to the bind API fails, except the destructor is not called if ** the third parameter is a NULL pointer or the fourth parameter is negative. -** ^ (2) The special constant, [SQLITE_STATIC], may be passsed to indicate that +** ^ (2) The special constant, [SQLITE_STATIC], may be passed to indicate that ** the application remains responsible for disposing of the object. ^In this ** case, the object and the provided pointer to it must remain valid until ** either the prepared statement is finalized or the same SQL parameter is

@@ -5179,19 +5300,32 @@ **

** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S ** back to the beginning of its program. ** -** ^If the most recent call to [sqlite3_step(S)] for the -** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE], -** or if [sqlite3_step(S)] has never before been called on S, -** then [sqlite3_reset(S)] returns [SQLITE_OK]. +** ^The return code from [sqlite3_reset(S)] indicates whether or not +** the previous evaluation of prepared statement S completed successfully. +** ^If [sqlite3_step(S)] has never before been called on S or if +** [sqlite3_step(S)] has not been called since the previous call +** to [sqlite3_reset(S)], then [sqlite3_reset(S)] will return +** [SQLITE_OK]. ** ** ^If the most recent call to [sqlite3_step(S)] for the ** [prepared statement] S indicated an error, then ** [sqlite3_reset(S)] returns an appropriate [error code]. +** ^The [sqlite3_reset(S)] interface might also return an [error code] +** if there were no prior errors but the process of resetting +** the prepared statement caused a new error. ^For example, if an +** [INSERT] statement with a [RETURNING] clause is only stepped one time, +** that one call to [sqlite3_step(S)] might return SQLITE_ROW but +** the overall statement might still fail and the [sqlite3_reset(S)] call +** might return SQLITE_BUSY if locking constraints prevent the +** database change from committing. Therefore, it is important that +** applications check the return code from [sqlite3_reset(S)] even if +** no prior call to [sqlite3_step(S)] indicated a problem. ** ** ^The [sqlite3_reset(S)] interface does not change the values ** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S. */ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); + /* ** CAPI3REF: Create Or Redefine SQL Functions

@@ -5398,10 +5532,21 @@ ** The SQLITE_DIRECTONLY flag means that the function may only be invoked

** from top-level SQL, and cannot be used in VIEWs or TRIGGERs nor in ** schema structures such as [CHECK constraints], [DEFAULT clauses], ** [expression indexes], [partial indexes], or [generated columns]. -** The SQLITE_DIRECTONLY flags is a security feature which is recommended -** for all [application-defined SQL functions], and especially for functions -** that have side-effects or that could potentially leak sensitive -** information. +** <p> +** The SQLITE_DIRECTONLY flag is recommended for any +** [application-defined SQL function] +** that has side-effects or that could potentially leak sensitive information. +** This will prevent attacks in which an application is tricked +** into using a database file that has had its schema surreptitiously +** modified to invoke the application-defined function in ways that are +** harmful. +** <p> +** Some people say it is good practice to set SQLITE_DIRECTONLY on all +** [application-defined SQL functions], regardless of whether or not they +** are security sensitive, as doing so prevents those functions from being used +** inside of the database schema, and thus ensures that the database +** can be inspected and modified using generic tools (such as the [CLI]) +** that do not have access to the application-defined functions. ** </dd> ** ** [[SQLITE_INNOCUOUS]] <dt>SQLITE_INNOCUOUS</dt><dd>

@@ -5428,13 +5573,27 @@ ** security-adverse side-effects and information-leaks.

** </dd> ** ** [[SQLITE_SUBTYPE]] <dt>SQLITE_SUBTYPE</dt><dd> -** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call +** The SQLITE_SUBTYPE flag indicates to SQLite that a function might call ** [sqlite3_value_subtype()] to inspect the sub-types of its arguments. -** Specifying this flag makes no difference for scalar or aggregate user -** functions. However, if it is not specified for a user-defined window -** function, then any sub-types belonging to arguments passed to the window -** function may be discarded before the window function is called (i.e. -** sqlite3_value_subtype() will always return 0). +** This flag instructs SQLite to omit some corner-case optimizations that +** might disrupt the operation of the [sqlite3_value_subtype()] function, +** causing it to return zero rather than the correct subtype(). +** SQL functions that invokes [sqlite3_value_subtype()] should have this +** property. If the SQLITE_SUBTYPE property is omitted, then the return +** value from [sqlite3_value_subtype()] might sometimes be zero even though +** a non-zero subtype was specified by the function argument expression. +** +** [[SQLITE_RESULT_SUBTYPE]] <dt>SQLITE_RESULT_SUBTYPE</dt><dd> +** The SQLITE_RESULT_SUBTYPE flag indicates to SQLite that a function might call +** [sqlite3_result_subtype()] to cause a sub-type to be associated with its +** result. +** Every function that invokes [sqlite3_result_subtype()] should have this +** property. If it does not, then the call to [sqlite3_result_subtype()] +** might become a no-op if the function is used as term in an +** [expression index]. On the other hand, SQL functions that never invoke +** [sqlite3_result_subtype()] should avoid setting this property, as the +** purpose of this property is to disable certain optimizations that are +** incompatible with subtypes. ** </dd> ** </dl> */

@@ -5442,6 +5601,7 @@ #define SQLITE_DETERMINISTIC 0x000000800

#define SQLITE_DIRECTONLY 0x000080000 #define SQLITE_SUBTYPE 0x000100000 #define SQLITE_INNOCUOUS 0x000200000 +#define SQLITE_RESULT_SUBTYPE 0x001000000 /* ** CAPI3REF: Deprecated Functions

@@ -5542,16 +5702,6 @@ ** words, if the value is a string that looks like a number)

** then the conversion is performed. Otherwise no conversion occurs. ** The [SQLITE_INTEGER | datatype] after conversion is returned.)^ ** -** ^(The sqlite3_value_encoding(X) interface returns one of [SQLITE_UTF8], -** [SQLITE_UTF16BE], or [SQLITE_UTF16LE] according to the current encoding -** of the value X, assuming that X has type TEXT.)^ If sqlite3_value_type(X) -** returns something other than SQLITE_TEXT, then the return value from -** sqlite3_value_encoding(X) is meaningless. ^Calls to -** sqlite3_value_text(X), sqlite3_value_text16(X), sqlite3_value_text16be(X), -** sqlite3_value_text16le(X), sqlite3_value_bytes(X), or -** sqlite3_value_bytes16(X) might change the encoding of the value X and -** thus change the return from subsequent calls to sqlite3_value_encoding(X). -** ** ^Within the [xUpdate] method of a [virtual table], the ** sqlite3_value_nochange(X) interface returns true if and only if ** the column corresponding to X is unchanged by the UPDATE operation

@@ -5616,6 +5766,27 @@ SQLITE_API int sqlite3_value_type(sqlite3_value*);

SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*); SQLITE_API int sqlite3_value_nochange(sqlite3_value*); SQLITE_API int sqlite3_value_frombind(sqlite3_value*); + +/* +** CAPI3REF: Report the internal text encoding state of an sqlite3_value object +** METHOD: sqlite3_value +** +** ^(The sqlite3_value_encoding(X) interface returns one of [SQLITE_UTF8], +** [SQLITE_UTF16BE], or [SQLITE_UTF16LE] according to the current text encoding +** of the value X, assuming that X has type TEXT.)^ If sqlite3_value_type(X) +** returns something other than SQLITE_TEXT, then the return value from +** sqlite3_value_encoding(X) is meaningless. ^Calls to +** [sqlite3_value_text(X)], [sqlite3_value_text16(X)], [sqlite3_value_text16be(X)], +** [sqlite3_value_text16le(X)], [sqlite3_value_bytes(X)], or +** [sqlite3_value_bytes16(X)] might change the encoding of the value X and +** thus change the return from subsequent calls to sqlite3_value_encoding(X). +** +** This routine is intended for used by applications that test and validate +** the SQLite implementation. This routine is inquiring about the opaque +** internal state of an [sqlite3_value] object. Ordinary applications should +** not need to know what the internal state of an sqlite3_value object is and +** hence should not need to use this interface. +*/ SQLITE_API int sqlite3_value_encoding(sqlite3_value*); /*

@@ -5627,6 +5798,12 @@ ** an [application-defined SQL function] argument V. The subtype

** information can be used to pass a limited amount of context from ** one SQL function to another. Use the [sqlite3_result_subtype()] ** routine to set the subtype for the return value of an SQL function. +** +** Every [application-defined SQL function] that invoke this interface +** should include the [SQLITE_SUBTYPE] property in the text +** encoding argument when the function is [sqlite3_create_function|registered]. +** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype() +** might return zero instead of the upstream subtype in some corner cases. */ SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*);

@@ -5725,48 +5902,56 @@ ** CAPI3REF: Function Auxiliary Data

** METHOD: sqlite3_context ** ** These functions may be used by (non-aggregate) SQL functions to -** associate metadata with argument values. If the same value is passed to -** multiple invocations of the same SQL function during query execution, under -** some circumstances the associated metadata may be preserved. An example -** of where this might be useful is in a regular-expression matching -** function. The compiled version of the regular expression can be stored as -** metadata associated with the pattern string. +** associate auxiliary data with argument values. If the same argument +** value is passed to multiple invocations of the same SQL function during +** query execution, under some circumstances the associated auxiliary data +** might be preserved. An example of where this might be useful is in a +** regular-expression matching function. The compiled version of the regular +** expression can be stored as auxiliary data associated with the pattern string. ** Then as long as the pattern string remains the same, ** the compiled regular expression can be reused on multiple ** invocations of the same function. ** -** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the metadata +** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the auxiliary data ** associated by the sqlite3_set_auxdata(C,N,P,X) function with the Nth argument ** value to the application-defined function. ^N is zero for the left-most -** function argument. ^If there is no metadata +** function argument. ^If there is no auxiliary data ** associated with the function argument, the sqlite3_get_auxdata(C,N) interface ** returns a NULL pointer. ** -** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th -** argument of the application-defined function. ^Subsequent +** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as auxiliary data for the +** N-th argument of the application-defined function. ^Subsequent ** calls to sqlite3_get_auxdata(C,N) return P from the most recent -** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or -** NULL if the metadata has been discarded. +** sqlite3_set_auxdata(C,N,P,X) call if the auxiliary data is still valid or +** NULL if the auxiliary data has been discarded. ** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL, ** SQLite will invoke the destructor function X with parameter P exactly -** once, when the metadata is discarded. -** SQLite is free to discard the metadata at any time, including: <ul> +** once, when the auxiliary data is discarded. +** SQLite is free to discard the auxiliary data at any time, including: <ul> ** <li> ^(when the corresponding function parameter changes)^, or ** <li> ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the ** SQL statement)^, or ** <li> ^(when sqlite3_set_auxdata() is invoked again on the same ** parameter)^, or ** <li> ^(during the original sqlite3_set_auxdata() call when a memory -** allocation error occurs.)^ </ul> +** allocation error occurs.)^ +** <li> ^(during the original sqlite3_set_auxdata() call if the function +** is evaluated during query planning instead of during query execution, +** as sometimes happens with [SQLITE_ENABLE_STAT4].)^ </ul> ** -** Note the last bullet in particular. The destructor X in +** Note the last two bullets in particular. The destructor X in ** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the ** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata() ** should be called near the end of the function implementation and the ** function implementation should not make any use of P after -** sqlite3_set_auxdata() has been called. +** sqlite3_set_auxdata() has been called. Furthermore, a call to +** sqlite3_get_auxdata() that occurs immediately after a corresponding call +** to sqlite3_set_auxdata() might still return NULL if an out-of-memory +** condition occurred during the sqlite3_set_auxdata() call or if the +** function is being evaluated during query planning rather than during +** query execution. ** -** ^(In practice, metadata is preserved between function calls for +** ^(In practice, auxiliary data is preserved between function calls for ** function parameters that are compile-time constants, including literal ** values and [parameters] and expressions composed from the same.)^ **

@@ -5776,10 +5961,67 @@ ** kinds of function caching behavior.

** ** These routines must be called from the same thread in which ** the SQL function is running. +** +** See also: [sqlite3_get_clientdata()] and [sqlite3_set_clientdata()]. */ SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N); SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*)); +/* +** CAPI3REF: Database Connection Client Data +** METHOD: sqlite3 +** +** These functions are used to associate one or more named pointers +** with a [database connection]. +** A call to sqlite3_set_clientdata(D,N,P,X) causes the pointer P +** to be attached to [database connection] D using name N. Subsequent +** calls to sqlite3_get_clientdata(D,N) will return a copy of pointer P +** or a NULL pointer if there were no prior calls to +** sqlite3_set_clientdata() with the same values of D and N. +** Names are compared using strcmp() and are thus case sensitive. +** +** If P and X are both non-NULL, then the destructor X is invoked with +** argument P on the first of the following occurrences: +** <ul> +** <li> An out-of-memory error occurs during the call to +** sqlite3_set_clientdata() which attempts to register pointer P. +** <li> A subsequent call to sqlite3_set_clientdata(D,N,P,X) is made +** with the same D and N parameters. +** <li> The database connection closes. SQLite does not make any guarantees +** about the order in which destructors are called, only that all +** destructors will be called exactly once at some point during the +** database connection closing process. +** </ul> +** +** SQLite does not do anything with client data other than invoke +** destructors on the client data at the appropriate time. The intended +** use for client data is to provide a mechanism for wrapper libraries +** to store additional information about an SQLite database connection. +** +** There is no limit (other than available memory) on the number of different +** client data pointers (with different names) that can be attached to a +** single database connection. However, the implementation is optimized +** for the case of having only one or two different client data names. +** Applications and wrapper libraries are discouraged from using more than +** one client data name each. +** +** There is no way to enumerate the client data pointers +** associated with a database connection. The N parameter can be thought +** of as a secret key such that only code that knows the secret key is able +** to access the associated data. +** +** Security Warning: These interfaces should not be exposed in scripting +** languages or in other circumstances where it might be possible for an +** an attacker to invoke them. Any agent that can invoke these interfaces +** can probably also take control of the process. +** +** Database connection client data is only available for SQLite +** version 3.44.0 ([dateof:3.44.0]) and later. +** +** See also: [sqlite3_set_auxdata()] and [sqlite3_get_auxdata()]. +*/ +SQLITE_API void *sqlite3_get_clientdata(sqlite3*,const char*); +SQLITE_API int sqlite3_set_clientdata(sqlite3*, const char*, void*, void(*)(void*)); /* ** CAPI3REF: Constants Defining Special Destructor Behavior

@@ -5981,6 +6223,20 @@ ** of the subtype T are preserved in current versions of SQLite;

** higher order bits are discarded. ** The number of subtype bytes preserved by SQLite might increase ** in future releases of SQLite. +** +** Every [application-defined SQL function] that invokes this interface +** should include the [SQLITE_RESULT_SUBTYPE] property in its +** text encoding argument when the SQL function is +** [sqlite3_create_function|registered]. If the [SQLITE_RESULT_SUBTYPE] +** property is omitted from the function that invokes sqlite3_result_subtype(), +** then in some cases the sqlite3_result_subtype() might fail to set +** the result subtype. +** +** If SQLite is compiled with -DSQLITE_STRICT_SUBTYPE=1, then any +** SQL function that invokes the sqlite3_result_subtype() interface +** and that does not have the SQLITE_RESULT_SUBTYPE property will raise +** an error. Future versions of SQLite might enable -DSQLITE_STRICT_SUBTYPE=1 +** by default. */ SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int);

@@ -6152,6 +6408,13 @@ ** method of the default [sqlite3_vfs] object. If the xSleep() method

** of the default VFS is not implemented correctly, or not implemented at ** all, then the behavior of sqlite3_sleep() may deviate from the description ** in the previous paragraphs. +** +** If a negative argument is passed to sqlite3_sleep() the results vary by +** VFS and operating system. Some system treat a negative argument as an +** instruction to sleep forever. Others understand it to mean do not sleep +** at all. ^In SQLite version 3.42.0 and later, a negative +** argument passed into sqlite3_sleep() is changed to zero before it is relayed +** down into the xSleep method of the VFS. */ SQLITE_API int sqlite3_sleep(int);

@@ -6405,7 +6668,7 @@ */

SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); /* -** CAPI3REF: Allowed return values from [sqlite3_txn_state()] +** CAPI3REF: Allowed return values from sqlite3_txn_state() ** KEYWORDS: {transaction state} ** ** These constants define the current transaction state of a database file.

@@ -6537,7 +6800,7 @@ ** <p>^There is only one autovacuum pages callback per database connection.

** ^Each call to the sqlite3_autovacuum_pages() interface overrides all ** previous invocations for that database connection. ^If the callback ** argument (C) to sqlite3_autovacuum_pages(D,C,P,X) is a NULL pointer, -** then the autovacuum steps callback is cancelled. The return value +** then the autovacuum steps callback is canceled. The return value ** from sqlite3_autovacuum_pages() is normally SQLITE_OK, but might ** be some other error code if something goes wrong. The current ** implementation will only return SQLITE_OK or SQLITE_MISUSE, but other

@@ -6997,15 +7260,6 @@ */

SQLITE_API void sqlite3_reset_auto_extension(void); /* -** The interface to the virtual-table mechanism is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - -/* ** Structures used by the virtual table interface */ typedef struct sqlite3_vtab sqlite3_vtab;

@@ -7065,6 +7319,10 @@ int (*xRollbackTo)(sqlite3_vtab *pVTab, int);

/* The methods above are in versions 1 and 2 of the sqlite_module object. ** Those below are for version 3 and greater. */ int (*xShadowName)(const char*); + /* The methods above are in versions 1 through 3 of the sqlite_module object. + ** Those below are for version 4 and greater. */ + int (*xIntegrity)(sqlite3_vtab *pVTab, const char *zSchema, + const char *zTabName, int mFlags, char **pzErr); }; /*

@@ -7123,10 +7381,10 @@ ** the constraint may or may not be checked in byte code. In other words,

** when the omit flag is true there is no guarantee that the constraint will ** not be checked again using byte code.)^ ** -** ^The idxNum and idxPtr values are recorded and passed into the +** ^The idxNum and idxStr values are recorded and passed into the ** [xFilter] method. -** ^[sqlite3_free()] is used to free idxPtr if and only if -** needToFreeIdxPtr is true. +** ^[sqlite3_free()] is used to free idxStr if and only if +** needToFreeIdxStr is true. ** ** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in ** the correct order to satisfy the ORDER BY clause so that no separate

@@ -7246,7 +7504,7 @@ ** The collating sequence to be used for comparison can be found using

** the [sqlite3_vtab_collation()] interface. For most real-world virtual ** tables, the collating sequence of constraints does not matter (for example ** because the constraints are numeric) and so the sqlite3_vtab_collation() -** interface is no commonly needed. +** interface is not commonly needed. */ #define SQLITE_INDEX_CONSTRAINT_EQ 2 #define SQLITE_INDEX_CONSTRAINT_GT 4

@@ -7406,16 +7664,6 @@ */

SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); /* -** The interface to the virtual-table mechanism defined above (back up -** to a comment remarkably similar to this one) is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - -/* ** CAPI3REF: A Handle To An Open BLOB ** KEYWORDS: {BLOB handle} {BLOB handles} **

@@ -7562,7 +7810,7 @@ ** committed. ^If an error occurs while committing the transaction, an error

** code is returned and the transaction rolled back. ** ** Calling this function with an argument that is not a NULL pointer or an -** open blob handle results in undefined behaviour. ^Calling this routine +** open blob handle results in undefined behavior. ^Calling this routine ** with a null pointer (such as would be returned by a failed call to ** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function ** is passed a valid open blob handle, the values returned by the

@@ -7798,9 +8046,9 @@ ** previously entered by the same thread. The behavior

** is undefined if the mutex is not currently entered by the ** calling thread or is not currently allocated. ** -** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or -** sqlite3_mutex_leave() is a NULL pointer, then all three routines -** behave as no-ops. +** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), +** sqlite3_mutex_leave(), or sqlite3_mutex_free() is a NULL pointer, +** then any of the four routines behaves as a no-op. ** ** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()]. */

@@ -8042,6 +8290,7 @@ #define SQLITE_TESTCTRL_FIRST 5

#define SQLITE_TESTCTRL_PRNG_SAVE 5 #define SQLITE_TESTCTRL_PRNG_RESTORE 6 #define SQLITE_TESTCTRL_PRNG_RESET 7 /* NOT USED */ +#define SQLITE_TESTCTRL_FK_NO_ACTION 7 #define SQLITE_TESTCTRL_BITVEC_TEST 8 #define SQLITE_TESTCTRL_FAULT_INSTALL 9 #define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10

@@ -8070,7 +8319,8 @@ #define SQLITE_TESTCTRL_SEEK_COUNT 30

#define SQLITE_TESTCTRL_TRACEFLAGS 31 #define SQLITE_TESTCTRL_TUNE 32 #define SQLITE_TESTCTRL_LOGEST 33 -#define SQLITE_TESTCTRL_LAST 33 /* Largest TESTCTRL */ +#define SQLITE_TESTCTRL_USELONGDOUBLE 34 +#define SQLITE_TESTCTRL_LAST 34 /* Largest TESTCTRL */ /* ** CAPI3REF: SQL Keyword Checking

@@ -9526,7 +9776,7 @@ **

** [[SQLITE_VTAB_DIRECTONLY]]<dt>SQLITE_VTAB_DIRECTONLY</dt> ** <dd>Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_DIRECTONLY) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implmentation +** the [xConnect] or [xCreate] methods of a [virtual table] implementation ** prohibits that virtual table from being used from within triggers and ** views. ** </dd>

@@ -9534,18 +9784,28 @@ **

** [[SQLITE_VTAB_INNOCUOUS]]<dt>SQLITE_VTAB_INNOCUOUS</dt> ** <dd>Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_INNOCUOUS) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implmentation +** the [xConnect] or [xCreate] methods of a [virtual table] implementation ** identify that virtual table as being safe to use from within triggers ** and views. Conceptually, the SQLITE_VTAB_INNOCUOUS tag means that the ** virtual table can do no serious harm even if it is controlled by a ** malicious hacker. Developers should avoid setting the SQLITE_VTAB_INNOCUOUS ** flag unless absolutely necessary. ** </dd> +** +** [[SQLITE_VTAB_USES_ALL_SCHEMAS]]<dt>SQLITE_VTAB_USES_ALL_SCHEMAS</dt> +** <dd>Calls of the form +** [sqlite3_vtab_config](db,SQLITE_VTAB_USES_ALL_SCHEMA) from within the +** the [xConnect] or [xCreate] methods of a [virtual table] implementation +** instruct the query planner to begin at least a read transaction on +** all schemas ("main", "temp", and any ATTACH-ed databases) whenever the +** virtual table is used. +** </dd> ** </dl> */ #define SQLITE_VTAB_CONSTRAINT_SUPPORT 1 #define SQLITE_VTAB_INNOCUOUS 2 #define SQLITE_VTAB_DIRECTONLY 3 +#define SQLITE_VTAB_USES_ALL_SCHEMAS 4 /* ** CAPI3REF: Determine The Virtual Table Conflict Policy

@@ -9618,7 +9878,7 @@ ** name of that alternative collating sequence is returned.

** <li><p> Otherwise, "BINARY" is returned. ** </ol> */ -SQLITE_API SQLITE_EXPERIMENTAL const char *sqlite3_vtab_collation(sqlite3_index_info*,int); +SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); /* ** CAPI3REF: Determine if a virtual table query is DISTINCT

@@ -9706,7 +9966,7 @@ ** "[IN operator|column IN (...)]" is

** communicated to the xBestIndex method as a ** [SQLITE_INDEX_CONSTRAINT_EQ] constraint.)^ If xBestIndex wants to use ** this constraint, it must set the corresponding -** aConstraintUsage[].argvIndex to a postive integer. ^(Then, under +** aConstraintUsage[].argvIndex to a positive integer. ^(Then, under ** the usual mode of handling IN operators, SQLite generates [bytecode] ** that invokes the [xFilter|xFilter() method] once for each value ** on the right-hand side of the IN operator.)^ Thus the virtual table

@@ -9775,21 +10035,20 @@ ** The result of invoking these interfaces from any other context

** is undefined and probably harmful. ** ** The X parameter in a call to sqlite3_vtab_in_first(X,P) or -** sqlite3_vtab_in_next(X,P) must be one of the parameters to the +** sqlite3_vtab_in_next(X,P) should be one of the parameters to the ** xFilter method which invokes these routines, and specifically ** a parameter that was previously selected for all-at-once IN constraint ** processing use the [sqlite3_vtab_in()] interface in the ** [xBestIndex|xBestIndex method]. ^(If the X parameter is not ** an xFilter argument that was selected for all-at-once IN constraint -** processing, then these routines return [SQLITE_MISUSE])^ or perhaps -** exhibit some other undefined or harmful behavior. +** processing, then these routines return [SQLITE_ERROR].)^ ** ** ^(Use these routines to access all values on the right-hand side ** of the IN constraint using code like the following: ** ** <blockquote><pre> ** &nbsp; for(rc=sqlite3_vtab_in_first(pList, &pVal); -** &nbsp; rc==SQLITE_OK && pVal +** &nbsp; rc==SQLITE_OK && pVal; ** &nbsp; rc=sqlite3_vtab_in_next(pList, &pVal) ** &nbsp; ){ ** &nbsp; // do something with pVal

@@ -9887,6 +10146,10 @@ ** When the value returned to V is a string, space to hold that string is

** managed by the prepared statement S and will be automatically freed when ** S is finalized. ** +** Not all values are available for all query elements. When a value is +** not available, the output variable is set to -1 if the value is numeric, +** or to NULL if it is a string (SQLITE_SCANSTAT_NAME). +** ** <dl> ** [[SQLITE_SCANSTAT_NLOOP]] <dt>SQLITE_SCANSTAT_NLOOP</dt> ** <dd>^The [sqlite3_int64] variable pointed to by the V parameter will be

@@ -9914,12 +10177,24 @@ ** <dd>^The "const char *" variable pointed to by the V parameter will be set

** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN] ** description for the X-th loop. ** -** [[SQLITE_SCANSTAT_SELECTID]] <dt>SQLITE_SCANSTAT_SELECT</dt> +** [[SQLITE_SCANSTAT_SELECTID]] <dt>SQLITE_SCANSTAT_SELECTID</dt> ** <dd>^The "int" variable pointed to by the V parameter will be set to the -** "select-id" for the X-th loop. The select-id identifies which query or -** subquery the loop is part of. The main query has a select-id of zero. -** The select-id is the same value as is output in the first column -** of an [EXPLAIN QUERY PLAN] query. +** id for the X-th query plan element. The id value is unique within the +** statement. The select-id is the same value as is output in the first +** column of an [EXPLAIN QUERY PLAN] query. +** +** [[SQLITE_SCANSTAT_PARENTID]] <dt>SQLITE_SCANSTAT_PARENTID</dt> +** <dd>The "int" variable pointed to by the V parameter will be set to the +** the id of the parent of the current query element, if applicable, or +** to zero if the query element has no parent. This is the same value as +** returned in the second column of an [EXPLAIN QUERY PLAN] query. +** +** [[SQLITE_SCANSTAT_NCYCLE]] <dt>SQLITE_SCANSTAT_NCYCLE</dt> +** <dd>The sqlite3_int64 output value is set to the number of cycles, +** according to the processor time-stamp counter, that elapsed while the +** query element was being processed. This value is not available for +** all query elements - if it is unavailable the output variable is +** set to -1. ** </dl> */ #define SQLITE_SCANSTAT_NLOOP 0

@@ -9928,12 +10203,14 @@ #define SQLITE_SCANSTAT_EST 2

#define SQLITE_SCANSTAT_NAME 3 #define SQLITE_SCANSTAT_EXPLAIN 4 #define SQLITE_SCANSTAT_SELECTID 5 +#define SQLITE_SCANSTAT_PARENTID 6 +#define SQLITE_SCANSTAT_NCYCLE 7 /* ** CAPI3REF: Prepared Statement Scan Status ** METHOD: sqlite3_stmt ** -** This interface returns information about the predicted and measured +** These interfaces return information about the predicted and measured ** performance for pStmt. Advanced applications can use this ** interface to compare the predicted and the measured performance and ** issue warnings and/or rerun [ANALYZE] if discrepancies are found.

@@ -9944,19 +10221,25 @@ ** compile-time option.

** ** The "iScanStatusOp" parameter determines which status information to return. ** The "iScanStatusOp" must be one of the [scanstatus options] or the behavior -** of this interface is undefined. -** ^The requested measurement is written into a variable pointed to by -** the "pOut" parameter. -** Parameter "idx" identifies the specific loop to retrieve statistics for. -** Loops are numbered starting from zero. ^If idx is out of range - less than -** zero or greater than or equal to the total number of loops used to implement -** the statement - a non-zero value is returned and the variable that pOut -** points to is unchanged. +** of this interface is undefined. ^The requested measurement is written into +** a variable pointed to by the "pOut" parameter. +** +** The "flags" parameter must be passed a mask of flags. At present only +** one flag is defined - SQLITE_SCANSTAT_COMPLEX. If SQLITE_SCANSTAT_COMPLEX +** is specified, then status information is available for all elements +** of a query plan that are reported by "EXPLAIN QUERY PLAN" output. If +** SQLITE_SCANSTAT_COMPLEX is not specified, then only query plan elements +** that correspond to query loops (the "SCAN..." and "SEARCH..." elements of +** the EXPLAIN QUERY PLAN output) are available. Invoking API +** sqlite3_stmt_scanstatus() is equivalent to calling +** sqlite3_stmt_scanstatus_v2() with a zeroed flags parameter. ** -** ^Statistics might not be available for all loops in all statements. ^In cases -** where there exist loops with no available statistics, this function behaves -** as if the loop did not exist - it returns non-zero and leave the variable -** that pOut points to unchanged. +** Parameter "idx" identifies the specific query element to retrieve statistics +** for. Query elements are numbered starting from zero. A value of -1 may be +** to query for statistics regarding the entire query. ^If idx is out of range +** - less than -1 or greater than or equal to the total number of query +** elements used to implement the statement - a non-zero value is returned and +** the variable that pOut points to is unchanged. ** ** See also: [sqlite3_stmt_scanstatus_reset()] */

@@ -9966,6 +10249,19 @@ int idx, /* Index of loop to report on */

int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */ void *pOut /* Result written here */ ); +SQLITE_API int sqlite3_stmt_scanstatus_v2( + sqlite3_stmt *pStmt, /* Prepared statement for which info desired */ + int idx, /* Index of loop to report on */ + int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */ + int flags, /* Mask of flags defined below */ + void *pOut /* Result written here */ +); + +/* +** CAPI3REF: Prepared Statement Scan Status +** KEYWORDS: {scan status flags} +*/ +#define SQLITE_SCANSTAT_COMPLEX 0x0001 /* ** CAPI3REF: Zero Scan-Status Counters

@@ -10056,6 +10352,10 @@ ** or updated. The value of the seventh parameter passed to the callback

** function is not defined for operations on WITHOUT ROWID tables, or for ** DELETE operations on rowid tables. ** +** ^The sqlite3_preupdate_hook(D,C,P) function returns the P argument from +** the previous call on the same [database connection] D, or NULL for +** the first call on D. +** ** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()], ** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces ** provide additional information about a preupdate event. These routines

@@ -10095,7 +10395,7 @@ **

** When the [sqlite3_blob_write()] API is used to update a blob column, ** the pre-update hook is invoked with SQLITE_DELETE. This is because the ** in this case the new values are not available. In this case, when a -** callback made with op==SQLITE_DELETE is actuall a write using the +** callback made with op==SQLITE_DELETE is actually a write using the ** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns ** the index of the column being written. In other cases, where the ** pre-update hook is being invoked for some other reason, including a

@@ -10356,6 +10656,13 @@ ** The size of the database is written into *P even if the

** SQLITE_SERIALIZE_NOCOPY bit is set but no contiguous copy ** of the database exists. ** +** After the call, if the SQLITE_SERIALIZE_NOCOPY bit had been set, +** the returned buffer content will remain accessible and unchanged +** until either the next write operation on the connection or when +** the connection is closed, and applications must not modify the +** buffer. If the bit had been clear, the returned buffer will not +** be accessed by SQLite after the call. +** ** A call to sqlite3_serialize(D,S,P,F) might return NULL even if the ** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory ** allocation error occurs.

@@ -10404,6 +10711,9 @@ ** connection closes. If the SQLITE_DESERIALIZE_RESIZEABLE bit is set, then

** SQLite will try to increase the buffer size using sqlite3_realloc64() ** if writes on the database cause it to grow larger than M bytes. ** +** Applications must not modify the buffer P or invalidate it before +** the database connection D is closed. +** ** The sqlite3_deserialize() interface will fail with SQLITE_BUSY if the ** database is currently in a read transaction or is involved in a backup ** operation.

@@ -10412,6 +10722,13 @@ ** It is not possible to deserialized into the TEMP database. If the

** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the ** function returns SQLITE_ERROR. ** +** The deserialized database should not be in [WAL mode]. If the database +** is in WAL mode, then any attempt to use the database file will result +** in an [SQLITE_CANTOPEN] error. The application can set the +** [file format version numbers] (bytes 18 and 19) of the input database P +** to 0x01 prior to invoking sqlite3_deserialize(D,S,P,N,M,F) to force the +** database file into rollback mode and work around this limitation. +** ** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the ** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then ** [sqlite3_free()] is invoked on argument P prior to returning.

@@ -10459,6 +10776,19 @@ ** builds on processors without floating point support.

*/ #ifdef SQLITE_OMIT_FLOATING_POINT # undef double +#endif + +#if defined(__wasi__) +# undef SQLITE_WASI +# define SQLITE_WASI 1 +# undef SQLITE_OMIT_WAL +# define SQLITE_OMIT_WAL 1/* because it requires shared memory APIs */ +# ifndef SQLITE_OMIT_LOAD_EXTENSION +# define SQLITE_OMIT_LOAD_EXTENSION +# endif +# ifndef SQLITE_THREADSAFE +# define SQLITE_THREADSAFE 0 +# endif #endif #ifdef __cplusplus

@@ -10667,16 +10997,20 @@ */

SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); /* -** CAPIREF: Conigure a Session Object +** CAPI3REF: Configure a Session Object ** METHOD: sqlite3_session ** ** This method is used to configure a session object after it has been -** created. At present the only valid value for the second parameter is -** [SQLITE_SESSION_OBJCONFIG_SIZE]. +** created. At present the only valid values for the second parameter are +** [SQLITE_SESSION_OBJCONFIG_SIZE] and [SQLITE_SESSION_OBJCONFIG_ROWID]. ** -** Arguments for sqlite3session_object_config() +*/ +SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); + +/* +** CAPI3REF: Options for sqlite3session_object_config ** -** The following values may passed as the the 4th parameter to +** The following values may passed as the the 2nd parameter to ** sqlite3session_object_config(). ** ** <dt>SQLITE_SESSION_OBJCONFIG_SIZE <dd>

@@ -10692,12 +11026,21 @@ ** enabled following the current call, or 0 otherwise.

** ** It is an error (SQLITE_MISUSE) to attempt to modify this setting after ** the first table has been attached to the session object. +** +** <dt>SQLITE_SESSION_OBJCONFIG_ROWID <dd> +** This option is used to set, clear or query the flag that enables +** collection of data for tables with no explicit PRIMARY KEY. +** +** Normally, tables with no explicit PRIMARY KEY are simply ignored +** by the sessions module. However, if this flag is set, it behaves +** as if such tables have a column "_rowid_ INTEGER PRIMARY KEY" inserted +** as their leftmost columns. +** +** It is an error (SQLITE_MISUSE) to attempt to modify this setting after +** the first table has been attached to the session object. */ -SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); - -/* -*/ -#define SQLITE_SESSION_OBJCONFIG_SIZE 1 +#define SQLITE_SESSION_OBJCONFIG_SIZE 1 +#define SQLITE_SESSION_OBJCONFIG_ROWID 2 /* ** CAPI3REF: Enable Or Disable A Session Object

@@ -11459,6 +11802,18 @@ );

/* +** CAPI3REF: Upgrade the Schema of a Changeset/Patchset +*/ +SQLITE_API int sqlite3changeset_upgrade( + sqlite3 *db, + const char *zDb, + int nIn, const void *pIn, /* Input changeset */ + int *pnOut, void **ppOut /* OUT: Inverse of input */ +); + + + +/* ** CAPI3REF: Changegroup Handle ** ** A changegroup is an object used to combine two or more

@@ -11505,6 +11860,38 @@ */

SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp); /* +** CAPI3REF: Add a Schema to a Changegroup +** METHOD: sqlite3_changegroup_schema +** +** This method may be used to optionally enforce the rule that the changesets +** added to the changegroup handle must match the schema of database zDb +** ("main", "temp", or the name of an attached database). If +** sqlite3changegroup_add() is called to add a changeset that is not compatible +** with the configured schema, SQLITE_SCHEMA is returned and the changegroup +** object is left in an undefined state. +** +** A changeset schema is considered compatible with the database schema in +** the same way as for sqlite3changeset_apply(). Specifically, for each +** table in the changeset, there exists a database table with: +** +** <ul> +** <li> The name identified by the changeset, and +** <li> at least as many columns as recorded in the changeset, and +** <li> the primary key columns in the same position as recorded in +** the changeset. +** </ul> +** +** The output of the changegroup object always has the same schema as the +** database nominated using this function. In cases where changesets passed +** to sqlite3changegroup_add() have fewer columns than the corresponding table +** in the database schema, these are filled in using the default column +** values from the database schema. This makes it possible to combined +** changesets that have different numbers of columns for a single table +** within a changegroup, provided that they are otherwise compatible. +*/ +SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const char *zDb); + +/* ** CAPI3REF: Add A Changeset To A Changegroup ** METHOD: sqlite3_changegroup **

@@ -11572,13 +11959,18 @@ **

** If the new changeset contains changes to a table that is already present ** in the changegroup, then the number of columns and the position of the ** primary key columns for the table must be consistent. If this is not the -** case, this function fails with SQLITE_SCHEMA. If the input changeset -** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is -** returned. Or, if an out-of-memory condition occurs during processing, this -** function returns SQLITE_NOMEM. In all cases, if an error occurs the state -** of the final contents of the changegroup is undefined. +** case, this function fails with SQLITE_SCHEMA. Except, if the changegroup +** object has been configured with a database schema using the +** sqlite3changegroup_schema() API, then it is possible to combine changesets +** with different numbers of columns for a single table, provided that +** they are otherwise compatible. +** +** If the input changeset appears to be corrupt and the corruption is +** detected, SQLITE_CORRUPT is returned. Or, if an out-of-memory condition +** occurs during processing, this function returns SQLITE_NOMEM. ** -** If no error occurs, SQLITE_OK is returned. +** In all cases, if an error occurs the state of the final contents of the +** changegroup is undefined. If no error occurs, SQLITE_OK is returned. */ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData);

@@ -11830,9 +12222,30 @@ ** <dt>SQLITE_CHANGESETAPPLY_INVERT <dd>

** Invert the changeset before applying it. This is equivalent to inverting ** a changeset using sqlite3changeset_invert() before applying it. It is ** an error to specify this flag with a patchset. +** +** <dt>SQLITE_CHANGESETAPPLY_IGNORENOOP <dd> +** Do not invoke the conflict handler callback for any changes that +** would not actually modify the database even if they were applied. +** Specifically, this means that the conflict handler is not invoked +** for: +** <ul> +** <li>a delete change if the row being deleted cannot be found, +** <li>an update change if the modified fields are already set to +** their new values in the conflicting row, or +** <li>an insert change if all fields of the conflicting row match +** the row being inserted. +** </ul> +** +** <dt>SQLITE_CHANGESETAPPLY_FKNOACTION <dd> +** If this flag it set, then all foreign key constraints in the target +** database behave as if they were declared with "ON UPDATE NO ACTION ON +** DELETE NO ACTION", even if they are actually CASCADE, RESTRICT, SET NULL +** or SET DEFAULT. */ #define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001 #define SQLITE_CHANGESETAPPLY_INVERT 0x0002 +#define SQLITE_CHANGESETAPPLY_IGNORENOOP 0x0004 +#define SQLITE_CHANGESETAPPLY_FKNOACTION 0x0008 /* ** CAPI3REF: Constants Passed To The Conflict Handler

@@ -12573,7 +12986,7 @@ ** xPhraseNextColumn()

** See xPhraseFirstColumn above. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 3 */ + int iVersion; /* Currently always set to 2 */ void *(*xUserData)(Fts5Context*);

@@ -12802,8 +13215,8 @@ ** On the other hand, it may require more CPU cycles to run MATCH queries,

** as separate queries of the FTS index are required for each synonym. ** ** When using methods (2) or (3), it is important that the tokenizer only -** provide synonyms when tokenizing document text (method (2)) or query -** text (method (3)), not both. Doing so will not cause any errors, but is +** provide synonyms when tokenizing document text (method (3)) or query +** text (method (2)), not both. Doing so will not cause any errors, but is ** inefficient. */ typedef struct Fts5Tokenizer Fts5Tokenizer;

@@ -12851,7 +13264,7 @@ /* Create a new tokenizer */

int (*xCreateTokenizer)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_tokenizer *pTokenizer, void (*xDestroy)(void*) );

@@ -12860,7 +13273,7 @@ /* Find an existing tokenizer */

int (*xFindTokenizer)( fts5_api *pApi, const char *zName, - void **ppContext, + void **ppUserData, fts5_tokenizer *pTokenizer );

@@ -12868,7 +13281,7 @@ /* Create a new auxiliary function */

int (*xCreateFunction)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_extension_function xFunction, void (*xDestroy)(void*) );
M test/http_api.nimtest/http_api.nim

@@ -207,7 +207,7 @@ check(json["results"][2]["data"]["age"] == %31)

check(json["results"][5]["data"]["name"]["first"] == %"Hart") test "GET documents in range": - var rget = jget("docs/?created-after=$1&created-before=$2" % [$t_now, $(t_now+10)]) + var rget = jget("docs/?created-after=$1&created-before=$2" % [$t_now, $(t_now+100)]) var json = rget.body.parseJson check(json["total"] == %8)