Merge branch 'master' into patch-1
jump to
@@ -76,13 +76,13 @@ if: matrix.os == 'windows-latest'
# Build for Linux - name: Build (Linux) - run: nimble build -v -y --passL:-static -d:release --gcc.exe:musl-gcc --gcc.linkerexe:musl-gcc --gc:orc --opt:size litestore + run: nimble build -v -y --passL:-static -d:release --gcc.exe:musl-gcc --gcc.linkerexe:musl-gcc --mm:refc --opt:size litestore if: matrix.os == 'ubuntu-latest' # Build for macOS/Windows - name: Build (macOS, Windows) shell: bash - run: nimble build -v -y -d:release --gc:orc --opt:size litestore + run: nimble build -v -y -d:release --mm:refc --opt:size litestore if: matrix.os == 'macos-latest' || matrix.os == 'windows-latest' # Import admin directory and create default db
@@ -40,4 +40,4 @@ curl https://nim-lang.org/choosenim/init.sh -sSf > init.sh
sh init.sh -y - name: Build - run: nimble build -y -d:release --gcc.exe:musl-gcc --gcc.linkerexe:musl-gcc + run: nimble build -y -d:release --gcc.exe:musl-gcc --gcc.linkerexe:musl-gcc --mm:refc
@@ -16,8 +16,13 @@ LiteStore_UserGuide.htm
jester_integration js *_backup -./config.json +config.json +*jwks.json *.db-shm *.db-wal *.nim.bak litestore_linkerArgs.txt +token*.txt +x5c*.cert +jwt +middleware
@@ -32,7 +32,7 @@ for page in ${pages[@]}
do (cat "${page}"; printf "\n\n") >> LiteStore_UserGuide.md done -hastyscribe --field/version:1.11.0 LiteStore_UserGuide.md +hastyscribe --field/version:1.13.0 LiteStore_UserGuide.md rm LiteStore_UserGuide.md mv LiteStore_UserGuide.htm ../.. cd ../..
@@ -25,7 +25,7 @@ installExt = @["nim", "c", "h", "json", "ico"]
# Dependencies -requires "nim >= 1.4.4", "https://github.com/h3rald/nim-jwt", "nimgen", "duktape" +requires "nim >= 2.0.0", "db_connector", "nimgen", "duktape" # Build
@@ -40,8 +40,13 @@ However, users with the **admin:wiki** scope will be able to access documents located under the /docs/wiki/ folder.
Finally, specify the public signature to be used to validate JWT tokens using the **signature** property. Typically, its value should be set to the first value of the [x.509 certificate chain](https://auth0.com/docs/tokens/reference/jwt/jwks-properties) specified in the [JSON Web Key Set](https://auth0.com/docs/jwks) of your API. +> %tip% +> signature vs. jwks_uri +> +> As of version 1.13.0, it is recommended to use the **jwks_uri** property in a LiteStore configuration file instead of the **signature** property. + To use this configuration at runtime, specify it through the **-\-auth** option, like this: `litestore -\-auth:auth.json` -Once enabled, LiteStore will return HTTP 401 error codes if an invalid token or no token is included in the HTTP Authorization header of the request accessing the resource or HTTP 403 error codes in case an authenticated user does not have a valid scope to access a specified resource.+Once enabled, LiteStore will return HTTP 401 error codes if an invalid token or no token is included in the HTTP Authorization header of the request accessing the resource or HTTP 403 error codes in case an authenticated user does not have a valid scope to access a specified resource.
@@ -101,4 +101,13 @@ * **allowed** — If set to **false**, LiteStore will return a [405 - Method not allowed](class:kwd) error code when accessing the resource with the specified method.
### signature -This section must be set to a valid certificate used validate JWT tokens. Note that the certificate must follow a specific format and start with the appropriate begin/end blocks.+This section must be set to a valid certificate used validate JWT tokens. Note that the certificate must follow a specific format and start with the appropriate begin/end blocks. + +### jwks_uri + +As of version 1.13.0, this property can be set to a URI pointing to a valid [JSON Web Key Sets](https://auth0.com/docs/secure/tokens/json-web-tokens/json-web-key-sets) file. If this property is specified, it will be used instead of **signature** to perform signature verification of JWKS tokens. + +> %note% +> How JWKS data is managed +> +> If this property is set, LiteStore will attempt to download the specified JWKS file on startup. This file will be catched to a *store-name*_jwks.json file (e.g. `data_jwks.json`) and its contents stored in memory.
@@ -10,8 +10,6 @@ ### $req
The current HTTP request sent to access the current resource. -#### Properties - <dl> <dt>method: string</dt> <dd>The HTTP method used by the request, all uppercase (GET, POST, DELETE, PUT, PATCH, OPTIOONS, or HEAD).</dd>@@ -67,7 +65,7 @@ All methods return a response object containing two String properties, **code** and **content**.
<dl> <dt>function get(resource: string, id: string, parameters: string): object</dt> -<dd>Retrieves the specified resource(s.). +<dd>Retrieves the specified resource(s). <p> Examples: <ul>@@ -125,4 +123,70 @@ <li><code>$store.head('docs')</code></li>
</ul> </p> </dd> -</dl>+</dl> + +### $http + +Simple synchronous API to perform HTTP requests. + +All methods return a response object containing the following properties: +* **code** (string) +* **content** (string) +* **headers** (object) + +<dl> +<dt>function get(url: string, headers: object): object</dt> +<dd>Executes a GET request. +<p> +Example: +<ul> +<li><code>$http.get('https://reqres.in/api/users', {})</code></li> +</ul> +</p> +</dd> +<dt>function post(url: string, headers: object body: string): object</dt> +<dd>Executes a POST request. +<p> +Example: +<ul> +<li><code>$http.post(https://reqres.in/api/users', {'Content-Type': 'application/json'}, '{"name": "Test", "job": "Tester"}')</code></li> +</ul> +</p> +</dd> +<dt>function put(url: string, headers: object body: string): object</dt> +<dd>Executes a PUT request. +<p> +Example: +<ul> +<li><code>$http.put(https://reqres.in/api/users/2', {'Content-Type': 'application/json'}, '{"name": "Test", "job": "Tester"}')</code></li> +</ul> +</p> +</dd> +<dt>function patch(url: string, headers: object body: string): object</dt> +<dd>Executes a PATCH request. +<p> +Example: +<ul> +<li><code>$http.patch(https://reqres.in/api/users/2', {'Content-Type': 'application/json'}, '{"name": "Test", "job": "Tester"}')</code></li> +</ul> +</p> +</dd> +<dt>function delete(url: string, headers: object): object</dt> +<dd>Executes a DELETE request. +<p> +Example: +<ul> +<li><code>$http.delete('https://reqres.in/api/users/2', {})</code></li> +</ul> +</p> +</dd> +<dt>function head(url: string, headers: object): object</dt> +<dd>Executes a HEAD request. +<p> +Example: +<ul> +<li><code>$http.head('https://reqres.in/api/users', {})</code></li> +</ul> +</p> +</dd> +</dl>
@@ -1,19 +0,0 @@
-define:release -threads:on - -@if nimHasWarningObservableStores: - warning[ObservableStores]: off -@end - -# https://blog.filippo.io/easy-windows-and-linux-cross-compilers-for-macos/ - -amd64.windows.gcc.path = "/usr/local/bin" -amd64.windows.gcc.exe = "x86_64-w64-mingw32-gcc" -amd64.windows.gcc.linkerexe = "x86_64-w64-mingw32-gcc" - -amd64.linux.gcc.path = "/usr/local/bin" -amd64.linux.gcc.exe = "x86_64-linux-musl-gcc" -amd64.linux.gcc.linkerexe = "x86_64-linux-musl-gcc" - ---gc = "orc" ---opt = "size"
@@ -0,0 +1,23 @@
+ +switch("mm", "refc") +switch("opt", "size") +switch("define", "ssl") +switch("define", "release") +switch("threadAnalysis", "off") + +when defined(windows): + switch("dynlibOverride", "sqlite3_64") +else: + switch("dynlibOverride", "sqlite3") + +when defined(ssl): + switch("define", "useOpenSsl3") + when defined(windows): + # TODO", change once issue nim#15220 is resolved + switch("define", "noOpenSSLHacks") + switch("define", "sslVersion:(") + switch("dynlibOverride", "ssl-") + switch("dynlibOverride", "crypto-") + else: + switch("dynlibOverride", "ssl") + switch("dynlibOverride", "crypto")
@@ -35,7 +35,7 @@ if pair.len < 2 or pair[1] == "":
raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) try: pair[1] = pair[1].replace("+", "%2B").decodeURL - except: + except CatchableError: raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) case pair[0]: of "search":@@ -45,12 +45,12 @@ options.tags = pair[1]
of "limit": try: options.limit = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) of "offset": try: options.offset = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) of "sort": let orderby = pair[1].orderByClauses()@@ -139,7 +139,7 @@ result.headers = newHttpHeaders(TAB_HEADERS)
result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: result = resError(Http500, "Unable to delete document '$1'" % id) proc getRawDocuments(LS: LiteStore, options: QueryOptions = newQueryOptions()): LSResponse =@@ -208,7 +208,7 @@ result.content = doc
result.code = Http201 else: result = resError(Http500, "Unable to create document.") - except: + except CatchableError: result = resError(Http500, "Unable to create document.") proc putDocument(LS: LiteStore, id: string, body: string, ct: string): LSResponse =@@ -232,7 +232,7 @@ result.content = doc
result.code = Http200 else: result = resError(Http500, "Unable to update document '$1'." % id) - except: + except CatchableError: result = resError(Http500, "Unable to update document '$1'." % id) proc patchDocument(LS: LiteStore, id: string, body: string): LSResponse =@@ -258,7 +258,7 @@ try:
apply = applyPatchOperation(tags, item["op"].str, item["path"].str, item["value"].str) if not apply: break - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) else: return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c)@@ -270,7 +270,7 @@ discard LS.store.destroyTag(t1.str, id, true)
for t2 in tags: if t2 != "": LS.store.createTag(t2, id, true) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) return LS.getRawDocument(id)@@ -331,7 +331,7 @@ result.content = ""
else: result = LS.getRawDocuments(options) result.content = "" - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) proc get(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse =@@ -350,7 +350,7 @@ else:
return LS.getDocument(id, options) else: return LS.getRawDocuments(options) - except: + except CatchableError: return resError(Http500, "Internal Server Error - $1" % getCurrentExceptionMsg()) of "info": if id != "":@@ -409,7 +409,7 @@ else:
result.headers = ctHeader("text/plain") result.content = contents result.code = Http200 - except: + except CatchableError: return resError(Http500, "Unable to read file '$1'." % path) else: return resError(Http404, "File '$1' not found." % path)@@ -444,4 +444,4 @@ if LS.readonly:
return resError(Http405, "Method not allowed: $1" % $req.reqMethod) return validate(req, LS, resource, id, patch) else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return resError(Http405, "Method not allowed: $1" % $req.reqMethod)
@@ -34,7 +34,7 @@ if pair.len < 2 or pair[1] == "":
raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) try: pair[1] = pair[1].replace("+", "%2B").decodeURL - except: + except CatchableError: raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) case pair[0]: of "search":@@ -44,12 +44,12 @@ options.tags = pair[1]
of "limit": try: options.limit = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) of "offset": try: options.offset = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) of "sort": let orderby = pair[1].orderByClauses()@@ -77,7 +77,7 @@ case ct:
of "application/json": try: discard body.parseJson() - except: + except CatchableError: return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) else: discard@@ -156,7 +156,7 @@ result.headers = newHttpHeaders(TAB_HEADERS)
result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: result = resError(Http500, "Unable to delete document '$1'" % id) proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions()): LSResponse =@@ -229,7 +229,7 @@ result.content = doc
result.code = Http201 else: result = resError(Http500, "Unable to create document.") - except: + except CatchableError: result = resError(Http500, "Unable to create document.") proc putDocument*(LS: LiteStore, id: string, body: string, ct: string): LSResponse =@@ -255,7 +255,7 @@ result.content = doc
result.code = Http200 else: result = resError(Http500, "Unable to update document '$1'." % id) - except: + except CatchableError: result = resError(Http500, "Unable to update document '$1'." % id) proc patchDocument*(LS: LiteStore, id: string, body: string): LSResponse =@@ -281,7 +281,7 @@ try:
apply = applyPatchOperation(tags, item["op"].str, item["path"].str, item["value"].str) if not apply: break - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) else: return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c)@@ -293,7 +293,7 @@ discard LS.store.destroyTag(t1.str, id, true)
for t2 in tags: if t2 != "": LS.store.createTag(t2, id, true) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) return LS.getRawDocument(id)@@ -370,7 +370,7 @@ result.content = ""
else: result = LS.getRawDocuments(options) result.content = "" - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse =@@ -391,7 +391,7 @@ else:
return LS.getDocument(id, options) else: return LS.getRawDocuments(options) - except: + except CatchableError: return resError(Http500, "Internal Server Error - $1" % getCurrentExceptionMsg()) of "info": if id != "":@@ -447,7 +447,7 @@ else:
result.headers = ctHeader("text/plain") result.content = contents result.code = Http200 - except: + except CatchableError: return resError(Http500, "Unable to read file '$1'." % path) else: return resError(Http404, "File '$1' not found." % path)@@ -482,4 +482,4 @@ if LS.readonly:
return resError(Http405, "Method not allowed: $1" % $req.reqMethod) return validate(req, LS, resource, id, patch) else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return resError(Http405, "Method not allowed: $1" % $req.reqMethod)
@@ -141,7 +141,7 @@ if pair.len < 2 or pair[1] == "":
raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) try: pair[1] = pair[1].replace("+", "%2B").decodeURL - except: + except CatchableError: raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) case pair[0]: of "filter":@@ -159,12 +159,12 @@ options.tags = pair[1]
of "limit": try: options.limit = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) of "offset": try: options.offset = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) of "sort": let orderby = pair[1].orderByClauses()@@ -192,7 +192,7 @@ case ct:
of "application/json": try: discard body.parseJson() - except: + except CatchableError: return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) else: discard@@ -263,7 +263,7 @@ raise newException(EInvalidRequest, "invalid patch operation: $1" % op)
else: d = d[index] dorig = dorig[index] - except: + except CatchableError: raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) else: if c >= keys.len:@@ -350,7 +350,7 @@ result.headers = newHttpHeaders(TAB_HEADERS)
result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: result = resError(Http500, "Unable to delete document '$1'" % id) proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions()): LSResponse =@@ -420,7 +420,7 @@ result.content = doc
result.code = Http201 else: result = resError(Http500, "Unable to create document.") - except: + except CatchableError: result = resError(Http500, "Unable to create document.") proc putDocument*(LS: LiteStore, id: string, body: string, ct: string): LSResponse =@@ -446,7 +446,7 @@ result.content = doc
result.code = Http200 else: result = resError(Http500, "Unable to update document '$1'." % id) - except: + except CatchableError: result = resError(Http500, "Unable to update document '$1'." % id) proc patchDocument*(LS: LiteStore, id: string, body: string): LSResponse =@@ -471,7 +471,7 @@ if tags.contains("$subtype:json"):
try: origData = jdoc["data"].getStr.parseJson data = origData.copy - except: + except CatchableError: discard var c = 1 for item in jbody.items:@@ -482,7 +482,7 @@ try:
apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) if not apply: break - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) else: return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c)@@ -493,7 +493,7 @@ try:
var doc = LS.store.updateDocument(id, data.pretty, "application/json") if doc == "": return resError(Http500, "Unable to patch document '$1'." % id) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) if origTags != tags: try:@@ -502,7 +502,7 @@ discard LS.store.destroyTag(t1.str, id, true)
for t2 in tags: if t2 != "": LS.store.createTag(t2, id, true) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) return LS.getRawDocument(id)@@ -579,7 +579,7 @@ result.content = ""
else: result = LS.getRawDocuments(options) result.content = "" - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse =@@ -600,7 +600,7 @@ else:
return LS.getDocument(id, options) else: return LS.getRawDocuments(options) - except: + except CatchableError: return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) of "info": if id != "":@@ -656,7 +656,7 @@ else:
result.headers = ctHeader("text/plain") result.content = contents result.code = Http200 - except: + except CatchableError: return resError(Http500, "Unable to read file '$1'." % path) else: return resError(Http404, "File '$1' not found." % path)@@ -691,4 +691,4 @@ if LS.readonly:
return resError(Http405, "Method not allowed: $1" % $req.reqMethod) return validate(req, LS, resource, id, patch) else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return resError(Http405, "Method not allowed: $1" % $req.reqMethod)
@@ -141,7 +141,7 @@ if pair.len < 2 or pair[1] == "":
raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) try: pair[1] = pair[1].replace("+", "%2B").decodeURL - except: + except CatchableError: raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) case pair[0]: of "filter":@@ -161,32 +161,32 @@ options.tags = pair[1]
of "created-after": try: options.createdAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid created-after value: $1" % getCurrentExceptionMsg()) of "created-before": try: options.createdBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid created-before value: $1" % getCurrentExceptionMsg()) of "modified-after": try: options.modifiedAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid modified.after value: $1" % getCurrentExceptionMsg()) of "modified-before": try: options.modifiedBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid modified-before value: $1" % getCurrentExceptionMsg()) of "limit": try: options.limit = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) of "offset": try: options.offset = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) of "sort": let orderby = pair[1].orderByClauses()@@ -216,7 +216,7 @@ case ct:
of "application/json": try: discard body.parseJson() - except: + except CatchableError: return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) else: discard@@ -286,7 +286,7 @@ raise newException(EInvalidRequest, "invalid patch operation: $1" % op)
else: d = d[index] dorig = dorig[index] - except: + except CatchableError: raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) else: if c >= keys.len:@@ -386,7 +386,7 @@ setOrigin(LS, req, result.headers)
result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: result = resError(Http500, "Unable to delete document '$1'" % id) proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse =@@ -484,7 +484,7 @@ result.content = doc
result.code = Http201 else: result = resError(Http500, "Unable to create document.") - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to create document.")@@ -513,7 +513,7 @@ result.content = doc
result.code = Http200 else: result = resError(Http500, "Unable to update document '$1'." % id) - except: + except CatchableError: result = resError(Http500, "Unable to update document '$1'." % id) proc patchDocument*(LS: LiteStore, id: string, body: string, req: LSRequest): LSResponse =@@ -538,7 +538,7 @@ if tags.contains("$subtype:json"):
try: origData = jdoc["data"].getStr.parseJson data = origData.copy - except: + except CatchableError: discard var c = 1 for item in jbody.items:@@ -549,7 +549,7 @@ try:
apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) if not apply: break - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) else: return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c)@@ -560,7 +560,7 @@ try:
var doc = LS.store.updateDocument(id, data.pretty, "application/json") if doc == "": return resError(Http500, "Unable to patch document '$1'." % id) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) if origTags != tags: try:@@ -569,7 +569,7 @@ discard LS.store.destroyTag(t1.str, id, true)
for t2 in tags: if t2 != "": LS.store.createTag(t2, id, true) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) return LS.getRawDocument(id, newQueryOptions(), req)@@ -661,7 +661,7 @@ result.content = ""
else: result = LS.getRawDocuments(options, req) result.content = "" - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse =@@ -681,7 +681,7 @@ else:
return LS.getDocument(id, options, req) else: return LS.getRawDocuments(options, req) - except: + except CatchableError: let e = getCurrentException() let trace = e.getStackTrace() echo trace@@ -694,7 +694,7 @@ if id != "":
return LS.getTag(id, options, req) else: return LS.getTags(options, req) - except: + except CatchableError: return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) of "info": if id != "":@@ -750,7 +750,7 @@ result.headers = ctHeader("text/plain")
setOrigin(LS, req, result.headers) result.content = contents result.code = Http200 - except: + except CatchableError: return resError(Http500, "Unable to read file '$1'." % path) else: return resError(Http404, "File '$1' not found." % path)@@ -785,4 +785,4 @@ if LS.readonly:
return resError(Http405, "Method not allowed: $1" % $req.reqMethod) return validate(req, LS, resource, id, patch) else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return resError(Http405, "Method not allowed: $1" % $req.reqMethod)
@@ -145,7 +145,7 @@ if pair.len < 2 or pair[1] == "":
raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) try: pair[1] = pair[1].replace("+", "%2B").decodeURL - except: + except CatchableError: raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) case pair[0]: of "filter":@@ -165,32 +165,32 @@ options.tags = pair[1]
of "created-after": try: options.createdAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid created-after value: $1" % getCurrentExceptionMsg()) of "created-before": try: options.createdBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid created-before value: $1" % getCurrentExceptionMsg()) of "modified-after": try: options.modifiedAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid modified.after value: $1" % getCurrentExceptionMsg()) of "modified-before": try: options.modifiedBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid modified-before value: $1" % getCurrentExceptionMsg()) of "limit": try: options.limit = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) of "offset": try: options.offset = pair[1].parseInt - except: + except CatchableError: raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) of "sort": let orderby = pair[1].orderByClauses()@@ -220,7 +220,7 @@ case ct:
of "application/json": try: discard body.parseJson() - except: + except CatchableError: return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) else: discard@@ -290,7 +290,7 @@ raise newException(EInvalidRequest, "invalid patch operation: $1" % op)
else: d = d[index] dorig = dorig[index] - except: + except CatchableError: raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) else: if c >= keys.len:@@ -400,7 +400,7 @@ setOrigin(LS, req, result.headers)
result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: result = resError(Http500, "Unable to delete document '$1'" % id) proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse =@@ -524,7 +524,7 @@ result.headers = ctJsonHeader()
setOrigin(LS, req, result.headers) result.content = "{\"id\": \"$1\", \"field\": \"$2\"}" % [id, field] result.code = Http200 - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to create index.")@@ -540,7 +540,7 @@ setOrigin(LS, req, result.headers)
result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to delete index.")@@ -556,7 +556,7 @@ result.content = doc
result.code = Http201 else: result = resError(Http500, "Unable to create document.") - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to create document.")@@ -585,7 +585,7 @@ result.content = doc
result.code = Http200 else: result = resError(Http500, "Unable to update document '$1'." % id) - except: + except CatchableError: result = resError(Http500, "Unable to update document '$1'." % id) proc patchDocument*(LS: LiteStore, id: string, body: string, req: LSRequest): LSResponse =@@ -610,7 +610,7 @@ if tags.contains("$subtype:json"):
try: origData = jdoc["data"].getStr.parseJson data = origData.copy - except: + except CatchableError: discard var c = 1 for item in jbody.items:@@ -621,7 +621,7 @@ try:
apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) if not apply: break - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) else: return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c)@@ -632,7 +632,7 @@ try:
var doc = LS.store.updateDocument(id, data.pretty, "application/json") if doc == "": return resError(Http500, "Unable to patch document '$1'." % id) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) if origTags != tags: try:@@ -641,7 +641,7 @@ discard LS.store.destroyTag(t1.str, id, true)
for t2 in tags: if t2 != "": LS.store.createTag(t2, id, true) - except: + except CatchableError: return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) return LS.getRawDocument(id, newQueryOptions(), req)@@ -760,7 +760,7 @@ result.content = ""
else: result = LS.getRawDocuments(options, req) result.content = "" - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse =@@ -780,7 +780,7 @@ else:
return LS.getDocument(id, options, req) else: return LS.getRawDocuments(options, req) - except: + except CatchableError: let e = getCurrentException() let trace = e.getStackTrace() echo trace@@ -793,7 +793,7 @@ if id != "":
return LS.getTag(id, options, req) else: return LS.getTags(options, req) - except: + except CatchableError: return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) of "indexes": var options = newQueryOptions()@@ -803,7 +803,7 @@ if id != "":
return LS.getIndex(id, options, req) else: return LS.getIndexes(options, req) - except: + except CatchableError: return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) of "info": if id != "":@@ -824,7 +824,7 @@ if resource == "indexes":
var field = "" try: field = parseJson(req.body.strip)["field"].getStr - except: + except CatchableError: return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) return LS.putIndex(id, field, req) else: # Assume docs@@ -870,7 +870,7 @@ result.headers = ctHeader("text/plain")
setOrigin(LS, req, result.headers) result.content = contents result.code = Http200 - except: + except CatchableError: return resError(Http500, "Unable to read file '$1'." % path) else: return resError(Http404, "File '$1' not found." % path)@@ -905,4 +905,4 @@ if LS.readonly:
return resError(Http405, "Method not allowed: $1" % $req.reqMethod) return validate(req, LS, resource, id, patch) else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return resError(Http405, "Method not allowed: $1" % $req.reqMethod)
@@ -1,1131 +1,1131 @@
-import - asynchttpserver, - strutils, - sequtils, - cgi, - strtabs, - pegs, - json, - os, - uri, - times -import - types, - contenttypes, - core, - utils, - logger, - duktape - -# Helper procs - -proc sqlOp(op: string): string = - let table = newStringTable() - table["not eq"] = "<>" - table["eq"] = "==" - table["gt"] = ">" - table["gte"] = ">=" - table["lt"] = "<" - table["lte"] = "<=" - table["contains"] = "contains" - table["like"] = "like" - return table[op] - -proc orderByClauses*(str: string): string = - var clauses = newSeq[string]() - var fragments = str.split(",") - let clause = peg""" - clause <- {[-+]} {field} - field <- ('id' / 'created' / 'modified' / path) - path <- '$' (objField)+ - ident <- [a-zA-Z0-9_]+ - objField <- '.' ident - """ - for f in fragments: - var matches = @["", ""] - if f.find(clause, matches) != -1: - var field = matches[1] - if field[0] == '$': - field = "json_extract(documents.data, '$1')" % matches[1] - if matches[0] == "-": - clauses.add("$1 COLLATE NOCASE DESC" % field) - else: - clauses.add("$1 COLLATE NOCASE ASC" % field) - return clauses.join(", ") - -proc selectClause*(str: string, options: var QueryOptions) = - let tokens = """ - path <- '$' (objItem / objField)+ - ident <- [a-zA-Z0-9_]+ - objIndex <- '[' \d+ ']' - objField <- '.' ident - objItem <- objField objIndex - """ - let fields = peg(""" - fields <- ^{field} (\s* ',' \s* {field})*$ - field <- path \s+ ('as' / 'AS') \s+ ident - """ & tokens) - let field = peg(""" - field <- ^{path} \s+ ('as' / 'AS') \s+ {ident}$ - """ & tokens) - var fieldMatches = newSeq[string](10) - if str.strip.match(fields, fieldMatches): - for m in fieldMatches: - if m.len > 0: - var rawTuple = newSeq[string](2) - if m.match(field, rawTuple): - options.jsonSelect.add((path: rawTuple[0], alias: rawTuple[1])) - -proc filterClauses*(str: string, options: var QueryOptions) = - let tokens = """ - operator <- 'not eq' / 'eq' / 'gte' / 'gt' / 'lte' / 'lt' / 'contains' / 'like' - value <- string / number / 'null' / 'true' / 'false' - string <- '"' ('\\"' . / [^"])* '"' - number <- '-'? '0' / [1-9] [0-9]* ('.' [0-9]+)? (( 'e' / 'E' ) ( '+' / '-' )? [0-9]+)? - path <- '$' (objItem / objField)+ - ident <- [a-zA-Z0-9_]+ - objIndex <- '[' \d+ ']' - objField <- '.' ident - objItem <- objField objIndex - """ - let clause = peg(""" - clause <- {path} \s+ {operator} \s+ {value} - """ & tokens) - let andClauses = peg(""" - andClauses <- ^{clause} (\s+ 'and' \s+ {clause})*$ - clause <- path \s+ operator \s+ value - """ & tokens) - let orClauses = peg(""" - orClauses <- ^{andClauses} (\s+ 'or' \s+ {andClauses})*$ - andClauses <- clause (\s+ 'and' \s+ clause)* - clause <- path \s+ operator \s+ value - """ & tokens) - var orClausesMatches = newSeq[string](10) - discard str.strip.match(orClauses, orClausesMatches) - var parsedClauses = newSeq[seq[seq[string]]]() - for orClause in orClausesMatches: - if orClause.len > 0: - var andClausesMatches = newSeq[string](10) - discard orClause.strip.match(andClauses, andClausesMatches) - var parsedAndClauses = newSeq[seq[string]]() - for andClause in andClausesMatches: - if andClause.len > 0: - var clauses = newSeq[string](3) - discard andClause.strip.match(clause, clauses) - clauses[1] = sqlOp(clauses[1]) - if clauses[2] == "true": - clauses[2] = "1" - elif clauses[2] == "false": - clauses[2] = "0" - parsedAndClauses.add clauses - if parsedAndClauses.len > 0: - parsedClauses.add parsedAndClauses - if parsedClauses.len == 0: - return - var currentArr = 0 - var tables = newSeq[string]() - let resOrClauses = parsedClauses.map do (it: seq[seq[string]]) -> string: - let resAndClauses = it.map do (x: seq[string]) -> string: - if x[1] == "contains": - currentArr = currentArr + 1 - tables.add "json_each(documents.data, '$1') AS arr$2" % [x[0], $currentArr] - return "arr$1.value == $2" % [$currentArr, x[2]] - else: - var arr = @[x[0], x[1], x[2]] - if x[1] == "like": - arr[2] = x[2].replace('*', '%') - return "json_extract(documents.data, '$1') $2 $3 " % arr - return resAndClauses.join(" AND ") - options.tables = options.tables & tables - options.jsonFilter = resOrClauses.join(" OR ") - -proc parseQueryOption*(fragment: string, options: var QueryOptions) = - if fragment == "": - return - var pair = fragment.split('=') - if pair.len < 2 or pair[1] == "": - raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) - try: - pair[1] = pair[1].replace("+", "%2B").decodeURL - except: - raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) - case pair[0]: - of "filter": - filterClauses(pair[1], options) - if options.jsonFilter == "": - raise newException(EInvalidRequest, "Invalid filter clause: $1" % pair[1].replace("\"", "\\\"")) - of "select": - selectClause(pair[1], options) - if options.jsonSelect.len == 0: - raise newException(EInvalidRequest, "Invalid select clause: $1" % pair[1].replace("\"", "\\\"")) - of "like": - options.like = pair[1] - of "search": - options.search = pair[1] - of "tags": - options.tags = pair[1] - of "created-after": - try: - options.createdAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid created-after value: $1" % getCurrentExceptionMsg()) - of "created-before": - try: - options.createdBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid created-before value: $1" % getCurrentExceptionMsg()) - of "modified-after": - try: - options.modifiedAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid modified.after value: $1" % getCurrentExceptionMsg()) - of "modified-before": - try: - options.modifiedBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid modified-before value: $1" % getCurrentExceptionMsg()) - of "limit": - try: - options.limit = pair[1].parseInt - except: - raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) - of "offset": - try: - options.offset = pair[1].parseInt - except: - raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) - of "sort": - let orderby = pair[1].orderByClauses() - if orderby != "": - options.orderby = orderby - else: - raise newException(EInvalidRequest, "Invalid sort value: $1" % pair[1]) - of "contents", "raw": - discard - else: - discard - -proc parseQueryOptions*(querystring: string, options: var QueryOptions) = - var fragments = querystring.split('&') - for f in fragments: - f.parseQueryOption(options) - -proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, cb: proc(req: LSRequest, LS: LiteStore, resource: string, id: string):LSResponse): LSResponse = - if req.reqMethod == HttpPost or req.reqMethod == HttpPut or req.reqMethod == HttpPatch: - var ct = "" - let body = req.body.strip - if body == "": - return resError(Http400, "Bad request: No content specified for document.") - if req.headers.hasKey("Content-Type"): - ct = req.headers["Content-Type"] - case ct: - of "application/json": - try: - discard body.parseJson() - except: - return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) - else: - discard - return cb(req, LS, resource, id) - -proc patchTag(tags: var seq[string], index: int, op, path, value: string): bool = - LOG.debug("- PATCH -> $1 tag['$2'] = \"$3\" - Total tags: $4." % [op, $index, $value, $tags.len]) - case op: - of "remove": - let tag = tags[index] - if not tag.startsWith("$"): - tags[index] = "" # Not removing element, otherwise subsequent indexes won't work! - else: - raise newException(EInvalidRequest, "cannot remove system tag: $1" % tag) - of "add": - if value.match(PEG_USER_TAG): - tags.insert(value, index) - else: - if value.strip == "": - raise newException(EInvalidRequest, "tag not specified." % value) - else: - raise newException(EInvalidRequest, "invalid tag: $1" % value) - of "replace": - if value.match(PEG_USER_TAG): - if tags[index].startsWith("$"): - raise newException(EInvalidRequest, "cannot replace system tag: $1" % tags[index]) - else: - tags[index] = value - else: - if value.strip == "": - raise newException(EInvalidRequest, "tag not specified." % value) - else: - raise newException(EInvalidRequest, "invalid tag: $1" % value) - of "test": - if tags[index] != value: - return false - else: - raise newException(EInvalidRequest, "invalid patch operation: $1" % op) - return true - -proc patchData*(data: var JsonNode, origData: JsonNode, op: string, path: string, value: JsonNode): bool = - LOG.debug("- PATCH -> $1 path $2 with $3" % [op, path, $value]) - var keys = path.replace(peg"^\/data\/", "").split("/") - if keys.len == 0: - raise newException(EInvalidRequest, "no valid path specified: $1" % path) - var d = data - var dorig = origData - var c = 1 - for key in keys: - if d.kind == JArray: - try: - var index = key.parseInt - if c >= keys.len: - d.elems[index] = value - case op: - of "remove": - d.elems.del(index) - of "add": - d.elems.insert(value, index) - of "replace": - d.elems[index] = value - of "test": - if d.elems[index] != value: - return false - else: - raise newException(EInvalidRequest, "invalid patch operation: $1" % op) - else: - d = d[index] - dorig = dorig[index] - except: - raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) - else: - if c >= keys.len: - case op: - of "remove": - if d.hasKey(key): - d.delete(key) - else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) - of "add": - d[key] = value - of "replace": - if d.hasKey(key): - d[key] = value - else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) - of "test": - if dorig.hasKey(key): - if dorig[key] != value: - return false - else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) - else: - raise newException(EInvalidRequest, "invalid patch operation: $1" % op) - else: - d = d[key] - dorig = dorig[key] - c += 1 - return true - - -proc applyPatchOperation*(data: var JsonNode, origData: JsonNode, tags: var seq[string], op: string, path: string, value: JsonNode): bool = - var matches = @[""] - let p = peg""" - path <- ^tagPath / fieldPath$ - tagPath <- '\/tags\/' {\d+} - fieldPath <- '\/data\/' ident ('\/' ident)* - ident <- [a-zA-Z0-9_]+ / '-' - """ - if path.find(p, matches) == -1: - raise newException(EInvalidRequest, "cannot patch path '$1'" % path) - if path.match(peg"^\/tags\/"): - let index = matches[0].parseInt - if value.kind != JString: - raise newException(EInvalidRequest, "tag '$1' is not a string." % $value) - let tag = value.getStr - return patchTag(tags, index, op, path, tag) - elif tags.contains("$subtype:json"): - return patchData(data, origData, op, path, value) - else: - raise newException(EInvalidRequest, "cannot patch data of a non-JSON document.") - -# Low level procs - -proc getTag*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveTag(id, options) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - if doc == newJNull(): - result = resTagNotFound(id) - else: - result.content = $doc - result.code = Http200 - -proc getIndex*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveIndex(id, options) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - if doc == newJNull(): - result = resIndexNotFound(id) - else: - result.content = $doc - result.code = Http200 - -proc getRawDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveRawDocument(id, options) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - if doc == "": - result = resDocumentNotFound(id) - else: - result.content = doc - result.code = Http200 - -proc getDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveDocument(id, options) - if doc.data == "": - result = resDocumentNotFound(id) - else: - result.headers = doc.contenttype.ctHeader - setOrigin(LS, req, result.headers) - result.content = doc.data - result.code = Http200 - -proc deleteDocument*(LS: LiteStore, id: string, req: LSRequest): LSResponse = - let doc = LS.store.retrieveDocument(id) - if doc.data == "": - result = resDocumentNotFound(id) - else: - try: - let res = LS.store.destroyDocument(id) - if res == 0: - result = resError(Http500, "Unable to delete document '$1'" % id) - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Content-Length"] = "0" - result.content = "" - result.code = Http204 - except: - result = resError(Http500, "Unable to delete document '$1'" % id) - -proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = - var options = options - let t0 = cpuTime() - let docs = LS.store.retrieveTags(options) - let orig_limit = options.limit - let orig_offset = options.offset - options.limit = 0 - options.offset = 0 - options.select = @["COUNT(tag_id)"] - let total = LS.store.countTags(prepareSelectTagsQuery(options), options.like.replace("*", "%")) - var content = newJObject() - if options.like != "": - content["like"] = %(options.like.decodeURL) - if orig_limit > 0: - content["limit"] = %orig_limit - if orig_offset > 0: - content["offset"] = %orig_offset - content["total"] = %total - content["execution_time"] = %(cputime()-t0) - content["results"] = docs - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc getIndexes*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = - var options = options - let t0 = cpuTime() - let docs = LS.store.retrieveIndexes(options) - let orig_limit = options.limit - let orig_offset = options.offset - options.limit = 0 - options.offset = 0 - options.select = @["COUNT(name)"] - let total = LS.store.countIndexes(prepareSelectIndexesQuery(options), options.like.replace("*", "%")) - var content = newJObject() - if options.like != "": - content["like"] = %(options.like.decodeURL) - if orig_limit > 0: - content["limit"] = %orig_limit - if orig_offset > 0: - content["offset"] = %orig_offset - content["total"] = %total - content["execution_time"] = %(cputime()-t0) - content["results"] = docs - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = - var options = options - let t0 = cpuTime() - let docs = LS.store.retrieveRawDocuments(options) - let orig_limit = options.limit - let orig_offset = options.offset - options.limit = 0 - options.offset = 0 - options.select = @["COUNT(docid)"] - let total = LS.store.retrieveRawDocuments(options)[0].num - var content = newJObject() - if options.folder != "": - content["folder"] = %(options.folder) - if options.search != "": - content["search"] = %(options.search.decodeURL) - if options.tags != "": - content["tags"] = newJArray() - for tag in options.tags.replace("+", "%2B").decodeURL.split(","): - content["tags"].add(%tag) - if orig_limit > 0: - content["limit"] = %orig_limit - if orig_offset > 0: - content["offset"] = %orig_offset - if options.orderby != "": - content["sort"] = %options.orderby - content["total"] = %total - content["execution_time"] = %(cputime()-t0) - content["results"] = docs - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc getInfo*(LS: LiteStore, req: LSRequest): LSResponse = - let info = LS.store.retrieveInfo() - let version = info[0] - let total_documents = info[1] - let total_tags = LS.store.countTags() - let tags = LS.store.retrieveTagsWithTotals() - var content = newJObject() - content["version"] = %(LS.appname & " v" & LS.appversion) - content["datastore_version"] = %version - content["size"] = %($((LS.file.getFileSize().float/(1024*1024)).formatFloat(ffDecimal, 2)) & " MB") - content["read_only"] = %LS.readonly - content["log_level"] = %LS.loglevel - if LS.directory.len == 0: - content["directory"] = newJNull() - else: - content["directory"] = %LS.directory - content["mount"] = %LS.mount - content["total_documents"] = %total_documents - content["total_tags"] = %total_tags - content["tags"] = tags - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc putIndex*(LS: LiteStore, id, field: string, req: LSRequest): LSResponse = - try: - if (not id.match(PEG_INDEX)): - return resError(Http400, "invalid index ID: $1" % id) - if (not field.match(PEG_JSON_FIELD)): - return resError(Http400, "invalid field path: $1" % field) - if (LS.store.retrieveIndex(id) != newJNull()): - return resError(Http409, "Index already exists: $1" % id) - LS.store.createIndex(id, field) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = "{\"id\": \"$1\", \"field\": \"$2\"}" % [id, field] - result.code = Http200 - except: - eWarn() - result = resError(Http500, "Unable to create index.") - -proc deleteIndex*(LS: LiteStore, id: string, req: LSRequest): LSResponse = - if (not id.match(PEG_INDEX)): - return resError(Http400, "invalid index ID: $1" % id) - if (LS.store.retrieveIndex(id) == newJNull()): - return resError(Http404, "Index not found: $1" % id) - try: - LS.store.dropIndex(id) - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Content-Length"] = "0" - result.content = "" - result.code = Http204 - except: - eWarn() - result = resError(Http500, "Unable to delete index.") - -proc postDocument*(LS: LiteStore, body: string, ct: string, folder="", req: LSRequest): LSResponse = - if not folder.isFolder: - return resError(Http400, "Invalid folder specified when creating document: $1" % folder) - try: - var doc = LS.store.createDocument(folder, body, ct) - if doc != "": - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = doc - result.code = Http201 - else: - result = resError(Http500, "Unable to create document.") - except: - eWarn() - result = resError(Http500, "Unable to create document.") - -proc putDocument*(LS: LiteStore, id: string, body: string, ct: string, req: LSRequest): LSResponse = - if id.isFolder: - return resError(Http400, "Invalid ID '$1' (Document IDs cannot end with '/')." % id) - let doc = LS.store.retrieveDocument(id) - if doc.data == "": - # Create a new document - var doc = LS.store.createDocument(id, body, ct) - if doc != "": - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = doc - result.code = Http201 - else: - result = resError(Http500, "Unable to create document.") - else: - # Update existing document - try: - var doc = LS.store.updateDocument(id, body, ct) - if doc != "": - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = doc - result.code = Http200 - else: - result = resError(Http500, "Unable to update document '$1'." % id) - except: - result = resError(Http500, "Unable to update document '$1'." % id) - -proc patchDocument*(LS: LiteStore, id: string, body: string, req: LSRequest): LSResponse = - var apply = true - let jbody = body.parseJson - if jbody.kind != JArray: - return resError(Http400, "Bad request: PATCH request body is not an array.") - var options = newQueryOptions() - options.select = @["documents.id AS id", "created", "modified", "data"] - let doc = LS.store.retrieveRawDocument(id, options) - if doc == "": - return resDocumentNotFound(id) - let jdoc = doc.parseJson - var tags = newSeq[string]() - var origTags = newSeq[string]() - for tag in jdoc["tags"].items: - tags.add(tag.str) - origTags.add(tag.str) - var data: JsonNode - var origData: JsonNode - if tags.contains("$subtype:json"): - try: - origData = jdoc["data"].getStr.parseJson - data = origData.copy - except: - discard - var c = 1 - for item in jbody.items: - if item.hasKey("op") and item.hasKey("path"): - if not item.hasKey("value"): - item["value"] = %"" - try: - apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) - if not apply: - break - except: - return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) - else: - return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c) - c.inc - if apply: - if origData.len > 0 and origData != data: - try: - var doc = LS.store.updateDocument(id, data.pretty, "application/json") - if doc == "": - return resError(Http500, "Unable to patch document '$1'." % id) - except: - return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) - if origTags != tags: - try: - for t1 in jdoc["tags"].items: - discard LS.store.destroyTag(t1.str, id, true) - for t2 in tags: - if t2 != "": - LS.store.createTag(t2, id, true) - except: - return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) - return LS.getRawDocument(id, newQueryOptions(), req) - -# Main routing - -proc options*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - case resource: - of "info": - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - if id != "": - return resError(Http404, "Info '$1' not found." % id) - else: - result.code = Http204 - result.content = "" - of "dir": - result.code = Http204 - result.content = "" - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - of "tags": - result.code = Http204 - result.content = "" - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - of "indexes": - result.code = Http204 - result.content = "" - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - if id != "": - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - else: - result.headers["Allow"] = "GET, OPTIONS, PUT, DELETE" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS, PUT, DELETE" - else: - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - of "docs": - var folder: string - if id.isFolder: - folder = id - if folder.len > 0: - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS, POST, PUT" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST, PUT" - elif id != "": - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" - result.headers["Allow-Patch"] = "application/json-patch+json" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" - else: - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS, POST" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST" - else: - discard # never happens really. - -proc head*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - var options = newQueryOptions() - options.select = @["documents.id AS id", "created", "modified"] - if id.isFolder: - options.folder = id - try: - parseQueryOptions(req.url.query, options); - if id != "" and options.folder == "": - result = LS.getRawDocument(id, options, req) - result.content = "" - else: - result = LS.getRawDocuments(options, req) - result.content = "" - except: - return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) - -proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - case resource: - of "docs": - var options = newQueryOptions() - if id.isFolder: - options.folder = id - if req.url.query.contains("contents=false"): - options.select = @["documents.id AS id", "created", "modified"] - try: - parseQueryOptions(req.url.query, options); - if id != "" and options.folder == "": - if req.url.query.contains("raw=true") or req.headers.hasKey("Accept") and req.headers["Accept"] == "application/json": - return LS.getRawDocument(id, options, req) - else: - return LS.getDocument(id, options, req) - else: - return LS.getRawDocuments(options, req) - except: - let e = getCurrentException() - let trace = e.getStackTrace() - echo trace - return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) - of "tags": - var options = newQueryOptions() - try: - parseQueryOptions(req.url.query, options); - if id != "": - return LS.getTag(id, options, req) - else: - return LS.getTags(options, req) - except: - return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) - of "indexes": - var options = newQueryOptions() - try: - parseQueryOptions(req.url.query, options); - if id != "": - return LS.getIndex(id, options, req) - else: - return LS.getIndexes(options, req) - except: - return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) - of "info": - if id != "": - return resError(Http404, "Info '$1' not found." % id) - return LS.getInfo(req) - else: - discard # never happens really. - -proc post*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - var ct = "text/plain" - if req.headers.hasKey("Content-Type"): - ct = req.headers["Content-Type"] - return LS.postDocument(req.body.strip, ct, id, req) - -proc put*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - if id != "": - if resource == "indexes": - var field = "" - try: - field = parseJson(req.body.strip)["field"].getStr - except: - return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) - return LS.putIndex(id, field, req) - else: # Assume docs - var ct = "text/plain" - if req.headers.hasKey("Content-Type"): - ct = req.headers["Content-Type"] - return LS.putDocument(id, req.body.strip, ct, req) - else: - return resError(Http400, "Bad request: document ID must be specified in PUT requests.") - -proc delete*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - if id != "": - if resource == "indexes": - return LS.deleteIndex(id, req) - else: # Assume docs - return LS.deleteDocument(id, req) - else: - return resError(Http400, "Bad request: document ID must be specified in DELETE requests.") - -proc patch*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - if id != "": - return LS.patchDocument(id, req.body, req) - else: - return resError(Http400, "Bad request: document ID must be specified in PATCH requests.") - -proc serveFile*(req: LSRequest, LS: LiteStore, id: string): LSResponse = - let path = LS.directory / id - var reqMethod = $req.reqMethod - if req.headers.hasKey("X-HTTP-Method-Override"): - reqMethod = req.headers["X-HTTP-Method-Override"] - case reqMethod.toUpperAscii: - of "OPTIONS": - return validate(req, LS, "dir", id, options) - of "GET": - if path.fileExists: - try: - let contents = path.readFile - let parts = path.splitFile - if CONTENT_TYPES.hasKey(parts.ext): - result.headers = CONTENT_TYPES[parts.ext].ctHeader - else: - result.headers = ctHeader("text/plain") - setOrigin(LS, req, result.headers) - result.content = contents - result.code = Http200 - except: - return resError(Http500, "Unable to read file '$1'." % path) - else: - return resError(Http404, "File '$1' not found." % path) - else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - -proc route*(req: LSRequest, LS: LiteStore, resource = "docs", id = ""): LSResponse = - var reqMethod = $req.reqMethod - if req.headers.hasKey("X-HTTP-Method-Override"): - reqMethod = req.headers["X-HTTP-Method-Override"] - case reqMethod.toUpperAscii: - of "POST": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, post) - of "PUT": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, put) - of "DELETE": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, delete) - of "HEAD": - return validate(req, LS, resource, id, head) - of "OPTIONS": - return validate(req, LS, resource, id, options) - of "GET": - return validate(req, LS, resource, id, get) - of "PATCH": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, patch) - else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - -proc newSimpleLSRequest(meth: HttpMethod, resource, id, body = "", params = "", headers = newHttpHeaders()): LSRequest = - result.reqMethod = meth - result.body = body - result.headers = headers - result.url = parseUri("$1://$2:$3/$4/$5?$6" % @["http", "localhost", "9500", resource, id, params]) - -proc get(resource, id: string, params = ""): LSResponse = - return newSimpleLSRequest(HttpGet, resource, id, "", params).get(LS, resource, id) - -proc post(resource, folder, body: string, ct = ""): LSResponse = - var headers = newHttpHeaders() - if ct != "": - headers["Content-Type"] = ct - return newSimpleLSRequest(HttpPost, resource, "", body, "", headers).post(LS, resource, folder & "/") - -proc put(resource, id, body: string, ct = ""): LSResponse = - var headers = newHttpHeaders() - if ct != "": - headers["Content-Type"] = ct - return newSimpleLSRequest(HttpPut, resource, id, body, "", headers).put(LS, resource, id) - -proc patch(resource, id, body: string): LSResponse = - var headers = newHttpHeaders() - headers["Content-Type"] = "application/json" - return newSimpleLSRequest(HttpPatch, resource, id, body, "", headers).patch(LS, resource, id) - -proc delete(resource, id: string): LSResponse = - return newSimpleLSRequest(HttpPatch, resource, id).delete(LS, resource, id) - -proc head(resource, id: string): LSResponse = - return newSimpleLSRequest(HttpHead, resource, id).head(LS, resource, id) - -proc registerStoreApi(LS: LiteStore, ctx: DTContext, origResource, origId: string) = - var api_idx = ctx.duk_push_object() - # GET - var get: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let params = duk_get_string(ctx, 2) - let resp = get($resource, $id, $params) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, get, 3) - discard ctx.duk_put_prop_string(api_idx, "get") - # POST - var post: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let folder = duk_get_string(ctx, 1) - let body = duk_get_string(ctx, 2) - let ct = duk_get_string(ctx, 3) - let resp = post($resource, $folder, $body, $ct) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, post, 4) - discard ctx.duk_put_prop_string(api_idx, "post") - # PUT - var put: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let body = duk_get_string(ctx, 2) - let ct = duk_get_string(ctx, 3) - let resp = put($resource, $id, $body, $ct) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, put, 4) - discard ctx.duk_put_prop_string(api_idx, "put") - # PATCH - var patch: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let body = duk_get_string(ctx, 2) - let resp = patch($resource, $id, $body) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, patch, 3) - discard ctx.duk_put_prop_string(api_idx, "patch") - # DELETE - var delete: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let resp = delete($resource, $id) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, delete, 2) - discard ctx.duk_put_prop_string(api_idx, "delete") - # HEAD - var head: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let resp = head($resource, $id) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, head, 2) - discard ctx.duk_put_prop_string(api_idx, "head") - discard ctx.duk_put_global_string("$store") - -proc jError(ctx: DTContext): LSResponse = - return resError(Http500, "Middleware Error: " & $ctx.duk_safe_to_string(-1)) - -proc getMiddleware*(LS: LiteStore, id: string): string = - if not LS.middleware.hasKey(id): - # Attempt to retrieve resource from system documents - let options = newQueryOptions(true) - let doc = LS.store.retrieveDocument("middleware/" & id & ".js", options) - result = doc.data - if result == "": - LOG.warn("Middleware '$1' not found" % id) - else: - result = LS.middleware[id] - -proc getMiddlewareSeq(resource, id, meth: string): seq[string] = - result = newSeq[string]() - if LS.config.kind != JObject or not LS.config.hasKey("resources"): - return - var reqUri = "/" & resource & "/" & id - if reqUri[^1] == '/': - reqUri.removeSuffix({'/'}) - let parts = reqUri.split("/") - let ancestors = parts[1..parts.len-2] - var currentPath = "" - var currentPaths = "" - for p in ancestors: - currentPath &= "/" & p - currentPaths = currentPath & "/*" - if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][meth].hasKey("middleware"): - let mw = LS.config["resources"][currentPaths][meth]["middleware"] - if (mw.kind == JArray): - for m in mw: - result.add m.getStr - if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("middleware"): - let mw = LS.config["resources"][reqUri][meth]["middleware"] - if (mw.kind == JArray): - for m in mw: - result.add m.getStr - -proc execute*(req: var LSRequest, LS: LiteStore, resource, id: string): LSResponse = - let middleware = getMiddlewareSeq(resource, id, $req.reqMethod) - LOG.debug("Middleware: " & middleware.join(" -> ")); - if middleware.len == 0: - return route(req, LS, resource, id) - var jReq = $(%* req) - LOG.debug("Request: " & jReq) - var jRes = """{ - "code": 200, - "content": {}, - "final": false, - "headers": { - "Access-Control-Allow-Origin": "*", - "Access-Control-Allow-Headers": "Authorization, Content-Type", - "Server": "$1", - "Content-Type": "application/json" - } - }""" % [LS.appname & "/" & LS.appversion] - var context = "{}" - # Create execution context - var ctx = duk_create_heap_default() - duk_console_init(ctx) - duk_print_alert_init(ctx) - LS.registerStoreApi(ctx, resource, id) - if ctx.duk_peval_string(cstring("($1)" % $jReq)) != 0: - return jError(ctx) - discard ctx.duk_put_global_string("$req") - if ctx.duk_peval_string(cstring("($1)" % $jRes)) != 0: - return jError(ctx) - discard ctx.duk_put_global_string("$res") - if ctx.duk_peval_string(cstring("($1)" % $context)) != 0: - return jError(ctx) - discard ctx.duk_put_global_string("$ctx") - # Middleware-specific functions - var i = 0 - var abort = 0 - while abort != 1 and i < middleware.len: - let code = LS.getMiddleware(middleware[i]) - LOG.debug("Evaluating middleware '$1'" % middleware[i]) - if ctx.duk_peval_string(code.cstring) != 0: - return jError(ctx) - abort = ctx.duk_get_boolean(-1) - i.inc - # Retrieve response, and request - if ctx.duk_peval_string("JSON.stringify($res);") != 0: - return jError(ctx) - let fRes = parseJson($(ctx.duk_get_string(-1))).newLSResponse - if ctx.duk_peval_string("JSON.stringify($req);") != 0: - return jError(ctx) - let fReq = parseJson($(ctx.duk_get_string(-1))).newLSRequest() - ctx.duk_destroy_heap(); - LOG.debug("abort: $1", [$abort]) - if abort == 1: - return fRes - return route(fReq, LS, resource, id) +import + asynchttpserver, + strutils, + sequtils, + cgi, + strtabs, + pegs, + json, + os, + uri, + times +import + types, + contenttypes, + core, + utils, + logger, + duktape + +# Helper procs + +proc sqlOp(op: string): string = + let table = newStringTable() + table["not eq"] = "<>" + table["eq"] = "==" + table["gt"] = ">" + table["gte"] = ">=" + table["lt"] = "<" + table["lte"] = "<=" + table["contains"] = "contains" + table["like"] = "like" + return table[op] + +proc orderByClauses*(str: string): string = + var clauses = newSeq[string]() + var fragments = str.split(",") + let clause = peg""" + clause <- {[-+]} {field} + field <- ('id' / 'created' / 'modified' / path) + path <- '$' (objField)+ + ident <- [a-zA-Z0-9_]+ + objField <- '.' ident + """ + for f in fragments: + var matches = @["", ""] + if f.find(clause, matches) != -1: + var field = matches[1] + if field[0] == '$': + field = "json_extract(documents.data, '$1')" % matches[1] + if matches[0] == "-": + clauses.add("$1 COLLATE NOCASE DESC" % field) + else: + clauses.add("$1 COLLATE NOCASE ASC" % field) + return clauses.join(", ") + +proc selectClause*(str: string, options: var QueryOptions) = + let tokens = """ + path <- '$' (objItem / objField)+ + ident <- [a-zA-Z0-9_]+ + objIndex <- '[' \d+ ']' + objField <- '.' ident + objItem <- objField objIndex + """ + let fields = peg(""" + fields <- ^{field} (\s* ',' \s* {field})*$ + field <- path \s+ ('as' / 'AS') \s+ ident + """ & tokens) + let field = peg(""" + field <- ^{path} \s+ ('as' / 'AS') \s+ {ident}$ + """ & tokens) + var fieldMatches = newSeq[string](10) + if str.strip.match(fields, fieldMatches): + for m in fieldMatches: + if m.len > 0: + var rawTuple = newSeq[string](2) + if m.match(field, rawTuple): + options.jsonSelect.add((path: rawTuple[0], alias: rawTuple[1])) + +proc filterClauses*(str: string, options: var QueryOptions) = + let tokens = """ + operator <- 'not eq' / 'eq' / 'gte' / 'gt' / 'lte' / 'lt' / 'contains' / 'like' + value <- string / number / 'null' / 'true' / 'false' + string <- '"' ('\\"' . / [^"])* '"' + number <- '-'? '0' / [1-9] [0-9]* ('.' [0-9]+)? (( 'e' / 'E' ) ( '+' / '-' )? [0-9]+)? + path <- '$' (objItem / objField)+ + ident <- [a-zA-Z0-9_]+ + objIndex <- '[' \d+ ']' + objField <- '.' ident + objItem <- objField objIndex + """ + let clause = peg(""" + clause <- {path} \s+ {operator} \s+ {value} + """ & tokens) + let andClauses = peg(""" + andClauses <- ^{clause} (\s+ 'and' \s+ {clause})*$ + clause <- path \s+ operator \s+ value + """ & tokens) + let orClauses = peg(""" + orClauses <- ^{andClauses} (\s+ 'or' \s+ {andClauses})*$ + andClauses <- clause (\s+ 'and' \s+ clause)* + clause <- path \s+ operator \s+ value + """ & tokens) + var orClausesMatches = newSeq[string](10) + discard str.strip.match(orClauses, orClausesMatches) + var parsedClauses = newSeq[seq[seq[string]]]() + for orClause in orClausesMatches: + if orClause.len > 0: + var andClausesMatches = newSeq[string](10) + discard orClause.strip.match(andClauses, andClausesMatches) + var parsedAndClauses = newSeq[seq[string]]() + for andClause in andClausesMatches: + if andClause.len > 0: + var clauses = newSeq[string](3) + discard andClause.strip.match(clause, clauses) + clauses[1] = sqlOp(clauses[1]) + if clauses[2] == "true": + clauses[2] = "1" + elif clauses[2] == "false": + clauses[2] = "0" + parsedAndClauses.add clauses + if parsedAndClauses.len > 0: + parsedClauses.add parsedAndClauses + if parsedClauses.len == 0: + return + var currentArr = 0 + var tables = newSeq[string]() + let resOrClauses = parsedClauses.map do (it: seq[seq[string]]) -> string: + let resAndClauses = it.map do (x: seq[string]) -> string: + if x[1] == "contains": + currentArr = currentArr + 1 + tables.add "json_each(documents.data, '$1') AS arr$2" % [x[0], $currentArr] + return "arr$1.value == $2" % [$currentArr, x[2]] + else: + var arr = @[x[0], x[1], x[2]] + if x[1] == "like": + arr[2] = x[2].replace('*', '%') + return "json_extract(documents.data, '$1') $2 $3 " % arr + return resAndClauses.join(" AND ") + options.tables = options.tables & tables + options.jsonFilter = resOrClauses.join(" OR ") + +proc parseQueryOption*(fragment: string, options: var QueryOptions) = + if fragment == "": + return + var pair = fragment.split('=') + if pair.len < 2 or pair[1] == "": + raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) + try: + pair[1] = pair[1].replace("+", "%2B").decodeURL + except CatchableError: + raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) + case pair[0]: + of "filter": + filterClauses(pair[1], options) + if options.jsonFilter == "": + raise newException(EInvalidRequest, "Invalid filter clause: $1" % pair[1].replace("\"", "\\\"")) + of "select": + selectClause(pair[1], options) + if options.jsonSelect.len == 0: + raise newException(EInvalidRequest, "Invalid select clause: $1" % pair[1].replace("\"", "\\\"")) + of "like": + options.like = pair[1] + of "search": + options.search = pair[1] + of "tags": + options.tags = pair[1] + of "created-after": + try: + options.createdAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid created-after value: $1" % getCurrentExceptionMsg()) + of "created-before": + try: + options.createdBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid created-before value: $1" % getCurrentExceptionMsg()) + of "modified-after": + try: + options.modifiedAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid modified.after value: $1" % getCurrentExceptionMsg()) + of "modified-before": + try: + options.modifiedBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid modified-before value: $1" % getCurrentExceptionMsg()) + of "limit": + try: + options.limit = pair[1].parseInt + except CatchableError: + raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) + of "offset": + try: + options.offset = pair[1].parseInt + except CatchableError: + raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) + of "sort": + let orderby = pair[1].orderByClauses() + if orderby != "": + options.orderby = orderby + else: + raise newException(EInvalidRequest, "Invalid sort value: $1" % pair[1]) + of "contents", "raw": + discard + else: + discard + +proc parseQueryOptions*(querystring: string, options: var QueryOptions) = + var fragments = querystring.split('&') + for f in fragments: + f.parseQueryOption(options) + +proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, cb: proc(req: LSRequest, LS: LiteStore, resource: string, id: string):LSResponse): LSResponse = + if req.reqMethod == HttpPost or req.reqMethod == HttpPut or req.reqMethod == HttpPatch: + var ct = "" + let body = req.body.strip + if body == "": + return resError(Http400, "Bad request: No content specified for document.") + if req.headers.hasKey("Content-Type"): + ct = req.headers["Content-Type"] + case ct: + of "application/json": + try: + discard body.parseJson() + except CatchableError: + return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) + else: + discard + return cb(req, LS, resource, id) + +proc patchTag(tags: var seq[string], index: int, op, path, value: string): bool = + LOG.debug("- PATCH -> $1 tag['$2'] = \"$3\" - Total tags: $4." % [op, $index, $value, $tags.len]) + case op: + of "remove": + let tag = tags[index] + if not tag.startsWith("$"): + tags[index] = "" # Not removing element, otherwise subsequent indexes won't work! + else: + raise newException(EInvalidRequest, "cannot remove system tag: $1" % tag) + of "add": + if value.match(PEG_USER_TAG): + tags.insert(value, index) + else: + if value.strip == "": + raise newException(EInvalidRequest, "tag not specified." % value) + else: + raise newException(EInvalidRequest, "invalid tag: $1" % value) + of "replace": + if value.match(PEG_USER_TAG): + if tags[index].startsWith("$"): + raise newException(EInvalidRequest, "cannot replace system tag: $1" % tags[index]) + else: + tags[index] = value + else: + if value.strip == "": + raise newException(EInvalidRequest, "tag not specified." % value) + else: + raise newException(EInvalidRequest, "invalid tag: $1" % value) + of "test": + if tags[index] != value: + return false + else: + raise newException(EInvalidRequest, "invalid patch operation: $1" % op) + return true + +proc patchData*(data: var JsonNode, origData: JsonNode, op: string, path: string, value: JsonNode): bool = + LOG.debug("- PATCH -> $1 path $2 with $3" % [op, path, $value]) + var keys = path.replace(peg"^\/data\/", "").split("/") + if keys.len == 0: + raise newException(EInvalidRequest, "no valid path specified: $1" % path) + var d = data + var dorig = origData + var c = 1 + for key in keys: + if d.kind == JArray: + try: + var index = key.parseInt + if c >= keys.len: + d.elems[index] = value + case op: + of "remove": + d.elems.del(index) + of "add": + d.elems.insert(value, index) + of "replace": + d.elems[index] = value + of "test": + if d.elems[index] != value: + return false + else: + raise newException(EInvalidRequest, "invalid patch operation: $1" % op) + else: + d = d[index] + dorig = dorig[index] + except CatchableError: + raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) + else: + if c >= keys.len: + case op: + of "remove": + if d.hasKey(key): + d.delete(key) + else: + raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + of "add": + d[key] = value + of "replace": + if d.hasKey(key): + d[key] = value + else: + raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + of "test": + if dorig.hasKey(key): + if dorig[key] != value: + return false + else: + raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + else: + raise newException(EInvalidRequest, "invalid patch operation: $1" % op) + else: + d = d[key] + dorig = dorig[key] + c += 1 + return true + + +proc applyPatchOperation*(data: var JsonNode, origData: JsonNode, tags: var seq[string], op: string, path: string, value: JsonNode): bool = + var matches = @[""] + let p = peg""" + path <- ^tagPath / fieldPath$ + tagPath <- '\/tags\/' {\d+} + fieldPath <- '\/data\/' ident ('\/' ident)* + ident <- [a-zA-Z0-9_]+ / '-' + """ + if path.find(p, matches) == -1: + raise newException(EInvalidRequest, "cannot patch path '$1'" % path) + if path.match(peg"^\/tags\/"): + let index = matches[0].parseInt + if value.kind != JString: + raise newException(EInvalidRequest, "tag '$1' is not a string." % $value) + let tag = value.getStr + return patchTag(tags, index, op, path, tag) + elif tags.contains("$subtype:json"): + return patchData(data, origData, op, path, value) + else: + raise newException(EInvalidRequest, "cannot patch data of a non-JSON document.") + +# Low level procs + +proc getTag*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveTag(id, options) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + if doc == newJNull(): + result = resTagNotFound(id) + else: + result.content = $doc + result.code = Http200 + +proc getIndex*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveIndex(id, options) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + if doc == newJNull(): + result = resIndexNotFound(id) + else: + result.content = $doc + result.code = Http200 + +proc getRawDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveRawDocument(id, options) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + if doc == "": + result = resDocumentNotFound(id) + else: + result.content = doc + result.code = Http200 + +proc getDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveDocument(id, options) + if doc.data == "": + result = resDocumentNotFound(id) + else: + result.headers = doc.contenttype.ctHeader + setOrigin(LS, req, result.headers) + result.content = doc.data + result.code = Http200 + +proc deleteDocument*(LS: LiteStore, id: string, req: LSRequest): LSResponse = + let doc = LS.store.retrieveDocument(id) + if doc.data == "": + result = resDocumentNotFound(id) + else: + try: + let res = LS.store.destroyDocument(id) + if res == 0: + result = resError(Http500, "Unable to delete document '$1'" % id) + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Content-Length"] = "0" + result.content = "" + result.code = Http204 + except CatchableError: + result = resError(Http500, "Unable to delete document '$1'" % id) + +proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = + var options = options + let t0 = cpuTime() + let docs = LS.store.retrieveTags(options) + let orig_limit = options.limit + let orig_offset = options.offset + options.limit = 0 + options.offset = 0 + options.select = @["COUNT(tag_id)"] + let total = LS.store.countTags(prepareSelectTagsQuery(options), options.like.replace("*", "%")) + var content = newJObject() + if options.like != "": + content["like"] = %(options.like.decodeURL) + if orig_limit > 0: + content["limit"] = %orig_limit + if orig_offset > 0: + content["offset"] = %orig_offset + content["total"] = %total + content["execution_time"] = %(cputime()-t0) + content["results"] = docs + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc getIndexes*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = + var options = options + let t0 = cpuTime() + let docs = LS.store.retrieveIndexes(options) + let orig_limit = options.limit + let orig_offset = options.offset + options.limit = 0 + options.offset = 0 + options.select = @["COUNT(name)"] + let total = LS.store.countIndexes(prepareSelectIndexesQuery(options), options.like.replace("*", "%")) + var content = newJObject() + if options.like != "": + content["like"] = %(options.like.decodeURL) + if orig_limit > 0: + content["limit"] = %orig_limit + if orig_offset > 0: + content["offset"] = %orig_offset + content["total"] = %total + content["execution_time"] = %(cputime()-t0) + content["results"] = docs + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = + var options = options + let t0 = cpuTime() + let docs = LS.store.retrieveRawDocuments(options) + let orig_limit = options.limit + let orig_offset = options.offset + options.limit = 0 + options.offset = 0 + options.select = @["COUNT(docid)"] + let total = LS.store.retrieveRawDocuments(options)[0].num + var content = newJObject() + if options.folder != "": + content["folder"] = %(options.folder) + if options.search != "": + content["search"] = %(options.search.decodeURL) + if options.tags != "": + content["tags"] = newJArray() + for tag in options.tags.replace("+", "%2B").decodeURL.split(","): + content["tags"].add(%tag) + if orig_limit > 0: + content["limit"] = %orig_limit + if orig_offset > 0: + content["offset"] = %orig_offset + if options.orderby != "": + content["sort"] = %options.orderby + content["total"] = %total + content["execution_time"] = %(cputime()-t0) + content["results"] = docs + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc getInfo*(LS: LiteStore, req: LSRequest): LSResponse = + let info = LS.store.retrieveInfo() + let version = info[0] + let total_documents = info[1] + let total_tags = LS.store.countTags() + let tags = LS.store.retrieveTagsWithTotals() + var content = newJObject() + content["version"] = %(LS.appname & " v" & LS.appversion) + content["datastore_version"] = %version + content["size"] = %($((LS.file.getFileSize().float/(1024*1024)).formatFloat(ffDecimal, 2)) & " MB") + content["read_only"] = %LS.readonly + content["log_level"] = %LS.loglevel + if LS.directory.len == 0: + content["directory"] = newJNull() + else: + content["directory"] = %LS.directory + content["mount"] = %LS.mount + content["total_documents"] = %total_documents + content["total_tags"] = %total_tags + content["tags"] = tags + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc putIndex*(LS: LiteStore, id, field: string, req: LSRequest): LSResponse = + try: + if (not id.match(PEG_INDEX)): + return resError(Http400, "invalid index ID: $1" % id) + if (not field.match(PEG_JSON_FIELD)): + return resError(Http400, "invalid field path: $1" % field) + if (LS.store.retrieveIndex(id) != newJNull()): + return resError(Http409, "Index already exists: $1" % id) + LS.store.createIndex(id, field) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = "{\"id\": \"$1\", \"field\": \"$2\"}" % [id, field] + result.code = Http200 + except CatchableError: + eWarn() + result = resError(Http500, "Unable to create index.") + +proc deleteIndex*(LS: LiteStore, id: string, req: LSRequest): LSResponse = + if (not id.match(PEG_INDEX)): + return resError(Http400, "invalid index ID: $1" % id) + if (LS.store.retrieveIndex(id) == newJNull()): + return resError(Http404, "Index not found: $1" % id) + try: + LS.store.dropIndex(id) + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Content-Length"] = "0" + result.content = "" + result.code = Http204 + except CatchableError: + eWarn() + result = resError(Http500, "Unable to delete index.") + +proc postDocument*(LS: LiteStore, body: string, ct: string, folder="", req: LSRequest): LSResponse = + if not folder.isFolder: + return resError(Http400, "Invalid folder specified when creating document: $1" % folder) + try: + var doc = LS.store.createDocument(folder, body, ct) + if doc != "": + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = doc + result.code = Http201 + else: + result = resError(Http500, "Unable to create document.") + except CatchableError: + eWarn() + result = resError(Http500, "Unable to create document.") + +proc putDocument*(LS: LiteStore, id: string, body: string, ct: string, req: LSRequest): LSResponse = + if id.isFolder: + return resError(Http400, "Invalid ID '$1' (Document IDs cannot end with '/')." % id) + let doc = LS.store.retrieveDocument(id) + if doc.data == "": + # Create a new document + var doc = LS.store.createDocument(id, body, ct) + if doc != "": + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = doc + result.code = Http201 + else: + result = resError(Http500, "Unable to create document.") + else: + # Update existing document + try: + var doc = LS.store.updateDocument(id, body, ct) + if doc != "": + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = doc + result.code = Http200 + else: + result = resError(Http500, "Unable to update document '$1'." % id) + except CatchableError: + result = resError(Http500, "Unable to update document '$1'." % id) + +proc patchDocument*(LS: LiteStore, id: string, body: string, req: LSRequest): LSResponse = + var apply = true + let jbody = body.parseJson + if jbody.kind != JArray: + return resError(Http400, "Bad request: PATCH request body is not an array.") + var options = newQueryOptions() + options.select = @["documents.id AS id", "created", "modified", "data"] + let doc = LS.store.retrieveRawDocument(id, options) + if doc == "": + return resDocumentNotFound(id) + let jdoc = doc.parseJson + var tags = newSeq[string]() + var origTags = newSeq[string]() + for tag in jdoc["tags"].items: + tags.add(tag.str) + origTags.add(tag.str) + var data: JsonNode + var origData: JsonNode + if tags.contains("$subtype:json"): + try: + origData = jdoc["data"].getStr.parseJson + data = origData.copy + except CatchableError: + discard + var c = 1 + for item in jbody.items: + if item.hasKey("op") and item.hasKey("path"): + if not item.hasKey("value"): + item["value"] = %"" + try: + apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) + if not apply: + break + except CatchableError: + return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) + else: + return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c) + c.inc + if apply: + if origData.len > 0 and origData != data: + try: + var doc = LS.store.updateDocument(id, data.pretty, "application/json") + if doc == "": + return resError(Http500, "Unable to patch document '$1'." % id) + except CatchableError: + return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) + if origTags != tags: + try: + for t1 in jdoc["tags"].items: + discard LS.store.destroyTag(t1.str, id, true) + for t2 in tags: + if t2 != "": + LS.store.createTag(t2, id, true) + except CatchableError: + return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) + return LS.getRawDocument(id, newQueryOptions(), req) + +# Main routing + +proc options*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + case resource: + of "info": + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + if id != "": + return resError(Http404, "Info '$1' not found." % id) + else: + result.code = Http204 + result.content = "" + of "dir": + result.code = Http204 + result.content = "" + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + of "tags": + result.code = Http204 + result.content = "" + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + of "indexes": + result.code = Http204 + result.content = "" + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + if id != "": + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + else: + result.headers["Allow"] = "GET, OPTIONS, PUT, DELETE" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS, PUT, DELETE" + else: + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + of "docs": + var folder: string + if id.isFolder: + folder = id + if folder.len > 0: + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS, POST, PUT" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST, PUT" + elif id != "": + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" + result.headers["Allow-Patch"] = "application/json-patch+json" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" + else: + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS, POST" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST" + else: + discard # never happens really. + +proc head*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + var options = newQueryOptions() + options.select = @["documents.id AS id", "created", "modified"] + if id.isFolder: + options.folder = id + try: + parseQueryOptions(req.url.query, options); + if id != "" and options.folder == "": + result = LS.getRawDocument(id, options, req) + result.content = "" + else: + result = LS.getRawDocuments(options, req) + result.content = "" + except CatchableError: + return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) + +proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + case resource: + of "docs": + var options = newQueryOptions() + if id.isFolder: + options.folder = id + if req.url.query.contains("contents=false"): + options.select = @["documents.id AS id", "created", "modified"] + try: + parseQueryOptions(req.url.query, options); + if id != "" and options.folder == "": + if req.url.query.contains("raw=true") or req.headers.hasKey("Accept") and req.headers["Accept"] == "application/json": + return LS.getRawDocument(id, options, req) + else: + return LS.getDocument(id, options, req) + else: + return LS.getRawDocuments(options, req) + except CatchableError: + let e = getCurrentException() + let trace = e.getStackTrace() + echo trace + return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) + of "tags": + var options = newQueryOptions() + try: + parseQueryOptions(req.url.query, options); + if id != "": + return LS.getTag(id, options, req) + else: + return LS.getTags(options, req) + except CatchableError: + return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) + of "indexes": + var options = newQueryOptions() + try: + parseQueryOptions(req.url.query, options); + if id != "": + return LS.getIndex(id, options, req) + else: + return LS.getIndexes(options, req) + except CatchableError: + return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) + of "info": + if id != "": + return resError(Http404, "Info '$1' not found." % id) + return LS.getInfo(req) + else: + discard # never happens really. + +proc post*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + var ct = "text/plain" + if req.headers.hasKey("Content-Type"): + ct = req.headers["Content-Type"] + return LS.postDocument(req.body.strip, ct, id, req) + +proc put*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + if id != "": + if resource == "indexes": + var field = "" + try: + field = parseJson(req.body.strip)["field"].getStr + except CatchableError: + return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) + return LS.putIndex(id, field, req) + else: # Assume docs + var ct = "text/plain" + if req.headers.hasKey("Content-Type"): + ct = req.headers["Content-Type"] + return LS.putDocument(id, req.body.strip, ct, req) + else: + return resError(Http400, "Bad request: document ID must be specified in PUT requests.") + +proc delete*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + if id != "": + if resource == "indexes": + return LS.deleteIndex(id, req) + else: # Assume docs + return LS.deleteDocument(id, req) + else: + return resError(Http400, "Bad request: document ID must be specified in DELETE requests.") + +proc patch*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + if id != "": + return LS.patchDocument(id, req.body, req) + else: + return resError(Http400, "Bad request: document ID must be specified in PATCH requests.") + +proc serveFile*(req: LSRequest, LS: LiteStore, id: string): LSResponse = + let path = LS.directory / id + var reqMethod = $req.reqMethod + if req.headers.hasKey("X-HTTP-Method-Override"): + reqMethod = req.headers["X-HTTP-Method-Override"] + case reqMethod.toUpperAscii: + of "OPTIONS": + return validate(req, LS, "dir", id, options) + of "GET": + if path.fileExists: + try: + let contents = path.readFile + let parts = path.splitFile + if CONTENT_TYPES.hasKey(parts.ext): + result.headers = CONTENT_TYPES[parts.ext].ctHeader + else: + result.headers = ctHeader("text/plain") + setOrigin(LS, req, result.headers) + result.content = contents + result.code = Http200 + except CatchableError: + return resError(Http500, "Unable to read file '$1'." % path) + else: + return resError(Http404, "File '$1' not found." % path) + else: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + +proc route*(req: LSRequest, LS: LiteStore, resource = "docs", id = ""): LSResponse = + var reqMethod = $req.reqMethod + if req.headers.hasKey("X-HTTP-Method-Override"): + reqMethod = req.headers["X-HTTP-Method-Override"] + case reqMethod.toUpperAscii: + of "POST": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, post) + of "PUT": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, put) + of "DELETE": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, delete) + of "HEAD": + return validate(req, LS, resource, id, head) + of "OPTIONS": + return validate(req, LS, resource, id, options) + of "GET": + return validate(req, LS, resource, id, get) + of "PATCH": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, patch) + else: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + +proc newSimpleLSRequest(meth: HttpMethod, resource = "", id = "", body = "", params = "", headers = newHttpHeaders()): LSRequest = + result.reqMethod = meth + result.body = body + result.headers = headers + result.url = parseUri("$1://$2:$3/$4/$5?$6" % @["http", "localhost", "9500", resource, id, params]) + +proc get(resource, id: string, params = ""): LSResponse = + return newSimpleLSRequest(HttpGet, resource, id, "", params).get(LS, resource, id) + +proc post(resource, folder, body: string, ct = ""): LSResponse = + var headers = newHttpHeaders() + if ct != "": + headers["Content-Type"] = ct + return newSimpleLSRequest(HttpPost, resource, "", body, "", headers).post(LS, resource, folder & "/") + +proc put(resource, id, body: string, ct = ""): LSResponse = + var headers = newHttpHeaders() + if ct != "": + headers["Content-Type"] = ct + return newSimpleLSRequest(HttpPut, resource, id, body, "", headers).put(LS, resource, id) + +proc patch(resource, id, body: string): LSResponse = + var headers = newHttpHeaders() + headers["Content-Type"] = "application/json" + return newSimpleLSRequest(HttpPatch, resource, id, body, "", headers).patch(LS, resource, id) + +proc delete(resource, id: string): LSResponse = + return newSimpleLSRequest(HttpPatch, resource, id).delete(LS, resource, id) + +proc head(resource, id: string): LSResponse = + return newSimpleLSRequest(HttpHead, resource, id).head(LS, resource, id) + +proc registerStoreApi(LS: LiteStore, ctx: DTContext, origResource, origId: string) = + var api_idx = ctx.duk_push_object() + # GET + var get: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let params = duk_get_string(ctx, 2) + let resp = get($resource, $id, $params) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, get, 3) + discard ctx.duk_put_prop_string(api_idx, "get") + # POST + var post: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let folder = duk_get_string(ctx, 1) + let body = duk_get_string(ctx, 2) + let ct = duk_get_string(ctx, 3) + let resp = post($resource, $folder, $body, $ct) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, post, 4) + discard ctx.duk_put_prop_string(api_idx, "post") + # PUT + var put: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let body = duk_get_string(ctx, 2) + let ct = duk_get_string(ctx, 3) + let resp = put($resource, $id, $body, $ct) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, put, 4) + discard ctx.duk_put_prop_string(api_idx, "put") + # PATCH + var patch: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let body = duk_get_string(ctx, 2) + let resp = patch($resource, $id, $body) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, patch, 3) + discard ctx.duk_put_prop_string(api_idx, "patch") + # DELETE + var delete: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let resp = delete($resource, $id) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, delete, 2) + discard ctx.duk_put_prop_string(api_idx, "delete") + # HEAD + var head: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let resp = head($resource, $id) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, head, 2) + discard ctx.duk_put_prop_string(api_idx, "head") + discard ctx.duk_put_global_string("$store") + +proc jError(ctx: DTContext): LSResponse = + return resError(Http500, "Middleware Error: " & $ctx.duk_safe_to_string(-1)) + +proc getMiddleware*(LS: LiteStore, id: string): string = + if not LS.middleware.hasKey(id): + # Attempt to retrieve resource from system documents + let options = newQueryOptions(true) + let doc = LS.store.retrieveDocument("middleware/" & id & ".js", options) + result = doc.data + if result == "": + LOG.warn("Middleware '$1' not found" % id) + else: + result = LS.middleware[id] + +proc getMiddlewareSeq(resource, id, meth: string): seq[string] = + result = newSeq[string]() + if LS.config.kind != JObject or not LS.config.hasKey("resources"): + return + var reqUri = "/" & resource & "/" & id + if reqUri[^1] == '/': + reqUri.removeSuffix({'/'}) + let parts = reqUri.split("/") + let ancestors = parts[1..parts.len-2] + var currentPath = "" + var currentPaths = "" + for p in ancestors: + currentPath &= "/" & p + currentPaths = currentPath & "/*" + if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][meth].hasKey("middleware"): + let mw = LS.config["resources"][currentPaths][meth]["middleware"] + if (mw.kind == JArray): + for m in mw: + result.add m.getStr + if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("middleware"): + let mw = LS.config["resources"][reqUri][meth]["middleware"] + if (mw.kind == JArray): + for m in mw: + result.add m.getStr + +proc execute*(req: var LSRequest, LS: LiteStore, resource, id: string): LSResponse = + let middleware = getMiddlewareSeq(resource, id, $req.reqMethod) + LOG.debug("Middleware: " & middleware.join(" -> ")); + if middleware.len == 0: + return route(req, LS, resource, id) + var jReq = $(%* req) + LOG.debug("Request: " & jReq) + var jRes = """{ + "code": 200, + "content": {}, + "final": false, + "headers": { + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Headers": "Authorization, Content-Type", + "Server": "$1", + "Content-Type": "application/json" + } + }""" % [LS.appname & "/" & LS.appversion] + var context = "{}" + # Create execution context + var ctx = duk_create_heap_default() + duk_console_init(ctx) + duk_print_alert_init(ctx) + LS.registerStoreApi(ctx, resource, id) + if ctx.duk_peval_string(cstring("($1)" % $jReq)) != 0: + return jError(ctx) + discard ctx.duk_put_global_string("$req") + if ctx.duk_peval_string(cstring("($1)" % $jRes)) != 0: + return jError(ctx) + discard ctx.duk_put_global_string("$res") + if ctx.duk_peval_string(cstring("($1)" % $context)) != 0: + return jError(ctx) + discard ctx.duk_put_global_string("$ctx") + # Middleware-specific functions + var i = 0 + var abort = 0 + while abort != 1 and i < middleware.len: + let code = LS.getMiddleware(middleware[i]) + LOG.debug("Evaluating middleware '$1'" % middleware[i]) + if ctx.duk_peval_string(code.cstring) != 0: + return jError(ctx) + abort = ctx.duk_get_boolean(-1) + i.inc + # Retrieve response, and request + if ctx.duk_peval_string("JSON.stringify($res);") != 0: + return jError(ctx) + let fRes = parseJson($(ctx.duk_get_string(-1))).newLSResponse + if ctx.duk_peval_string("JSON.stringify($req);") != 0: + return jError(ctx) + let fReq = parseJson($(ctx.duk_get_string(-1))).newLSRequest() + ctx.duk_destroy_heap(); + LOG.debug("abort: $1", [$abort]) + if abort == 1: + return fRes + return route(fReq, LS, resource, id)
@@ -1,1269 +1,1269 @@
-import - asynchttpserver, - strutils, - sequtils, - cgi, - strtabs, - pegs, - json, - os, - uri, - tables, - times -import - types, - contenttypes, - core, - utils, - logger, - duktape - -# Helper procs - -proc sqlOp(op: string): string = - let table = newStringTable() - table["not eq"] = "<>" - table["eq"] = "==" - table["gt"] = ">" - table["gte"] = ">=" - table["lt"] = "<" - table["lte"] = "<=" - table["contains"] = "contains" - table["like"] = "like" - return table[op] - -proc orderByClauses*(str: string): string = - var clauses = newSeq[string]() - var fragments = str.split(",") - let clause = peg""" - clause <- {[-+]} {field} - field <- ('id' / 'created' / 'modified' / path) - path <- '$' (objField)+ - ident <- [a-zA-Z0-9_]+ - objField <- '.' ident - """ - for f in fragments: - var matches = @["", ""] - if f.find(clause, matches) != -1: - var field = matches[1] - if field[0] == '$': - field = "json_extract(documents.data, '$1')" % matches[1] - if matches[0] == "-": - clauses.add("$1 COLLATE NOCASE DESC" % field) - else: - clauses.add("$1 COLLATE NOCASE ASC" % field) - return clauses.join(", ") - -proc selectClause*(str: string, options: var QueryOptions) = - let tokens = """ - path <- '$' (objItem / objField)+ - ident <- [a-zA-Z0-9_]+ - objIndex <- '[' \d+ ']' - objField <- '.' ident - objItem <- objField objIndex - """ - let fields = peg(""" - fields <- ^{field} (\s* ',' \s* {field})*$ - field <- path \s+ ('as' / 'AS') \s+ ident - """ & tokens) - let field = peg(""" - field <- ^{path} \s+ ('as' / 'AS') \s+ {ident}$ - """ & tokens) - var fieldMatches = newSeq[string](10) - if str.strip.match(fields, fieldMatches): - for m in fieldMatches: - if m.len > 0: - var rawTuple = newSeq[string](2) - if m.match(field, rawTuple): - options.jsonSelect.add((path: rawTuple[0], alias: rawTuple[1])) - -proc filterClauses*(str: string, options: var QueryOptions) = - let tokens = """ - operator <- 'not eq' / 'eq' / 'gte' / 'gt' / 'lte' / 'lt' / 'contains' / 'like' - value <- string / number / 'null' / 'true' / 'false' - string <- '"' ('\\"' . / [^"])* '"' - number <- '-'? '0' / [1-9] [0-9]* ('.' [0-9]+)? (( 'e' / 'E' ) ( '+' / '-' )? [0-9]+)? - path <- '$' (objItem / objField)+ - ident <- [a-zA-Z0-9_]+ - objIndex <- '[' \d+ ']' - objField <- '.' ident - objItem <- objField objIndex - """ - let clause = peg(""" - clause <- {path} \s+ {operator} \s+ {value} - """ & tokens) - let andClauses = peg(""" - andClauses <- ^{clause} (\s+ 'and' \s+ {clause})*$ - clause <- path \s+ operator \s+ value - """ & tokens) - let orClauses = peg(""" - orClauses <- ^{andClauses} (\s+ 'or' \s+ {andClauses})*$ - andClauses <- clause (\s+ 'and' \s+ clause)* - clause <- path \s+ operator \s+ value - """ & tokens) - var orClausesMatches = newSeq[string](10) - discard str.strip.match(orClauses, orClausesMatches) - var parsedClauses = newSeq[seq[seq[string]]]() - for orClause in orClausesMatches: - if orClause.len > 0: - var andClausesMatches = newSeq[string](10) - discard orClause.strip.match(andClauses, andClausesMatches) - var parsedAndClauses = newSeq[seq[string]]() - for andClause in andClausesMatches: - if andClause.len > 0: - var clauses = newSeq[string](3) - discard andClause.strip.match(clause, clauses) - clauses[1] = sqlOp(clauses[1]) - if clauses[2] == "true": - clauses[2] = "1" - elif clauses[2] == "false": - clauses[2] = "0" - parsedAndClauses.add clauses - if parsedAndClauses.len > 0: - parsedClauses.add parsedAndClauses - if parsedClauses.len == 0: - return - var currentArr = 0 - var tables = newSeq[string]() - let resOrClauses = parsedClauses.map do (it: seq[seq[string]]) -> string: - let resAndClauses = it.map do (x: seq[string]) -> string: - if x[1] == "contains": - currentArr = currentArr + 1 - tables.add "json_each(documents.data, '$1') AS arr$2" % [x[0], $currentArr] - return "arr$1.value == $2" % [$currentArr, x[2]] - else: - var arr = @[x[0], x[1], x[2]] - if x[1] == "like": - arr[2] = x[2].replace('*', '%') - return "json_extract(documents.data, '$1') $2 $3 " % arr - return resAndClauses.join(" AND ") - options.tables = options.tables & tables - options.jsonFilter = resOrClauses.join(" OR ") - -proc parseQueryOption*(fragment: string, options: var QueryOptions) = - if fragment == "": - return - var pair = fragment.split('=') - if pair.len < 2 or pair[1] == "": - raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) - try: - pair[1] = pair[1].replace("+", "%2B").decodeURL - except: - raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) - case pair[0]: - of "filter": - filterClauses(pair[1], options) - if options.jsonFilter == "": - raise newException(EInvalidRequest, "Invalid filter clause: $1" % pair[1].replace("\"", "\\\"")) - of "select": - selectClause(pair[1], options) - if options.jsonSelect.len == 0: - raise newException(EInvalidRequest, "Invalid select clause: $1" % pair[1].replace("\"", "\\\"")) - of "like": - options.like = pair[1] - of "search": - options.search = pair[1] - of "tags": - options.tags = pair[1] - of "created-after": - try: - options.createdAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid created-after value: $1" % getCurrentExceptionMsg()) - of "created-before": - try: - options.createdBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid created-before value: $1" % getCurrentExceptionMsg()) - of "modified-after": - try: - options.modifiedAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid modified.after value: $1" % getCurrentExceptionMsg()) - of "modified-before": - try: - options.modifiedBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid modified-before value: $1" % getCurrentExceptionMsg()) - of "limit": - try: - options.limit = pair[1].parseInt - except: - raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) - of "offset": - try: - options.offset = pair[1].parseInt - except: - raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) - of "sort": - let orderby = pair[1].orderByClauses() - if orderby != "": - options.orderby = orderby - else: - raise newException(EInvalidRequest, "Invalid sort value: $1" % pair[1]) - of "contents", "raw": - discard - else: - discard - -proc parseQueryOptions*(querystring: string, options: var QueryOptions) = - var q = querystring - if q.startsWith("?"): - q = q[1 .. q.len - 1] - var fragments = q.split('&') - for f in fragments: - f.parseQueryOption(options) - -proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, cb: proc(req: LSRequest, LS: LiteStore, resource: string, id: string):LSResponse): LSResponse = - if req.reqMethod == HttpPost or req.reqMethod == HttpPut or req.reqMethod == HttpPatch: - var ct = "" - let body = req.body.strip - if body == "": - return resError(Http400, "Bad request: No content specified for document.") - if req.headers.hasKey("Content-Type"): - ct = req.headers["Content-Type"] - case ct: - of "application/json": - try: - discard body.parseJson() - except: - return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) - else: - discard - return cb(req, LS, resource, id) - -proc patchTag(tags: var seq[string], index: int, op, path, value: string): bool = - LOG.debug("- PATCH -> $1 tag['$2'] = \"$3\" - Total tags: $4." % [op, $index, $value, $tags.len]) - case op: - of "remove": - let tag = tags[index] - if not tag.startsWith("$"): - tags[index] = "" # Not removing element, otherwise subsequent indexes won't work! - else: - raise newException(EInvalidRequest, "cannot remove system tag: $1" % tag) - of "add": - if value.match(PEG_USER_TAG): - tags.insert(value, index) - else: - if value.strip == "": - raise newException(EInvalidRequest, "tag not specified." % value) - else: - raise newException(EInvalidRequest, "invalid tag: $1" % value) - of "replace": - if value.match(PEG_USER_TAG): - if tags[index].startsWith("$"): - raise newException(EInvalidRequest, "cannot replace system tag: $1" % tags[index]) - else: - tags[index] = value - else: - if value.strip == "": - raise newException(EInvalidRequest, "tag not specified." % value) - else: - raise newException(EInvalidRequest, "invalid tag: $1" % value) - of "test": - if tags[index] != value: - return false - else: - raise newException(EInvalidRequest, "invalid patch operation: $1" % op) - return true - -proc patchData*(data: var JsonNode, origData: JsonNode, op: string, path: string, value: JsonNode): bool = - LOG.debug("- PATCH -> $1 path $2 with $3" % [op, path, $value]) - var keys = path.replace(peg"^\/data\/", "").split("/") - if keys.len == 0: - raise newException(EInvalidRequest, "no valid path specified: $1" % path) - var d = data - var dorig = origData - var c = 1 - for key in keys: - if d.kind == JArray: - try: - var index = key.parseInt - if c >= keys.len: - d.elems[index] = value - case op: - of "remove": - d.elems.del(index) - of "add": - d.elems.insert(value, index) - of "replace": - d.elems[index] = value - of "test": - if d.elems[index] != value: - return false - else: - raise newException(EInvalidRequest, "invalid patch operation: $1" % op) - else: - d = d[index] - dorig = dorig[index] - except: - raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) - else: - if c >= keys.len: - case op: - of "remove": - if d.hasKey(key): - d.delete(key) - else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) - of "add": - d[key] = value - of "replace": - if d.hasKey(key): - d[key] = value - else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) - of "test": - if dorig.hasKey(key): - if dorig[key] != value: - return false - else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) - else: - raise newException(EInvalidRequest, "invalid patch operation: $1" % op) - else: - d = d[key] - dorig = dorig[key] - c += 1 - return true - - -proc applyPatchOperation*(data: var JsonNode, origData: JsonNode, tags: var seq[string], op: string, path: string, value: JsonNode): bool = - var matches = @[""] - let p = peg""" - path <- ^tagPath / fieldPath$ - tagPath <- '\/tags\/' {\d+} - fieldPath <- '\/data\/' ident ('\/' ident)* - ident <- [a-zA-Z0-9_]+ / '-' - """ - if path.find(p, matches) == -1: - raise newException(EInvalidRequest, "cannot patch path '$1'" % path) - if path.match(peg"^\/tags\/"): - let index = matches[0].parseInt - if value.kind != JString: - raise newException(EInvalidRequest, "tag '$1' is not a string." % $value) - let tag = value.getStr - return patchTag(tags, index, op, path, tag) - elif tags.contains("$subtype:json"): - return patchData(data, origData, op, path, value) - else: - raise newException(EInvalidRequest, "cannot patch data of a non-JSON document.") - -# Low level procs - -proc getTag*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveTag(id, options) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - if doc == newJNull(): - result = resTagNotFound(id) - else: - result.content = $doc - result.code = Http200 - -proc getStore*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - if (not LSDICT.hasKey(id)): - return resStoreNotFound(id) - let store = LSDICT[id] - var doc = newJObject() - doc["id"] = %id - doc["file"] = %store.file - doc["config"] = store.config - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = $doc - result.code = Http200 - -proc getIndex*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveIndex(id, options) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - if doc == newJNull(): - result = resIndexNotFound(id) - else: - result.content = $doc - result.code = Http200 - -proc getRawDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveRawDocument(id, options) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - if doc == "": - result = resDocumentNotFound(id) - else: - result.content = doc - result.code = Http200 - -proc getDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = - let doc = LS.store.retrieveDocument(id, options) - if doc.data == "": - result = resDocumentNotFound(id) - else: - result.headers = doc.contenttype.ctHeader - setOrigin(LS, req, result.headers) - result.content = doc.data - result.code = Http200 - -proc deleteDocument*(LS: LiteStore, id: string, req: LSRequest): LSResponse = - let doc = LS.store.retrieveDocument(id) - if doc.data == "": - result = resDocumentNotFound(id) - else: - try: - let res = LS.store.destroyDocument(id) - if res == 0: - result = resError(Http500, "Unable to delete document '$1'" % id) - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Content-Length"] = "0" - result.content = "" - result.code = Http204 - except: - result = resError(Http500, "Unable to delete document '$1'" % id) - -proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = - var options = options - let t0 = cpuTime() - let docs = LS.store.retrieveTags(options) - let orig_limit = options.limit - let orig_offset = options.offset - options.limit = 0 - options.offset = 0 - options.select = @["COUNT(tag_id)"] - let total = LS.store.countTags(prepareSelectTagsQuery(options), options.like.replace("*", "%")) - var content = newJObject() - if options.like != "": - content["like"] = %(options.like.decodeURL) - if orig_limit > 0: - content["limit"] = %orig_limit - if orig_offset > 0: - content["offset"] = %orig_offset - content["total"] = %total - content["execution_time"] = %(cputime()-t0) - content["results"] = docs - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc getStores(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = - let t0 = cpuTime() - var docs = newJArray() - for k, v in LSDICT.pairs: - var store = newJObject() - store["id"] = %k - store["file"] = %v.file - store["config"] = v.config - docs.add(store) - var content = newJObject() - content["total"] = %LSDICT.len - content["execution_time"] = %(cputime()-t0) - content["results"] = docs - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc getIndexes*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = - var options = options - let t0 = cpuTime() - let docs = LS.store.retrieveIndexes(options) - let orig_limit = options.limit - let orig_offset = options.offset - options.limit = 0 - options.offset = 0 - options.select = @["COUNT(name)"] - let total = LS.store.countIndexes(prepareSelectIndexesQuery(options), options.like.replace("*", "%")) - var content = newJObject() - if options.like != "": - content["like"] = %(options.like.decodeURL) - if orig_limit > 0: - content["limit"] = %orig_limit - if orig_offset > 0: - content["offset"] = %orig_offset - content["total"] = %total - content["execution_time"] = %(cputime()-t0) - content["results"] = docs - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = - var options = options - let t0 = cpuTime() - let docs = LS.store.retrieveRawDocuments(options) - let orig_limit = options.limit - let orig_offset = options.offset - options.limit = 0 - options.offset = 0 - options.select = @["COUNT(docid)"] - let total = LS.store.retrieveRawDocuments(options)[0].num - var content = newJObject() - if options.folder != "": - content["folder"] = %(options.folder) - if options.search != "": - content["search"] = %(options.search.decodeURL) - if options.tags != "": - content["tags"] = newJArray() - for tag in options.tags.replace("+", "%2B").decodeURL.split(","): - content["tags"].add(%tag) - if orig_limit > 0: - content["limit"] = %orig_limit - if orig_offset > 0: - content["offset"] = %orig_offset - if options.orderby != "": - content["sort"] = %options.orderby - content["total"] = %total - content["execution_time"] = %(cputime()-t0) - content["results"] = docs - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc getInfo*(LS: LiteStore, req: LSRequest): LSResponse = - let info = LS.store.retrieveInfo() - let version = info[0] - let total_documents = info[1] - let total_tags = LS.store.countTags() - let tags = LS.store.retrieveTagsWithTotals() - var content = newJObject() - content["version"] = %(LS.appname & " v" & LS.appversion) - content["datastore_version"] = %version - content["api_version"] = %7 - content["size"] = %($((LS.file.getFileSize().float/(1024*1024)).formatFloat(ffDecimal, 2)) & " MB") - content["read_only"] = %LS.readonly - content["log_level"] = %LS.loglevel - if LS.directory.len == 0: - content["directory"] = newJNull() - else: - content["directory"] = %LS.directory - content["mount"] = %LS.mount - if LS.config != newJNull() and LS.config.hasKey("stores") and LS.config["stores"].len > 0: - content["additional_stores"] = %toSeq(LS.config["stores"].keys) - else: - content["additional_stores"] = newJArray() - if LS.auth != newJNull(): - content["auth"] = %true - else: - content["auth"] = %false - content["total_documents"] = %total_documents - content["total_tags"] = %total_tags - content["tags"] = tags - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = content.pretty - result.code = Http200 - -proc putIndex*(LS: LiteStore, id, field: string, req: LSRequest): LSResponse = - try: - if (not id.match(PEG_INDEX)): - return resError(Http400, "invalid index ID: $1" % id) - if (not field.match(PEG_JSON_FIELD)): - return resError(Http400, "invalid field path: $1" % field) - if (LS.store.retrieveIndex(id) != newJNull()): - return resError(Http409, "Index already exists: $1" % id) - LS.store.createIndex(id, field) - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = "{\"id\": \"$1\", \"field\": \"$2\"}" % [id, field] - result.code = Http201 - except: - eWarn() - result = resError(Http500, "Unable to create index.") - -proc putStore*(LS: LiteStore, id: string, config: JsonNode, req: LSRequest): LSResponse = - try: - if (not id.match(PEG_STORE) or id == "master"): - return resError(Http400, "invalid store ID: $1" % id) - if (LSDICT.hasKey(id)): - return resError(Http409, "Store already exists: $1" % id) - let store = LS.addStore(id, id & ".db", config) - LS.updateConfig() - LSDICT[id] = store - result = getStore(LS, id, newQueryOptions(), req) - result.code = Http201 - except: - eWarn() - result = resError(Http500, "Unable to create store.") - -proc deleteIndex*(LS: LiteStore, id: string, req: LSRequest): LSResponse = - if (not id.match(PEG_INDEX)): - return resError(Http400, "invalid index ID: $1" % id) - if (LS.store.retrieveIndex(id) == newJNull()): - return resError(Http404, "Index not found: $1" % id) - try: - LS.store.dropIndex(id) - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Content-Length"] = "0" - result.content = "" - result.code = Http204 - except: - eWarn() - result = resError(Http500, "Unable to delete index.") - -proc deleteStore*(LS: LiteStore, id: string, req: LSRequest): LSResponse = - if (not id.match(PEG_STORE)): - return resError(Http400, "invalid store ID: $1" % id) - if (not LSDICT.hasKey(id)): - return resError(Http404, "Store not found: $1" % id) - try: - LSDICT.del(id) - if LS.config.hasKey("stores") and LS.config["stores"].hasKey(id): - LS.config["stores"].delete(id) - LS.updateConfig() - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Content-Length"] = "0" - result.content = "" - result.code = Http204 - except: - eWarn() - result = resError(Http500, "Unable to delete index.") - -proc postDocument*(LS: LiteStore, body: string, ct: string, folder="", req: LSRequest): LSResponse = - if not folder.isFolder: - return resError(Http400, "Invalid folder specified when creating document: $1" % folder) - try: - var doc = LS.store.createDocument(folder, body, ct) - if doc != "": - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = doc - result.code = Http201 - else: - result = resError(Http500, "Unable to create document.") - except: - eWarn() - result = resError(Http500, "Unable to create document.") - -proc putDocument*(LS: LiteStore, id: string, body: string, ct: string, req: LSRequest): LSResponse = - if id.isFolder: - return resError(Http400, "Invalid ID '$1' (Document IDs cannot end with '/')." % id) - let doc = LS.store.retrieveDocument(id) - if doc.data == "": - # Create a new document - var doc = LS.store.createDocument(id, body, ct) - if doc != "": - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = doc - result.code = Http201 - else: - result = resError(Http500, "Unable to create document.") - else: - # Update existing document - try: - var doc = LS.store.updateDocument(id, body, ct) - if doc != "": - result.headers = ctJsonHeader() - setOrigin(LS, req, result.headers) - result.content = doc - result.code = Http200 - else: - result = resError(Http500, "Unable to update document '$1'." % id) - except: - result = resError(Http500, "Unable to update document '$1'." % id) - -proc patchDocument*(LS: LiteStore, id: string, body: string, req: LSRequest): LSResponse = - var apply = true - let jbody = body.parseJson - if jbody.kind != JArray: - return resError(Http400, "Bad request: PATCH request body is not an array.") - var options = newQueryOptions() - options.select = @["documents.id AS id", "created", "modified", "data"] - let doc = LS.store.retrieveRawDocument(id, options) - if doc == "": - return resDocumentNotFound(id) - let jdoc = doc.parseJson - var tags = newSeq[string]() - var origTags = newSeq[string]() - for tag in jdoc["tags"].items: - tags.add(tag.str) - origTags.add(tag.str) - var data: JsonNode - var origData: JsonNode - if tags.contains("$subtype:json"): - try: - origData = jdoc["data"].getStr.parseJson - data = origData.copy - except: - discard - var c = 1 - for item in jbody.items: - if item.hasKey("op") and item.hasKey("path"): - if not item.hasKey("value"): - item["value"] = %"" - try: - apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) - if not apply: - break - except: - return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) - else: - return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c) - c.inc - if apply: - # when document is not JSON the origData is not defined - # the extra check allows editing tags for non-JSON documents - if origData != nil and origData.len > 0 and origData != data: - try: - var doc = LS.store.updateDocument(id, data.pretty, "application/json") - if doc == "": - return resError(Http500, "Unable to patch document '$1'." % id) - except: - return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) - if origTags != tags: - try: - for t1 in jdoc["tags"].items: - discard LS.store.destroyTag(t1.str, id, true) - for t2 in tags: - if t2 != "": - LS.store.createTag(t2, id, true) - except: - return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) - return LS.getRawDocument(id, newQueryOptions(), req) - -# Main routing - -proc options*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - case resource: - of "info": - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - if id != "": - return resError(Http404, "Info '$1' not found." % id) - else: - result.code = Http204 - result.content = "" - of "dir": - result.code = Http204 - result.content = "" - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - of "tags": - result.code = Http204 - result.content = "" - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - of "indexes": - result.code = Http204 - result.content = "" - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - if id != "": - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - else: - result.headers["Allow"] = "GET, OPTIONS, PUT, DELETE" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS, PUT, DELETE" - else: - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - of "docs": - var folder: string - if id.isFolder: - folder = id - if folder.len > 0: - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS, POST, PUT" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST, PUT" - elif id != "": - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" - result.headers["Allow-Patch"] = "application/json-patch+json" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" - else: - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "HEAD, GET, OPTIONS, POST" - result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST" - of "stores": - result.code = Http204 - result.content = "" - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - if id != "": - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - else: - result.headers["Allow"] = "GET, OPTIONS, PUT, DELETE" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS, PUT, DELETE" - else: - result.code = Http204 - result.content = "" - if LS.readonly: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - else: - result.headers = newHttpHeaders(TAB_HEADERS) - setOrigin(LS, req, result.headers) - result.headers["Allow"] = "GET, OPTIONS" - result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" - else: - discard # never happens really. - -proc head*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - var options = newQueryOptions() - options.select = @["documents.id AS id", "created", "modified"] - if id.isFolder: - options.folder = id - try: - parseQueryOptions(req.url.query, options); - if id != "" and options.folder == "": - result = LS.getRawDocument(id, options, req) - result.content = "" - else: - result = LS.getRawDocuments(options, req) - result.content = "" - except: - return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) - -proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - case resource: - of "docs": - var options = newQueryOptions() - if id.isFolder: - options.folder = id - if req.url.query.contains("contents=false"): - options.select = @["documents.id AS id", "created", "modified"] - try: - parseQueryOptions(req.url.query, options); - if id != "" and options.folder == "": - if req.url.query.contains("raw=true") or req.headers.hasKey("Accept") and req.headers["Accept"] == "application/json": - return LS.getRawDocument(id, options, req) - else: - return LS.getDocument(id, options, req) - else: - return LS.getRawDocuments(options, req) - except: - let e = getCurrentException() - let trace = e.getStackTrace() - echo trace - return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) - of "tags": - var options = newQueryOptions() - try: - parseQueryOptions(req.url.query, options); - if id != "": - return LS.getTag(id, options, req) - else: - return LS.getTags(options, req) - except: - return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) - of "indexes": - var options = newQueryOptions() - try: - parseQueryOptions(req.url.query, options); - if id != "": - return LS.getIndex(id, options, req) - else: - return LS.getIndexes(options, req) - except: - return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) - of "stores": - var options = newQueryOptions() - try: - parseQueryOptions(req.url.query, options); - if id != "": - return LS.getStore(id, options, req) - else: - return LS.getStores(options, req) - except: - return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) - of "info": - if id != "": - return resError(Http404, "Info '$1' not found." % id) - return LS.getInfo(req) - else: - discard # never happens really. - -proc post*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - var ct = "text/plain" - if req.headers.hasKey("Content-Type"): - ct = req.headers["Content-Type"] - return LS.postDocument(req.body.strip, ct, id, req) - -proc put*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - if id != "": - if resource == "indexes": - var field = "" - try: - field = parseJson(req.body.strip)["field"].getStr - except: - return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) - return LS.putIndex(id, field, req) - elif resource == "stores": - var config = newJNull() - try: - config = parseJson(req.body) - except: - return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) - return LS.putStore(id, config, req) - else: # Assume docs - var ct = "text/plain" - if req.headers.hasKey("Content-Type"): - ct = req.headers["Content-Type"] - return LS.putDocument(id, req.body.strip, ct, req) - else: - return resError(Http400, "Bad request: document ID must be specified in PUT requests.") - -proc delete*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - if id != "": - if resource == "indexes": - return LS.deleteIndex(id, req) - elif resource == "stores": - return LS.deleteStore(id, req) - else: # Assume docs - return LS.deleteDocument(id, req) - else: - return resError(Http400, "Bad request: document ID must be specified in DELETE requests.") - -proc patch*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = - if id != "": - return LS.patchDocument(id, req.body, req) - else: - return resError(Http400, "Bad request: document ID must be specified in PATCH requests.") - -proc serveFile*(req: LSRequest, LS: LiteStore, id: string): LSResponse = - let path = LS.directory / id - var reqMethod = $req.reqMethod - if req.headers.hasKey("X-HTTP-Method-Override"): - reqMethod = req.headers["X-HTTP-Method-Override"] - case reqMethod.toUpperAscii: - of "OPTIONS": - return validate(req, LS, "dir", id, options) - of "GET": - if path.fileExists: - try: - let contents = path.readFile - let parts = path.splitFile - if CONTENT_TYPES.hasKey(parts.ext): - result.headers = CONTENT_TYPES[parts.ext].ctHeader - else: - result.headers = ctHeader("text/plain") - setOrigin(LS, req, result.headers) - result.content = contents - result.code = Http200 - except: - return resError(Http500, "Unable to read file '$1'." % path) - else: - return resError(Http404, "File '$1' not found." % path) - else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - -proc route*(req: LSRequest, LS: LiteStore, resource = "docs", id = ""): LSResponse = - var reqMethod = $req.reqMethod - if req.headers.hasKey("X-HTTP-Method-Override"): - reqMethod = req.headers["X-HTTP-Method-Override"] - LOG.debug("ROUTE - resource: " & resource & " id: " & id) - case reqMethod.toUpperAscii: - of "POST": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, post) - of "PUT": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, put) - of "DELETE": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, delete) - of "HEAD": - return validate(req, LS, resource, id, head) - of "OPTIONS": - return validate(req, LS, resource, id, options) - of "GET": - return validate(req, LS, resource, id, get) - of "PATCH": - if LS.readonly: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - return validate(req, LS, resource, id, patch) - else: - return resError(Http405, "Method not allowed: $1" % $req.reqMethod) - -proc multiRoute(req: LSRequest, resource, id: string): LSResponse = - var matches = @["", "", ""] - if req.url.path.find(PEG_STORE_URL, matches) != -1: - let id = matches[0] - let path = "/v7/" & matches[1] - matches = @["", "", ""] - discard path.find(PEG_URL, matches) - return req.route(LSDICT[id], matches[1], matches[2]) - return req.route(LS, resource, id) - -proc newSimpleLSRequest(meth: HttpMethod, resource, id, body = "", params = "", headers = newHttpHeaders()): LSRequest = - result.reqMethod = meth - result.body = body - result.headers = headers - result.url = parseUri("$1://$2:$3/$4/$5?$6" % @["http", "localhost", "9500", resource, id, params]) - -proc get(resource, id: string, params = ""): LSResponse = - return newSimpleLSRequest(HttpGet, resource, id, "", params).multiRoute(resource, id) - -proc post(resource, folder, body: string, ct = ""): LSResponse = - var headers = newHttpHeaders() - if ct != "": - headers["Content-Type"] = ct - return newSimpleLSRequest(HttpPost, resource, folder, body, "", headers).multiRoute(resource, folder & "/") - -proc put(resource, id, body: string, ct = ""): LSResponse = - var headers = newHttpHeaders() - if ct != "": - headers["Content-Type"] = ct - return newSimpleLSRequest(HttpPut, resource, id, body, "", headers).multiRoute(resource, id) - -proc patch(resource, id, body: string): LSResponse = - var headers = newHttpHeaders() - headers["Content-Type"] = "application/json" - return newSimpleLSRequest(HttpPatch, resource, id, body, "", headers).multiRoute(resource, id) - -proc delete(resource, id: string): LSResponse = - return newSimpleLSRequest(HttpDelete, resource, id).multiRoute(resource, id) - -proc head(resource, id: string): LSResponse = - return newSimpleLSRequest(HttpHead, resource, id).multiRoute(resource, id) - -proc registerStoreApi(LS: LiteStore, ctx: DTContext, origResource, origId: string) = - var api_idx = ctx.duk_push_object() - # GET - var get: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let params = duk_get_string(ctx, 2) - let resp = get($resource, $id, $params) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, get, 3) - discard ctx.duk_put_prop_string(api_idx, "get") - # POST - var post: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let folder = duk_get_string(ctx, 1) - let body = duk_get_string(ctx, 2) - let ct = duk_get_string(ctx, 3) - let resp = post($resource, $folder, $body, $ct) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, post, 4) - discard ctx.duk_put_prop_string(api_idx, "post") - # PUT - var put: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let body = duk_get_string(ctx, 2) - let ct = duk_get_string(ctx, 3) - let resp = put($resource, $id, $body, $ct) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, put, 4) - discard ctx.duk_put_prop_string(api_idx, "put") - # PATCH - var patch: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let body = duk_get_string(ctx, 2) - let resp = patch($resource, $id, $body) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, patch, 3) - discard ctx.duk_put_prop_string(api_idx, "patch") - # DELETE - var delete: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let resp = delete($resource, $id) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, delete, 2) - discard ctx.duk_put_prop_string(api_idx, "delete") - # HEAD - var head: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = - let resource = duk_get_string(ctx, 0) - let id = duk_get_string(ctx, 1) - let resp = head($resource, $id) - var res_idx = ctx.duk_push_object() - ctx.duk_push_int(cast[cint](resp.code)) - discard ctx.duk_put_prop_string(res_idx, "code") - discard ctx.duk_push_string(resp.content.cstring) - discard ctx.duk_put_prop_string(res_idx, "content") - return 1 - ) - discard duk_push_c_function(ctx, head, 2) - discard ctx.duk_put_prop_string(api_idx, "head") - discard ctx.duk_put_global_string("$store") - -proc jError(ctx: DTContext): LSResponse = - return resError(Http500, "Middleware Error: " & $ctx.duk_safe_to_string(-1)) - -proc getMiddleware*(LS: LiteStore, id: string): string = - if not LS.middleware.hasKey(id): - # Attempt to retrieve resource from system documents - let options = newQueryOptions(true) - let doc = LS.store.retrieveDocument("middleware/" & id & ".js", options) - result = doc.data - if result == "": - LOG.warn("Middleware '$1' not found" % id) - else: - result = LS.middleware[id] - -proc getMiddlewareSeq(LS: LiteStore, resource, id, meth: string): seq[string] = - result = newSeq[string]() - if LS.config.kind != JObject or not LS.config.hasKey("resources"): - return - var reqUri = "/" & resource & "/" & id - if reqUri[^1] == '/': - reqUri.removeSuffix({'/'}) - let parts = reqUri.split("/") - let ancestors = parts[1..parts.len-2] - var currentPath = "" - var currentPaths = "" - for p in ancestors: - currentPath &= "/" & p - currentPaths = currentPath & "/*" - if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][meth].hasKey("middleware"): - let mw = LS.config["resources"][currentPaths][meth]["middleware"] - if (mw.kind == JArray): - for m in mw: - result.add m.getStr - if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("middleware"): - let mw = LS.config["resources"][reqUri][meth]["middleware"] - if (mw.kind == JArray): - for m in mw: - result.add m.getStr - -proc execute*(req: var LSRequest, LS: LiteStore, resource, id: string): LSResponse = - let middleware = getMiddlewareSeq(LS, resource, id, $req.reqMethod) - if middleware.len > 0: - LOG.debug("Middleware: " & middleware.join(" -> ")); - if middleware.len == 0: - return route(req, LS, resource, id) - var jReq = $(%* req) - LOG.debug("Request: " & jReq) - var jRes = """{ - "code": 200, - "content": {}, - "final": false, - "headers": { - "Access-Control-Allow-Origin": "*", - "Access-Control-Allow-Headers": "Authorization, Content-Type", - "Server": "$1", - "Content-Type": "application/json" - } - }""" % [LS.appname & "/" & LS.appversion] - var context = "{}" - # Create execution context - var ctx = duk_create_heap_default() - duk_console_init(ctx) - duk_print_alert_init(ctx) - LS.registerStoreApi(ctx, resource, id) - if ctx.duk_peval_string(cstring("($1)" % $jReq)) != 0: - return jError(ctx) - discard ctx.duk_put_global_string("$req") - if ctx.duk_peval_string(cstring("($1)" % $jRes)) != 0: - return jError(ctx) - discard ctx.duk_put_global_string("$res") - if ctx.duk_peval_string(cstring("($1)" % $context)) != 0: - return jError(ctx) - discard ctx.duk_put_global_string("$ctx") - # Middleware-specific functions - var i = 0 - var abort = 0 - while abort != 1 and i < middleware.len: - let code = LS.getMiddleware(middleware[i]) - LOG.debug("Evaluating middleware '$1'" % middleware[i]) - if ctx.duk_peval_string(code.cstring) != 0: - return jError(ctx) - abort = ctx.duk_get_boolean(-1) - i.inc - # Retrieve response, and request - if ctx.duk_peval_string("JSON.stringify($res);") != 0: - return jError(ctx) - let fRes = parseJson($(ctx.duk_get_string(-1))).newLSResponse - if ctx.duk_peval_string("JSON.stringify($req);") != 0: - return jError(ctx) - let fReq = parseJson($(ctx.duk_get_string(-1))).newLSRequest() - ctx.duk_destroy_heap(); - LOG.debug("abort: $1", [$abort]) - if abort == 1: - return fRes - return route(fReq, LS, resource, id) +import + asynchttpserver, + strutils, + sequtils, + cgi, + strtabs, + pegs, + json, + os, + uri, + tables, + times +import + types, + contenttypes, + core, + utils, + logger, + duktape + +# Helper procs + +proc sqlOp(op: string): string = + let table = newStringTable() + table["not eq"] = "<>" + table["eq"] = "==" + table["gt"] = ">" + table["gte"] = ">=" + table["lt"] = "<" + table["lte"] = "<=" + table["contains"] = "contains" + table["like"] = "like" + return table[op] + +proc orderByClauses*(str: string): string = + var clauses = newSeq[string]() + var fragments = str.split(",") + let clause = peg""" + clause <- {[-+]} {field} + field <- ('id' / 'created' / 'modified' / path) + path <- '$' (objField)+ + ident <- [a-zA-Z0-9_]+ + objField <- '.' ident + """ + for f in fragments: + var matches = @["", ""] + if f.find(clause, matches) != -1: + var field = matches[1] + if field[0] == '$': + field = "json_extract(documents.data, '$1')" % matches[1] + if matches[0] == "-": + clauses.add("$1 COLLATE NOCASE DESC" % field) + else: + clauses.add("$1 COLLATE NOCASE ASC" % field) + return clauses.join(", ") + +proc selectClause*(str: string, options: var QueryOptions) = + let tokens = """ + path <- '$' (objItem / objField)+ + ident <- [a-zA-Z0-9_]+ + objIndex <- '[' \d+ ']' + objField <- '.' ident + objItem <- objField objIndex + """ + let fields = peg(""" + fields <- ^{field} (\s* ',' \s* {field})*$ + field <- path \s+ ('as' / 'AS') \s+ ident + """ & tokens) + let field = peg(""" + field <- ^{path} \s+ ('as' / 'AS') \s+ {ident}$ + """ & tokens) + var fieldMatches = newSeq[string](10) + if str.strip.match(fields, fieldMatches): + for m in fieldMatches: + if m.len > 0: + var rawTuple = newSeq[string](2) + if m.match(field, rawTuple): + options.jsonSelect.add((path: rawTuple[0], alias: rawTuple[1])) + +proc filterClauses*(str: string, options: var QueryOptions) = + let tokens = """ + operator <- 'not eq' / 'eq' / 'gte' / 'gt' / 'lte' / 'lt' / 'contains' / 'like' + value <- string / number / 'null' / 'true' / 'false' + string <- '"' ('\\"' . / [^"])* '"' + number <- '-'? '0' / [1-9] [0-9]* ('.' [0-9]+)? (( 'e' / 'E' ) ( '+' / '-' )? [0-9]+)? + path <- '$' (objItem / objField)+ + ident <- [a-zA-Z0-9_]+ + objIndex <- '[' \d+ ']' + objField <- '.' ident + objItem <- objField objIndex + """ + let clause = peg(""" + clause <- {path} \s+ {operator} \s+ {value} + """ & tokens) + let andClauses = peg(""" + andClauses <- ^{clause} (\s+ 'and' \s+ {clause})*$ + clause <- path \s+ operator \s+ value + """ & tokens) + let orClauses = peg(""" + orClauses <- ^{andClauses} (\s+ 'or' \s+ {andClauses})*$ + andClauses <- clause (\s+ 'and' \s+ clause)* + clause <- path \s+ operator \s+ value + """ & tokens) + var orClausesMatches = newSeq[string](10) + discard str.strip.match(orClauses, orClausesMatches) + var parsedClauses = newSeq[seq[seq[string]]]() + for orClause in orClausesMatches: + if orClause.len > 0: + var andClausesMatches = newSeq[string](10) + discard orClause.strip.match(andClauses, andClausesMatches) + var parsedAndClauses = newSeq[seq[string]]() + for andClause in andClausesMatches: + if andClause.len > 0: + var clauses = newSeq[string](3) + discard andClause.strip.match(clause, clauses) + clauses[1] = sqlOp(clauses[1]) + if clauses[2] == "true": + clauses[2] = "1" + elif clauses[2] == "false": + clauses[2] = "0" + parsedAndClauses.add clauses + if parsedAndClauses.len > 0: + parsedClauses.add parsedAndClauses + if parsedClauses.len == 0: + return + var currentArr = 0 + var tables = newSeq[string]() + let resOrClauses = parsedClauses.map do (it: seq[seq[string]]) -> string: + let resAndClauses = it.map do (x: seq[string]) -> string: + if x[1] == "contains": + currentArr = currentArr + 1 + tables.add "json_each(documents.data, '$1') AS arr$2" % [x[0], $currentArr] + return "arr$1.value == $2" % [$currentArr, x[2]] + else: + var arr = @[x[0], x[1], x[2]] + if x[1] == "like": + arr[2] = x[2].replace('*', '%') + return "json_extract(documents.data, '$1') $2 $3 " % arr + return resAndClauses.join(" AND ") + options.tables = options.tables & tables + options.jsonFilter = resOrClauses.join(" OR ") + +proc parseQueryOption*(fragment: string, options: var QueryOptions) = + if fragment == "": + return + var pair = fragment.split('=') + if pair.len < 2 or pair[1] == "": + raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) + try: + pair[1] = pair[1].replace("+", "%2B").decodeURL + except CatchableError: + raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) + case pair[0]: + of "filter": + filterClauses(pair[1], options) + if options.jsonFilter == "": + raise newException(EInvalidRequest, "Invalid filter clause: $1" % pair[1].replace("\"", "\\\"")) + of "select": + selectClause(pair[1], options) + if options.jsonSelect.len == 0: + raise newException(EInvalidRequest, "Invalid select clause: $1" % pair[1].replace("\"", "\\\"")) + of "like": + options.like = pair[1] + of "search": + options.search = pair[1] + of "tags": + options.tags = pair[1] + of "created-after": + try: + options.createdAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid created-after value: $1" % getCurrentExceptionMsg()) + of "created-before": + try: + options.createdBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid created-before value: $1" % getCurrentExceptionMsg()) + of "modified-after": + try: + options.modifiedAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid modified.after value: $1" % getCurrentExceptionMsg()) + of "modified-before": + try: + options.modifiedBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + raise newException(EInvalidRequest, "Invalid modified-before value: $1" % getCurrentExceptionMsg()) + of "limit": + try: + options.limit = pair[1].parseInt + except CatchableError: + raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) + of "offset": + try: + options.offset = pair[1].parseInt + except CatchableError: + raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) + of "sort": + let orderby = pair[1].orderByClauses() + if orderby != "": + options.orderby = orderby + else: + raise newException(EInvalidRequest, "Invalid sort value: $1" % pair[1]) + of "contents", "raw": + discard + else: + discard + +proc parseQueryOptions*(querystring: string, options: var QueryOptions) = + var q = querystring + if q.startsWith("?"): + q = q[1 .. q.len - 1] + var fragments = q.split('&') + for f in fragments: + f.parseQueryOption(options) + +proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, cb: proc(req: LSRequest, LS: LiteStore, resource: string, id: string):LSResponse): LSResponse = + if req.reqMethod == HttpPost or req.reqMethod == HttpPut or req.reqMethod == HttpPatch: + var ct = "" + let body = req.body.strip + if body == "": + return resError(Http400, "Bad request: No content specified for document.") + if req.headers.hasKey("Content-Type"): + ct = req.headers["Content-Type"] + case ct: + of "application/json": + try: + discard body.parseJson() + except CatchableError: + return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) + else: + discard + return cb(req, LS, resource, id) + +proc patchTag(tags: var seq[string], index: int, op, path, value: string): bool = + LOG.debug("- PATCH -> $1 tag['$2'] = \"$3\" - Total tags: $4." % [op, $index, $value, $tags.len]) + case op: + of "remove": + let tag = tags[index] + if not tag.startsWith("$"): + tags[index] = "" # Not removing element, otherwise subsequent indexes won't work! + else: + raise newException(EInvalidRequest, "cannot remove system tag: $1" % tag) + of "add": + if value.match(PEG_USER_TAG): + tags.insert(value, index) + else: + if value.strip == "": + raise newException(EInvalidRequest, "tag not specified." % value) + else: + raise newException(EInvalidRequest, "invalid tag: $1" % value) + of "replace": + if value.match(PEG_USER_TAG): + if tags[index].startsWith("$"): + raise newException(EInvalidRequest, "cannot replace system tag: $1" % tags[index]) + else: + tags[index] = value + else: + if value.strip == "": + raise newException(EInvalidRequest, "tag not specified." % value) + else: + raise newException(EInvalidRequest, "invalid tag: $1" % value) + of "test": + if tags[index] != value: + return false + else: + raise newException(EInvalidRequest, "invalid patch operation: $1" % op) + return true + +proc patchData*(data: var JsonNode, origData: JsonNode, op: string, path: string, value: JsonNode): bool = + LOG.debug("- PATCH -> $1 path $2 with $3" % [op, path, $value]) + var keys = path.replace(peg"^\/data\/", "").split("/") + if keys.len == 0: + raise newException(EInvalidRequest, "no valid path specified: $1" % path) + var d = data + var dorig = origData + var c = 1 + for key in keys: + if d.kind == JArray: + try: + var index = key.parseInt + if c >= keys.len: + d.elems[index] = value + case op: + of "remove": + d.elems.del(index) + of "add": + d.elems.insert(value, index) + of "replace": + d.elems[index] = value + of "test": + if d.elems[index] != value: + return false + else: + raise newException(EInvalidRequest, "invalid patch operation: $1" % op) + else: + d = d[index] + dorig = dorig[index] + except CatchableError: + raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) + else: + if c >= keys.len: + case op: + of "remove": + if d.hasKey(key): + d.delete(key) + else: + raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + of "add": + d[key] = value + of "replace": + if d.hasKey(key): + d[key] = value + else: + raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + of "test": + if dorig.hasKey(key): + if dorig[key] != value: + return false + else: + raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + else: + raise newException(EInvalidRequest, "invalid patch operation: $1" % op) + else: + d = d[key] + dorig = dorig[key] + c += 1 + return true + + +proc applyPatchOperation*(data: var JsonNode, origData: JsonNode, tags: var seq[string], op: string, path: string, value: JsonNode): bool = + var matches = @[""] + let p = peg""" + path <- ^tagPath / fieldPath$ + tagPath <- '\/tags\/' {\d+} + fieldPath <- '\/data\/' ident ('\/' ident)* + ident <- [a-zA-Z0-9_]+ / '-' + """ + if path.find(p, matches) == -1: + raise newException(EInvalidRequest, "cannot patch path '$1'" % path) + if path.match(peg"^\/tags\/"): + let index = matches[0].parseInt + if value.kind != JString: + raise newException(EInvalidRequest, "tag '$1' is not a string." % $value) + let tag = value.getStr + return patchTag(tags, index, op, path, tag) + elif tags.contains("$subtype:json"): + return patchData(data, origData, op, path, value) + else: + raise newException(EInvalidRequest, "cannot patch data of a non-JSON document.") + +# Low level procs + +proc getTag*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveTag(id, options) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + if doc == newJNull(): + result = resTagNotFound(id) + else: + result.content = $doc + result.code = Http200 + +proc getStore*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + if (not LSDICT.hasKey(id)): + return resStoreNotFound(id) + let store = LSDICT[id] + var doc = newJObject() + doc["id"] = %id + doc["file"] = %store.file + doc["config"] = store.config + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = $doc + result.code = Http200 + +proc getIndex*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveIndex(id, options) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + if doc == newJNull(): + result = resIndexNotFound(id) + else: + result.content = $doc + result.code = Http200 + +proc getRawDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveRawDocument(id, options) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + if doc == "": + result = resDocumentNotFound(id) + else: + result.content = doc + result.code = Http200 + +proc getDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = + let doc = LS.store.retrieveDocument(id, options) + if doc.data == "": + result = resDocumentNotFound(id) + else: + result.headers = doc.contenttype.ctHeader + setOrigin(LS, req, result.headers) + result.content = doc.data + result.code = Http200 + +proc deleteDocument*(LS: LiteStore, id: string, req: LSRequest): LSResponse = + let doc = LS.store.retrieveDocument(id) + if doc.data == "": + result = resDocumentNotFound(id) + else: + try: + let res = LS.store.destroyDocument(id) + if res == 0: + result = resError(Http500, "Unable to delete document '$1'" % id) + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Content-Length"] = "0" + result.content = "" + result.code = Http204 + except CatchableError: + result = resError(Http500, "Unable to delete document '$1'" % id) + +proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = + var options = options + let t0 = cpuTime() + let docs = LS.store.retrieveTags(options) + let orig_limit = options.limit + let orig_offset = options.offset + options.limit = 0 + options.offset = 0 + options.select = @["COUNT(tag_id)"] + let total = LS.store.countTags(prepareSelectTagsQuery(options), options.like.replace("*", "%")) + var content = newJObject() + if options.like != "": + content["like"] = %(options.like.decodeURL) + if orig_limit > 0: + content["limit"] = %orig_limit + if orig_offset > 0: + content["offset"] = %orig_offset + content["total"] = %total + content["execution_time"] = %(cputime()-t0) + content["results"] = docs + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc getStores(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = + let t0 = cpuTime() + var docs = newJArray() + for k, v in LSDICT.pairs: + var store = newJObject() + store["id"] = %k + store["file"] = %v.file + store["config"] = v.config + docs.add(store) + var content = newJObject() + content["total"] = %LSDICT.len + content["execution_time"] = %(cputime()-t0) + content["results"] = docs + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc getIndexes*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = + var options = options + let t0 = cpuTime() + let docs = LS.store.retrieveIndexes(options) + let orig_limit = options.limit + let orig_offset = options.offset + options.limit = 0 + options.offset = 0 + options.select = @["COUNT(name)"] + let total = LS.store.countIndexes(prepareSelectIndexesQuery(options), options.like.replace("*", "%")) + var content = newJObject() + if options.like != "": + content["like"] = %(options.like.decodeURL) + if orig_limit > 0: + content["limit"] = %orig_limit + if orig_offset > 0: + content["offset"] = %orig_offset + content["total"] = %total + content["execution_time"] = %(cputime()-t0) + content["results"] = docs + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = + var options = options + let t0 = cpuTime() + let docs = LS.store.retrieveRawDocuments(options) + let orig_limit = options.limit + let orig_offset = options.offset + options.limit = 0 + options.offset = 0 + options.select = @["COUNT(docid)"] + let total = LS.store.retrieveRawDocuments(options)[0].num + var content = newJObject() + if options.folder != "": + content["folder"] = %(options.folder) + if options.search != "": + content["search"] = %(options.search.decodeURL) + if options.tags != "": + content["tags"] = newJArray() + for tag in options.tags.replace("+", "%2B").decodeURL.split(","): + content["tags"].add(%tag) + if orig_limit > 0: + content["limit"] = %orig_limit + if orig_offset > 0: + content["offset"] = %orig_offset + if options.orderby != "": + content["sort"] = %options.orderby + content["total"] = %total + content["execution_time"] = %(cputime()-t0) + content["results"] = docs + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc getInfo*(LS: LiteStore, req: LSRequest): LSResponse = + let info = LS.store.retrieveInfo() + let version = info[0] + let total_documents = info[1] + let total_tags = LS.store.countTags() + let tags = LS.store.retrieveTagsWithTotals() + var content = newJObject() + content["version"] = %(LS.appname & " v" & LS.appversion) + content["datastore_version"] = %version + content["api_version"] = %7 + content["size"] = %($((LS.file.getFileSize().float/(1024*1024)).formatFloat(ffDecimal, 2)) & " MB") + content["read_only"] = %LS.readonly + content["log_level"] = %LS.loglevel + if LS.directory.len == 0: + content["directory"] = newJNull() + else: + content["directory"] = %LS.directory + content["mount"] = %LS.mount + if LS.config != newJNull() and LS.config.hasKey("stores") and LS.config["stores"].len > 0: + content["additional_stores"] = %toSeq(LS.config["stores"].keys) + else: + content["additional_stores"] = newJArray() + if LS.auth != newJNull(): + content["auth"] = %true + else: + content["auth"] = %false + content["total_documents"] = %total_documents + content["total_tags"] = %total_tags + content["tags"] = tags + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = content.pretty + result.code = Http200 + +proc putIndex*(LS: LiteStore, id, field: string, req: LSRequest): LSResponse = + try: + if (not id.match(PEG_INDEX)): + return resError(Http400, "invalid index ID: $1" % id) + if (not field.match(PEG_JSON_FIELD)): + return resError(Http400, "invalid field path: $1" % field) + if (LS.store.retrieveIndex(id) != newJNull()): + return resError(Http409, "Index already exists: $1" % id) + LS.store.createIndex(id, field) + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = "{\"id\": \"$1\", \"field\": \"$2\"}" % [id, field] + result.code = Http201 + except CatchableError: + eWarn() + result = resError(Http500, "Unable to create index.") + +proc putStore*(LS: LiteStore, id: string, config: JsonNode, req: LSRequest): LSResponse = + try: + if (not id.match(PEG_STORE) or id == "master"): + return resError(Http400, "invalid store ID: $1" % id) + if (LSDICT.hasKey(id)): + return resError(Http409, "Store already exists: $1" % id) + let store = LS.addStore(id, id & ".db", config) + LS.updateConfig() + LSDICT[id] = store + result = getStore(LS, id, newQueryOptions(), req) + result.code = Http201 + except CatchableError: + eWarn() + result = resError(Http500, "Unable to create store.") + +proc deleteIndex*(LS: LiteStore, id: string, req: LSRequest): LSResponse = + if (not id.match(PEG_INDEX)): + return resError(Http400, "invalid index ID: $1" % id) + if (LS.store.retrieveIndex(id) == newJNull()): + return resError(Http404, "Index not found: $1" % id) + try: + LS.store.dropIndex(id) + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Content-Length"] = "0" + result.content = "" + result.code = Http204 + except CatchableError: + eWarn() + result = resError(Http500, "Unable to delete index.") + +proc deleteStore*(LS: LiteStore, id: string, req: LSRequest): LSResponse = + if (not id.match(PEG_STORE)): + return resError(Http400, "invalid store ID: $1" % id) + if (not LSDICT.hasKey(id)): + return resError(Http404, "Store not found: $1" % id) + try: + LSDICT.del(id) + if LS.config.hasKey("stores") and LS.config["stores"].hasKey(id): + LS.config["stores"].delete(id) + LS.updateConfig() + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Content-Length"] = "0" + result.content = "" + result.code = Http204 + except CatchableError: + eWarn() + result = resError(Http500, "Unable to delete index.") + +proc postDocument*(LS: LiteStore, body: string, ct: string, folder="", req: LSRequest): LSResponse = + if not folder.isFolder: + return resError(Http400, "Invalid folder specified when creating document: $1" % folder) + try: + var doc = LS.store.createDocument(folder, body, ct) + if doc != "": + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = doc + result.code = Http201 + else: + result = resError(Http500, "Unable to create document.") + except CatchableError: + eWarn() + result = resError(Http500, "Unable to create document.") + +proc putDocument*(LS: LiteStore, id: string, body: string, ct: string, req: LSRequest): LSResponse = + if id.isFolder: + return resError(Http400, "Invalid ID '$1' (Document IDs cannot end with '/')." % id) + let doc = LS.store.retrieveDocument(id) + if doc.data == "": + # Create a new document + var doc = LS.store.createDocument(id, body, ct) + if doc != "": + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = doc + result.code = Http201 + else: + result = resError(Http500, "Unable to create document.") + else: + # Update existing document + try: + var doc = LS.store.updateDocument(id, body, ct) + if doc != "": + result.headers = ctJsonHeader() + setOrigin(LS, req, result.headers) + result.content = doc + result.code = Http200 + else: + result = resError(Http500, "Unable to update document '$1'." % id) + except CatchableError: + result = resError(Http500, "Unable to update document '$1'." % id) + +proc patchDocument*(LS: LiteStore, id: string, body: string, req: LSRequest): LSResponse = + var apply = true + let jbody = body.parseJson + if jbody.kind != JArray: + return resError(Http400, "Bad request: PATCH request body is not an array.") + var options = newQueryOptions() + options.select = @["documents.id AS id", "created", "modified", "data"] + let doc = LS.store.retrieveRawDocument(id, options) + if doc == "": + return resDocumentNotFound(id) + let jdoc = doc.parseJson + var tags = newSeq[string]() + var origTags = newSeq[string]() + for tag in jdoc["tags"].items: + tags.add(tag.str) + origTags.add(tag.str) + var data: JsonNode + var origData: JsonNode + if tags.contains("$subtype:json"): + try: + origData = jdoc["data"].getStr.parseJson + data = origData.copy + except CatchableError: + discard + var c = 1 + for item in jbody.items: + if item.hasKey("op") and item.hasKey("path"): + if not item.hasKey("value"): + item["value"] = %"" + try: + apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) + if not apply: + break + except CatchableError: + return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) + else: + return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c) + c.inc + if apply: + # when document is not JSON the origData is not defined + # the extra check allows editing tags for non-JSON documents + if origData != nil and origData.len > 0 and origData != data: + try: + var doc = LS.store.updateDocument(id, data.pretty, "application/json") + if doc == "": + return resError(Http500, "Unable to patch document '$1'." % id) + except CatchableError: + return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) + if origTags != tags: + try: + for t1 in jdoc["tags"].items: + discard LS.store.destroyTag(t1.str, id, true) + for t2 in tags: + if t2 != "": + LS.store.createTag(t2, id, true) + except CatchableError: + return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) + return LS.getRawDocument(id, newQueryOptions(), req) + +# Main routing + +proc options*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + case resource: + of "info": + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + if id != "": + return resError(Http404, "Info '$1' not found." % id) + else: + result.code = Http204 + result.content = "" + of "dir": + result.code = Http204 + result.content = "" + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + of "tags": + result.code = Http204 + result.content = "" + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + of "indexes": + result.code = Http204 + result.content = "" + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + if id != "": + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + else: + result.headers["Allow"] = "GET, OPTIONS, PUT, DELETE" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS, PUT, DELETE" + else: + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + of "docs": + var folder: string + if id.isFolder: + folder = id + if folder.len > 0: + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS, POST, PUT" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST, PUT" + elif id != "": + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" + result.headers["Allow-Patch"] = "application/json-patch+json" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, PUT, PATCH, DELETE" + else: + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "HEAD, GET, OPTIONS, POST" + result.headers["Access-Control-Allow-Methods"] = "HEAD, GET, OPTIONS, POST" + of "stores": + result.code = Http204 + result.content = "" + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + if id != "": + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + else: + result.headers["Allow"] = "GET, OPTIONS, PUT, DELETE" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS, PUT, DELETE" + else: + result.code = Http204 + result.content = "" + if LS.readonly: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + else: + result.headers = newHttpHeaders(TAB_HEADERS) + setOrigin(LS, req, result.headers) + result.headers["Allow"] = "GET, OPTIONS" + result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS" + else: + discard # never happens really. + +proc head*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + var options = newQueryOptions() + options.select = @["documents.id AS id", "created", "modified"] + if id.isFolder: + options.folder = id + try: + parseQueryOptions(req.url.query, options); + if id != "" and options.folder == "": + result = LS.getRawDocument(id, options, req) + result.content = "" + else: + result = LS.getRawDocuments(options, req) + result.content = "" + except CatchableError: + return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) + +proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + case resource: + of "docs": + var options = newQueryOptions() + if id.isFolder: + options.folder = id + if req.url.query.contains("contents=false"): + options.select = @["documents.id AS id", "created", "modified"] + try: + parseQueryOptions(req.url.query, options); + if id != "" and options.folder == "": + if req.url.query.contains("raw=true") or req.headers.hasKey("Accept") and req.headers["Accept"] == "application/json": + return LS.getRawDocument(id, options, req) + else: + return LS.getDocument(id, options, req) + else: + return LS.getRawDocuments(options, req) + except CatchableError: + let e = getCurrentException() + let trace = e.getStackTrace() + echo trace + return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) + of "tags": + var options = newQueryOptions() + try: + parseQueryOptions(req.url.query, options); + if id != "": + return LS.getTag(id, options, req) + else: + return LS.getTags(options, req) + except CatchableError: + return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) + of "indexes": + var options = newQueryOptions() + try: + parseQueryOptions(req.url.query, options); + if id != "": + return LS.getIndex(id, options, req) + else: + return LS.getIndexes(options, req) + except CatchableError: + return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) + of "stores": + var options = newQueryOptions() + try: + parseQueryOptions(req.url.query, options); + if id != "": + return LS.getStore(id, options, req) + else: + return LS.getStores(options, req) + except CatchableError: + return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) + of "info": + if id != "": + return resError(Http404, "Info '$1' not found." % id) + return LS.getInfo(req) + else: + discard # never happens really. + +proc post*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + var ct = "text/plain" + if req.headers.hasKey("Content-Type"): + ct = req.headers["Content-Type"] + return LS.postDocument(req.body.strip, ct, id, req) + +proc put*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + if id != "": + if resource == "indexes": + var field = "" + try: + field = parseJson(req.body.strip)["field"].getStr + except CatchableError: + return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) + return LS.putIndex(id, field, req) + elif resource == "stores": + var config = newJNull() + try: + config = parseJson(req.body) + except CatchableError: + return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) + return LS.putStore(id, config, req) + else: # Assume docs + var ct = "text/plain" + if req.headers.hasKey("Content-Type"): + ct = req.headers["Content-Type"] + return LS.putDocument(id, req.body.strip, ct, req) + else: + return resError(Http400, "Bad request: document ID must be specified in PUT requests.") + +proc delete*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + if id != "": + if resource == "indexes": + return LS.deleteIndex(id, req) + elif resource == "stores": + return LS.deleteStore(id, req) + else: # Assume docs + return LS.deleteDocument(id, req) + else: + return resError(Http400, "Bad request: document ID must be specified in DELETE requests.") + +proc patch*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = + if id != "": + return LS.patchDocument(id, req.body, req) + else: + return resError(Http400, "Bad request: document ID must be specified in PATCH requests.") + +proc serveFile*(req: LSRequest, LS: LiteStore, id: string): LSResponse = + let path = LS.directory / id + var reqMethod = $req.reqMethod + if req.headers.hasKey("X-HTTP-Method-Override"): + reqMethod = req.headers["X-HTTP-Method-Override"] + case reqMethod.toUpperAscii: + of "OPTIONS": + return validate(req, LS, "dir", id, options) + of "GET": + if path.fileExists: + try: + let contents = path.readFile + let parts = path.splitFile + if CONTENT_TYPES.hasKey(parts.ext): + result.headers = CONTENT_TYPES[parts.ext].ctHeader + else: + result.headers = ctHeader("text/plain") + setOrigin(LS, req, result.headers) + result.content = contents + result.code = Http200 + except CatchableError: + return resError(Http500, "Unable to read file '$1'." % path) + else: + return resError(Http404, "File '$1' not found." % path) + else: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + +proc route*(req: LSRequest, LS: LiteStore, resource = "docs", id = ""): LSResponse = + var reqMethod = $req.reqMethod + if req.headers.hasKey("X-HTTP-Method-Override"): + reqMethod = req.headers["X-HTTP-Method-Override"] + LOG.debug("ROUTE - resource: " & resource & " id: " & id) + case reqMethod.toUpperAscii: + of "POST": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, post) + of "PUT": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, put) + of "DELETE": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, delete) + of "HEAD": + return validate(req, LS, resource, id, head) + of "OPTIONS": + return validate(req, LS, resource, id, options) + of "GET": + return validate(req, LS, resource, id, get) + of "PATCH": + if LS.readonly: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + return validate(req, LS, resource, id, patch) + else: + return resError(Http405, "Method not allowed: $1" % $req.reqMethod) + +proc multiRoute(req: LSRequest, resource, id: string): LSResponse = + var matches = @["", "", ""] + if req.url.path.find(PEG_STORE_URL, matches) != -1: + let id = matches[0] + let path = "/v7/" & matches[1] + matches = @["", "", ""] + discard path.find(PEG_URL, matches) + return req.route(LSDICT[id], matches[1], matches[2]) + return req.route(LS, resource, id) + +proc newSimpleLSRequest(meth: HttpMethod, resource = "", id = "", body = "", params = "", headers = newHttpHeaders()): LSRequest = + result.reqMethod = meth + result.body = body + result.headers = headers + result.url = parseUri("$1://$2:$3/$4/$5?$6" % @["http", "localhost", "9500", resource, id, params]) + +proc get(resource, id: string, params = ""): LSResponse = + return newSimpleLSRequest(HttpGet, resource, id, "", params).multiRoute(resource, id) + +proc post(resource, folder, body: string, ct = ""): LSResponse = + var headers = newHttpHeaders() + if ct != "": + headers["Content-Type"] = ct + return newSimpleLSRequest(HttpPost, resource, folder, body, "", headers).multiRoute(resource, folder & "/") + +proc put(resource, id, body: string, ct = ""): LSResponse = + var headers = newHttpHeaders() + if ct != "": + headers["Content-Type"] = ct + return newSimpleLSRequest(HttpPut, resource, id, body, "", headers).multiRoute(resource, id) + +proc patch(resource, id, body: string): LSResponse = + var headers = newHttpHeaders() + headers["Content-Type"] = "application/json" + return newSimpleLSRequest(HttpPatch, resource, id, body, "", headers).multiRoute(resource, id) + +proc delete(resource, id: string): LSResponse = + return newSimpleLSRequest(HttpDelete, resource, id).multiRoute(resource, id) + +proc head(resource, id: string): LSResponse = + return newSimpleLSRequest(HttpHead, resource, id).multiRoute(resource, id) + +proc registerStoreApi(LS: LiteStore, ctx: DTContext, origResource, origId: string) = + var api_idx = ctx.duk_push_object() + # GET + var get: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let params = duk_get_string(ctx, 2) + let resp = get($resource, $id, $params) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, get, 3) + discard ctx.duk_put_prop_string(api_idx, "get") + # POST + var post: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let folder = duk_get_string(ctx, 1) + let body = duk_get_string(ctx, 2) + let ct = duk_get_string(ctx, 3) + let resp = post($resource, $folder, $body, $ct) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, post, 4) + discard ctx.duk_put_prop_string(api_idx, "post") + # PUT + var put: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let body = duk_get_string(ctx, 2) + let ct = duk_get_string(ctx, 3) + let resp = put($resource, $id, $body, $ct) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, put, 4) + discard ctx.duk_put_prop_string(api_idx, "put") + # PATCH + var patch: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let body = duk_get_string(ctx, 2) + let resp = patch($resource, $id, $body) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, patch, 3) + discard ctx.duk_put_prop_string(api_idx, "patch") + # DELETE + var delete: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let resp = delete($resource, $id) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, delete, 2) + discard ctx.duk_put_prop_string(api_idx, "delete") + # HEAD + var head: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let resource = duk_get_string(ctx, 0) + let id = duk_get_string(ctx, 1) + let resp = head($resource, $id) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.content.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + return 1 + ) + discard duk_push_c_function(ctx, head, 2) + discard ctx.duk_put_prop_string(api_idx, "head") + discard ctx.duk_put_global_string("$store") + +proc jError(ctx: DTContext): LSResponse = + return resError(Http500, "Middleware Error: " & $ctx.duk_safe_to_string(-1)) + +proc getMiddleware*(LS: LiteStore, id: string): string = + if not LS.middleware.hasKey(id): + # Attempt to retrieve resource from system documents + let options = newQueryOptions(true) + let doc = LS.store.retrieveDocument("middleware/" & id & ".js", options) + result = doc.data + if result == "": + LOG.warn("Middleware '$1' not found" % id) + else: + result = LS.middleware[id] + +proc getMiddlewareSeq(LS: LiteStore, resource, id, meth: string): seq[string] = + result = newSeq[string]() + if LS.config.kind != JObject or not LS.config.hasKey("resources"): + return + var reqUri = "/" & resource & "/" & id + if reqUri[^1] == '/': + reqUri.removeSuffix({'/'}) + let parts = reqUri.split("/") + let ancestors = parts[1..parts.len-2] + var currentPath = "" + var currentPaths = "" + for p in ancestors: + currentPath &= "/" & p + currentPaths = currentPath & "/*" + if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][meth].hasKey("middleware"): + let mw = LS.config["resources"][currentPaths][meth]["middleware"] + if (mw.kind == JArray): + for m in mw: + result.add m.getStr + if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("middleware"): + let mw = LS.config["resources"][reqUri][meth]["middleware"] + if (mw.kind == JArray): + for m in mw: + result.add m.getStr + +proc execute*(req: var LSRequest, LS: LiteStore, resource, id: string): LSResponse = + let middleware = getMiddlewareSeq(LS, resource, id, $req.reqMethod) + if middleware.len > 0: + LOG.debug("Middleware: " & middleware.join(" -> ")); + if middleware.len == 0: + return route(req, LS, resource, id) + var jReq = $(%* req) + LOG.debug("Request: " & jReq) + var jRes = """{ + "code": 200, + "content": {}, + "final": false, + "headers": { + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Headers": "Authorization, Content-Type", + "Server": "$1", + "Content-Type": "application/json" + } + }""" % [LS.appname & "/" & LS.appversion] + var context = "{}" + # Create execution context + var ctx = duk_create_heap_default() + duk_console_init(ctx) + duk_print_alert_init(ctx) + LS.registerStoreApi(ctx, resource, id) + if ctx.duk_peval_string(cstring("($1)" % $jReq)) != 0: + return jError(ctx) + discard ctx.duk_put_global_string("$req") + if ctx.duk_peval_string(cstring("($1)" % $jRes)) != 0: + return jError(ctx) + discard ctx.duk_put_global_string("$res") + if ctx.duk_peval_string(cstring("($1)" % $context)) != 0: + return jError(ctx) + discard ctx.duk_put_global_string("$ctx") + # Middleware-specific functions + var i = 0 + var abort = 0 + while abort != 1 and i < middleware.len: + let code = LS.getMiddleware(middleware[i]) + LOG.debug("Evaluating middleware '$1'" % middleware[i]) + if ctx.duk_peval_string(code.cstring) != 0: + return jError(ctx) + abort = ctx.duk_get_boolean(-1) + i.inc + # Retrieve response, and request + if ctx.duk_peval_string("JSON.stringify($res);") != 0: + return jError(ctx) + let fRes = parseJson($(ctx.duk_get_string(-1))).newLSResponse + if ctx.duk_peval_string("JSON.stringify($req);") != 0: + return jError(ctx) + let fReq = parseJson($(ctx.duk_get_string(-1))).newLSRequest() + ctx.duk_destroy_heap(); + LOG.debug("abort: $1", [$abort]) + if abort == 1: + return fRes + return route(fReq, LS, resource, id)
@@ -1,5 +1,6 @@
import - asynchttpserver, + std/[asynchttpserver, + httpclient, strutils, sequtils, cgi,@@ -9,7 +10,7 @@ json,
os, uri, tables, - times + times] import types, contenttypes,@@ -148,17 +149,19 @@ if pair.len < 2 or pair[1] == "":
raise newException(EInvalidRequest, "Invalid query string fragment '$1'" % fragment) try: pair[1] = pair[1].replace("+", "%2B").decodeURL - except: + except CatchableError: raise newException(EInvalidRequest, "Unable to decode query string fragment '$1'" % fragment) case pair[0]: of "filter": filterClauses(pair[1], options) if options.jsonFilter == "": - raise newException(EInvalidRequest, "Invalid filter clause: $1" % pair[1].replace("\"", "\\\"")) + raise newException(EInvalidRequest, "Invalid filter clause: $1" % pair[ + 1].replace("\"", "\\\"")) of "select": selectClause(pair[1], options) if options.jsonSelect.len == 0: - raise newException(EInvalidRequest, "Invalid select clause: $1" % pair[1].replace("\"", "\\\"")) + raise newException(EInvalidRequest, "Invalid select clause: $1" % pair[ + 1].replace("\"", "\\\"")) of "like": options.like = pair[1] of "search":@@ -168,33 +171,39 @@ options.tags = pair[1]
of "created-after": try: options.createdAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid created-after value: $1" % getCurrentExceptionMsg()) + except CatchableError: + raise newException(EInvalidRequest, "Invalid created-after value: $1" % + getCurrentExceptionMsg()) of "created-before": try: options.createdBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid created-before value: $1" % getCurrentExceptionMsg()) + except CatchableError: + raise newException(EInvalidRequest, "Invalid created-before value: $1" % + getCurrentExceptionMsg()) of "modified-after": try: options.modifiedAfter = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid modified.after value: $1" % getCurrentExceptionMsg()) + except CatchableError: + raise newException(EInvalidRequest, "Invalid modified.after value: $1" % + getCurrentExceptionMsg()) of "modified-before": try: options.modifiedBefore = pair[1].parseInt.fromUnix.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") - except: - raise newException(EInvalidRequest, "Invalid modified-before value: $1" % getCurrentExceptionMsg()) + except CatchableError: + raise newException(EInvalidRequest, + "Invalid modified-before value: $1" % getCurrentExceptionMsg()) of "limit": try: options.limit = pair[1].parseInt - except: - raise newException(EInvalidRequest, "Invalid limit value: $1" % getCurrentExceptionMsg()) + except CatchableError: + raise newException(EInvalidRequest, "Invalid limit value: $1" % + getCurrentExceptionMsg()) of "offset": try: options.offset = pair[1].parseInt - except: - raise newException(EInvalidRequest, "Invalid offset value: $1" % getCurrentExceptionMsg()) + except CatchableError: + raise newException(EInvalidRequest, "Invalid offset value: $1" % + getCurrentExceptionMsg()) of "sort": let orderby = pair[1].orderByClauses() if orderby != "":@@ -214,9 +223,11 @@ var fragments = q.split('&')
for f in fragments: f.parseQueryOption(options) -proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, cb: proc(req: LSRequest, LS: LiteStore, resource: string, id: string):LSResponse): LSResponse = +proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, + cb: proc(req: LSRequest, LS: LiteStore, resource: string, + id: string): LSResponse): LSResponse = if req.reqMethod == HttpPost or req.reqMethod == HttpPut or req.reqMethod == HttpPatch: - var ct = "" + var ct = "" let body = req.body.strip if body == "": return resError(Http400, "Bad request: No content specified for document.")@@ -226,14 +237,17 @@ case ct:
of "application/json": try: discard body.parseJson() - except: - return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) + except CatchableError: + return resError(Http400, "Invalid JSON content - $1" % + getCurrentExceptionMsg()) else: discard return cb(req, LS, resource, id) -proc patchTag(tags: var seq[string], index: int, op, path, value: string): bool = - LOG.debug("- PATCH -> $1 tag['$2'] = \"$3\" - Total tags: $4." % [op, $index, $value, $tags.len]) +proc patchTag(tags: var seq[string], index: int, op, path, + value: string): bool = + LOG.debug("- PATCH -> $1 tag['$2'] = \"$3\" - Total tags: $4." % [op, $index, + $value, $tags.len]) case op: of "remove": let tag = tags[index]@@ -252,7 +266,8 @@ raise newException(EInvalidRequest, "invalid tag: $1" % value)
of "replace": if value.match(PEG_USER_TAG): if tags[index].startsWith("$"): - raise newException(EInvalidRequest, "cannot replace system tag: $1" % tags[index]) + raise newException(EInvalidRequest, "cannot replace system tag: $1" % + tags[index]) else: tags[index] = value else:@@ -267,7 +282,8 @@ else:
raise newException(EInvalidRequest, "invalid patch operation: $1" % op) return true -proc patchData*(data: var JsonNode, origData: JsonNode, op: string, path: string, value: JsonNode): bool = +proc patchData*(data: var JsonNode, origData: JsonNode, op: string, + path: string, value: JsonNode): bool = LOG.debug("- PATCH -> $1 path $2 with $3" % [op, path, $value]) var keys = path.replace(peg"^\/data\/", "").split("/") if keys.len == 0:@@ -292,12 +308,14 @@ of "test":
if d.elems[index] != value: return false else: - raise newException(EInvalidRequest, "invalid patch operation: $1" % op) + raise newException(EInvalidRequest, + "invalid patch operation: $1" % op) else: d = d[index] dorig = dorig[index] - except: - raise newException(EInvalidRequest, "invalid index key '$1' in path '$2'" % [key, path]) + except CatchableError: + raise newException(EInvalidRequest, + "invalid index key '$1' in path '$2'" % [key, path]) else: if c >= keys.len: case op:@@ -305,20 +323,23 @@ of "remove":
if d.hasKey(key): d.delete(key) else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + raise newException(EInvalidRequest, + "key '$1' not found in path '$2'" % [key, path]) of "add": d[key] = value of "replace": if d.hasKey(key): d[key] = value else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + raise newException(EInvalidRequest, + "key '$1' not found in path '$2'" % [key, path]) of "test": if dorig.hasKey(key): if dorig[key] != value: return false else: - raise newException(EInvalidRequest, "key '$1' not found in path '$2'" % [key, path]) + raise newException(EInvalidRequest, + "key '$1' not found in path '$2'" % [key, path]) else: raise newException(EInvalidRequest, "invalid patch operation: $1" % op) else:@@ -328,7 +349,8 @@ c += 1
return true -proc applyPatchOperation*(data: var JsonNode, origData: JsonNode, tags: var seq[string], op: string, path: string, value: JsonNode): bool = +proc applyPatchOperation*(data: var JsonNode, origData: JsonNode, tags: var seq[ + string], op: string, path: string, value: JsonNode): bool = var matches = @[""] let p = peg""" path <- ^tagPath / fieldPath$@@ -351,7 +373,8 @@ raise newException(EInvalidRequest, "cannot patch data of a non-JSON document.")
# Low level procs -proc getTag*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = +proc getTag*(LS: LiteStore, id: string, options = newQueryOptions(), + req: LSRequest): LSResponse = let doc = LS.store.retrieveTag(id, options) result.headers = ctJsonHeader() setOrigin(LS, req, result.headers)@@ -361,7 +384,8 @@ else:
result.content = $doc result.code = Http200 -proc getStore*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = +proc getStore*(LS: LiteStore, id: string, options = newQueryOptions(), + req: LSRequest): LSResponse = if (not LSDICT.hasKey(id)): return resStoreNotFound(id) let store = LSDICT[id]@@ -374,7 +398,8 @@ setOrigin(LS, req, result.headers)
result.content = $doc result.code = Http200 -proc getIndex*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = +proc getIndex*(LS: LiteStore, id: string, options = newQueryOptions(), + req: LSRequest): LSResponse = let doc = LS.store.retrieveIndex(id, options) result.headers = ctJsonHeader() setOrigin(LS, req, result.headers)@@ -384,7 +409,8 @@ else:
result.content = $doc result.code = Http200 -proc getRawDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = +proc getRawDocument*(LS: LiteStore, id: string, options = newQueryOptions(), + req: LSRequest): LSResponse = let doc = LS.store.retrieveRawDocument(id, options) result.headers = ctJsonHeader() setOrigin(LS, req, result.headers)@@ -394,7 +420,8 @@ else:
result.content = doc result.code = Http200 -proc getDocument*(LS: LiteStore, id: string, options = newQueryOptions(), req: LSRequest): LSResponse = +proc getDocument*(LS: LiteStore, id: string, options = newQueryOptions(), + req: LSRequest): LSResponse = let doc = LS.store.retrieveDocument(id, options) if doc.data == "": result = resDocumentNotFound(id)@@ -419,10 +446,11 @@ setOrigin(LS, req, result.headers)
result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: result = resError(Http500, "Unable to delete document '$1'" % id) -proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = +proc getTags*(LS: LiteStore, options: QueryOptions = newQueryOptions(), + req: LSRequest): LSResponse = var options = options let t0 = cpuTime() let docs = LS.store.retrieveTags(options)@@ -431,7 +459,8 @@ let orig_offset = options.offset
options.limit = 0 options.offset = 0 options.select = @["COUNT(tag_id)"] - let total = LS.store.countTags(prepareSelectTagsQuery(options), options.like.replace("*", "%")) + let total = LS.store.countTags(prepareSelectTagsQuery(options), + options.like.replace("*", "%")) var content = newJObject() if options.like != "": content["like"] = %(options.like.decodeURL)@@ -447,7 +476,8 @@ setOrigin(LS, req, result.headers)
result.content = content.pretty result.code = Http200 -proc getStores(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = +proc getStores(LS: LiteStore, options: QueryOptions = newQueryOptions(), + req: LSRequest): LSResponse = let t0 = cpuTime() var docs = newJArray() for k, v in LSDICT.pairs:@@ -465,7 +495,8 @@ setOrigin(LS, req, result.headers)
result.content = content.pretty result.code = Http200 -proc getIndexes*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = +proc getIndexes*(LS: LiteStore, options: QueryOptions = newQueryOptions(), + req: LSRequest): LSResponse = var options = options let t0 = cpuTime() let docs = LS.store.retrieveIndexes(options)@@ -474,7 +505,8 @@ let orig_offset = options.offset
options.limit = 0 options.offset = 0 options.select = @["COUNT(name)"] - let total = LS.store.countIndexes(prepareSelectIndexesQuery(options), options.like.replace("*", "%")) + let total = LS.store.countIndexes(prepareSelectIndexesQuery(options), + options.like.replace("*", "%")) var content = newJObject() if options.like != "": content["like"] = %(options.like.decodeURL)@@ -490,7 +522,8 @@ setOrigin(LS, req, result.headers)
result.content = content.pretty result.code = Http200 -proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions(), req: LSRequest): LSResponse = +proc getRawDocuments*(LS: LiteStore, options: QueryOptions = newQueryOptions(), + req: LSRequest): LSResponse = var options = options let t0 = cpuTime() let docs = LS.store.retrieveRawDocuments(options)@@ -533,7 +566,8 @@ var content = newJObject()
content["version"] = %(LS.appname & " v" & LS.appversion) content["datastore_version"] = %version content["api_version"] = %7 - content["size"] = %($((LS.file.getFileSize().float/(1024*1024)).formatFloat(ffDecimal, 2)) & " MB") + content["size"] = %($((LS.file.getFileSize().float/(1024*1024)).formatFloat( + ffDecimal, 2)) & " MB") content["read_only"] = %LS.readonly content["log_level"] = %LS.loglevel if LS.directory.len == 0:@@ -541,7 +575,8 @@ content["directory"] = newJNull()
else: content["directory"] = %LS.directory content["mount"] = %LS.mount - if LS.config != newJNull() and LS.config.hasKey("stores") and LS.config["stores"].len > 0: + if LS.config != newJNull() and LS.config.hasKey("stores") and LS.config[ + "stores"].len > 0: content["additional_stores"] = %toSeq(LS.config["stores"].keys) else: content["additional_stores"] = newJArray()@@ -570,11 +605,12 @@ result.headers = ctJsonHeader()
setOrigin(LS, req, result.headers) result.content = "{\"id\": \"$1\", \"field\": \"$2\"}" % [id, field] result.code = Http201 - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to create index.") -proc putStore*(LS: LiteStore, id: string, config: JsonNode, req: LSRequest): LSResponse = +proc putStore*(LS: LiteStore, id: string, config: JsonNode, + req: LSRequest): LSResponse = try: if (not id.match(PEG_STORE) or id == "master"): return resError(Http400, "invalid store ID: $1" % id)@@ -585,7 +621,7 @@ LS.updateConfig()
LSDICT[id] = store result = getStore(LS, id, newQueryOptions(), req) result.code = Http201 - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to create store.")@@ -601,7 +637,7 @@ setOrigin(LS, req, result.headers)
result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to delete index.")@@ -620,11 +656,12 @@ setOrigin(LS, req, result.headers)
result.headers["Content-Length"] = "0" result.content = "" result.code = Http204 - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to delete index.") -proc postDocument*(LS: LiteStore, body: string, ct: string, folder="", req: LSRequest): LSResponse = +proc postDocument*(LS: LiteStore, body: string, ct: string, folder = "", + req: LSRequest): LSResponse = if not folder.isFolder: return resError(Http400, "Invalid folder specified when creating document: $1" % folder) try:@@ -636,11 +673,12 @@ result.content = doc
result.code = Http201 else: result = resError(Http500, "Unable to create document.") - except: + except CatchableError: eWarn() result = resError(Http500, "Unable to create document.") -proc putDocument*(LS: LiteStore, id: string, body: string, ct: string, req: LSRequest): LSResponse = +proc putDocument*(LS: LiteStore, id: string, body: string, ct: string, + req: LSRequest): LSResponse = if id.isFolder: return resError(Http400, "Invalid ID '$1' (Document IDs cannot end with '/')." % id) let doc = LS.store.retrieveDocument(id)@@ -665,10 +703,11 @@ result.content = doc
result.code = Http200 else: result = resError(Http500, "Unable to update document '$1'." % id) - except: + except CatchableError: result = resError(Http500, "Unable to update document '$1'." % id) -proc patchDocument*(LS: LiteStore, id: string, body: string, req: LSRequest): LSResponse = +proc patchDocument*(LS: LiteStore, id: string, body: string, + req: LSRequest): LSResponse = var apply = true let jbody = body.parseJson if jbody.kind != JArray:@@ -690,7 +729,7 @@ if tags.contains("$subtype:json"):
try: origData = jdoc["data"].getStr.parseJson data = origData.copy - except: + except CatchableError: discard var c = 1 for item in jbody.items:@@ -698,13 +737,14 @@ if item.hasKey("op") and item.hasKey("path"):
if not item.hasKey("value"): item["value"] = %"" try: - apply = applyPatchOperation(data, origData, tags, item["op"].str, item["path"].str, item["value"]) + apply = applyPatchOperation(data, origData, tags, item["op"].str, item[ + "path"].str, item["value"]) if not apply: break - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) else: - return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c) + return resError(Http400, "Bad request: patch operation #$1 is malformed." % $c) c.inc if apply: # when document is not JSON the origData is not defined@@ -714,8 +754,9 @@ try:
var doc = LS.store.updateDocument(id, data.pretty, "application/json") if doc == "": return resError(Http500, "Unable to patch document '$1'." % id) - except: - return resError(Http500, "Unable to patch document '$1' - $2" % id, getCurrentExceptionMsg()) + except CatchableError: + return resError(Http500, "Unable to patch document '$1' - $2" % id, + getCurrentExceptionMsg()) if origTags != tags: try: for t1 in jdoc["tags"].items:@@ -723,13 +764,15 @@ discard LS.store.destroyTag(t1.str, id, true)
for t2 in tags: if t2 != "": LS.store.createTag(t2, id, true) - except: - return resError(Http500, "Unable to patch document '$1' - $2" % [id, getCurrentExceptionMsg()]) + except CatchableError: + return resError(Http500, "Unable to patch document '$1' - $2" % [id, + getCurrentExceptionMsg()]) return LS.getRawDocument(id, newQueryOptions(), req) # Main routing -proc options*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = +proc options*(req: LSRequest, LS: LiteStore, resource: string, + id = ""): LSResponse = case resource: of "info": result.headers = newHttpHeaders(TAB_HEADERS)@@ -863,7 +906,8 @@ result.headers["Access-Control-Allow-Methods"] = "GET, OPTIONS"
else: discard # never happens really. -proc head*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = +proc head*(req: LSRequest, LS: LiteStore, resource: string, + id = ""): LSResponse = var options = newQueryOptions() options.select = @["documents.id AS id", "created", "modified"] if id.isFolder:@@ -876,10 +920,11 @@ result.content = ""
else: result = LS.getRawDocuments(options, req) result.content = "" - except: + except CatchableError: return resError(Http400, "Bad request - $1" % getCurrentExceptionMsg()) -proc get*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = +proc get*(req: LSRequest, LS: LiteStore, resource: string, + id = ""): LSResponse = case resource: of "docs": var options = newQueryOptions()@@ -890,13 +935,14 @@ options.select = @["documents.id AS id", "created", "modified"]
try: parseQueryOptions(req.url.query, options); if id != "" and options.folder == "": - if req.url.query.contains("raw=true") or req.headers.hasKey("Accept") and req.headers["Accept"] == "application/json": + if req.url.query.contains("raw=true") or req.headers.hasKey( + "Accept") and req.headers["Accept"] == "application/json": return LS.getRawDocument(id, options, req) else: return LS.getDocument(id, options, req) else: return LS.getRawDocuments(options, req) - except: + except CatchableError: let e = getCurrentException() let trace = e.getStackTrace() echo trace@@ -913,9 +959,9 @@ else:
options.folder = "" result = LS.getDocument(id & "index.html", options, req) if result.code == Http404: - result = LS.getDocument(id & "index.htm", options, req) + result = LS.getDocument(id & "index.htm", options, req) return result - except: + except CatchableError: let e = getCurrentException() let trace = e.getStackTrace() echo trace@@ -928,7 +974,7 @@ if id != "":
return LS.getTag(id, options, req) else: return LS.getTags(options, req) - except: + except CatchableError: return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) of "indexes": var options = newQueryOptions()@@ -938,7 +984,7 @@ if id != "":
return LS.getIndex(id, options, req) else: return LS.getIndexes(options, req) - except: + except CatchableError: return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) of "stores": var options = newQueryOptions()@@ -948,7 +994,7 @@ if id != "":
return LS.getStore(id, options, req) else: return LS.getStores(options, req) - except: + except CatchableError: return resError(Http400, "Bad Request - $1" % getCurrentExceptionMsg()) of "info": if id != "":@@ -957,27 +1003,31 @@ return LS.getInfo(req)
else: discard # never happens really. -proc post*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = +proc post*(req: LSRequest, LS: LiteStore, resource: string, + id = ""): LSResponse = var ct = "text/plain" if req.headers.hasKey("Content-Type"): ct = req.headers["Content-Type"] return LS.postDocument(req.body.strip, ct, id, req) -proc put*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = +proc put*(req: LSRequest, LS: LiteStore, resource: string, + id = ""): LSResponse = if id != "": if resource == "indexes": var field = "" try: field = parseJson(req.body.strip)["field"].getStr - except: - return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) + except CatchableError: + return resError(Http400, "Bad Request - Invalid JSON body - $1" % + getCurrentExceptionMsg()) return LS.putIndex(id, field, req) elif resource == "stores": var config = newJNull() try: config = parseJson(req.body) - except: - return resError(Http400, "Bad Request - Invalid JSON body - $1" % getCurrentExceptionMsg()) + except CatchableError: + return resError(Http400, "Bad Request - Invalid JSON body - $1" % + getCurrentExceptionMsg()) return LS.putStore(id, config, req) else: # Assume docs var ct = "text/plain"@@ -987,7 +1037,8 @@ return LS.putDocument(id, req.body.strip, ct, req)
else: return resError(Http400, "Bad request: document ID must be specified in PUT requests.") -proc delete*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = +proc delete*(req: LSRequest, LS: LiteStore, resource: string, + id = ""): LSResponse = if id != "": if resource == "indexes": return LS.deleteIndex(id, req)@@ -998,7 +1049,8 @@ return LS.deleteDocument(id, req)
else: return resError(Http400, "Bad request: document ID must be specified in DELETE requests.") -proc patch*(req: LSRequest, LS: LiteStore, resource: string, id = ""): LSResponse = +proc patch*(req: LSRequest, LS: LiteStore, resource: string, + id = ""): LSResponse = if id != "": return LS.patchDocument(id, req.body, req) else:@@ -1024,14 +1076,15 @@ result.headers = ctHeader("text/plain")
setOrigin(LS, req, result.headers) result.content = contents result.code = Http200 - except: + except CatchableError: return resError(Http500, "Unable to read file '$1'." % path) else: return resError(Http404, "File '$1' not found." % path) else: return resError(Http405, "Method not allowed: $1" % $req.reqMethod) -proc route*(req: LSRequest, LS: LiteStore, resource = "docs", id = ""): LSResponse = +proc route*(req: LSRequest, LS: LiteStore, resource = "docs", + id = ""): LSResponse = var reqMethod = $req.reqMethod if req.headers.hasKey("X-HTTP-Method-Override"): reqMethod = req.headers["X-HTTP-Method-Override"]@@ -1072,31 +1125,37 @@ discard path.find(PEG_URL, matches)
return req.route(LSDICT[id], matches[1], matches[2]) return req.route(LS, resource, id) -proc newSimpleLSRequest(meth: HttpMethod, resource, id, body = "", params = "", headers = newHttpHeaders()): LSRequest = +proc newSimpleLSRequest(meth: HttpMethod, resource = "", id = "", body = "", + params = "", headers = newHttpHeaders()): LSRequest = result.reqMethod = meth result.body = body result.headers = headers - result.url = parseUri("$1://$2:$3/$4/$5?$6" % @["http", "localhost", "9500", resource, id, params]) - + result.url = parseUri("$1://$2:$3/$4/$5?$6" % @["http", "localhost", "9500", + resource, id, params]) + proc get(resource, id: string, params = ""): LSResponse = - return newSimpleLSRequest(HttpGet, resource, id, "", params).multiRoute(resource, id) + return newSimpleLSRequest(HttpGet, resource, id, "", params).multiRoute( + resource, id) proc post(resource, folder, body: string, ct = ""): LSResponse = var headers = newHttpHeaders() if ct != "": headers["Content-Type"] = ct - return newSimpleLSRequest(HttpPost, resource, folder, body, "", headers).multiRoute(resource, folder & "/") + return newSimpleLSRequest(HttpPost, resource, folder, body, "", + headers).multiRoute(resource, folder & "/") proc put(resource, id, body: string, ct = ""): LSResponse = var headers = newHttpHeaders() if ct != "": headers["Content-Type"] = ct - return newSimpleLSRequest(HttpPut, resource, id, body, "", headers).multiRoute(resource, id) + return newSimpleLSRequest(HttpPut, resource, id, body, "", + headers).multiRoute(resource, id) proc patch(resource, id, body: string): LSResponse = var headers = newHttpHeaders() headers["Content-Type"] = "application/json" - return newSimpleLSRequest(HttpPatch, resource, id, body, "", headers).multiRoute(resource, id) + return newSimpleLSRequest(HttpPatch, resource, id, body, "", + headers).multiRoute(resource, id) proc delete(resource, id: string): LSResponse = return newSimpleLSRequest(HttpDelete, resource, id).multiRoute(resource, id)@@ -1104,7 +1163,133 @@
proc head(resource, id: string): LSResponse = return newSimpleLSRequest(HttpHead, resource, id).multiRoute(resource, id) -proc registerStoreApi(LS: LiteStore, ctx: DTContext, origResource, origId: string) = +proc toHeaders(obj: JsonNode): HttpHeaders = + var headers = newSeq[tuple[key: string, val: string]](0) + for k, v in obj.pairs: + headers.add (key: k, val: v.getstr) + return newHttpHeaders(headers) + +proc toJson(headers: HttpHeaders): JsonNode = + result = newJObject() + for k, v in headers.pairs: + result[k] = newJString(v) + +proc getHeadersArg(ctx: DTContext): HttpHeaders = + duk_enum(ctx, 1, 0) + var headers = newSeq[tuple[key: string, val: string]](0) + while duk_next(ctx, -1, 1) == 1: + let key = $duk_safe_to_string(ctx, -2) + let val = $duk_safe_to_string(ctx, -1) + duk_pop_2(ctx) + headers.add (key: key, val: val) + return newHttpHeaders(headers) + + +proc registerHttpApi(LS: LiteStore, ctx: DTContext) = + var api_idx = ctx.duk_push_object() + # GET + var get: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let url = $duk_get_string(ctx, 0) + let client = newHttpClient(headers = ctx.getHeadersArg) + let resp = client.get(url) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.body.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + discard ctx.duk_push_string(resp.headers.toJson.pretty.cstring) + discard ctx.duk_put_prop_string(res_idx, "headers") + return 1 + ) + discard duk_push_c_function(ctx, get, 2) + discard ctx.duk_put_prop_string(api_idx, "get") + # HEAD + var head: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let url = $duk_get_string(ctx, 0) + let client = newHttpClient(headers = ctx.getHeadersArg) + let resp = client.head(url) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.body.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + discard ctx.duk_push_string(resp.headers.toJson.pretty.cstring) + discard ctx.duk_put_prop_string(res_idx, "headers") + return 1 + ) + discard duk_push_c_function(ctx, head, 2) + discard ctx.duk_put_prop_string(api_idx, "head") + # DELETE + var delete: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let url = $duk_get_string(ctx, 0) + let client = newHttpClient(headers = ctx.getHeadersArg) + let resp = client.delete(url) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.body.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + discard ctx.duk_push_string(resp.headers.toJson.pretty.cstring) + discard ctx.duk_put_prop_string(res_idx, "headers") + return 1 + ) + discard duk_push_c_function(ctx, delete, 2) + discard ctx.duk_put_prop_string(api_idx, "head") + # POST + var post: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let url = $duk_get_string(ctx, 0) + let client = newHttpClient(headers = ctx.getHeadersArg) + let body = $duk_get_string(ctx, 2) + let resp = client.post(url, body) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.body.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + discard ctx.duk_push_string(resp.headers.toJson.pretty.cstring) + discard ctx.duk_put_prop_string(res_idx, "headers") + return 1 + ) + discard duk_push_c_function(ctx, post, 3) + discard ctx.duk_put_prop_string(api_idx, "post") + # PUT + var put: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let url = $duk_get_string(ctx, 0) + let client = newHttpClient(headers = ctx.getHeadersArg) + let body = $duk_get_string(ctx, 2) + let resp = client.put(url, body) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.body.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + discard ctx.duk_push_string(resp.headers.toJson.pretty.cstring) + discard ctx.duk_put_prop_string(res_idx, "headers") + return 1 + ) + discard duk_push_c_function(ctx, put, 3) + discard ctx.duk_put_prop_string(api_idx, "put") + # PATCH + var patch: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} = + let url = $duk_get_string(ctx, 0) + let client = newHttpClient(headers = ctx.getHeadersArg) + let body = $duk_get_string(ctx, 2) + let resp = client.patch(url, body) + var res_idx = ctx.duk_push_object() + ctx.duk_push_int(cast[cint](resp.code)) + discard ctx.duk_put_prop_string(res_idx, "code") + discard ctx.duk_push_string(resp.body.cstring) + discard ctx.duk_put_prop_string(res_idx, "content") + discard ctx.duk_push_string(resp.headers.toJson.pretty.cstring) + discard ctx.duk_put_prop_string(res_idx, "headers") + return 1 + ) + discard duk_push_c_function(ctx, patch, 3) + discard ctx.duk_put_prop_string(api_idx, "patch") + discard ctx.duk_put_global_string("$http") + +proc registerStoreApi(LS: LiteStore, ctx: DTContext, origResource, + origId: string) = var api_idx = ctx.duk_push_object() # GET var get: DTCFunction = (proc (ctx: DTContext): cint{.stdcall.} =@@ -1198,7 +1383,7 @@ discard duk_push_c_function(ctx, head, 2)
discard ctx.duk_put_prop_string(api_idx, "head") discard ctx.duk_put_global_string("$store") -proc jError(ctx: DTContext): LSResponse = +proc jError(ctx: DTContext): LSResponse = return resError(Http500, "Middleware Error: " & $ctx.duk_safe_to_string(-1)) proc getMiddleware*(LS: LiteStore, id: string): string =@@ -1213,9 +1398,9 @@ else:
result = LS.middleware[id] proc getMiddlewareSeq(LS: LiteStore, resource, id, meth: string): seq[string] = - result = newSeq[string]() + result = newSeq[string]() if LS.config.kind != JObject or not LS.config.hasKey("resources"): - return + return var reqUri = "/" & resource & "/" & id if reqUri[^1] == '/': reqUri.removeSuffix({'/'})@@ -1226,24 +1411,28 @@ var currentPaths = ""
for p in ancestors: currentPath &= "/" & p currentPaths = currentPath & "/*" - if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][meth].hasKey("middleware"): + if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][ + currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][ + meth].hasKey("middleware"): let mw = LS.config["resources"][currentPaths][meth]["middleware"] if (mw.kind == JArray): for m in mw: result.add m.getStr - if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("middleware"): + if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][ + reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("middleware"): let mw = LS.config["resources"][reqUri][meth]["middleware"] if (mw.kind == JArray): for m in mw: result.add m.getStr -proc execute*(req: var LSRequest, LS: LiteStore, resource, id: string): LSResponse = +proc execute*(req: var LSRequest, LS: LiteStore, resource, + id: string): LSResponse = let middleware = getMiddlewareSeq(LS, resource, id, $req.reqMethod) if middleware.len > 0: LOG.debug("Middleware: " & middleware.join(" -> ")); if middleware.len == 0: return route(req, LS, resource, id) - var jReq = $(%* req) + var jReq = $( %* req) LOG.debug("Request: " & jReq) var jRes = """{ "code": 200,@@ -1262,6 +1451,7 @@ var ctx = duk_create_heap_default()
duk_console_init(ctx) duk_print_alert_init(ctx) LS.registerStoreApi(ctx, resource, id) + LS.registerHttpApi(ctx) if ctx.duk_peval_string(cstring("($1)" % $jReq)) != 0: return jError(ctx) discard ctx.duk_put_global_string("$req")@@ -1276,7 +1466,7 @@ var i = 0
var abort = 0 while abort != 1 and i < middleware.len: let code = LS.getMiddleware(middleware[i]) - LOG.debug("Evaluating middleware '$1'" % middleware[i]) + LOG.debug("Evaluating middleware '$1'" % middleware[i]) if ctx.duk_peval_string(code.cstring) != 0: return jError(ctx) abort = ctx.duk_get_boolean(-1)
@@ -1,12 +1,12 @@
const - pkgName* = "litestore" - pkgVersion* = "1.12.1" - pkgAuthor* = "Fabio Cevasco" + pkgName* = "litestore" + pkgVersion* = "1.13.0" + pkgAuthor* = "Fabio Cevasco" pkgDescription* = "Self-contained, lightweight, RESTful document store." - pkgLicense* = "MIT" - appname* = "LiteStore" + pkgLicense* = "MIT" + appname* = "LiteStore" var - file* = "data.db" - address* = "127.0.0.1" - port* = 9500 + file* = "data.db" + address* = "127.0.0.1" + port* = 9500
@@ -1,15 +1,18 @@
import - x_sqlite3, - x_db_sqlite as db, + db_connector/sqlite3, + db_connector/db_sqlite as db, + std/[ os, + paths, oids, json, pegs, strtabs, strutils, sequtils, + httpclient, base64, - math + math] import types, contenttypes,@@ -52,7 +55,7 @@
proc closeDatastore*(store: Datastore) = try: db.close(store.db) - except: + except CatchableError: raise newException(EDatastoreUnavailable, "Datastore '$1' cannot be closed." % store.path)@@ -61,7 +64,7 @@ try:
if store.path.fileExists(): store.closeDataStore() store.path.removeFile() - except: + except CatchableError: raise newException(EDatastoreUnavailable, "Datastore '$1' cannot destroyed." % store.path)@@ -79,7 +82,7 @@ try:
store.db.exec(SQL_CREATE_SYSTEM_DOCUMENTS_TABLE) store.db.exec(SQL_UPDATE_VERSION, 2) LOG.debug("Done.") - except: + except CatchableError: store.closeDatastore() store.path.removeFile() copyFile(bkp_path, store.path)@@ -106,7 +109,7 @@ discard result.db.tryExec("PRAGMA foreign_keys = ON".sql)
LOG.debug("Done.") result.path = file result.mount = "" - except: + except CatchableError: raise newException(EDatastoreUnavailable, "Datastore '$1' cannot be opened." % file)@@ -136,7 +139,8 @@
# Manage Indexes proc createIndex*(store: Datastore, indexId, field: string) = - let query = sql("CREATE INDEX json_index_$1 ON documents(json_extract(data, ?) COLLATE NOCASE) WHERE json_valid(data)" % [indexId]) + let query = sql("CREATE INDEX json_index_$1 ON documents(json_extract(data, ?) COLLATE NOCASE) WHERE json_valid(data)" % + [indexId]) store.begin() store.db.exec(query, field) store.commit()@@ -147,7 +151,8 @@ store.begin()
store.db.exec(query) store.commit() -proc retrieveIndex*(store: Datastore, id: string, options: QueryOptions = newQueryOptions()): JsonNode = +proc retrieveIndex*(store: Datastore, id: string, + options: QueryOptions = newQueryOptions()): JsonNode = var options = options options.single = true let query = prepareSelectIndexesQuery(options)@@ -167,7 +172,7 @@ if (options.like[options.like.len-1] == '*' and options.like[0] != '*'):
let str = "json_index_" & options.like.substr(0, options.like.len-2) raw_indexes = store.db.getAllRows(query.sql, str, str & "{") else: - let str = "json_index_" & options.like.replace("*", "%") + let str = "json_index_" & options.like.replace("*", "%") raw_indexes = store.db.getAllRows(query.sql, str) else: raw_indexes = store.db.getAllRows(query.sql)@@ -176,7 +181,8 @@ for index in raw_indexes:
var matches: array[0..0, string] let fieldPeg = peg"'CREATE INDEX json_index_test ON documents(json_extract(data, \'' {[^']+}" discard index[1].match(fieldPeg, matches) - indexes.add(%[("id", %index[0].replace("json_index_", "")), ("field", %matches[0])]) + indexes.add(%[("id", %index[0].replace("json_index_", "")), ("field", + %matches[0])]) return %indexes proc countIndexes*(store: Datastore, q = "", like = ""): int64 =@@ -286,7 +292,7 @@ if contenttype == "application/json":
# Validate JSON data try: discard data.parseJson - except: + except CatchableError: raise newException(JsonParsingError, "Invalid JSON content - " & getCurrentExceptionMsg()) if id == "":@@ -318,7 +324,7 @@ raise newException(EFileExists, "File already exists: $1" % filename)
if singleOp: store.commit() return $store.retrieveRawDocument(id) - except: + except CatchableError: store.rollback() eWarn() raise@@ -334,7 +340,7 @@ if contenttype == "application/json":
# Validate JSON data try: discard data.parseJson - except: + except CatchableError: raise newException(JsonParsingError, "Invalid JSON content - " & getCurrentExceptionMsg()) if id == "":@@ -350,7 +356,7 @@ binary, currentTime())
if singleOp: store.commit() return $store.retrieveRawDocument(id) - except: + except CatchableError: store.rollback() eWarn() raise@@ -365,21 +371,21 @@ if contenttype == "application/json":
# Validate JSON data try: discard data.parseJson - except: + except CatchableError: raise newException(JsonParsingError, "Invalid JSON content - " & getCurrentExceptionMsg()) try: LOG.debug("Updating system document '$1'" % id) store.begin() - var res = store.db.execAffectedRows(SQL_UPDATE_SYSTEM_DOCUMENT, data, contenttype, - binary, currentTime(), id) + var res = store.db.execAffectedRows(SQL_UPDATE_SYSTEM_DOCUMENT, data, + contenttype, binary, currentTime(), id) if res > 0: result = $store.retrieveRawDocument(id) else: result = "" if singleOp: store.commit() - except: + except CatchableError: eWarn() store.rollback() raise@@ -394,7 +400,7 @@ if contenttype == "application/json":
# Validate JSON data try: discard data.parseJson - except: + except CatchableError: raise newException(JsonParsingError, "Invalid JSON content - " & getCurrentExceptionMsg()) var searchable = searchable@@ -421,7 +427,7 @@ else:
result = "" if singleOp: store.commit() - except: + except CatchableError: eWarn() store.rollback() raise@@ -446,11 +452,11 @@ else:
raise newException(EFileNotFound, "File not found: $1" % filename) if singleOp: store.commit() - except: + except CatchableError: eWarn() store.rollback() -proc findDocumentId*(store: Datastore, pattern: string): string = +proc findDocumentId*(store: Datastore, pattern: string): string = var select = "SELECT id FROM documents WHERE id LIKE ? ESCAPE '\\' " var raw_document = store.db.getRow(select.sql, pattern) LOG.debug("Retrieving document '$1'" % pattern)@@ -496,7 +502,8 @@
proc countDocuments*(store: Datastore): int64 = return store.db.getRow(SQL_COUNT_DOCUMENTS)[0].parseInt -proc importFile*(store: Datastore, f: string, dir = "/", system = false, notSearchable = false): string = +proc importFile*(store: Datastore, f: string, dir = "/", system = false, + notSearchable = false): string = if not f.fileExists: raise newException(EFileNotFound, "File '$1' not found." % f) let split = f.splitFile@@ -530,7 +537,7 @@ else:
discard store.createDocument(d_id, d_contents, d_ct, d_binary, d_searchable) if dir != "/" and not system: store.db.exec(SQL_INSERT_TAG, "$dir:"&dir, d_id) - except: + except CatchableError: store.rollback() eWarn() raise@@ -544,7 +551,7 @@ store.begin()
try: for tag in tags: store.db.exec(SQL_INSERT_TAG, tag, d_id) - except: + except CatchableError: store.rollback() eWarn() raise@@ -562,7 +569,7 @@ LOG.debug("Optimixing full-text index...")
store.db.exec(SQL_OPTIMIZE) store.commit() LOG.debug("Done") - except: + except CatchableError: eWarn() proc vacuum*(file: string) =@@ -570,7 +577,7 @@ let data = db.open(file, "", "", "")
try: data.exec(SQL_VACUUM) db.close(data) - except: + except CatchableError: eWarn() quit(203) quit(0)@@ -580,21 +587,22 @@ result = newSeq[string]()
let tags_file = f.splitFile.dir / "_tags" if tags_file.fileExists: for tag in tags_file.lines: - result.add(tag) + result.add(tag) -proc importDir*(store: Datastore, dir: string, system = false, importTags = false, notSearchable = false) = +proc importDir*(store: Datastore, dir: string, system = false, + importTags = false, notSearchable = false) = var files = newSeq[string]() if not dir.dirExists: raise newException(EDirectoryNotFound, "Directory '$1' not found." % dir) for f in dir.walkDirRec(): if f.dirExists: continue - let dirs = f.split(DirSep) + let dirs = f.split(DirSep) if dirs.any(proc (s: string): bool = return s.startsWith(".")): # Ignore hidden directories and files - continue - let fileName = f.splitFile.name + continue + let fileName = f.splitFile.name if fileName == "_tags" and not importTags: # Ignore tags file unless the CLI flag was set continue@@ -621,7 +629,7 @@ cBatches.inc
store.commit() LOG.info("Importing batch $1/$2...", cBatches, nBatches) store.begin() - except: + except CatchableError: LOG.warn("Unable to import file: $1", f) eWarn() store.rollback()@@ -685,17 +693,37 @@ LOG.level = lvNone
else: fail(103, "Invalid log level '$1'" % val) -proc processAuthConfig(configuration: JsonNode, auth: var JsonNode) = - if auth == newJNull() and configuration != newJNull() and configuration.hasKey("signature"): - LOG.debug("Authentication: Signature found, processing authentication rules in configuration.") - auth = newJObject(); - auth["access"] = newJObject(); - auth["signature"] = configuration["signature"] - for k, v in configuration["resources"].pairs: - auth["access"][k] = newJObject() +proc downloadJwks*(LS: LiteStore, uri: string) = + let file = LS.jwksPath + let client = newHttpClient() + client.downloadFile(uri, file) + +proc processAuthConfig(LS: var LiteStore) = + if LS.auth == newJNull() and LS.config != newJNull(): + LS.auth = newJObject(); + LS.auth["access"] = newJObject(); + if LS.config.hasKey("jwks_uri"): + LOG.debug("Authentication: Downloading JWKS file.") + try: + LS.downloadJwks(LS.config["jwks_uri"].getStr) + except CatchableError: + LOG.warn "Unable to download JWKS file." + eWarn() + try: + LS.jwks = LS.jwksPath.parseFile + except: + LOG.warn "Unable to parse JWKS file." + eWarn() + elif LS.config.hasKey("signature"): + LOG.debug("Authentication: Signature found, processing authentication rules in configuration.") + LS.auth["signature"] = LS.config["signature"].getStr.replace( + "-----BEGIN CERTIFICATE-----\n", "").replace( + "\n-----END CERTIFICATE-----").strip().newJString + for k, v in LS.config["resources"].pairs: + LS.auth["access"][k] = newJObject() for meth, content in v.pairs: if content.hasKey("auth"): - auth["access"][k][meth] = content["auth"] + LS.auth["access"][k][meth] = content["auth"] proc processConfigSettings(LS: var LiteStore) = # Process config settings if present and if no cli settings are set@@ -727,7 +755,7 @@ proc setup*(LS: var LiteStore, open = true) {.gcsafe.} =
if not LS.file.fileExists: try: LS.file.createDatastore() - except: + except CatchableError: eWarn() fail(200, "Unable to create datastore '$1'" % [LS.file]) if (open):@@ -735,15 +763,15 @@ try:
LS.store = LS.file.openDatastore() try: LS.store.upgradeDatastore() - except: + except CatchableError: fail(203, "Unable to upgrade datastore '$1'" % [LS.file]) if LS.mount: try: LS.store.mountDir(LS.directory) - except: + except CatchableError: eWarn() fail(202, "Unable to mount directory '$1'" % [LS.directory]) - except: + except CatchableError: fail(201, "Unable to open datastore '$1'" % [LS.file]) proc initStore*(LS: var LiteStore) =@@ -759,7 +787,7 @@ # Process config settings
LS.processConfigSettings() # Process auth from config settings LOG.debug("Authentication: Checking configuration for auth rules - Store file: " & LS.file) - processAuthConfig(LS.config, LS.auth) + LS.processAuthConfig() if LS.auth == newJNull(): # Attempt to retrieve auth.json from system documents
@@ -0,0 +1,142 @@
+import std/[ + openssl, base64, strutils, macros, json, times, pegs, sequtils, os + ] +import types, core + +when defined(windows) and defined(amd64): + {.passL: "-static -L"&getProjectPath()&"/litestorepkg/vendor/openssl/windows -lssl -lcrypto -lbcrypt".} +elif defined(linux) and defined(amd64): + {.passL: "-static -L"&getProjectPath()&"/litestorepkg/vendor/openssl/linux -lssl -lcrypto".} +elif defined(macosx) and defined(amd64): + {.passL: "-Bstatic -L"&getProjectPath()&"/litestorepkg/vendor/openssl/macosx -lssl -lcrypto -Bdynamic".} + + +proc EVP_PKEY_new(): EVP_PKEY {.cdecl, importc.} +proc X509_get_pubkey(cert: PX509): EVP_PKEY {.cdecl, importc.} +proc EVP_DigestVerifyInit(ctx: EVP_MD_CTX; pctx: ptr EVP_PKEY_CTX; typ: EVP_MD; + e: ENGINE; pkey: EVP_PKEY): cint {.cdecl, importc.} +proc EVP_DigestVerifyUpdate(ctx: EVP_MD_CTX; data: pointer; + len: cuint): cint {.cdecl, importc.} +proc EVP_DigestVerifyFinal(ctx: EVP_MD_CTX; data: pointer; + len: cuint): cint {.cdecl, importc.} + +proc getLastError(): string = + return $ERR_error_string(ERR_get_error(), nil) + +proc raiseJwtError(msg: string) = + let err = getLastError() + raise newException(EJwtValidationError, msg&"\n"&err) + +proc raiseX509Error(msg: string) = + let err = getLastError() + raise newException(EX509Error, msg&"\n"&err) + +proc getX5c*(LS: LiteStore; token: JWT): string = + let keys = LS.jwks["keys"] + if token.header.hasKey("kid"): + let kid = token.header["kid"].getStr + return keys.filterIt(it["kid"].getStr == kid)[0]["x5c"][0].getStr + return keys[0]["x5c"][0].getStr + +proc base64UrlDecode(encoded: string): string = + let padding = 4 - (encoded.len mod 4) + let base64String = encoded.replace("-", "+").replace("_", "/") & repeat("=", padding) + result = base64.decode(base64String) + +proc newJwt*(token: string): JWT = + let parts = token.split(".") + result.token = token + result.payload = parts[0]&"."&parts[1] + result.header = parts[0].base64UrlDecode.parseJson + result.claims = parts[1].base64UrlDecode.parseJson + result.signature = parts[2].base64UrlDecode + +proc verifyTimeClaims*(jwt: JWT) = + let t = now().toTime.toUnix + if jwt.claims.hasKey("nbf") and jwt.claims["nbf"].getInt > t: + raiseJwtError("Token cannot be used yet.") + if jwt.claims.hasKey("exp") and jwt.claims["exp"].getInt < t: + raiseJwtError("Token has expired.") + +proc verifyAlgorithm*(jwt: JWT) = + let alg = jwt.header["alg"].getStr + if alg != "RS256": + raiseJwtError("Algorithm not supported: " & alg) + +proc verifyScope*(jwt: JWT; reqScope: seq[string] = @[]) = + if reqScope.len == 0: + return + var scp = newSeq[string](0) + if jwt.claims.hasKey("scp"): + scp = jwt.claims["scp"].getStr.split(peg"\s") + elif jwt.claims.hasKey("scope"): + scp = jwt.claims["scope"].getStr.split(peg"\s") + if scp.len == 0: + raiseJwtError("No scp or scope claim found in token") + var authorized = "" + for s in scp: + for r in reqScope: + if r == s: + authorized = s + break + if authorized == "": + raise newException(EUnauthorizedError, "Unauthorized") + +proc verifySignature*(jwt: JWT; x5c: string) = + let sig = jwt.signature + let payload = jwt.payload + let cert = x5c.decode + var pkeyctx: EVP_PKEY_CTX + var mdctx: EVP_MD_CTX + let alg = EVP_sha256(); + var x509: PX509 + var pubkey = EVP_PKEY_new() + try: + ### Validate Signature (Only RS256 supported) + x509 = d2i_X509(cert) + if x509.isNil: + raiseX509Error("Invalid X509 certificate") + + pubkey = X509_get_pubkey(x509) + if pubkey.isNil: + raiseX509Error("An error occurred while retrieving the public key") + + mdctx = EVP_MD_CTX_create() + if mdctx.isNil: + raiseX509Error("Unable to initialize MD CTX") + + pkeyctx = EVP_PKEY_CTX_new(pubkey, nil) + if pkeyctx.isNil: + raiseX509Error("Unable to initialize PKEY CTX") + + if EVP_DigestVerifyInit(mdctx, addr pkeyctx, alg, nil, pubkey) != 1: + raiseJwtError("Unable to initialize digest verification") + + if EVP_DigestVerifyUpdate(mdctx, addr payload[0], payload.len.cuint) != 1: + raiseJwtError("Unable to update digest verification") + + if EVP_DigestVerifyFinal(mdctx, addr sig[0], sig.len.cuint) != 1: + raiseJwtError("Verification failed") + except CatchableError: + let err = getCurrentException() + if not mdctx.isNil: + EVP_MD_CTX_destroy(mdctx) + if not pkeyctx.isNil: + EVP_PKEY_CTX_free(pkeyctx) + if not pubkey.isNil: + EVP_PKEY_free(pubkey) + if not x509.isNil: + X509_free(x509) + raise err + + +when isMainModule: + + let token = "token.txt".readFile + let x5c = "x5c.cert".readFile + let jwt = token.newJwt + + echo token + echo "---" + echo x5c + jwt.verifySignature(x5c)
@@ -1,4 +1,4 @@
-import x_db_sqlite +import db_connector/db_sqlite # SQL QUERIES
@@ -1,22 +1,19 @@
import asynchttpserver, asyncdispatch, - times, strutils, pegs, logger, cgi, - os, json, tables, strtabs, - base64, asyncnet, - jwt, sequtils -import - types, - utils, +import + types, + utils, + jwt, api_v1, api_v2, api_v3,@@ -27,17 +24,7 @@ api_v7,
api_v8 export - api_v5 - - -proc decodeUrlSafeAsString*(s: string): string = - var s = s.replace('-', '+').replace('_', '/') - while s.len mod 4 > 0: - s &= "=" - base64.decode(s) - -proc decodeUrlSafe*(s: string): seq[byte] = - cast[seq[byte]](decodeUrlSafeAsString(s)) + api_v8 proc getReqInfo(req: LSRequest): string = var url = req.url.path@@ -51,37 +38,44 @@ proc handleCtrlC() {.noconv.} =
echo "" LOG.info("Exiting...") quit() - -template auth(uri: string, jwt: JWT, LS: LiteStore): void = + +template auth(uri: string, LS: LiteStore, jwt: JWT): void = let cfg = access[uri] if cfg.hasKey(reqMethod): LOG.debug("Authenticating: " & reqMethod & " " & uri) - if not req.headers.hasKey("Authorization"): + if not req.headers.hasKey("Authorization"): return resError(Http401, "Unauthorized - No token") let token = req.headers["Authorization"].replace(peg"^ 'Bearer '", "") # Validate token try: - jwt = token.toJwt() - let parts = token.split(".") - var sig = LS.auth["signature"].getStr - discard verifySignature(parts[0] & "." & parts[1], decodeUrlSafe(parts[2]), sig, RS256) - verifyTimeClaims(jwt) - let scopes = cfg[reqMethod] - # Validate scope - var authorized = "" - let reqScopes = ($jwt.claims["scope"].node.str).split(peg"\s+") - LOG.debug("Resource scopes: " & $scopes) - LOG.debug("Request scopes: " & $reqScopes) - for scope in scopes: - for reqScope in reqScopes: - if reqScope == scope.getStr: - authorized = scope.getStr - break - if authorized == "": - return resError(Http403, "Forbidden - You are not permitted to access this resource") - LOG.debug("Authorization successful: " & authorized) - except: - echo getCurrentExceptionMsg() + jwt = token.newJwt + var x5c: string + if LS.config.hasKey("jwks_uri"): + LOG.debug("Selecting x5c...") + x5c = LS.getX5c(jwt) + else: + LOG.debug("Using stored signature...") + x5c = LS.config["signature"].getStr + LOG.debug("Verifying algorithm...") + jwt.verifyAlgorithm() + LOG.debug("Verifying signature...") + try: + jwt.verifySignature(x5c) + except EX509Error: + LOG.warn getCurrentExceptionMsg() + writeStackTrace() + LOG.debug("Verifying claims...") + jwt.verifyTimeClaims() + let scope = cfg[reqMethod].mapIt(it.getStr) + LOG.debug("Verifying scope...") + jwt.verifyScope(scope) + LOG.debug("Authorization successful") + except EUnauthorizedError: + LOG.warn getCurrentExceptionMsg() + writeStackTrace() + return resError(Http403, "Forbidden - You are not permitted to access this resource") + except CatchableError: + LOG.warn getCurrentExceptionMsg() writeStackTrace() return resError(Http401, "Unauthorized - Invalid token")@@ -100,17 +94,21 @@ var currentPaths = ""
for p in ancestors: currentPath &= "/" & p currentPaths = currentPath & "/*" - if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][meth].hasKey("allowed"): + if LS.config["resources"].hasKey(currentPaths) and LS.config["resources"][ + currentPaths].hasKey(meth) and LS.config["resources"][currentPaths][ + meth].hasKey("allowed"): let allowed = LS.config["resources"][currentPaths][meth]["allowed"] if (allowed == %false): return false; - if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("allowed"): + if LS.config["resources"].hasKey(reqUri) and LS.config["resources"][ + reqUri].hasKey(meth) and LS.config["resources"][reqUri][meth].hasKey("allowed"): let allowed = LS.config["resources"][reqUri][meth]["allowed"] if (allowed == %false): return false return true -proc processApiUrl(req: LSRequest, LS: LiteStore, info: ResourceInfo): LSResponse = +proc processApiUrl(req: LSRequest, LS: LiteStore, + info: ResourceInfo): LSResponse = var reqUri = "/" & info.resource & "/" & info.id if reqUri[^1] == '/': reqUri.removeSuffix({'/'})@@ -125,12 +123,12 @@ let access = LS.auth["access"]
while true: # Match exact url if access.hasKey(uri): - auth(uri, jwt, LS) + auth(uri, LS, jwt) break # Match exact url adding /* (e.g. /docs would match also /docs/* in auth.json) elif uri[^1] != '*' and uri[^1] != '/': if access.hasKey(uri & "/*"): - auth(uri & "/*", jwt, LS) + auth(uri & "/*", LS, jwt) break var parts = uri.split("/") if parts[^1] == "*":@@ -143,7 +141,7 @@ else:
# If at the end of the URL, check generic URL uri = "/*" if access.hasKey(uri): - auth(uri, jwt, LS) + auth(uri, LS, jwt) break if info.version == "v8": if info.resource.match(peg"^assets / docs / info / tags / indexes / stores$"):@@ -235,7 +233,8 @@ return resError(Http400, "Bad Request - Not serving any directory." % info.version)
else: return resError(Http404, "Resource Not Found: $1" % info.resource) else: - if info.version == "v1" or info.version == "v2" or info.version == "v3" or info.version == "v4" or info.version == "v5": + if info.version == "v1" or info.version == "v2" or info.version == "v3" or + info.version == "v4" or info.version == "v5": return resError(Http400, "Bad Request - Invalid API version: $1" % info.version) else: if info.resource.decodeURL.strip == "":@@ -243,7 +242,7 @@ return resError(Http400, "Bad Request - No resource specified." % info.resource)
else: return resError(Http404, "Resource Not Found: $1" % info.resource) -proc process*(req: LSRequest, LS: LiteStore): LSResponse {.gcsafe.}= +proc process*(req: LSRequest, LS: LiteStore): LSResponse {.gcsafe.} = var matches = @["", "", ""] template route(req: LSRequest, peg: Peg, op: untyped): untyped = if req.url.path.find(peg, matches) != -1:@@ -273,14 +272,17 @@ raise newException(EInvalidRequest, req.getReqInfo())
except EInvalidRequest: let e = (ref EInvalidRequest)(getCurrentException()) let trace = e.getStackTrace() - return resError(Http404, "Resource Not Found: $1" % getCurrentExceptionMsg().split(" ")[2], trace) - except: + return resError(Http404, "Resource Not Found: $1" % getCurrentExceptionMsg( + ).split(" ")[2], trace) + except CatchableError: let e = getCurrentException() let trace = e.getStackTrace() - return resError(Http500, "Internal Server Error: $1" % getCurrentExceptionMsg(), trace) + return resError(Http500, "Internal Server Error: $1" % + getCurrentExceptionMsg(), trace) -proc process*(req: LSRequest, LSDICT: OrderedTable[string, LiteStore]): LSResponse {.gcsafe.}= +proc process*(req: LSRequest, LSDICT: OrderedTable[string, + LiteStore]): LSResponse {.gcsafe.} = var matches = @["", ""] if req.url.path.find(PEG_STORE_URL, matches) != -1: let id = matches[0]@@ -335,7 +337,8 @@ LOG.info(getReqInfo(req).replace("$", "$$"))
let res = req.process(LSDICT) var newReq = newRequest(req, client) await newReq.respond(res.code, res.content, res.headers) - echo(LS.appname & " v" & LS.appversion & " started on " & LS.address & ":" & $LS.port & ".") + echo(LS.appname & " v" & LS.appversion & " started on " & LS.address & ":" & + $LS.port & ".") printCfg("master") let storeIds = toSeq(LSDICT.keys) if (storeIds.len > 1):
@@ -1,21 +1,21 @@
-import - x_db_sqlite, - asynchttpserver, +import + db_connector/db_sqlite, + std/[asynchttpserver, asyncnet, uri, - pegs, + pegs, json, strtabs, + os, strutils, sequtils, nativesockets, - jwt, - tables + tables] import config type - EDatastoreExists* = object of CatchableError + EDatastoreExists* = object of CatchableError EDatastoreDoesNotExist* = object of CatchableError EDatastoreUnavailable* = object of CatchableError EInvalidTag* = object of CatchableError@@ -23,6 +23,9 @@ EDirectoryNotFound* = object of CatchableError
EFileNotFound* = object of CatchableError EFileExists* = object of CatchableError EInvalidRequest* = object of CatchableError + EJwtValidationError* = object of CatchableError + EUnauthorizedError* = object of CatchableError + EX509Error* = object of CatchableError ConfigFiles* = object auth*: string config*: string@@ -41,8 +44,8 @@ tables*: seq[string]
jsonFilter*: string jsonSelect*: seq[tuple[path: string, alias: string]] select*: seq[string] - single*:bool - system*:bool + single*: bool + system*: bool limit*: int offset*: int orderby*: string@@ -59,6 +62,12 @@ tag*: string
startswith*: bool endswith*: bool negated*: bool + JWT* = object + header*: JsonNode + claims*: JsonNode + signature*: string + payload*: string + token*: string Operation* = enum opRun, opImport,@@ -83,6 +92,7 @@ port*: int
operation*: Operation config*: JsonNode configFile*: string + jwks*: JsonNode cliSettings*: JsonNode directory*: string manageSystemData*: bool@@ -96,15 +106,15 @@ middleware*: StringTableRef
appversion*: string auth*: JsonNode authFile*: string - favicon*:string - loglevel*:string + favicon*: string + loglevel*: string LSRequest* = object reqMethod*: HttpMethod headers*: HttpHeaders protocol*: tuple[orig: string, major, minor: int] url*: Uri jwt*: JWT - hostname*: string + hostname*: string body*: string LSResponse* = object code*: HttpCode@@ -116,7 +126,10 @@ id: string,
version: string ] -proc initLiteStore*(): LiteStore = +proc jwksPath*(LS: LiteStore): string = + return "$#/$#_jwks.json" % [getCurrentDir(), LS.file.splitFile.name] + +proc initLiteStore*(): LiteStore = result.config = newJNull() result.configFile = "" result.cliSettings = newJObject()@@ -147,7 +160,7 @@ of "DELETE":
return HttpDelete else: return HttpGet - + proc `%`*(protocol: tuple[orig: string, major: int, minor: int]): JsonNode = result = newJObject()@@ -201,7 +214,7 @@ result.headers = newHttpHeaders()
for k, v in req["headers"].pairs: result.headers[k] = v.getStr let protocol = req["protocol"].getStr - let parts = protocol.split("/") + let parts = protocol.split("/") let version = parts[1].split(".") result.protocol = (orig: parts[0], major: version[0].parseInt, minor: version[1].parseInt) result.url = initUri()@@ -232,8 +245,8 @@
var PEG_TAG* {.threadvar.}: Peg PEG_USER_TAG* {.threadvar.}: Peg - PEG_INDEX* {.threadvar}: Peg - PEG_STORE* {.threadvar}: Peg + PEG_INDEX* {.threadvar.}: Peg + PEG_STORE* {.threadvar.}: Peg PEG_JSON_FIELD* {.threadvar.}: Peg PEG_DEFAULT_URL* {.threadvar.}: Peg PEG_STORE_URL* {.threadvar.}: Peg@@ -252,7 +265,7 @@ # Initialize LiteStore
var LS* {.threadvar.}: LiteStore var LSDICT* {.threadvar.}: OrderedTable[string, LiteStore] var TAB_HEADERS* {.threadvar.}: array[0..2, (string, string)] -LSDICT = initOrderedTable[string, LiteStore]() +LSDICT = initOrderedTable[string, LiteStore]() LS.appversion = pkgVersion LS.appname = appname@@ -264,9 +277,14 @@ "Server": LS.appname & "/" & LS.appversion
} proc newQueryOptions*(system = false): QueryOptions = - var select = @["documents.id AS id", "documents.data AS data", "content_type", "binary", "searchable", "created", "modified"] + var select = @["documents.id AS id", "documents.data AS data", "content_type", + "binary", "searchable", "created", "modified"] if system: - select = @["system_documents.id AS id", "system_documents.data AS data", "content_type", "binary", "created", "modified"] + select = @["system_documents.id AS id", "system_documents.data AS data", + "content_type", "binary", "created", "modified"] return QueryOptions(select: select, - single: false, limit: 0, offset: 0, orderby: "", tags: "", search: "", folder: "", like: "", system: system, - createdAfter: "", createdBefore: "", modifiedAfter: "", modifiedBefore: "", jsonFilter: "", jsonSelect: newSeq[tuple[path: string, alias: string]](), tables: newSeq[string]()) + single: false, limit: 0, offset: 0, orderby: "", tags: "", search: "", + folder: "", like: "", system: system, + createdAfter: "", createdBefore: "", modifiedAfter: "", modifiedBefore: "", + jsonFilter: "", jsonSelect: newSeq[tuple[path: string, alias: string]](), + tables: newSeq[string]())
@@ -1,6 +1,6 @@
import - x_sqlite3, - x_db_sqlite, + db_connector/sqlite3, + db_connector/db_sqlite, json, strutils, pegs,@@ -248,7 +248,7 @@ var tags = peg"""'<' [^>]+ '>'"""
var special_chars = peg"""\*\*+ / \_\_+ / \-\-+ / \#\#+ / \+\++ / \~\~+ / \`\`+ """ try: json = s.parseJson() - except: + except CatchableError: discard if not json.isNil: if json.kind == JObject:@@ -324,7 +324,7 @@ var e = getCurrentException()
LOG.warn(e.msg) LOG.debug(getStackTrace(e)) -proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, cb: proc(req: LSRequest, LS: LiteStore, resource: string, id: string):LSResponse): LSResponse = +proc validate*(req: LSRequest, LS: LiteStore, resource: string, id: string, cb: proc(req: LSRequest, LS: LiteStore, resource: string, id: string):LSResponse): LSResponse {.gcsafe.} = if req.reqMethod == HttpPost or req.reqMethod == HttpPut or req.reqMethod == HttpPatch: var ct = "" let body = req.body.strip@@ -336,7 +336,7 @@ case ct:
of "application/json": try: discard body.parseJson() - except: + except CatchableError: return resError(Http400, "Invalid JSON content - $1" % getCurrentExceptionMsg()) else: discard
@@ -1,638 +0,0 @@
-# -# -# Nim's Runtime Library -# (c) Copyright 2015 Andreas Rumpf -# -# See the file "copying.txt", included in this -# distribution, for details about the copyright. -# - -## A higher level `SQLite`:idx: database wrapper. This interface -## is implemented for other databases too. -## -## Basic usage -## =========== -## -## The basic flow of using this module is: -## -## 1. Open database connection -## 2. Execute SQL query -## 3. Close database connection -## -## Parameter substitution -## ---------------------- -## -## All ``db_*`` modules support the same form of parameter substitution. -## That is, using the ``?`` (question mark) to signify the place where a -## value should be placed. For example: -## -## .. code-block:: Nim -## -## sql"INSERT INTO my_table (colA, colB, colC) VALUES (?, ?, ?)" -## -## Opening a connection to a database -## ---------------------------------- -## -## .. code-block:: Nim -## -## import db_sqlite -## -## # user, password, database name can be empty. -## # These params are not used on db_sqlite module. -## let db = open("mytest.db", "", "", "") -## db.close() -## -## Creating a table -## ---------------- -## -## .. code-block:: Nim -## -## db.exec(sql"DROP TABLE IF EXISTS my_table") -## db.exec(sql"""CREATE TABLE my_table ( -## id INTEGER, -## name VARCHAR(50) NOT NULL -## )""") -## -## Inserting data -## -------------- -## -## .. code-block:: Nim -## -## db.exec(sql"INSERT INTO my_table (id, name) VALUES (0, ?)", -## "Jack") -## -## Larger example -## -------------- -## -## .. code-block:: nim -## -## import db_sqlite, math -## -## let db = open("mytest.db", "", "", "") -## -## db.exec(sql"DROP TABLE IF EXISTS my_table") -## db.exec(sql"""CREATE TABLE my_table ( -## id INTEGER PRIMARY KEY, -## name VARCHAR(50) NOT NULL, -## i INT(11), -## f DECIMAL(18, 10) -## )""") -## -## db.exec(sql"BEGIN") -## for i in 1..1000: -## db.exec(sql"INSERT INTO my_table (name, i, f) VALUES (?, ?, ?)", -## "Item#" & $i, i, sqrt(i.float)) -## db.exec(sql"COMMIT") -## -## for x in db.fastRows(sql"SELECT * FROM my_table"): -## echo x -## -## let id = db.tryInsertId(sql"""INSERT INTO my_table (name, i, f) -## VALUES (?, ?, ?)""", -## "Item#1001", 1001, sqrt(1001.0)) -## echo "Inserted item: ", db.getValue(sql"SELECT name FROM my_table WHERE id=?", id) -## -## db.close() -## -## See also -## ======== -## -## * `db_odbc module <db_odbc.html>`_ for ODBC database wrapper -## * `db_mysql module <db_mysql.html>`_ for MySQL database wrapper -## * `db_postgres module <db_postgres.html>`_ for PostgreSQL database wrapper - -{.deadCodeElim: on.} # dce option deprecated - -import x_sqlite3 as sqlite3 # h3rald - -import db_common -export db_common - -type - DbConn* = PSqlite3 ## Encapsulates a database connection. - Row* = seq[string] ## A row of a dataset. `NULL` database values will be - ## converted to an empty string. - InstantRow* = PStmt ## A handle that can be used to get a row's column - ## text on demand. - -proc dbError*(db: DbConn) {.noreturn.} = - ## Raises a `DbError` exception. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## if not db.tryExec(sql"SELECT * FROM not_exist_table"): - ## dbError(db) - ## db.close() - var e: ref DbError - new(e) - e.msg = $sqlite3.errmsg(db) - raise e - -proc dbQuote*(s: string): string = - ## Escapes the `'` (single quote) char to `''`. - ## Because single quote is used for defining `VARCHAR` in SQL. - runnableExamples: - doAssert dbQuote("'") == "''''" - doAssert dbQuote("A Foobar's pen.") == "'A Foobar''s pen.'" - - result = "'" - for c in items(s): - if c == '\'': add(result, "''") - else: add(result, c) - add(result, '\'') - -proc dbFormat(formatstr: SqlQuery, args: varargs[string]): string = - result = "" - var a = 0 - for c in items(string(formatstr)): - if c == '?': - add(result, dbQuote(args[a])) - inc(a) - else: - add(result, c) - -proc tryExec*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): bool {. - tags: [ReadDbEffect, WriteDbEffect].} = - ## Tries to execute the query and returns `true` if successful, `false` otherwise. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## if not db.tryExec(sql"SELECT * FROM my_table"): - ## dbError(db) - ## db.close() - assert(not db.isNil, "Database not connected.") - var q = dbFormat(query, args) - var stmt: sqlite3.PStmt - if prepare_v2(db, q.cstring, q.cstring.len.cint, stmt, nil) == SQLITE_OK: - let x = step(stmt) - if x in {SQLITE_DONE, SQLITE_ROW}: - result = finalize(stmt) == SQLITE_OK - -proc exec*(db: DbConn, query: SqlQuery, args: varargs[string, `$`]) {. - tags: [ReadDbEffect, WriteDbEffect].} = - ## Executes the query and raises a `DbError` exception if not successful. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## try: - ## db.exec(sql"INSERT INTO my_table (id, name) VALUES (?, ?)", - ## 1, "item#1") - ## except: - ## stderr.writeLine(getCurrentExceptionMsg()) - ## finally: - ## db.close() - if not tryExec(db, query, args): dbError(db) - -proc newRow(L: int): Row = - newSeq(result, L) - for i in 0..L-1: result[i] = "" - -proc setupQuery(db: DbConn, query: SqlQuery, - args: varargs[string]): PStmt = - assert(not db.isNil, "Database not connected.") - var q = dbFormat(query, args) - if prepare_v2(db, q.cstring, q.len.cint, result, nil) != SQLITE_OK: dbError(db) - -proc setRow(stmt: PStmt, r: var Row, cols: cint) = - for col in 0'i32..cols-1: - setLen(r[col], column_bytes(stmt, col)) # set capacity - setLen(r[col], 0) - let x = column_text(stmt, col) - if not isNil(x): add(r[col], x) - -iterator fastRows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = - ## Executes the query and iterates over the result dataset. - ## - ## This is very fast, but potentially dangerous. Use this iterator only - ## if you require **ALL** the rows. - ## - ## **Note:** Breaking the `fastRows()` iterator during a loop will cause the - ## next database query to raise a `DbError` exception ``unable to close due - ## to ...``. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## for row in db.fastRows(sql"SELECT id, name FROM my_table"): - ## echo row - ## - ## # Output: - ## # @["1", "item#1"] - ## # @["2", "item#2"] - ## - ## db.close() - var stmt = setupQuery(db, query, args) - var L = (column_count(stmt)) - var result = newRow(L) - try: - while step(stmt) == SQLITE_ROW: - setRow(stmt, result, L) - yield result - finally: - if finalize(stmt) != SQLITE_OK: dbError(db) - -iterator instantRows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): InstantRow - {.tags: [ReadDbEffect].} = - ## Similar to `fastRows iterator <#fastRows.i,DbConn,SqlQuery,varargs[string,]>`_ - ## but returns a handle that can be used to get column text - ## on demand using `[]`. Returned handle is valid only within the iterator body. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## for row in db.instantRows(sql"SELECT * FROM my_table"): - ## echo "id:" & row[0] - ## echo "name:" & row[1] - ## echo "length:" & $len(row) - ## - ## # Output: - ## # id:1 - ## # name:item#1 - ## # length:2 - ## # id:2 - ## # name:item#2 - ## # length:2 - ## - ## db.close() - var stmt = setupQuery(db, query, args) - try: - while step(stmt) == SQLITE_ROW: - yield stmt - finally: - if finalize(stmt) != SQLITE_OK: dbError(db) - -proc toTypeKind(t: var DbType; x: int32) = - case x - of SQLITE_INTEGER: - t.kind = dbInt - t.size = 8 - of SQLITE_FLOAT: - t.kind = dbFloat - t.size = 8 - of SQLITE_BLOB: t.kind = dbBlob - of SQLITE_NULL: t.kind = dbNull - of SQLITE_TEXT: t.kind = dbVarchar - else: t.kind = dbUnknown - -proc setColumns(columns: var DbColumns; x: PStmt) = - let L = column_count(x) - setLen(columns, L) - for i in 0'i32 ..< L: - columns[i].name = $column_name(x, i) - columns[i].typ.name = $column_decltype(x, i) - toTypeKind(columns[i].typ, column_type(x, i)) - columns[i].tableName = $column_table_name(x, i) - -iterator instantRows*(db: DbConn; columns: var DbColumns; query: SqlQuery, - args: varargs[string, `$`]): InstantRow - {.tags: [ReadDbEffect].} = - ## Similar to `instantRows iterator <#instantRows.i,DbConn,SqlQuery,varargs[string,]>`_, - ## but sets information about columns to `columns`. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## var columns: DbColumns - ## for row in db.instantRows(columns, sql"SELECT * FROM my_table"): - ## discard - ## echo columns[0] - ## - ## # Output: - ## # (name: "id", tableName: "my_table", typ: (kind: dbNull, - ## # notNull: false, name: "INTEGER", size: 0, maxReprLen: 0, precision: 0, - ## # scale: 0, min: 0, max: 0, validValues: @[]), primaryKey: false, - ## # foreignKey: false) - ## - ## db.close() - var stmt = setupQuery(db, query, args) - setColumns(columns, stmt) - try: - while step(stmt) == SQLITE_ROW: - yield stmt - finally: - if finalize(stmt) != SQLITE_OK: dbError(db) - -proc `[]`*(row: InstantRow, col: int32): string {.inline.} = - ## Returns text for given column of the row. - ## - ## See also: - ## * `instantRows iterator <#instantRows.i,DbConn,SqlQuery,varargs[string,]>`_ - ## example code - $column_text(row, col) - -proc unsafeColumnAt*(row: InstantRow, index: int32): cstring {.inline.} = - ## Returns cstring for given column of the row. - ## - ## See also: - ## * `instantRows iterator <#instantRows.i,DbConn,SqlQuery,varargs[string,]>`_ - ## example code - column_text(row, index) - -proc len*(row: InstantRow): int32 {.inline.} = - ## Returns number of columns in a row. - ## - ## See also: - ## * `instantRows iterator <#instantRows.i,DbConn,SqlQuery,varargs[string,]>`_ - ## example code - column_count(row) - -proc getRow*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = - ## Retrieves a single row. If the query doesn't return any rows, this proc - ## will return a `Row` with empty strings for each column. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## doAssert db.getRow(sql"SELECT id, name FROM my_table" - ## ) == Row(@["1", "item#1"]) - ## doAssert db.getRow(sql"SELECT id, name FROM my_table WHERE id = ?", - ## 2) == Row(@["2", "item#2"]) - ## - ## # Returns empty. - ## doAssert db.getRow(sql"INSERT INTO my_table (id, name) VALUES (?, ?)", - ## 3, "item#3") == @[] - ## doAssert db.getRow(sql"DELETE FROM my_table WHERE id = ?", 3) == @[] - ## doAssert db.getRow(sql"UPDATE my_table SET name = 'ITEM#1' WHERE id = ?", - ## 1) == @[] - ## db.close() - var stmt = setupQuery(db, query, args) - var L = (column_count(stmt)) - result = newRow(L) - if step(stmt) == SQLITE_ROW: - setRow(stmt, result, L) - if finalize(stmt) != SQLITE_OK: dbError(db) - -proc getAllRows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): seq[Row] {.tags: [ReadDbEffect].} = - ## Executes the query and returns the whole result dataset. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## doAssert db.getAllRows(sql"SELECT id, name FROM my_table") == @[Row(@["1", "item#1"]), Row(@["2", "item#2"])] - ## db.close() - result = @[] - for r in fastRows(db, query, args): - result.add(r) - -iterator rows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): Row {.tags: [ReadDbEffect].} = - ## Similar to `fastRows iterator <#fastRows.i,DbConn,SqlQuery,varargs[string,]>`_, - ## but slower and safe. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## for row in db.rows(sql"SELECT id, name FROM my_table"): - ## echo row - ## - ## ## Output: - ## ## @["1", "item#1"] - ## ## @["2", "item#2"] - ## - ## db.close() - for r in fastRows(db, query, args): yield r - -proc getValue*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): string {.tags: [ReadDbEffect].} = - ## Executes the query and returns the first column of the first row of the - ## result dataset. Returns `""` if the dataset contains no rows or the database - ## value is `NULL`. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## doAssert db.getValue(sql"SELECT name FROM my_table WHERE id = ?", - ## 2) == "item#2" - ## doAssert db.getValue(sql"SELECT id, name FROM my_table") == "1" - ## doAssert db.getValue(sql"SELECT name, id FROM my_table") == "item#1" - ## - ## db.close() - var stmt = setupQuery(db, query, args) - if step(stmt) == SQLITE_ROW: - let cb = column_bytes(stmt, 0) - if cb == 0: - result = "" - else: - result = newStringOfCap(cb) - add(result, column_text(stmt, 0)) - else: - result = "" - if finalize(stmt) != SQLITE_OK: dbError(db) - -proc tryInsertID*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): int64 - {.tags: [WriteDbEffect], raises: [].} = - ## Executes the query (typically "INSERT") and returns the - ## generated ID for the row or -1 in case of an error. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## db.exec(sql"CREATE TABLE my_table (id INTEGER, name VARCHAR(50) NOT NULL)") - ## - ## doAssert db.tryInsertID(sql"INSERT INTO not_exist_table (id, name) VALUES (?, ?)", - ## 1, "item#1") == -1 - ## db.close() - assert(not db.isNil, "Database not connected.") - var q = dbFormat(query, args) - var stmt: sqlite3.PStmt - result = -1 - if prepare_v2(db, q.cstring, q.cstring.len.cint, stmt, nil) == SQLITE_OK: - if step(stmt) == SQLITE_DONE: - result = last_insert_rowid(db) - if finalize(stmt) != SQLITE_OK: - result = -1 - -proc insertID*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): int64 {.tags: [WriteDbEffect].} = - ## Executes the query (typically "INSERT") and returns the - ## generated ID for the row. - ## - ## Raises a `DbError` exception when failed to insert row. - ## For Postgre this adds ``RETURNING id`` to the query, so it only works - ## if your primary key is named ``id``. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## db.exec(sql"CREATE TABLE my_table (id INTEGER, name VARCHAR(50) NOT NULL)") - ## - ## for i in 0..2: - ## let id = db.insertID(sql"INSERT INTO my_table (id, name) VALUES (?, ?)", i, "item#" & $i) - ## echo "LoopIndex = ", i, ", InsertID = ", id - ## - ## # Output: - ## # LoopIndex = 0, InsertID = 1 - ## # LoopIndex = 1, InsertID = 2 - ## # LoopIndex = 2, InsertID = 3 - ## - ## db.close() - result = tryInsertID(db, query, args) - if result < 0: dbError(db) - -proc execAffectedRows*(db: DbConn, query: SqlQuery, - args: varargs[string, `$`]): int64 {. - tags: [ReadDbEffect, WriteDbEffect].} = - ## Executes the query (typically "UPDATE") and returns the - ## number of affected rows. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## - ## # Records of my_table: - ## # | id | name | - ## # |----|----------| - ## # | 1 | item#1 | - ## # | 2 | item#2 | - ## - ## doAssert db.execAffectedRows(sql"UPDATE my_table SET name = 'TEST'") == 2 - ## - ## db.close() - exec(db, query, args) - result = changes(db) - -proc close*(db: DbConn) {.tags: [DbEffect].} = - ## Closes the database connection. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## let db = open("mytest.db", "", "", "") - ## db.close() - if sqlite3.close(db) != SQLITE_OK: dbError(db) - -proc open*(connection, user, password, database: string): DbConn {. - tags: [DbEffect].} = - ## Opens a database connection. Raises a `DbError` exception if the connection - ## could not be established. - ## - ## **Note:** Only the ``connection`` parameter is used for ``sqlite``. - ## - ## **Examples:** - ## - ## .. code-block:: Nim - ## - ## try: - ## let db = open("mytest.db", "", "", "") - ## ## do something... - ## ## db.getAllRows(sql"SELECT * FROM my_table") - ## db.close() - ## except: - ## stderr.writeLine(getCurrentExceptionMsg()) - var db: DbConn - if sqlite3.open(connection, db) == SQLITE_OK: - result = db - else: - dbError(db) - -proc setEncoding*(connection: DbConn, encoding: string): bool {. - tags: [DbEffect].} = - ## Sets the encoding of a database connection, returns `true` for - ## success, `false` for failure. - ## - ## **Note:** The encoding cannot be changed once it's been set. - ## According to SQLite3 documentation, any attempt to change - ## the encoding after the database is created will be silently - ## ignored. - exec(connection, sql"PRAGMA encoding = ?", [encoding]) - result = connection.getValue(sql"PRAGMA encoding") == encoding - -when not defined(testing) and isMainModule: - var db = open("db.sql", "", "", "") - exec(db, sql"create table tbl1(one varchar(10), two smallint)", []) - exec(db, sql"insert into tbl1 values('hello!',10)", []) - exec(db, sql"insert into tbl1 values('goodbye', 20)", []) - #db.query("create table tbl1(one varchar(10), two smallint)") - #db.query("insert into tbl1 values('hello!',10)") - #db.query("insert into tbl1 values('goodbye', 20)") - for r in db.rows(sql"select * from tbl1", []): - echo(r[0], r[1]) - for r in db.instantRows(sql"select * from tbl1", []): - echo(r[0], r[1]) - - x_db_sqlite.close(db) # h3rald
@@ -1,379 +0,0 @@
-# -# -# Nim's Runtime Library -# (c) Copyright 2012 Andreas Rumpf -# -# See the file "copying.txt", included in this -# distribution, for details about the copyright. -# - -{.deadCodeElim: on.} # dce option deprecated - -when defined(nimHasStyleChecks): - {.push styleChecks: off.} - -# START - Removed by h3rald -#when defined(windows): -# when defined(nimOldDlls): -# const Lib = "sqlite3.dll" -# elif defined(cpu64): -# const Lib = "sqlite3_64.dll" -# else: -# const Lib = "sqlite3_32.dll" -#elif defined(macosx): -# const -# Lib = "libsqlite3(|.0).dylib" -#else: -# const -# Lib = "libsqlite3.so(|.0)" -# -#when defined(staticSqlite): -{.pragma: mylib.} -# {.compile: "sqlite3.c".} -#else: -# {.pragma: mylib, dynlib: Lib.} -# END - Removed by h3rald - -const - SQLITE_INTEGER* = 1 - SQLITE_FLOAT* = 2 - SQLITE_BLOB* = 4 - SQLITE_NULL* = 5 - SQLITE_TEXT* = 3 - SQLITE_UTF8* = 1 - SQLITE_UTF16LE* = 2 - SQLITE_UTF16BE* = 3 # Use native byte order - SQLITE_UTF16* = 4 # sqlite3_create_function only - SQLITE_ANY* = 5 #sqlite_exec return values - SQLITE_OK* = 0 - SQLITE_ERROR* = 1 # SQL error or missing database - SQLITE_INTERNAL* = 2 # An internal logic error in SQLite - SQLITE_PERM* = 3 # Access permission denied - SQLITE_ABORT* = 4 # Callback routine requested an abort - SQLITE_BUSY* = 5 # The database file is locked - SQLITE_LOCKED* = 6 # A table in the database is locked - SQLITE_NOMEM* = 7 # A malloc() failed - SQLITE_READONLY* = 8 # Attempt to write a readonly database - SQLITE_INTERRUPT* = 9 # Operation terminated by sqlite3_interrupt() - SQLITE_IOERR* = 10 # Some kind of disk I/O error occurred - SQLITE_CORRUPT* = 11 # The database disk image is malformed - SQLITE_NOTFOUND* = 12 # (Internal Only) Table or record not found - SQLITE_FULL* = 13 # Insertion failed because database is full - SQLITE_CANTOPEN* = 14 # Unable to open the database file - SQLITE_PROTOCOL* = 15 # Database lock protocol error - SQLITE_EMPTY* = 16 # Database is empty - SQLITE_SCHEMA* = 17 # The database schema changed - SQLITE_TOOBIG* = 18 # Too much data for one row of a table - SQLITE_CONSTRAINT* = 19 # Abort due to constraint violation - SQLITE_MISMATCH* = 20 # Data type mismatch - SQLITE_MISUSE* = 21 # Library used incorrectly - SQLITE_NOLFS* = 22 # Uses OS features not supported on host - SQLITE_AUTH* = 23 # Authorization denied - SQLITE_FORMAT* = 24 # Auxiliary database format error - SQLITE_RANGE* = 25 # 2nd parameter to sqlite3_bind out of range - SQLITE_NOTADB* = 26 # File opened that is not a database file - SQLITE_ROW* = 100 # sqlite3_step() has another row ready - SQLITE_DONE* = 101 # sqlite3_step() has finished executing - SQLITE_COPY* = 0 - SQLITE_CREATE_INDEX* = 1 - SQLITE_CREATE_TABLE* = 2 - SQLITE_CREATE_TEMP_INDEX* = 3 - SQLITE_CREATE_TEMP_TABLE* = 4 - SQLITE_CREATE_TEMP_TRIGGER* = 5 - SQLITE_CREATE_TEMP_VIEW* = 6 - SQLITE_CREATE_TRIGGER* = 7 - SQLITE_CREATE_VIEW* = 8 - SQLITE_DELETE* = 9 - SQLITE_DROP_INDEX* = 10 - SQLITE_DROP_TABLE* = 11 - SQLITE_DROP_TEMP_INDEX* = 12 - SQLITE_DROP_TEMP_TABLE* = 13 - SQLITE_DROP_TEMP_TRIGGER* = 14 - SQLITE_DROP_TEMP_VIEW* = 15 - SQLITE_DROP_TRIGGER* = 16 - SQLITE_DROP_VIEW* = 17 - SQLITE_INSERT* = 18 - SQLITE_PRAGMA* = 19 - SQLITE_READ* = 20 - SQLITE_SELECT* = 21 - SQLITE_TRANSACTION* = 22 - SQLITE_UPDATE* = 23 - SQLITE_ATTACH* = 24 - SQLITE_DETACH* = 25 - SQLITE_ALTER_TABLE* = 26 - SQLITE_REINDEX* = 27 - SQLITE_DENY* = 1 - SQLITE_IGNORE* = 2 # Original from sqlite3.h: - #define SQLITE_STATIC ((void(*)(void *))0) - #define SQLITE_TRANSIENT ((void(*)(void *))-1) - SQLITE_DETERMINISTIC* = 0x800 - -type - Sqlite3 {.pure, final.} = object - PSqlite3* = ptr Sqlite3 - PPSqlite3* = ptr PSqlite3 - Context{.pure, final.} = object - Pcontext* = ptr Context - TStmt{.pure, final.} = object - PStmt* = ptr TStmt - Value{.pure, final.} = object - PValue* = ptr Value - PValueArg* = array[0..127, PValue] - - Callback* = proc (para1: pointer, para2: int32, para3, - para4: cstringArray): int32{.cdecl.} - Tbind_destructor_func* = proc (para1: pointer){.cdecl, locks: 0, tags: [], gcsafe.} - Create_function_step_func* = proc (para1: Pcontext, para2: int32, - para3: PValueArg){.cdecl.} - Create_function_func_func* = proc (para1: Pcontext, para2: int32, - para3: PValueArg){.cdecl.} - Create_function_final_func* = proc (para1: Pcontext){.cdecl.} - Result_func* = proc (para1: pointer){.cdecl.} - Create_collation_func* = proc (para1: pointer, para2: int32, para3: pointer, - para4: int32, para5: pointer): int32{.cdecl.} - Collation_needed_func* = proc (para1: pointer, para2: PSqlite3, eTextRep: int32, - para4: cstring){.cdecl.} - -const - SQLITE_STATIC* = nil - SQLITE_TRANSIENT* = cast[Tbind_destructor_func](-1) - -proc close*(para1: PSqlite3): int32{.cdecl, mylib, importc: "sqlite3_close".} -proc exec*(para1: PSqlite3, sql: cstring, para3: Callback, para4: pointer, - errmsg: var cstring): int32{.cdecl, mylib, - importc: "sqlite3_exec".} -proc last_insert_rowid*(para1: PSqlite3): int64{.cdecl, mylib, - importc: "sqlite3_last_insert_rowid".} -proc changes*(para1: PSqlite3): int32{.cdecl, mylib, importc: "sqlite3_changes".} -proc total_changes*(para1: PSqlite3): int32{.cdecl, mylib, - importc: "sqlite3_total_changes".} -proc interrupt*(para1: PSqlite3){.cdecl, mylib, importc: "sqlite3_interrupt".} -proc complete*(sql: cstring): int32{.cdecl, mylib, - importc: "sqlite3_complete".} -proc complete16*(sql: pointer): int32{.cdecl, mylib, - importc: "sqlite3_complete16".} -proc busy_handler*(para1: PSqlite3, - para2: proc (para1: pointer, para2: int32): int32{.cdecl.}, - para3: pointer): int32{.cdecl, mylib, - importc: "sqlite3_busy_handler".} -proc busy_timeout*(para1: PSqlite3, ms: int32): int32{.cdecl, mylib, - importc: "sqlite3_busy_timeout".} -proc get_table*(para1: PSqlite3, sql: cstring, resultp: var cstringArray, - nrow, ncolumn: var cint, errmsg: ptr cstring): int32{.cdecl, - mylib, importc: "sqlite3_get_table".} -proc free_table*(result: cstringArray){.cdecl, mylib, - importc: "sqlite3_free_table".} - # Todo: see how translate sqlite3_mprintf, sqlite3_vmprintf, sqlite3_snprintf - # function sqlite3_mprintf(_para1:Pchar; args:array of const):Pchar;cdecl; external Sqlite3Lib name 'sqlite3_mprintf'; -proc mprintf*(para1: cstring): cstring{.cdecl, varargs, mylib, - importc: "sqlite3_mprintf".} - #function sqlite3_vmprintf(_para1:Pchar; _para2:va_list):Pchar;cdecl; external Sqlite3Lib name 'sqlite3_vmprintf'; -proc free*(z: cstring){.cdecl, mylib, importc: "sqlite3_free".} - #function sqlite3_snprintf(_para1:longint; _para2:Pchar; _para3:Pchar; args:array of const):Pchar;cdecl; external Sqlite3Lib name 'sqlite3_snprintf'; -proc snprintf*(para1: int32, para2: cstring, para3: cstring): cstring{.cdecl, - mylib, varargs, importc: "sqlite3_snprintf".} -proc set_authorizer*(para1: PSqlite3, xAuth: proc (para1: pointer, para2: int32, - para3: cstring, para4: cstring, para5: cstring, para6: cstring): int32{. - cdecl.}, pUserData: pointer): int32{.cdecl, mylib, - importc: "sqlite3_set_authorizer".} -proc trace*(para1: PSqlite3, xTrace: proc (para1: pointer, para2: cstring){.cdecl.}, - para3: pointer): pointer{.cdecl, mylib, - importc: "sqlite3_trace".} -proc progress_handler*(para1: PSqlite3, para2: int32, - para3: proc (para1: pointer): int32{.cdecl.}, - para4: pointer){.cdecl, mylib, - importc: "sqlite3_progress_handler".} -proc commit_hook*(para1: PSqlite3, para2: proc (para1: pointer): int32{.cdecl.}, - para3: pointer): pointer{.cdecl, mylib, - importc: "sqlite3_commit_hook".} -proc open*(filename: cstring, ppDb: var PSqlite3): int32{.cdecl, mylib, - importc: "sqlite3_open".} -proc open16*(filename: pointer, ppDb: var PSqlite3): int32{.cdecl, mylib, - importc: "sqlite3_open16".} -proc errcode*(db: PSqlite3): int32{.cdecl, mylib, importc: "sqlite3_errcode".} -proc errmsg*(para1: PSqlite3): cstring{.cdecl, mylib, importc: "sqlite3_errmsg".} -proc errmsg16*(para1: PSqlite3): pointer{.cdecl, mylib, - importc: "sqlite3_errmsg16".} -proc prepare*(db: PSqlite3, zSql: cstring, nBytes: int32, ppStmt: var PStmt, - pzTail: ptr cstring): int32{.cdecl, mylib, - importc: "sqlite3_prepare".} - -proc prepare_v2*(db: PSqlite3, zSql: cstring, nByte: cint, ppStmt: var PStmt, - pzTail: ptr cstring): cint {. - importc: "sqlite3_prepare_v2", cdecl, mylib.} - -proc prepare16*(db: PSqlite3, zSql: pointer, nBytes: int32, ppStmt: var PStmt, - pzTail: var pointer): int32{.cdecl, mylib, - importc: "sqlite3_prepare16".} -proc bind_blob*(para1: PStmt, para2: int32, para3: pointer, n: int32, - para5: Tbind_destructor_func): int32{.cdecl, mylib, - importc: "sqlite3_bind_blob".} -proc bind_double*(para1: PStmt, para2: int32, para3: float64): int32{.cdecl, - mylib, importc: "sqlite3_bind_double".} -proc bind_int*(para1: PStmt, para2: int32, para3: int32): int32{.cdecl, - mylib, importc: "sqlite3_bind_int".} -proc bind_int64*(para1: PStmt, para2: int32, para3: int64): int32{.cdecl, - mylib, importc: "sqlite3_bind_int64".} -proc bind_null*(para1: PStmt, para2: int32): int32{.cdecl, mylib, - importc: "sqlite3_bind_null".} -proc bind_text*(para1: PStmt, para2: int32, para3: cstring, n: int32, - para5: Tbind_destructor_func): int32{.cdecl, mylib, - importc: "sqlite3_bind_text".} -proc bind_text16*(para1: PStmt, para2: int32, para3: pointer, para4: int32, - para5: Tbind_destructor_func): int32{.cdecl, mylib, - importc: "sqlite3_bind_text16".} - #function sqlite3_bind_value(_para1:Psqlite3_stmt; _para2:longint; _para3:Psqlite3_value):longint;cdecl; external Sqlite3Lib name 'sqlite3_bind_value'; - #These overloaded functions were introduced to allow the use of SQLITE_STATIC and SQLITE_TRANSIENT - #It's the c world man ;-) -proc bind_blob*(para1: PStmt, para2: int32, para3: pointer, n: int32, - para5: int32): int32{.cdecl, mylib, - importc: "sqlite3_bind_blob".} -proc bind_text*(para1: PStmt, para2: int32, para3: cstring, n: int32, - para5: int32): int32{.cdecl, mylib, - importc: "sqlite3_bind_text".} -proc bind_text16*(para1: PStmt, para2: int32, para3: pointer, para4: int32, - para5: int32): int32{.cdecl, mylib, - importc: "sqlite3_bind_text16".} -proc bind_parameter_count*(para1: PStmt): int32{.cdecl, mylib, - importc: "sqlite3_bind_parameter_count".} -proc bind_parameter_name*(para1: PStmt, para2: int32): cstring{.cdecl, - mylib, importc: "sqlite3_bind_parameter_name".} -proc bind_parameter_index*(para1: PStmt, zName: cstring): int32{.cdecl, - mylib, importc: "sqlite3_bind_parameter_index".} -proc clear_bindings*(para1: PStmt): int32 {.cdecl, - mylib, importc: "sqlite3_clear_bindings".} -proc column_count*(PStmt: PStmt): int32{.cdecl, mylib, - importc: "sqlite3_column_count".} -proc column_name*(para1: PStmt, para2: int32): cstring{.cdecl, mylib, - importc: "sqlite3_column_name".} -proc column_table_name*(para1: PStmt; para2: int32): cstring{.cdecl, mylib, - importc: "sqlite3_column_table_name".} -proc column_name16*(para1: PStmt, para2: int32): pointer{.cdecl, mylib, - importc: "sqlite3_column_name16".} -proc column_decltype*(para1: PStmt, i: int32): cstring{.cdecl, mylib, - importc: "sqlite3_column_decltype".} -proc column_decltype16*(para1: PStmt, para2: int32): pointer{.cdecl, - mylib, importc: "sqlite3_column_decltype16".} -proc step*(para1: PStmt): int32{.cdecl, mylib, importc: "sqlite3_step".} -proc data_count*(PStmt: PStmt): int32{.cdecl, mylib, - importc: "sqlite3_data_count".} -proc column_blob*(para1: PStmt, iCol: int32): pointer{.cdecl, mylib, - importc: "sqlite3_column_blob".} -proc column_bytes*(para1: PStmt, iCol: int32): int32{.cdecl, mylib, - importc: "sqlite3_column_bytes".} -proc column_bytes16*(para1: PStmt, iCol: int32): int32{.cdecl, mylib, - importc: "sqlite3_column_bytes16".} -proc column_double*(para1: PStmt, iCol: int32): float64{.cdecl, mylib, - importc: "sqlite3_column_double".} -proc column_int*(para1: PStmt, iCol: int32): int32{.cdecl, mylib, - importc: "sqlite3_column_int".} -proc column_int64*(para1: PStmt, iCol: int32): int64{.cdecl, mylib, - importc: "sqlite3_column_int64".} -proc column_text*(para1: PStmt, iCol: int32): cstring{.cdecl, mylib, - importc: "sqlite3_column_text".} -proc column_text16*(para1: PStmt, iCol: int32): pointer{.cdecl, mylib, - importc: "sqlite3_column_text16".} -proc column_type*(para1: PStmt, iCol: int32): int32{.cdecl, mylib, - importc: "sqlite3_column_type".} -proc finalize*(PStmt: PStmt): int32{.cdecl, mylib, - importc: "sqlite3_finalize".} -proc reset*(PStmt: PStmt): int32{.cdecl, mylib, importc: "sqlite3_reset".} -proc create_function*(para1: PSqlite3, zFunctionName: cstring, nArg: int32, - eTextRep: int32, para5: pointer, - xFunc: Create_function_func_func, - xStep: Create_function_step_func, - xFinal: Create_function_final_func): int32{.cdecl, - mylib, importc: "sqlite3_create_function".} -proc create_function16*(para1: PSqlite3, zFunctionName: pointer, nArg: int32, - eTextRep: int32, para5: pointer, - xFunc: Create_function_func_func, - xStep: Create_function_step_func, - xFinal: Create_function_final_func): int32{.cdecl, - mylib, importc: "sqlite3_create_function16".} -proc aggregate_count*(para1: Pcontext): int32{.cdecl, mylib, - importc: "sqlite3_aggregate_count".} -proc value_blob*(para1: PValue): pointer{.cdecl, mylib, - importc: "sqlite3_value_blob".} -proc value_bytes*(para1: PValue): int32{.cdecl, mylib, - importc: "sqlite3_value_bytes".} -proc value_bytes16*(para1: PValue): int32{.cdecl, mylib, - importc: "sqlite3_value_bytes16".} -proc value_double*(para1: PValue): float64{.cdecl, mylib, - importc: "sqlite3_value_double".} -proc value_int*(para1: PValue): int32{.cdecl, mylib, - importc: "sqlite3_value_int".} -proc value_int64*(para1: PValue): int64{.cdecl, mylib, - importc: "sqlite3_value_int64".} -proc value_text*(para1: PValue): cstring{.cdecl, mylib, - importc: "sqlite3_value_text".} -proc value_text16*(para1: PValue): pointer{.cdecl, mylib, - importc: "sqlite3_value_text16".} -proc value_text16le*(para1: PValue): pointer{.cdecl, mylib, - importc: "sqlite3_value_text16le".} -proc value_text16be*(para1: PValue): pointer{.cdecl, mylib, - importc: "sqlite3_value_text16be".} -proc value_type*(para1: PValue): int32{.cdecl, mylib, - importc: "sqlite3_value_type".} -proc aggregate_context*(para1: Pcontext, nBytes: int32): pointer{.cdecl, - mylib, importc: "sqlite3_aggregate_context".} -proc user_data*(para1: Pcontext): pointer{.cdecl, mylib, - importc: "sqlite3_user_data".} -proc get_auxdata*(para1: Pcontext, para2: int32): pointer{.cdecl, mylib, - importc: "sqlite3_get_auxdata".} -proc set_auxdata*(para1: Pcontext, para2: int32, para3: pointer, - para4: proc (para1: pointer){.cdecl.}){.cdecl, mylib, - importc: "sqlite3_set_auxdata".} -proc result_blob*(para1: Pcontext, para2: pointer, para3: int32, - para4: Result_func){.cdecl, mylib, - importc: "sqlite3_result_blob".} -proc result_double*(para1: Pcontext, para2: float64){.cdecl, mylib, - importc: "sqlite3_result_double".} -proc result_error*(para1: Pcontext, para2: cstring, para3: int32){.cdecl, - mylib, importc: "sqlite3_result_error".} -proc result_error16*(para1: Pcontext, para2: pointer, para3: int32){.cdecl, - mylib, importc: "sqlite3_result_error16".} -proc result_int*(para1: Pcontext, para2: int32){.cdecl, mylib, - importc: "sqlite3_result_int".} -proc result_int64*(para1: Pcontext, para2: int64){.cdecl, mylib, - importc: "sqlite3_result_int64".} -proc result_null*(para1: Pcontext){.cdecl, mylib, - importc: "sqlite3_result_null".} -proc result_text*(para1: Pcontext, para2: cstring, para3: int32, - para4: Result_func){.cdecl, mylib, - importc: "sqlite3_result_text".} -proc result_text16*(para1: Pcontext, para2: pointer, para3: int32, - para4: Result_func){.cdecl, mylib, - importc: "sqlite3_result_text16".} -proc result_text16le*(para1: Pcontext, para2: pointer, para3: int32, - para4: Result_func){.cdecl, mylib, - importc: "sqlite3_result_text16le".} -proc result_text16be*(para1: Pcontext, para2: pointer, para3: int32, - para4: Result_func){.cdecl, mylib, - importc: "sqlite3_result_text16be".} -proc result_value*(para1: Pcontext, para2: PValue){.cdecl, mylib, - importc: "sqlite3_result_value".} -proc create_collation*(para1: PSqlite3, zName: cstring, eTextRep: int32, - para4: pointer, xCompare: Create_collation_func): int32{. - cdecl, mylib, importc: "sqlite3_create_collation".} -proc create_collation16*(para1: PSqlite3, zName: cstring, eTextRep: int32, - para4: pointer, xCompare: Create_collation_func): int32{. - cdecl, mylib, importc: "sqlite3_create_collation16".} -proc collation_needed*(para1: PSqlite3, para2: pointer, para3: Collation_needed_func): int32{. - cdecl, mylib, importc: "sqlite3_collation_needed".} -proc collation_needed16*(para1: PSqlite3, para2: pointer, para3: Collation_needed_func): int32{. - cdecl, mylib, importc: "sqlite3_collation_needed16".} -proc libversion*(): cstring{.cdecl, mylib, importc: "sqlite3_libversion".} - #Alias for allowing better code portability (win32 is not working with external variables) -proc version*(): cstring{.cdecl, mylib, importc: "sqlite3_libversion".} - # Not published functions -proc libversion_number*(): int32{.cdecl, mylib, - importc: "sqlite3_libversion_number".} - #function sqlite3_key(db:Psqlite3; pKey:pointer; nKey:longint):longint;cdecl; external Sqlite3Lib name 'sqlite3_key'; - #function sqlite3_rekey(db:Psqlite3; pKey:pointer; nKey:longint):longint;cdecl; external Sqlite3Lib name 'sqlite3_rekey'; - #function sqlite3_sleep(_para1:longint):longint;cdecl; external Sqlite3Lib name 'sqlite3_sleep'; - #function sqlite3_expired(_para1:Psqlite3_stmt):longint;cdecl; external Sqlite3Lib name 'sqlite3_expired'; - #function sqlite3_global_recover:longint;cdecl; external Sqlite3Lib name 'sqlite3_global_recover'; -# implementation - -when defined(nimHasStyleChecks): - {.pop.}
@@ -1,6 +1,6 @@
/****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.40.0. By combining all the individual C code files into this +** version 3.44.2. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements@@ -16,6 +16,9 @@ ** of the embedded sqlite3.h header file.) Additional code files may be needed
** if you want a wrapper to interface SQLite with your choice of programming ** language. The code for the "sqlite3" command-line shell is also in a ** separate file. This file contains only code for the core SQLite library. +** +** The content in this amalgamation comes from Fossil check-in +** ebead0e7230cd33bcec9f95d2183069565b9. */ #define SQLITE_CORE 1 #define SQLITE_AMALGAMATION 1@@ -50,11 +53,11 @@ ** measured by branch coverage. This is
** used on lines of code that actually ** implement parts of coverage testing. ** -** OPTIMIZATION-IF-TRUE - This branch is allowed to alway be false +** OPTIMIZATION-IF-TRUE - This branch is allowed to always be false ** and the correct answer is still obtained, ** though perhaps more slowly. ** -** OPTIMIZATION-IF-FALSE - This branch is allowed to alway be true +** OPTIMIZATION-IF-FALSE - This branch is allowed to always be true ** and the correct answer is still obtained, ** though perhaps more slowly. **@@ -123,6 +126,10 @@ #undef SQLITE_4_BYTE_ALIGNED_MALLOC
#define SQLITE_4_BYTE_ALIGNED_MALLOC #endif /* defined(_MSC_VER) && !defined(_WIN64) */ +#if !defined(HAVE_LOG2) && defined(_MSC_VER) && _MSC_VER<1800 +#define HAVE_LOG2 0 +#endif /* !defined(HAVE_LOG2) && defined(_MSC_VER) && _MSC_VER<1800 */ + #endif /* SQLITE_MSVC_H */ /************** End of msvc.h ************************************************/@@ -452,9 +459,9 @@ ** See also: [sqlite3_libversion()],
** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.40.0" -#define SQLITE_VERSION_NUMBER 3040000 -#define SQLITE_SOURCE_ID "2022-11-16 12:10:08 89c459e766ea7e9165d0beeb124708b955a4950d0f4792f457465d71b158d318" +#define SQLITE_VERSION "3.44.2" +#define SQLITE_VERSION_NUMBER 3044002 +#define SQLITE_SOURCE_ID "2023-11-24 11:41:44 ebead0e7230cd33bcec9f95d2183069565b9e709bf745c9b5db65cc0cbf92c0f" /* ** CAPI3REF: Run-Time Library Version Numbers@@ -834,6 +841,7 @@ #define SQLITE_IOERR_COMMIT_ATOMIC (SQLITE_IOERR | (30<<8))
#define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8)) #define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8)) #define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8)) +#define SQLITE_IOERR_IN_PAGE (SQLITE_IOERR | (34<<8)) #define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) #define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8)) #define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8))@@ -869,6 +877,7 @@ #define SQLITE_CONSTRAINT_PINNED (SQLITE_CONSTRAINT |(11<<8))
#define SQLITE_CONSTRAINT_DATATYPE (SQLITE_CONSTRAINT |(12<<8)) #define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8)) #define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) +#define SQLITE_NOTICE_RBU (SQLITE_NOTICE | (3<<8)) #define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) #define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8)) #define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8))@@ -1481,7 +1490,6 @@ ** The [SQLITE_FCNTL_CKPT_DONE] opcode is invoked from within a checkpoint
** in wal mode after the client has finished copying pages from the wal ** file to the database file, but before the *-shm file is updated to ** record the fact that the pages have been checkpointed. -** </ul> ** ** <li>[[SQLITE_FCNTL_EXTERNAL_READER]] ** The EXPERIMENTAL [SQLITE_FCNTL_EXTERNAL_READER] opcode is used to detect@@ -1494,10 +1502,16 @@ ** currently has an SQL transaction open on the database. It is set to 0 if
** the database is not a wal-mode db, or if there is no such connection in any ** other process. This opcode cannot be used to detect transactions opened ** by clients within the current process, only within other processes. -** </ul> ** ** <li>[[SQLITE_FCNTL_CKSM_FILE]] -** Used by the cksmvfs VFS module only. +** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use internally by the +** [checksum VFS shim] only. +** +** <li>[[SQLITE_FCNTL_RESET_CACHE]] +** If there is currently no transaction open on the database, and the +** database is not a temp db, then the [SQLITE_FCNTL_RESET_CACHE] file-control +** purges the contents of the in-memory page cache. If there is an open +** transaction, or if the db is a temp-db, this opcode is a no-op, not an error. ** </ul> */ #define SQLITE_FCNTL_LOCKSTATE 1@@ -1540,6 +1554,7 @@ #define SQLITE_FCNTL_RESERVE_BYTES 38
#define SQLITE_FCNTL_CKPT_START 39 #define SQLITE_FCNTL_EXTERNAL_READER 40 #define SQLITE_FCNTL_CKSM_FILE 41 +#define SQLITE_FCNTL_RESET_CACHE 42 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE@@ -1954,19 +1969,22 @@ ** <b>The sqlite3_config() interface is not threadsafe. The application
** must ensure that no other SQLite interfaces are invoked by other ** threads while sqlite3_config() is running.</b> ** -** The sqlite3_config() interface +** The first argument to sqlite3_config() is an integer +** [configuration option] that determines +** what property of SQLite is to be configured. Subsequent arguments +** vary depending on the [configuration option] +** in the first argument. +** +** For most configuration options, the sqlite3_config() interface ** may only be invoked prior to library initialization using ** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. +** The exceptional configuration options that may be invoked at any time +** are called "anytime configuration options". ** ^If sqlite3_config() is called after [sqlite3_initialize()] and before -** [sqlite3_shutdown()] then it will return SQLITE_MISUSE. +** [sqlite3_shutdown()] with a first argument that is not an anytime +** configuration option, then the sqlite3_config() call will return SQLITE_MISUSE. ** Note, however, that ^sqlite3_config() can be called as part of the ** implementation of an application-defined [sqlite3_os_init()]. -** -** The first argument to sqlite3_config() is an integer -** [configuration option] that determines -** what property of SQLite is to be configured. Subsequent arguments -** vary depending on the [configuration option] -** in the first argument. ** ** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK]. ** ^If the option is unknown or SQLite is unable to set the option@@ -2074,6 +2092,23 @@ ** KEYWORDS: {configuration option}
** ** These constants are the available integer configuration options that ** can be passed as the first argument to the [sqlite3_config()] interface. +** +** Most of the configuration options for sqlite3_config() +** will only work if invoked prior to [sqlite3_initialize()] or after +** [sqlite3_shutdown()]. The few exceptions to this rule are called +** "anytime configuration options". +** ^Calling [sqlite3_config()] with a first argument that is not an +** anytime configuration option in between calls to [sqlite3_initialize()] and +** [sqlite3_shutdown()] is a no-op that returns SQLITE_MISUSE. +** +** The set of anytime configuration options can change (by insertions +** and/or deletions) from one release of SQLite to the next. +** As of SQLite version 3.42.0, the complete set of anytime configuration +** options is: +** <ul> +** <li> SQLITE_CONFIG_LOG +** <li> SQLITE_CONFIG_PCACHE_HDRSZ +** </ul> ** ** New configuration options may be added in future releases of SQLite. ** Existing configuration options might be discontinued. Applications@@ -2405,7 +2440,7 @@ ** than the configured sorter-reference size threshold - then a reference
** is stored in each sorted record and the required column values loaded ** from the database as records are returned in sorted order. The default ** value for this option is to never use this optimization. Specifying a -** negative value for this option restores the default behaviour. +** negative value for this option restores the default behavior. ** This option is only available if SQLite is compiled with the ** [SQLITE_ENABLE_SORTER_REFERENCES] compile-time option. **@@ -2421,28 +2456,28 @@ ** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that
** compile-time option is not set, then the default maximum is 1073741824. ** </dl> */ -#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ -#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ -#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ -#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */ -#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ -#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ -#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ -#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ -#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ -/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ -#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ -#define SQLITE_CONFIG_PCACHE 14 /* no-op */ -#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ -#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ -#define SQLITE_CONFIG_URI 17 /* int */ -#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ -#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ +#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ +#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ +#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */ +#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ +#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ +#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ +#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ +#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ +/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ +#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ +#define SQLITE_CONFIG_PCACHE 14 /* no-op */ +#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ +#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ +#define SQLITE_CONFIG_URI 17 /* int */ +#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ #define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */ -#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ -#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ +#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ +#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ #define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */ #define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */ #define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */@@ -2483,7 +2518,7 @@ ** rounded down to the next smaller multiple of 8. ^(The lookaside memory
** configuration for a database connection can only be changed when that ** connection is not currently using lookaside memory, or in other words ** when the "current value" returned by -** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero. +** [sqlite3_db_status](D,[SQLITE_DBSTATUS_LOOKASIDE_USED],...) is zero. ** Any attempt to change the lookaside memory configuration when lookaside ** memory is in use leaves the configuration unchanged and returns ** [SQLITE_BUSY].)^</dd>@@ -2580,7 +2615,7 @@ ** <dd> Usually, when a database in wal mode is closed or detached from a
** database handle, SQLite checks if this will mean that there are now no ** connections at all to the database. If so, it performs a checkpoint ** operation before closing the connection. This option may be used to -** override this behaviour. The first parameter passed to this operation +** override this behavior. The first parameter passed to this operation ** is an integer - positive to disable checkpoints-on-close, or zero (the ** default) to enable them, and negative to leave the setting unchanged. ** The second parameter is a pointer to an integer@@ -2633,8 +2668,12 @@ ** <li> [sqlite3_exec](db, "[VACUUM]", 0, 0, 0);
** <li> sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 0, 0); ** </ol> ** Because resetting a database is destructive and irreversible, the -** process requires the use of this obscure API and multiple steps to help -** ensure that it does not happen by accident. +** process requires the use of this obscure API and multiple steps to +** help ensure that it does not happen by accident. Because this +** feature must be capable of resetting corrupt databases, and +** shutting down virtual tables may require access to that corrupt +** storage, the library must abandon any installed virtual tables +** without calling their xDestroy() methods. ** ** [[SQLITE_DBCONFIG_DEFENSIVE]] <dt>SQLITE_DBCONFIG_DEFENSIVE</dt> ** <dd>The SQLITE_DBCONFIG_DEFENSIVE option activates or deactivates the@@ -2673,7 +2712,7 @@ ** using the [PRAGMA legacy_alter_table] statement.
** </dd> ** ** [[SQLITE_DBCONFIG_DQS_DML]] -** <dt>SQLITE_DBCONFIG_DQS_DML</td> +** <dt>SQLITE_DBCONFIG_DQS_DML</dt> ** <dd>The SQLITE_DBCONFIG_DQS_DML option activates or deactivates ** the legacy [double-quoted string literal] misfeature for DML statements ** only, that is DELETE, INSERT, SELECT, and UPDATE statements. The@@ -2682,7 +2721,7 @@ ** compile-time option.
** </dd> ** ** [[SQLITE_DBCONFIG_DQS_DDL]] -** <dt>SQLITE_DBCONFIG_DQS_DDL</td> +** <dt>SQLITE_DBCONFIG_DQS_DDL</dt> ** <dd>The SQLITE_DBCONFIG_DQS option activates or deactivates ** the legacy [double-quoted string literal] misfeature for DDL statements, ** such as CREATE TABLE and CREATE INDEX. The@@ -2691,7 +2730,7 @@ ** compile-time option.
** </dd> ** ** [[SQLITE_DBCONFIG_TRUSTED_SCHEMA]] -** <dt>SQLITE_DBCONFIG_TRUSTED_SCHEMA</td> +** <dt>SQLITE_DBCONFIG_TRUSTED_SCHEMA</dt> ** <dd>The SQLITE_DBCONFIG_TRUSTED_SCHEMA option tells SQLite to ** assume that database schemas are untainted by malicious content. ** When the SQLITE_DBCONFIG_TRUSTED_SCHEMA option is disabled, SQLite@@ -2711,7 +2750,7 @@ ** can also be controlled using the [PRAGMA trusted_schema] statement.
** </dd> ** ** [[SQLITE_DBCONFIG_LEGACY_FILE_FORMAT]] -** <dt>SQLITE_DBCONFIG_LEGACY_FILE_FORMAT</td> +** <dt>SQLITE_DBCONFIG_LEGACY_FILE_FORMAT</dt> ** <dd>The SQLITE_DBCONFIG_LEGACY_FILE_FORMAT option activates or deactivates ** the legacy file format flag. When activated, this flag causes all newly ** created database file to have a schema format version number (the 4-byte@@ -2720,7 +2759,7 @@ ** means that the resulting database file will be readable and writable by
** any SQLite version back to 3.0.0 ([dateof:3.0.0]). Without this setting, ** newly created databases are generally not understandable by SQLite versions ** prior to 3.3.0 ([dateof:3.3.0]). As these words are written, there -** is now scarcely any need to generated database files that are compatible +** is now scarcely any need to generate database files that are compatible ** all the way back to version 3.0.0, and so this setting is of little ** practical use, but is provided so that SQLite can continue to claim the ** ability to generate new database files that are compatible with version@@ -2729,8 +2768,40 @@ ** <p>Note that when the SQLITE_DBCONFIG_LEGACY_FILE_FORMAT setting is on,
** the [VACUUM] command will fail with an obscure error when attempting to ** process a table with generated columns and a descending index. This is ** not considered a bug since SQLite versions 3.3.0 and earlier do not support -** either generated columns or decending indexes. +** either generated columns or descending indexes. ** </dd> +** +** [[SQLITE_DBCONFIG_STMT_SCANSTATUS]] +** <dt>SQLITE_DBCONFIG_STMT_SCANSTATUS</dt> +** <dd>The SQLITE_DBCONFIG_STMT_SCANSTATUS option is only useful in +** SQLITE_ENABLE_STMT_SCANSTATUS builds. In this case, it sets or clears +** a flag that enables collection of the sqlite3_stmt_scanstatus_v2() +** statistics. For statistics to be collected, the flag must be set on +** the database handle both when the SQL statement is prepared and when it +** is stepped. The flag is set (collection of statistics is enabled) +** by default. This option takes two arguments: an integer and a pointer to +** an integer.. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the statement scanstatus option. If the second argument +** is not NULL, then the value of the statement scanstatus setting after +** processing the first argument is written into the integer that the second +** argument points to. +** </dd> +** +** [[SQLITE_DBCONFIG_REVERSE_SCANORDER]] +** <dt>SQLITE_DBCONFIG_REVERSE_SCANORDER</dt> +** <dd>The SQLITE_DBCONFIG_REVERSE_SCANORDER option changes the default order +** in which tables and indexes are scanned so that the scans start at the end +** and work toward the beginning rather than starting at the beginning and +** working toward the end. Setting SQLITE_DBCONFIG_REVERSE_SCANORDER is the +** same as setting [PRAGMA reverse_unordered_selects]. This option takes +** two arguments which are an integer and a pointer to an integer. The first +** argument is 1, 0, or -1 to enable, disable, or leave unchanged the +** reverse scan order flag, respectively. If the second argument is not NULL, +** then 0 or 1 is written into the integer that the second argument points to +** depending on if the reverse scan order flag is set after processing the +** first argument. +** </dd> +** ** </dl> */ #define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */@@ -2751,7 +2822,9 @@ #define SQLITE_DBCONFIG_DQS_DDL 1014 /* int int* */
#define SQLITE_DBCONFIG_ENABLE_VIEW 1015 /* int int* */ #define SQLITE_DBCONFIG_LEGACY_FILE_FORMAT 1016 /* int int* */ #define SQLITE_DBCONFIG_TRUSTED_SCHEMA 1017 /* int int* */ -#define SQLITE_DBCONFIG_MAX 1017 /* Largest DBCONFIG */ +#define SQLITE_DBCONFIG_STMT_SCANSTATUS 1018 /* int int* */ +#define SQLITE_DBCONFIG_REVERSE_SCANORDER 1019 /* int int* */ +#define SQLITE_DBCONFIG_MAX 1019 /* Largest DBCONFIG */ /* ** CAPI3REF: Enable Or Disable Extended Result Codes@@ -2973,8 +3046,13 @@ ** not effected by the sqlite3_interrupt().
** ^A call to sqlite3_interrupt(D) that occurs when there are no running ** SQL statements is a no-op and has no effect on SQL statements ** that are started after the sqlite3_interrupt() call returns. +** +** ^The [sqlite3_is_interrupted(D)] interface can be used to determine whether +** or not an interrupt is currently in effect for [database connection] D. +** It returns 1 if an interrupt is currently in effect, or 0 otherwise. */ SQLITE_API void sqlite3_interrupt(sqlite3*); +SQLITE_API int sqlite3_is_interrupted(sqlite3*); /* ** CAPI3REF: Determine If An SQL Statement Is Complete@@ -3592,8 +3670,8 @@ ** [[SQLITE_TRACE_PROFILE]] <dt>SQLITE_TRACE_PROFILE</dt>
** <dd>^An SQLITE_TRACE_PROFILE callback provides approximately the same ** information as is provided by the [sqlite3_profile()] callback. ** ^The P argument is a pointer to the [prepared statement] and the -** X argument points to a 64-bit integer which is the estimated of -** the number of nanosecond that the prepared statement took to run. +** X argument points to a 64-bit integer which is approximately +** the number of nanoseconds that the prepared statement took to run. ** ^The SQLITE_TRACE_PROFILE callback is invoked when the statement finishes. ** ** [[SQLITE_TRACE_ROW]] <dt>SQLITE_TRACE_ROW</dt>@@ -3625,8 +3703,10 @@ ** NULL or if the M mask is zero, then tracing is disabled. The
** M argument should be the bitwise OR-ed combination of ** zero or more [SQLITE_TRACE] constants. ** -** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides -** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2(). +** ^Each call to either sqlite3_trace(D,X,P) or sqlite3_trace_v2(D,M,X,P) +** overrides (cancels) all prior calls to sqlite3_trace(D,X,P) or +** sqlite3_trace_v2(D,M,X,P) for the [database connection] D. Each +** database connection may have at most one trace callback. ** ** ^The X callback is invoked whenever any of the events identified by ** mask M occur. ^The integer return value from the callback is currently@@ -3656,7 +3736,7 @@ ** METHOD: sqlite3
** ** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback ** function X to be invoked periodically during long running calls to -** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for +** [sqlite3_step()] and [sqlite3_prepare()] and similar for ** database connection D. An example use for this ** interface is to keep a GUI updated during a large query. **@@ -3681,6 +3761,13 @@ ** the database connection that invoked the progress handler.
** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their ** database connections for the meaning of "modify" in this paragraph. ** +** The progress handler callback would originally only be invoked from the +** bytecode engine. It still might be invoked during [sqlite3_prepare()] +** and similar because those routines might force a reparse of the schema +** which involves running the bytecode engine. However, beginning with +** SQLite version 3.41.0, the progress handler callback might also be +** invoked directly from [sqlite3_prepare()] while analyzing and generating +** code for complex queries. */ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);@@ -3717,13 +3804,18 @@ ** three flag combinations:)^
** ** <dl> ** ^(<dt>[SQLITE_OPEN_READONLY]</dt> -** <dd>The database is opened in read-only mode. If the database does not -** already exist, an error is returned.</dd>)^ +** <dd>The database is opened in read-only mode. If the database does +** not already exist, an error is returned.</dd>)^ ** ** ^(<dt>[SQLITE_OPEN_READWRITE]</dt> -** <dd>The database is opened for reading and writing if possible, or reading -** only if the file is write protected by the operating system. In either -** case the database must already exist, otherwise an error is returned.</dd>)^ +** <dd>The database is opened for reading and writing if possible, or +** reading only if the file is write protected by the operating +** system. In either case the database must already exist, otherwise +** an error is returned. For historical reasons, if opening in +** read-write mode fails due to OS-level permissions, an attempt is +** made to open it in read-only mode. [sqlite3_db_readonly()] can be +** used to determine whether the database is actually +** read-write.</dd>)^ ** ** ^(<dt>[SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]</dt> ** <dd>The database is opened for reading and writing, and is created if@@ -3983,7 +4075,7 @@ ** The first parameter to these interfaces (hereafter referred to
** as F) must be one of: ** <ul> ** <li> A database filename pointer created by the SQLite core and -** passed into the xOpen() method of a VFS implemention, or +** passed into the xOpen() method of a VFS implementation, or ** <li> A filename obtained from [sqlite3_db_filename()], or ** <li> A new filename constructed using [sqlite3_create_filename()]. ** </ul>@@ -4096,7 +4188,7 @@
/* ** CAPI3REF: Create and Destroy VFS Filenames ** -** These interfces are provided for use by [VFS shim] implementations and +** These interfaces are provided for use by [VFS shim] implementations and ** are not useful outside of that context. ** ** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of@@ -4176,6 +4268,7 @@ ** </ul>
** ** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language ** text that describes the error, as either UTF-8 or UTF-16 respectively. +** (See how SQLite handles [invalid UTF] for exceptions to this rule.) ** ^(Memory to hold the error message string is managed internally. ** The application does not need to worry about freeing the result. ** However, the error string might be overwritten or deallocated by@@ -4644,6 +4737,41 @@ */
SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt); /* +** CAPI3REF: Change The EXPLAIN Setting For A Prepared Statement +** METHOD: sqlite3_stmt +** +** The sqlite3_stmt_explain(S,E) interface changes the EXPLAIN +** setting for [prepared statement] S. If E is zero, then S becomes +** a normal prepared statement. If E is 1, then S behaves as if +** its SQL text began with "[EXPLAIN]". If E is 2, then S behaves as if +** its SQL text began with "[EXPLAIN QUERY PLAN]". +** +** Calling sqlite3_stmt_explain(S,E) might cause S to be reprepared. +** SQLite tries to avoid a reprepare, but a reprepare might be necessary +** on the first transition into EXPLAIN or EXPLAIN QUERY PLAN mode. +** +** Because of the potential need to reprepare, a call to +** sqlite3_stmt_explain(S,E) will fail with SQLITE_ERROR if S cannot be +** reprepared because it was created using [sqlite3_prepare()] instead of +** the newer [sqlite3_prepare_v2()] or [sqlite3_prepare_v3()] interfaces and +** hence has no saved SQL text with which to reprepare. +** +** Changing the explain setting for a prepared statement does not change +** the original SQL text for the statement. Hence, if the SQL text originally +** began with EXPLAIN or EXPLAIN QUERY PLAN, but sqlite3_stmt_explain(S,0) +** is called to convert the statement into an ordinary statement, the EXPLAIN +** or EXPLAIN QUERY PLAN keywords will still appear in the sqlite3_sql(S) +** output, even though the statement now acts like a normal SQL statement. +** +** This routine returns SQLITE_OK if the explain mode is successfully +** changed, or an error code if the explain mode could not be changed. +** The explain mode cannot be changed while a statement is active. +** Hence, it is good practice to call [sqlite3_reset(S)] +** immediately prior to calling sqlite3_stmt_explain(S,E). +*/ +SQLITE_API int sqlite3_stmt_explain(sqlite3_stmt *pStmt, int eMode); + +/* ** CAPI3REF: Determine If A Prepared Statement Has Been Reset ** METHOD: sqlite3_stmt **@@ -4806,7 +4934,7 @@ ** ^ (1) A destructor to dispose of the BLOB or string after SQLite has finished
** with it may be passed. ^It is called to dispose of the BLOB or string even ** if the call to the bind API fails, except the destructor is not called if ** the third parameter is a NULL pointer or the fourth parameter is negative. -** ^ (2) The special constant, [SQLITE_STATIC], may be passsed to indicate that +** ^ (2) The special constant, [SQLITE_STATIC], may be passed to indicate that ** the application remains responsible for disposing of the object. ^In this ** case, the object and the provided pointer to it must remain valid until ** either the prepared statement is finalized or the same SQL parameter is@@ -5485,19 +5613,32 @@ **
** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S ** back to the beginning of its program. ** -** ^If the most recent call to [sqlite3_step(S)] for the -** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE], -** or if [sqlite3_step(S)] has never before been called on S, -** then [sqlite3_reset(S)] returns [SQLITE_OK]. +** ^The return code from [sqlite3_reset(S)] indicates whether or not +** the previous evaluation of prepared statement S completed successfully. +** ^If [sqlite3_step(S)] has never before been called on S or if +** [sqlite3_step(S)] has not been called since the previous call +** to [sqlite3_reset(S)], then [sqlite3_reset(S)] will return +** [SQLITE_OK]. ** ** ^If the most recent call to [sqlite3_step(S)] for the ** [prepared statement] S indicated an error, then ** [sqlite3_reset(S)] returns an appropriate [error code]. +** ^The [sqlite3_reset(S)] interface might also return an [error code] +** if there were no prior errors but the process of resetting +** the prepared statement caused a new error. ^For example, if an +** [INSERT] statement with a [RETURNING] clause is only stepped one time, +** that one call to [sqlite3_step(S)] might return SQLITE_ROW but +** the overall statement might still fail and the [sqlite3_reset(S)] call +** might return SQLITE_BUSY if locking constraints prevent the +** database change from committing. Therefore, it is important that +** applications check the return code from [sqlite3_reset(S)] even if +** no prior call to [sqlite3_step(S)] indicated a problem. ** ** ^The [sqlite3_reset(S)] interface does not change the values ** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S. */ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); + /* ** CAPI3REF: Create Or Redefine SQL Functions@@ -5704,10 +5845,21 @@ ** The SQLITE_DIRECTONLY flag means that the function may only be invoked
** from top-level SQL, and cannot be used in VIEWs or TRIGGERs nor in ** schema structures such as [CHECK constraints], [DEFAULT clauses], ** [expression indexes], [partial indexes], or [generated columns]. -** The SQLITE_DIRECTONLY flags is a security feature which is recommended -** for all [application-defined SQL functions], and especially for functions -** that have side-effects or that could potentially leak sensitive -** information. +** <p> +** The SQLITE_DIRECTONLY flag is recommended for any +** [application-defined SQL function] +** that has side-effects or that could potentially leak sensitive information. +** This will prevent attacks in which an application is tricked +** into using a database file that has had its schema surreptitiously +** modified to invoke the application-defined function in ways that are +** harmful. +** <p> +** Some people say it is good practice to set SQLITE_DIRECTONLY on all +** [application-defined SQL functions], regardless of whether or not they +** are security sensitive, as doing so prevents those functions from being used +** inside of the database schema, and thus ensures that the database +** can be inspected and modified using generic tools (such as the [CLI]) +** that do not have access to the application-defined functions. ** </dd> ** ** [[SQLITE_INNOCUOUS]] <dt>SQLITE_INNOCUOUS</dt><dd>@@ -5734,13 +5886,27 @@ ** security-adverse side-effects and information-leaks.
** </dd> ** ** [[SQLITE_SUBTYPE]] <dt>SQLITE_SUBTYPE</dt><dd> -** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call +** The SQLITE_SUBTYPE flag indicates to SQLite that a function might call ** [sqlite3_value_subtype()] to inspect the sub-types of its arguments. -** Specifying this flag makes no difference for scalar or aggregate user -** functions. However, if it is not specified for a user-defined window -** function, then any sub-types belonging to arguments passed to the window -** function may be discarded before the window function is called (i.e. -** sqlite3_value_subtype() will always return 0). +** This flag instructs SQLite to omit some corner-case optimizations that +** might disrupt the operation of the [sqlite3_value_subtype()] function, +** causing it to return zero rather than the correct subtype(). +** SQL functions that invokes [sqlite3_value_subtype()] should have this +** property. If the SQLITE_SUBTYPE property is omitted, then the return +** value from [sqlite3_value_subtype()] might sometimes be zero even though +** a non-zero subtype was specified by the function argument expression. +** +** [[SQLITE_RESULT_SUBTYPE]] <dt>SQLITE_RESULT_SUBTYPE</dt><dd> +** The SQLITE_RESULT_SUBTYPE flag indicates to SQLite that a function might call +** [sqlite3_result_subtype()] to cause a sub-type to be associated with its +** result. +** Every function that invokes [sqlite3_result_subtype()] should have this +** property. If it does not, then the call to [sqlite3_result_subtype()] +** might become a no-op if the function is used as term in an +** [expression index]. On the other hand, SQL functions that never invoke +** [sqlite3_result_subtype()] should avoid setting this property, as the +** purpose of this property is to disable certain optimizations that are +** incompatible with subtypes. ** </dd> ** </dl> */@@ -5748,6 +5914,7 @@ #define SQLITE_DETERMINISTIC 0x000000800
#define SQLITE_DIRECTONLY 0x000080000 #define SQLITE_SUBTYPE 0x000100000 #define SQLITE_INNOCUOUS 0x000200000 +#define SQLITE_RESULT_SUBTYPE 0x001000000 /* ** CAPI3REF: Deprecated Functions@@ -5848,16 +6015,6 @@ ** words, if the value is a string that looks like a number)
** then the conversion is performed. Otherwise no conversion occurs. ** The [SQLITE_INTEGER | datatype] after conversion is returned.)^ ** -** ^(The sqlite3_value_encoding(X) interface returns one of [SQLITE_UTF8], -** [SQLITE_UTF16BE], or [SQLITE_UTF16LE] according to the current encoding -** of the value X, assuming that X has type TEXT.)^ If sqlite3_value_type(X) -** returns something other than SQLITE_TEXT, then the return value from -** sqlite3_value_encoding(X) is meaningless. ^Calls to -** sqlite3_value_text(X), sqlite3_value_text16(X), sqlite3_value_text16be(X), -** sqlite3_value_text16le(X), sqlite3_value_bytes(X), or -** sqlite3_value_bytes16(X) might change the encoding of the value X and -** thus change the return from subsequent calls to sqlite3_value_encoding(X). -** ** ^Within the [xUpdate] method of a [virtual table], the ** sqlite3_value_nochange(X) interface returns true if and only if ** the column corresponding to X is unchanged by the UPDATE operation@@ -5922,6 +6079,27 @@ SQLITE_API int sqlite3_value_type(sqlite3_value*);
SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*); SQLITE_API int sqlite3_value_nochange(sqlite3_value*); SQLITE_API int sqlite3_value_frombind(sqlite3_value*); + +/* +** CAPI3REF: Report the internal text encoding state of an sqlite3_value object +** METHOD: sqlite3_value +** +** ^(The sqlite3_value_encoding(X) interface returns one of [SQLITE_UTF8], +** [SQLITE_UTF16BE], or [SQLITE_UTF16LE] according to the current text encoding +** of the value X, assuming that X has type TEXT.)^ If sqlite3_value_type(X) +** returns something other than SQLITE_TEXT, then the return value from +** sqlite3_value_encoding(X) is meaningless. ^Calls to +** [sqlite3_value_text(X)], [sqlite3_value_text16(X)], [sqlite3_value_text16be(X)], +** [sqlite3_value_text16le(X)], [sqlite3_value_bytes(X)], or +** [sqlite3_value_bytes16(X)] might change the encoding of the value X and +** thus change the return from subsequent calls to sqlite3_value_encoding(X). +** +** This routine is intended for used by applications that test and validate +** the SQLite implementation. This routine is inquiring about the opaque +** internal state of an [sqlite3_value] object. Ordinary applications should +** not need to know what the internal state of an sqlite3_value object is and +** hence should not need to use this interface. +*/ SQLITE_API int sqlite3_value_encoding(sqlite3_value*); /*@@ -5933,6 +6111,12 @@ ** an [application-defined SQL function] argument V. The subtype
** information can be used to pass a limited amount of context from ** one SQL function to another. Use the [sqlite3_result_subtype()] ** routine to set the subtype for the return value of an SQL function. +** +** Every [application-defined SQL function] that invoke this interface +** should include the [SQLITE_SUBTYPE] property in the text +** encoding argument when the function is [sqlite3_create_function|registered]. +** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype() +** might return zero instead of the upstream subtype in some corner cases. */ SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*);@@ -6031,48 +6215,56 @@ ** CAPI3REF: Function Auxiliary Data
** METHOD: sqlite3_context ** ** These functions may be used by (non-aggregate) SQL functions to -** associate metadata with argument values. If the same value is passed to -** multiple invocations of the same SQL function during query execution, under -** some circumstances the associated metadata may be preserved. An example -** of where this might be useful is in a regular-expression matching -** function. The compiled version of the regular expression can be stored as -** metadata associated with the pattern string. +** associate auxiliary data with argument values. If the same argument +** value is passed to multiple invocations of the same SQL function during +** query execution, under some circumstances the associated auxiliary data +** might be preserved. An example of where this might be useful is in a +** regular-expression matching function. The compiled version of the regular +** expression can be stored as auxiliary data associated with the pattern string. ** Then as long as the pattern string remains the same, ** the compiled regular expression can be reused on multiple ** invocations of the same function. ** -** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the metadata +** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the auxiliary data ** associated by the sqlite3_set_auxdata(C,N,P,X) function with the Nth argument ** value to the application-defined function. ^N is zero for the left-most -** function argument. ^If there is no metadata +** function argument. ^If there is no auxiliary data ** associated with the function argument, the sqlite3_get_auxdata(C,N) interface ** returns a NULL pointer. ** -** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th -** argument of the application-defined function. ^Subsequent +** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as auxiliary data for the +** N-th argument of the application-defined function. ^Subsequent ** calls to sqlite3_get_auxdata(C,N) return P from the most recent -** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or -** NULL if the metadata has been discarded. +** sqlite3_set_auxdata(C,N,P,X) call if the auxiliary data is still valid or +** NULL if the auxiliary data has been discarded. ** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL, ** SQLite will invoke the destructor function X with parameter P exactly -** once, when the metadata is discarded. -** SQLite is free to discard the metadata at any time, including: <ul> +** once, when the auxiliary data is discarded. +** SQLite is free to discard the auxiliary data at any time, including: <ul> ** <li> ^(when the corresponding function parameter changes)^, or ** <li> ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the ** SQL statement)^, or ** <li> ^(when sqlite3_set_auxdata() is invoked again on the same ** parameter)^, or ** <li> ^(during the original sqlite3_set_auxdata() call when a memory -** allocation error occurs.)^ </ul> +** allocation error occurs.)^ +** <li> ^(during the original sqlite3_set_auxdata() call if the function +** is evaluated during query planning instead of during query execution, +** as sometimes happens with [SQLITE_ENABLE_STAT4].)^ </ul> ** -** Note the last bullet in particular. The destructor X in +** Note the last two bullets in particular. The destructor X in ** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the ** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata() ** should be called near the end of the function implementation and the ** function implementation should not make any use of P after -** sqlite3_set_auxdata() has been called. +** sqlite3_set_auxdata() has been called. Furthermore, a call to +** sqlite3_get_auxdata() that occurs immediately after a corresponding call +** to sqlite3_set_auxdata() might still return NULL if an out-of-memory +** condition occurred during the sqlite3_set_auxdata() call or if the +** function is being evaluated during query planning rather than during +** query execution. ** -** ^(In practice, metadata is preserved between function calls for +** ^(In practice, auxiliary data is preserved between function calls for ** function parameters that are compile-time constants, including literal ** values and [parameters] and expressions composed from the same.)^ **@@ -6082,10 +6274,67 @@ ** kinds of function caching behavior.
** ** These routines must be called from the same thread in which ** the SQL function is running. +** +** See also: [sqlite3_get_clientdata()] and [sqlite3_set_clientdata()]. */ SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N); SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*)); +/* +** CAPI3REF: Database Connection Client Data +** METHOD: sqlite3 +** +** These functions are used to associate one or more named pointers +** with a [database connection]. +** A call to sqlite3_set_clientdata(D,N,P,X) causes the pointer P +** to be attached to [database connection] D using name N. Subsequent +** calls to sqlite3_get_clientdata(D,N) will return a copy of pointer P +** or a NULL pointer if there were no prior calls to +** sqlite3_set_clientdata() with the same values of D and N. +** Names are compared using strcmp() and are thus case sensitive. +** +** If P and X are both non-NULL, then the destructor X is invoked with +** argument P on the first of the following occurrences: +** <ul> +** <li> An out-of-memory error occurs during the call to +** sqlite3_set_clientdata() which attempts to register pointer P. +** <li> A subsequent call to sqlite3_set_clientdata(D,N,P,X) is made +** with the same D and N parameters. +** <li> The database connection closes. SQLite does not make any guarantees +** about the order in which destructors are called, only that all +** destructors will be called exactly once at some point during the +** database connection closing process. +** </ul> +** +** SQLite does not do anything with client data other than invoke +** destructors on the client data at the appropriate time. The intended +** use for client data is to provide a mechanism for wrapper libraries +** to store additional information about an SQLite database connection. +** +** There is no limit (other than available memory) on the number of different +** client data pointers (with different names) that can be attached to a +** single database connection. However, the implementation is optimized +** for the case of having only one or two different client data names. +** Applications and wrapper libraries are discouraged from using more than +** one client data name each. +** +** There is no way to enumerate the client data pointers +** associated with a database connection. The N parameter can be thought +** of as a secret key such that only code that knows the secret key is able +** to access the associated data. +** +** Security Warning: These interfaces should not be exposed in scripting +** languages or in other circumstances where it might be possible for an +** an attacker to invoke them. Any agent that can invoke these interfaces +** can probably also take control of the process. +** +** Database connection client data is only available for SQLite +** version 3.44.0 ([dateof:3.44.0]) and later. +** +** See also: [sqlite3_set_auxdata()] and [sqlite3_get_auxdata()]. +*/ +SQLITE_API void *sqlite3_get_clientdata(sqlite3*,const char*); +SQLITE_API int sqlite3_set_clientdata(sqlite3*, const char*, void*, void(*)(void*)); /* ** CAPI3REF: Constants Defining Special Destructor Behavior@@ -6287,6 +6536,20 @@ ** of the subtype T are preserved in current versions of SQLite;
** higher order bits are discarded. ** The number of subtype bytes preserved by SQLite might increase ** in future releases of SQLite. +** +** Every [application-defined SQL function] that invokes this interface +** should include the [SQLITE_RESULT_SUBTYPE] property in its +** text encoding argument when the SQL function is +** [sqlite3_create_function|registered]. If the [SQLITE_RESULT_SUBTYPE] +** property is omitted from the function that invokes sqlite3_result_subtype(), +** then in some cases the sqlite3_result_subtype() might fail to set +** the result subtype. +** +** If SQLite is compiled with -DSQLITE_STRICT_SUBTYPE=1, then any +** SQL function that invokes the sqlite3_result_subtype() interface +** and that does not have the SQLITE_RESULT_SUBTYPE property will raise +** an error. Future versions of SQLite might enable -DSQLITE_STRICT_SUBTYPE=1 +** by default. */ SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int);@@ -6458,6 +6721,13 @@ ** method of the default [sqlite3_vfs] object. If the xSleep() method
** of the default VFS is not implemented correctly, or not implemented at ** all, then the behavior of sqlite3_sleep() may deviate from the description ** in the previous paragraphs. +** +** If a negative argument is passed to sqlite3_sleep() the results vary by +** VFS and operating system. Some system treat a negative argument as an +** instruction to sleep forever. Others understand it to mean do not sleep +** at all. ^In SQLite version 3.42.0 and later, a negative +** argument passed into sqlite3_sleep() is changed to zero before it is relayed +** down into the xSleep method of the VFS. */ SQLITE_API int sqlite3_sleep(int);@@ -6711,7 +6981,7 @@ */
SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); /* -** CAPI3REF: Allowed return values from [sqlite3_txn_state()] +** CAPI3REF: Allowed return values from sqlite3_txn_state() ** KEYWORDS: {transaction state} ** ** These constants define the current transaction state of a database file.@@ -6843,7 +7113,7 @@ ** <p>^There is only one autovacuum pages callback per database connection.
** ^Each call to the sqlite3_autovacuum_pages() interface overrides all ** previous invocations for that database connection. ^If the callback ** argument (C) to sqlite3_autovacuum_pages(D,C,P,X) is a NULL pointer, -** then the autovacuum steps callback is cancelled. The return value +** then the autovacuum steps callback is canceled. The return value ** from sqlite3_autovacuum_pages() is normally SQLITE_OK, but might ** be some other error code if something goes wrong. The current ** implementation will only return SQLITE_OK or SQLITE_MISUSE, but other@@ -7303,15 +7573,6 @@ */
SQLITE_API void sqlite3_reset_auto_extension(void); /* -** The interface to the virtual-table mechanism is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - -/* ** Structures used by the virtual table interface */ typedef struct sqlite3_vtab sqlite3_vtab;@@ -7371,6 +7632,10 @@ int (*xRollbackTo)(sqlite3_vtab *pVTab, int);
/* The methods above are in versions 1 and 2 of the sqlite_module object. ** Those below are for version 3 and greater. */ int (*xShadowName)(const char*); + /* The methods above are in versions 1 through 3 of the sqlite_module object. + ** Those below are for version 4 and greater. */ + int (*xIntegrity)(sqlite3_vtab *pVTab, const char *zSchema, + const char *zTabName, int mFlags, char **pzErr); }; /*@@ -7429,10 +7694,10 @@ ** the constraint may or may not be checked in byte code. In other words,
** when the omit flag is true there is no guarantee that the constraint will ** not be checked again using byte code.)^ ** -** ^The idxNum and idxPtr values are recorded and passed into the +** ^The idxNum and idxStr values are recorded and passed into the ** [xFilter] method. -** ^[sqlite3_free()] is used to free idxPtr if and only if -** needToFreeIdxPtr is true. +** ^[sqlite3_free()] is used to free idxStr if and only if +** needToFreeIdxStr is true. ** ** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in ** the correct order to satisfy the ORDER BY clause so that no separate@@ -7552,7 +7817,7 @@ ** The collating sequence to be used for comparison can be found using
** the [sqlite3_vtab_collation()] interface. For most real-world virtual ** tables, the collating sequence of constraints does not matter (for example ** because the constraints are numeric) and so the sqlite3_vtab_collation() -** interface is no commonly needed. +** interface is not commonly needed. */ #define SQLITE_INDEX_CONSTRAINT_EQ 2 #define SQLITE_INDEX_CONSTRAINT_GT 4@@ -7712,16 +7977,6 @@ */
SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); /* -** The interface to the virtual-table mechanism defined above (back up -** to a comment remarkably similar to this one) is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - -/* ** CAPI3REF: A Handle To An Open BLOB ** KEYWORDS: {BLOB handle} {BLOB handles} **@@ -7868,7 +8123,7 @@ ** committed. ^If an error occurs while committing the transaction, an error
** code is returned and the transaction rolled back. ** ** Calling this function with an argument that is not a NULL pointer or an -** open blob handle results in undefined behaviour. ^Calling this routine +** open blob handle results in undefined behavior. ^Calling this routine ** with a null pointer (such as would be returned by a failed call to ** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function ** is passed a valid open blob handle, the values returned by the@@ -8104,9 +8359,9 @@ ** previously entered by the same thread. The behavior
** is undefined if the mutex is not currently entered by the ** calling thread or is not currently allocated. ** -** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or -** sqlite3_mutex_leave() is a NULL pointer, then all three routines -** behave as no-ops. +** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), +** sqlite3_mutex_leave(), or sqlite3_mutex_free() is a NULL pointer, +** then any of the four routines behaves as a no-op. ** ** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()]. */@@ -8348,6 +8603,7 @@ #define SQLITE_TESTCTRL_FIRST 5
#define SQLITE_TESTCTRL_PRNG_SAVE 5 #define SQLITE_TESTCTRL_PRNG_RESTORE 6 #define SQLITE_TESTCTRL_PRNG_RESET 7 /* NOT USED */ +#define SQLITE_TESTCTRL_FK_NO_ACTION 7 #define SQLITE_TESTCTRL_BITVEC_TEST 8 #define SQLITE_TESTCTRL_FAULT_INSTALL 9 #define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10@@ -8376,7 +8632,8 @@ #define SQLITE_TESTCTRL_SEEK_COUNT 30
#define SQLITE_TESTCTRL_TRACEFLAGS 31 #define SQLITE_TESTCTRL_TUNE 32 #define SQLITE_TESTCTRL_LOGEST 33 -#define SQLITE_TESTCTRL_LAST 33 /* Largest TESTCTRL */ +#define SQLITE_TESTCTRL_USELONGDOUBLE 34 +#define SQLITE_TESTCTRL_LAST 34 /* Largest TESTCTRL */ /* ** CAPI3REF: SQL Keyword Checking@@ -9832,7 +10089,7 @@ **
** [[SQLITE_VTAB_DIRECTONLY]]<dt>SQLITE_VTAB_DIRECTONLY</dt> ** <dd>Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_DIRECTONLY) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implmentation +** the [xConnect] or [xCreate] methods of a [virtual table] implementation ** prohibits that virtual table from being used from within triggers and ** views. ** </dd>@@ -9840,18 +10097,28 @@ **
** [[SQLITE_VTAB_INNOCUOUS]]<dt>SQLITE_VTAB_INNOCUOUS</dt> ** <dd>Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_INNOCUOUS) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implmentation +** the [xConnect] or [xCreate] methods of a [virtual table] implementation ** identify that virtual table as being safe to use from within triggers ** and views. Conceptually, the SQLITE_VTAB_INNOCUOUS tag means that the ** virtual table can do no serious harm even if it is controlled by a ** malicious hacker. Developers should avoid setting the SQLITE_VTAB_INNOCUOUS ** flag unless absolutely necessary. ** </dd> +** +** [[SQLITE_VTAB_USES_ALL_SCHEMAS]]<dt>SQLITE_VTAB_USES_ALL_SCHEMAS</dt> +** <dd>Calls of the form +** [sqlite3_vtab_config](db,SQLITE_VTAB_USES_ALL_SCHEMA) from within the +** the [xConnect] or [xCreate] methods of a [virtual table] implementation +** instruct the query planner to begin at least a read transaction on +** all schemas ("main", "temp", and any ATTACH-ed databases) whenever the +** virtual table is used. +** </dd> ** </dl> */ #define SQLITE_VTAB_CONSTRAINT_SUPPORT 1 #define SQLITE_VTAB_INNOCUOUS 2 #define SQLITE_VTAB_DIRECTONLY 3 +#define SQLITE_VTAB_USES_ALL_SCHEMAS 4 /* ** CAPI3REF: Determine The Virtual Table Conflict Policy@@ -9924,7 +10191,7 @@ ** name of that alternative collating sequence is returned.
** <li><p> Otherwise, "BINARY" is returned. ** </ol> */ -SQLITE_API SQLITE_EXPERIMENTAL const char *sqlite3_vtab_collation(sqlite3_index_info*,int); +SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); /* ** CAPI3REF: Determine if a virtual table query is DISTINCT@@ -10012,7 +10279,7 @@ ** "[IN operator|column IN (...)]" is
** communicated to the xBestIndex method as a ** [SQLITE_INDEX_CONSTRAINT_EQ] constraint.)^ If xBestIndex wants to use ** this constraint, it must set the corresponding -** aConstraintUsage[].argvIndex to a postive integer. ^(Then, under +** aConstraintUsage[].argvIndex to a positive integer. ^(Then, under ** the usual mode of handling IN operators, SQLite generates [bytecode] ** that invokes the [xFilter|xFilter() method] once for each value ** on the right-hand side of the IN operator.)^ Thus the virtual table@@ -10081,21 +10348,20 @@ ** The result of invoking these interfaces from any other context
** is undefined and probably harmful. ** ** The X parameter in a call to sqlite3_vtab_in_first(X,P) or -** sqlite3_vtab_in_next(X,P) must be one of the parameters to the +** sqlite3_vtab_in_next(X,P) should be one of the parameters to the ** xFilter method which invokes these routines, and specifically ** a parameter that was previously selected for all-at-once IN constraint ** processing use the [sqlite3_vtab_in()] interface in the ** [xBestIndex|xBestIndex method]. ^(If the X parameter is not ** an xFilter argument that was selected for all-at-once IN constraint -** processing, then these routines return [SQLITE_MISUSE])^ or perhaps -** exhibit some other undefined or harmful behavior. +** processing, then these routines return [SQLITE_ERROR].)^ ** ** ^(Use these routines to access all values on the right-hand side ** of the IN constraint using code like the following: ** ** <blockquote><pre> ** for(rc=sqlite3_vtab_in_first(pList, &pVal); -** rc==SQLITE_OK && pVal +** rc==SQLITE_OK && pVal; ** rc=sqlite3_vtab_in_next(pList, &pVal) ** ){ ** // do something with pVal@@ -10193,6 +10459,10 @@ ** When the value returned to V is a string, space to hold that string is
** managed by the prepared statement S and will be automatically freed when ** S is finalized. ** +** Not all values are available for all query elements. When a value is +** not available, the output variable is set to -1 if the value is numeric, +** or to NULL if it is a string (SQLITE_SCANSTAT_NAME). +** ** <dl> ** [[SQLITE_SCANSTAT_NLOOP]] <dt>SQLITE_SCANSTAT_NLOOP</dt> ** <dd>^The [sqlite3_int64] variable pointed to by the V parameter will be@@ -10220,12 +10490,24 @@ ** <dd>^The "const char *" variable pointed to by the V parameter will be set
** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN] ** description for the X-th loop. ** -** [[SQLITE_SCANSTAT_SELECTID]] <dt>SQLITE_SCANSTAT_SELECT</dt> +** [[SQLITE_SCANSTAT_SELECTID]] <dt>SQLITE_SCANSTAT_SELECTID</dt> ** <dd>^The "int" variable pointed to by the V parameter will be set to the -** "select-id" for the X-th loop. The select-id identifies which query or -** subquery the loop is part of. The main query has a select-id of zero. -** The select-id is the same value as is output in the first column -** of an [EXPLAIN QUERY PLAN] query. +** id for the X-th query plan element. The id value is unique within the +** statement. The select-id is the same value as is output in the first +** column of an [EXPLAIN QUERY PLAN] query. +** +** [[SQLITE_SCANSTAT_PARENTID]] <dt>SQLITE_SCANSTAT_PARENTID</dt> +** <dd>The "int" variable pointed to by the V parameter will be set to the +** the id of the parent of the current query element, if applicable, or +** to zero if the query element has no parent. This is the same value as +** returned in the second column of an [EXPLAIN QUERY PLAN] query. +** +** [[SQLITE_SCANSTAT_NCYCLE]] <dt>SQLITE_SCANSTAT_NCYCLE</dt> +** <dd>The sqlite3_int64 output value is set to the number of cycles, +** according to the processor time-stamp counter, that elapsed while the +** query element was being processed. This value is not available for +** all query elements - if it is unavailable the output variable is +** set to -1. ** </dl> */ #define SQLITE_SCANSTAT_NLOOP 0@@ -10234,12 +10516,14 @@ #define SQLITE_SCANSTAT_EST 2
#define SQLITE_SCANSTAT_NAME 3 #define SQLITE_SCANSTAT_EXPLAIN 4 #define SQLITE_SCANSTAT_SELECTID 5 +#define SQLITE_SCANSTAT_PARENTID 6 +#define SQLITE_SCANSTAT_NCYCLE 7 /* ** CAPI3REF: Prepared Statement Scan Status ** METHOD: sqlite3_stmt ** -** This interface returns information about the predicted and measured +** These interfaces return information about the predicted and measured ** performance for pStmt. Advanced applications can use this ** interface to compare the predicted and the measured performance and ** issue warnings and/or rerun [ANALYZE] if discrepancies are found.@@ -10250,19 +10534,25 @@ ** compile-time option.
** ** The "iScanStatusOp" parameter determines which status information to return. ** The "iScanStatusOp" must be one of the [scanstatus options] or the behavior -** of this interface is undefined. -** ^The requested measurement is written into a variable pointed to by -** the "pOut" parameter. -** Parameter "idx" identifies the specific loop to retrieve statistics for. -** Loops are numbered starting from zero. ^If idx is out of range - less than -** zero or greater than or equal to the total number of loops used to implement -** the statement - a non-zero value is returned and the variable that pOut -** points to is unchanged. +** of this interface is undefined. ^The requested measurement is written into +** a variable pointed to by the "pOut" parameter. ** -** ^Statistics might not be available for all loops in all statements. ^In cases -** where there exist loops with no available statistics, this function behaves -** as if the loop did not exist - it returns non-zero and leave the variable -** that pOut points to unchanged. +** The "flags" parameter must be passed a mask of flags. At present only +** one flag is defined - SQLITE_SCANSTAT_COMPLEX. If SQLITE_SCANSTAT_COMPLEX +** is specified, then status information is available for all elements +** of a query plan that are reported by "EXPLAIN QUERY PLAN" output. If +** SQLITE_SCANSTAT_COMPLEX is not specified, then only query plan elements +** that correspond to query loops (the "SCAN..." and "SEARCH..." elements of +** the EXPLAIN QUERY PLAN output) are available. Invoking API +** sqlite3_stmt_scanstatus() is equivalent to calling +** sqlite3_stmt_scanstatus_v2() with a zeroed flags parameter. +** +** Parameter "idx" identifies the specific query element to retrieve statistics +** for. Query elements are numbered starting from zero. A value of -1 may be +** to query for statistics regarding the entire query. ^If idx is out of range +** - less than -1 or greater than or equal to the total number of query +** elements used to implement the statement - a non-zero value is returned and +** the variable that pOut points to is unchanged. ** ** See also: [sqlite3_stmt_scanstatus_reset()] */@@ -10272,6 +10562,19 @@ int idx, /* Index of loop to report on */
int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */ void *pOut /* Result written here */ ); +SQLITE_API int sqlite3_stmt_scanstatus_v2( + sqlite3_stmt *pStmt, /* Prepared statement for which info desired */ + int idx, /* Index of loop to report on */ + int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */ + int flags, /* Mask of flags defined below */ + void *pOut /* Result written here */ +); + +/* +** CAPI3REF: Prepared Statement Scan Status +** KEYWORDS: {scan status flags} +*/ +#define SQLITE_SCANSTAT_COMPLEX 0x0001 /* ** CAPI3REF: Zero Scan-Status Counters@@ -10362,6 +10665,10 @@ ** or updated. The value of the seventh parameter passed to the callback
** function is not defined for operations on WITHOUT ROWID tables, or for ** DELETE operations on rowid tables. ** +** ^The sqlite3_preupdate_hook(D,C,P) function returns the P argument from +** the previous call on the same [database connection] D, or NULL for +** the first call on D. +** ** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()], ** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces ** provide additional information about a preupdate event. These routines@@ -10401,7 +10708,7 @@ **
** When the [sqlite3_blob_write()] API is used to update a blob column, ** the pre-update hook is invoked with SQLITE_DELETE. This is because the ** in this case the new values are not available. In this case, when a -** callback made with op==SQLITE_DELETE is actuall a write using the +** callback made with op==SQLITE_DELETE is actually a write using the ** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns ** the index of the column being written. In other cases, where the ** pre-update hook is being invoked for some other reason, including a@@ -10662,6 +10969,13 @@ ** The size of the database is written into *P even if the
** SQLITE_SERIALIZE_NOCOPY bit is set but no contiguous copy ** of the database exists. ** +** After the call, if the SQLITE_SERIALIZE_NOCOPY bit had been set, +** the returned buffer content will remain accessible and unchanged +** until either the next write operation on the connection or when +** the connection is closed, and applications must not modify the +** buffer. If the bit had been clear, the returned buffer will not +** be accessed by SQLite after the call. +** ** A call to sqlite3_serialize(D,S,P,F) might return NULL even if the ** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory ** allocation error occurs.@@ -10710,6 +11024,9 @@ ** connection closes. If the SQLITE_DESERIALIZE_RESIZEABLE bit is set, then
** SQLite will try to increase the buffer size using sqlite3_realloc64() ** if writes on the database cause it to grow larger than M bytes. ** +** Applications must not modify the buffer P or invalidate it before +** the database connection D is closed. +** ** The sqlite3_deserialize() interface will fail with SQLITE_BUSY if the ** database is currently in a read transaction or is involved in a backup ** operation.@@ -10718,6 +11035,13 @@ ** It is not possible to deserialized into the TEMP database. If the
** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the ** function returns SQLITE_ERROR. ** +** The deserialized database should not be in [WAL mode]. If the database +** is in WAL mode, then any attempt to use the database file will result +** in an [SQLITE_CANTOPEN] error. The application can set the +** [file format version numbers] (bytes 18 and 19) of the input database P +** to 0x01 prior to invoking sqlite3_deserialize(D,S,P,N,M,F) to force the +** database file into rollback mode and work around this limitation. +** ** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the ** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then ** [sqlite3_free()] is invoked on argument P prior to returning.@@ -10765,6 +11089,19 @@ ** builds on processors without floating point support.
*/ #ifdef SQLITE_OMIT_FLOATING_POINT # undef double +#endif + +#if defined(__wasi__) +# undef SQLITE_WASI +# define SQLITE_WASI 1 +# undef SQLITE_OMIT_WAL +# define SQLITE_OMIT_WAL 1/* because it requires shared memory APIs */ +# ifndef SQLITE_OMIT_LOAD_EXTENSION +# define SQLITE_OMIT_LOAD_EXTENSION +# endif +# ifndef SQLITE_THREADSAFE +# define SQLITE_THREADSAFE 0 +# endif #endif #if 0@@ -10973,16 +11310,20 @@ */
SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); /* -** CAPIREF: Conigure a Session Object +** CAPI3REF: Configure a Session Object ** METHOD: sqlite3_session ** ** This method is used to configure a session object after it has been -** created. At present the only valid value for the second parameter is -** [SQLITE_SESSION_OBJCONFIG_SIZE]. +** created. At present the only valid values for the second parameter are +** [SQLITE_SESSION_OBJCONFIG_SIZE] and [SQLITE_SESSION_OBJCONFIG_ROWID]. ** -** Arguments for sqlite3session_object_config() +*/ +SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); + +/* +** CAPI3REF: Options for sqlite3session_object_config ** -** The following values may passed as the the 4th parameter to +** The following values may passed as the the 2nd parameter to ** sqlite3session_object_config(). ** ** <dt>SQLITE_SESSION_OBJCONFIG_SIZE <dd>@@ -10998,12 +11339,21 @@ ** enabled following the current call, or 0 otherwise.
** ** It is an error (SQLITE_MISUSE) to attempt to modify this setting after ** the first table has been attached to the session object. -*/ -SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); - -/* +** +** <dt>SQLITE_SESSION_OBJCONFIG_ROWID <dd> +** This option is used to set, clear or query the flag that enables +** collection of data for tables with no explicit PRIMARY KEY. +** +** Normally, tables with no explicit PRIMARY KEY are simply ignored +** by the sessions module. However, if this flag is set, it behaves +** as if such tables have a column "_rowid_ INTEGER PRIMARY KEY" inserted +** as their leftmost columns. +** +** It is an error (SQLITE_MISUSE) to attempt to modify this setting after +** the first table has been attached to the session object. */ -#define SQLITE_SESSION_OBJCONFIG_SIZE 1 +#define SQLITE_SESSION_OBJCONFIG_SIZE 1 +#define SQLITE_SESSION_OBJCONFIG_ROWID 2 /* ** CAPI3REF: Enable Or Disable A Session Object@@ -11765,6 +12115,18 @@ );
/* +** CAPI3REF: Upgrade the Schema of a Changeset/Patchset +*/ +SQLITE_API int sqlite3changeset_upgrade( + sqlite3 *db, + const char *zDb, + int nIn, const void *pIn, /* Input changeset */ + int *pnOut, void **ppOut /* OUT: Inverse of input */ +); + + + +/* ** CAPI3REF: Changegroup Handle ** ** A changegroup is an object used to combine two or more@@ -11811,6 +12173,38 @@ */
SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp); /* +** CAPI3REF: Add a Schema to a Changegroup +** METHOD: sqlite3_changegroup_schema +** +** This method may be used to optionally enforce the rule that the changesets +** added to the changegroup handle must match the schema of database zDb +** ("main", "temp", or the name of an attached database). If +** sqlite3changegroup_add() is called to add a changeset that is not compatible +** with the configured schema, SQLITE_SCHEMA is returned and the changegroup +** object is left in an undefined state. +** +** A changeset schema is considered compatible with the database schema in +** the same way as for sqlite3changeset_apply(). Specifically, for each +** table in the changeset, there exists a database table with: +** +** <ul> +** <li> The name identified by the changeset, and +** <li> at least as many columns as recorded in the changeset, and +** <li> the primary key columns in the same position as recorded in +** the changeset. +** </ul> +** +** The output of the changegroup object always has the same schema as the +** database nominated using this function. In cases where changesets passed +** to sqlite3changegroup_add() have fewer columns than the corresponding table +** in the database schema, these are filled in using the default column +** values from the database schema. This makes it possible to combined +** changesets that have different numbers of columns for a single table +** within a changegroup, provided that they are otherwise compatible. +*/ +SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const char *zDb); + +/* ** CAPI3REF: Add A Changeset To A Changegroup ** METHOD: sqlite3_changegroup **@@ -11878,13 +12272,18 @@ **
** If the new changeset contains changes to a table that is already present ** in the changegroup, then the number of columns and the position of the ** primary key columns for the table must be consistent. If this is not the -** case, this function fails with SQLITE_SCHEMA. If the input changeset -** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is -** returned. Or, if an out-of-memory condition occurs during processing, this -** function returns SQLITE_NOMEM. In all cases, if an error occurs the state -** of the final contents of the changegroup is undefined. +** case, this function fails with SQLITE_SCHEMA. Except, if the changegroup +** object has been configured with a database schema using the +** sqlite3changegroup_schema() API, then it is possible to combine changesets +** with different numbers of columns for a single table, provided that +** they are otherwise compatible. +** +** If the input changeset appears to be corrupt and the corruption is +** detected, SQLITE_CORRUPT is returned. Or, if an out-of-memory condition +** occurs during processing, this function returns SQLITE_NOMEM. ** -** If no error occurs, SQLITE_OK is returned. +** In all cases, if an error occurs the state of the final contents of the +** changegroup is undefined. If no error occurs, SQLITE_OK is returned. */ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData);@@ -12136,9 +12535,30 @@ ** <dt>SQLITE_CHANGESETAPPLY_INVERT <dd>
** Invert the changeset before applying it. This is equivalent to inverting ** a changeset using sqlite3changeset_invert() before applying it. It is ** an error to specify this flag with a patchset. +** +** <dt>SQLITE_CHANGESETAPPLY_IGNORENOOP <dd> +** Do not invoke the conflict handler callback for any changes that +** would not actually modify the database even if they were applied. +** Specifically, this means that the conflict handler is not invoked +** for: +** <ul> +** <li>a delete change if the row being deleted cannot be found, +** <li>an update change if the modified fields are already set to +** their new values in the conflicting row, or +** <li>an insert change if all fields of the conflicting row match +** the row being inserted. +** </ul> +** +** <dt>SQLITE_CHANGESETAPPLY_FKNOACTION <dd> +** If this flag it set, then all foreign key constraints in the target +** database behave as if they were declared with "ON UPDATE NO ACTION ON +** DELETE NO ACTION", even if they are actually CASCADE, RESTRICT, SET NULL +** or SET DEFAULT. */ #define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001 #define SQLITE_CHANGESETAPPLY_INVERT 0x0002 +#define SQLITE_CHANGESETAPPLY_IGNORENOOP 0x0004 +#define SQLITE_CHANGESETAPPLY_FKNOACTION 0x0008 /* ** CAPI3REF: Constants Passed To The Conflict Handler@@ -12879,7 +13299,7 @@ ** xPhraseNextColumn()
** See xPhraseFirstColumn above. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 3 */ + int iVersion; /* Currently always set to 2 */ void *(*xUserData)(Fts5Context*);@@ -13108,8 +13528,8 @@ ** On the other hand, it may require more CPU cycles to run MATCH queries,
** as separate queries of the FTS index are required for each synonym. ** ** When using methods (2) or (3), it is important that the tokenizer only -** provide synonyms when tokenizing document text (method (2)) or query -** text (method (3)), not both. Doing so will not cause any errors, but is +** provide synonyms when tokenizing document text (method (3)) or query +** text (method (2)), not both. Doing so will not cause any errors, but is ** inefficient. */ typedef struct Fts5Tokenizer Fts5Tokenizer;@@ -13157,7 +13577,7 @@ /* Create a new tokenizer */
int (*xCreateTokenizer)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_tokenizer *pTokenizer, void (*xDestroy)(void*) );@@ -13166,7 +13586,7 @@ /* Find an existing tokenizer */
int (*xFindTokenizer)( fts5_api *pApi, const char *zName, - void **ppContext, + void **ppUserData, fts5_tokenizer *pTokenizer );@@ -13174,7 +13594,7 @@ /* Create a new auxiliary function */
int (*xCreateFunction)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_extension_function xFunction, void (*xDestroy)(void*) );@@ -13285,7 +13705,7 @@ ** The code generator for compound SELECT statements does one
** level of recursion for each term. A stack overflow can result ** if the number of terms is too large. In practice, most SQL ** never has more than 3 or 4 terms. Use a value of 0 to disable -** any limit on the number of terms in a compount SELECT. +** any limit on the number of terms in a compound SELECT. */ #ifndef SQLITE_MAX_COMPOUND_SELECT # define SQLITE_MAX_COMPOUND_SELECT 500@@ -13435,8 +13855,8 @@ #pragma warn -spa /* Suspicious pointer arithmetic */
#endif /* -** WAL mode depends on atomic aligned 32-bit loads and stores in a few -** places. The following macros try to make this explicit. +** A few places in the code require atomic load/store of aligned +** integer values. */ #ifndef __has_extension # define __has_extension(x) 0 /* compatibility with non-clang compilers */@@ -13492,15 +13912,22 @@ # define SQLITE_PTR_TO_INT(X) ((int)(X))
#endif /* -** A macro to hint to the compiler that a function should not be +** Macros to hint to the compiler that a function should or should not be ** inlined. */ #if defined(__GNUC__) # define SQLITE_NOINLINE __attribute__((noinline)) +# define SQLITE_INLINE __attribute__((always_inline)) inline #elif defined(_MSC_VER) && _MSC_VER>=1310 # define SQLITE_NOINLINE __declspec(noinline) +# define SQLITE_INLINE __forceinline #else # define SQLITE_NOINLINE +# define SQLITE_INLINE +#endif +#if defined(SQLITE_COVERAGE_TEST) || defined(__STRICT_ANSI__) +# undef SQLITE_INLINE +# define SQLITE_INLINE #endif /*@@ -13523,6 +13950,16 @@ # endif
#endif /* +** Enable SQLITE_USE_SEH by default on MSVC builds. Only omit +** SEH support if the -DSQLITE_OMIT_SEH option is given. +*/ +#if defined(_MSC_VER) && !defined(SQLITE_OMIT_SEH) +# define SQLITE_USE_SEH 1 +#else +# undef SQLITE_USE_SEH +#endif + +/* ** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2. ** 0 means mutexes are permanently disable and the library is never ** threadsafe. 1 means the library is serialized which is the highest@@ -14318,15 +14755,9 @@ #define SQLITE_MAX_U32 ((((u64)1)<<32)-1)
/* ** The datatype used to store estimates of the number of rows in a -** table or index. This is an unsigned integer type. For 99.9% of -** the world, a 32-bit integer is sufficient. But a 64-bit integer -** can be used at compile-time if desired. +** table or index. */ -#ifdef SQLITE_64BIT_STATS - typedef u64 tRowcnt; /* 64-bit only if requested at compile-time */ -#else - typedef u32 tRowcnt; /* 32-bit is the default */ -#endif +typedef u64 tRowcnt; /* ** Estimated quantities used for query planning are stored as 16-bit@@ -14387,8 +14818,31 @@ ** In other words, S is a buffer and E is a pointer to the first byte after
** the end of buffer S. This macro returns true if P points to something ** contained within the buffer S. */ -#define SQLITE_WITHIN(P,S,E) (((uptr)(P)>=(uptr)(S))&&((uptr)(P)<(uptr)(E))) +#define SQLITE_WITHIN(P,S,E) (((uptr)(P)>=(uptr)(S))&&((uptr)(P)<(uptr)(E))) +/* +** P is one byte past the end of a large buffer. Return true if a span of bytes +** between S..E crosses the end of that buffer. In other words, return true +** if the sub-buffer S..E-1 overflows the buffer whose last byte is P-1. +** +** S is the start of the span. E is one byte past the end of end of span. +** +** P +** |-----------------| FALSE +** |-------| +** S E +** +** P +** |-----------------| +** |-------| TRUE +** S E +** +** P +** |-----------------| +** |-------| FALSE +** S E +*/ +#define SQLITE_OVERFLOW(P,S,E) (((uptr)(S)<(uptr)(P))&&((uptr)(E)>(uptr)(P))) /* ** Macros to determine whether the machine is big or little endian,@@ -14398,16 +14852,33 @@ ** For best performance, an attempt is made to guess at the byte-order
** using C-preprocessor macros. If that is unsuccessful, or if ** -DSQLITE_BYTEORDER=0 is set, then byte-order is determined ** at run-time. +** +** If you are building SQLite on some obscure platform for which the +** following ifdef magic does not work, you can always include either: +** +** -DSQLITE_BYTEORDER=1234 +** +** or +** +** -DSQLITE_BYTEORDER=4321 +** +** to cause the build to work for little-endian or big-endian processors, +** respectively. */ -#ifndef SQLITE_BYTEORDER -# if defined(i386) || defined(__i386__) || defined(_M_IX86) || \ +#ifndef SQLITE_BYTEORDER /* Replicate changes at tag-20230904a */ +# if defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__ +# define SQLITE_BYTEORDER 4321 +# elif defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__ +# define SQLITE_BYTEORDER 1234 +# elif defined(__BIG_ENDIAN__) && __BIG_ENDIAN__==1 +# define SQLITE_BYTEORDER 4321 +# elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \ defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \ defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \ defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64) -# define SQLITE_BYTEORDER 1234 -# elif defined(sparc) || defined(__ppc__) || \ - defined(__ARMEB__) || defined(__AARCH64EB__) -# define SQLITE_BYTEORDER 4321 +# define SQLITE_BYTEORDER 1234 +# elif defined(sparc) || defined(__ARMEB__) || defined(__AARCH64EB__) +# define SQLITE_BYTEORDER 4321 # else # define SQLITE_BYTEORDER 0 # endif@@ -14472,9 +14943,9 @@ ** underlying malloc() implementation might return us 4-byte aligned
** pointers. In that case, only verify 4-byte alignment. */ #ifdef SQLITE_4_BYTE_ALIGNED_MALLOC -# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&3)==0) +# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&3)==0) #else -# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&7)==0) +# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&7)==0) #endif /*@@ -14528,15 +14999,38 @@ #if defined(SQLITE_DEBUG) \
&& (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_SELECTTRACE) \ || defined(SQLITE_ENABLE_TREETRACE)) # define TREETRACE_ENABLED 1 -# define SELECTTRACE(K,P,S,X) \ +# define TREETRACE(K,P,S,X) \ if(sqlite3TreeTrace&(K)) \ sqlite3DebugPrintf("%u/%d/%p: ",(S)->selId,(P)->addrExplain,(S)),\ sqlite3DebugPrintf X #else -# define SELECTTRACE(K,P,S,X) +# define TREETRACE(K,P,S,X) # define TREETRACE_ENABLED 0 #endif +/* TREETRACE flag meanings: +** +** 0x00000001 Beginning and end of SELECT processing +** 0x00000002 WHERE clause processing +** 0x00000004 Query flattener +** 0x00000008 Result-set wildcard expansion +** 0x00000010 Query name resolution +** 0x00000020 Aggregate analysis +** 0x00000040 Window functions +** 0x00000080 Generated column names +** 0x00000100 Move HAVING terms into WHERE +** 0x00000200 Count-of-view optimization +** 0x00000400 Compound SELECT processing +** 0x00000800 Drop superfluous ORDER BY +** 0x00001000 LEFT JOIN simplifies to JOIN +** 0x00002000 Constant propagation +** 0x00004000 Push-down optimization +** 0x00008000 After all FROM-clause analysis +** 0x00010000 Beginning of DELETE/INSERT/UPDATE processing +** 0x00020000 Transform DISTINCT into GROUP BY +** 0x00040000 SELECT tree dump after all code has been generated +*/ + /* ** Macros for "wheretrace" */@@ -14549,6 +15043,36 @@ #else
# define WHERETRACE(K,X) #endif +/* +** Bits for the sqlite3WhereTrace mask: +** +** (---any--) Top-level block structure +** 0x-------F High-level debug messages +** 0x----FFF- More detail +** 0xFFFF---- Low-level debug messages +** +** 0x00000001 Code generation +** 0x00000002 Solver +** 0x00000004 Solver costs +** 0x00000008 WhereLoop inserts +** +** 0x00000010 Display sqlite3_index_info xBestIndex calls +** 0x00000020 Range an equality scan metrics +** 0x00000040 IN operator decisions +** 0x00000080 WhereLoop cost adjustements +** 0x00000100 +** 0x00000200 Covering index decisions +** 0x00000400 OR optimization +** 0x00000800 Index scanner +** 0x00001000 More details associated with code generation +** 0x00002000 +** 0x00004000 Show all WHERE terms at key points +** 0x00008000 Show the full SELECT statement at key places +** +** 0x00010000 Show more detail when printing WHERE terms +** 0x00020000 Show WHERE terms returned from whereScanNext() +*/ + /* ** An instance of the following structure is used to store the busy-handler@@ -14569,7 +15093,7 @@
/* ** Name of table that holds the database schema. ** -** The PREFERRED names are used whereever possible. But LEGACY is also +** The PREFERRED names are used wherever possible. But LEGACY is also ** used for backwards compatibility. ** ** 1. Queries can use either the PREFERRED or the LEGACY names@@ -14678,11 +15202,13 @@ typedef struct Column Column;
typedef struct Cte Cte; typedef struct CteUse CteUse; typedef struct Db Db; +typedef struct DbClientData DbClientData; typedef struct DbFixer DbFixer; typedef struct Schema Schema; typedef struct Expr Expr; typedef struct ExprList ExprList; typedef struct FKey FKey; +typedef struct FpDecode FpDecode; typedef struct FuncDestructor FuncDestructor; typedef struct FuncDef FuncDef; typedef struct FuncDefHash FuncDefHash;@@ -14701,6 +15227,7 @@ typedef struct Parse Parse;
typedef struct ParseCleanup ParseCleanup; typedef struct PreUpdate PreUpdate; typedef struct PrintfArguments PrintfArguments; +typedef struct RCStr RCStr; typedef struct RenameToken RenameToken; typedef struct Returning Returning; typedef struct RowSet RowSet;@@ -15338,6 +15865,10 @@ # define disable_simulated_io_errors()
# define enable_simulated_io_errors() #endif +#if defined(SQLITE_USE_SEH) && !defined(SQLITE_OMIT_WAL) +SQLITE_PRIVATE int sqlite3PagerWalSystemErrno(Pager*); +#endif + #endif /* SQLITE_PAGER_H */ /************** End of pager.h ***********************************************/@@ -15529,7 +16060,7 @@ ** implementations with limits on what needs to be prefetched and thereby
** reduce network bandwidth. ** ** Note that BTREE_HINT_FLAGS with BTREE_BULKLOAD is the only hint used by -** standard SQLite. The other hints are provided for extentions that use +** standard SQLite. The other hints are provided for extensions that use ** the SQLite parser and code generator but substitute their own storage ** engine. */@@ -15667,15 +16198,21 @@ SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor*, int flags);
SQLITE_PRIVATE i64 sqlite3BtreeIntegerKey(BtCursor*); SQLITE_PRIVATE void sqlite3BtreeCursorPin(BtCursor*); SQLITE_PRIVATE void sqlite3BtreeCursorUnpin(BtCursor*); -#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC SQLITE_PRIVATE i64 sqlite3BtreeOffset(BtCursor*); -#endif SQLITE_PRIVATE int sqlite3BtreePayload(BtCursor*, u32 offset, u32 amt, void*); SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor*, u32 *pAmt); SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor*); SQLITE_PRIVATE sqlite3_int64 sqlite3BtreeMaxRecordSize(BtCursor*); -SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(sqlite3*,Btree*,Pgno*aRoot,int nRoot,int,int*); +SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( + sqlite3 *db, /* Database connection that is running the check */ + Btree *p, /* The btree to be checked */ + Pgno *aRoot, /* An array of root pages numbers for individual trees */ + int nRoot, /* Number of entries in aRoot[] */ + int mxErr, /* Stop reporting errors after this many */ + int *pnErr, /* OUT: Write number of errors seen to this variable */ + char **pzOut /* OUT: Write the error message string here */ +); SQLITE_PRIVATE struct Pager *sqlite3BtreePager(Btree*); SQLITE_PRIVATE i64 sqlite3BtreeRowCountEst(BtCursor*);@@ -15713,6 +16250,8 @@ SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree*, int, int *, int *);
#endif SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor*, BtCursor*, i64); + +SQLITE_PRIVATE void sqlite3BtreeClearCache(Btree*); /* ** If we are not using shared cache, then there is no need to@@ -15830,13 +16369,13 @@ } p4;
#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS char *zComment; /* Comment to improve readability */ #endif -#ifdef VDBE_PROFILE - u32 cnt; /* Number of times this instruction was executed */ - u64 cycles; /* Total time spent executing this instruction */ -#endif #ifdef SQLITE_VDBE_COVERAGE u32 iSrcLine; /* Source-code line that generated this opcode ** with flags in the upper 8 bits */ +#endif +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE) + u64 nExec; + u64 nCycle; #endif }; typedef struct VdbeOp VdbeOp;@@ -16104,19 +16643,20 @@ #define OP_VBegin 170
#define OP_VCreate 171 #define OP_VDestroy 172 #define OP_VOpen 173 -#define OP_VInitIn 174 /* synopsis: r[P2]=ValueList(P1,P3) */ -#define OP_VColumn 175 /* synopsis: r[P3]=vcolumn(P2) */ -#define OP_VRename 176 -#define OP_Pagecount 177 -#define OP_MaxPgcnt 178 -#define OP_ClrSubtype 179 /* synopsis: r[P1].subtype = 0 */ -#define OP_FilterAdd 180 /* synopsis: filter(P1) += key(P3@P4) */ -#define OP_Trace 181 -#define OP_CursorHint 182 -#define OP_ReleaseReg 183 /* synopsis: release r[P1@P2] mask P3 */ -#define OP_Noop 184 -#define OP_Explain 185 -#define OP_Abortable 186 +#define OP_VCheck 174 +#define OP_VInitIn 175 /* synopsis: r[P2]=ValueList(P1,P3) */ +#define OP_VColumn 176 /* synopsis: r[P3]=vcolumn(P2) */ +#define OP_VRename 177 +#define OP_Pagecount 178 +#define OP_MaxPgcnt 179 +#define OP_ClrSubtype 180 /* synopsis: r[P1].subtype = 0 */ +#define OP_FilterAdd 181 /* synopsis: filter(P1) += key(P3@P4) */ +#define OP_Trace 182 +#define OP_CursorHint 183 +#define OP_ReleaseReg 184 /* synopsis: release r[P1@P2] mask P3 */ +#define OP_Noop 185 +#define OP_Explain 186 +#define OP_Abortable 187 /* Properties such as "out2" or "jump" that are specified in ** comments following the "case" for each opcode in the vdbe.c@@ -16128,31 +16668,32 @@ #define OPFLG_IN2 0x04 /* in2: P2 is an input */
#define OPFLG_IN3 0x08 /* in3: P3 is an input */ #define OPFLG_OUT2 0x10 /* out2: P2 is an output */ #define OPFLG_OUT3 0x20 /* out3: P3 is an output */ +#define OPFLG_NCYCLE 0x40 /* ncycle:Cycles count against P1 */ #define OPFLG_INITIALIZER {\ -/* 0 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x01, 0x00,\ +/* 0 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x41, 0x00,\ /* 8 */ 0x01, 0x01, 0x01, 0x01, 0x03, 0x03, 0x01, 0x01,\ -/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x09, 0x09, 0x09,\ -/* 24 */ 0x09, 0x01, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,\ -/* 32 */ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,\ -/* 40 */ 0x01, 0x01, 0x01, 0x26, 0x26, 0x01, 0x23, 0x0b,\ +/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x49, 0x49, 0x49,\ +/* 24 */ 0x49, 0x01, 0x49, 0x49, 0x49, 0x49, 0x49, 0x49,\ +/* 32 */ 0x41, 0x01, 0x41, 0x41, 0x41, 0x01, 0x41, 0x41,\ +/* 40 */ 0x41, 0x41, 0x41, 0x26, 0x26, 0x41, 0x23, 0x0b,\ /* 48 */ 0x01, 0x01, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\ -/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x03, 0x01, 0x01,\ +/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x03, 0x01, 0x41,\ /* 64 */ 0x01, 0x00, 0x00, 0x02, 0x02, 0x08, 0x00, 0x10,\ /* 72 */ 0x10, 0x10, 0x00, 0x10, 0x00, 0x10, 0x10, 0x00,\ /* 80 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x02, 0x02,\ -/* 88 */ 0x02, 0x00, 0x00, 0x12, 0x1e, 0x20, 0x00, 0x00,\ -/* 96 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x26, 0x26,\ +/* 88 */ 0x02, 0x00, 0x00, 0x12, 0x1e, 0x20, 0x40, 0x00,\ +/* 96 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x40, 0x26, 0x26,\ /* 104 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26,\ -/* 112 */ 0x00, 0x00, 0x12, 0x00, 0x00, 0x10, 0x00, 0x00,\ -/* 120 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10,\ -/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,\ -/* 136 */ 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x10, 0x00,\ +/* 112 */ 0x40, 0x00, 0x12, 0x40, 0x40, 0x10, 0x40, 0x00,\ +/* 120 */ 0x00, 0x00, 0x40, 0x00, 0x40, 0x40, 0x10, 0x10,\ +/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x50,\ +/* 136 */ 0x00, 0x40, 0x04, 0x04, 0x00, 0x40, 0x50, 0x40,\ /* 144 */ 0x10, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,\ /* 152 */ 0x00, 0x10, 0x00, 0x00, 0x06, 0x10, 0x00, 0x04,\ /* 160 */ 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,\ -/* 176 */ 0x00, 0x10, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00,\ -/* 184 */ 0x00, 0x00, 0x00,} +/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x50,\ +/* 176 */ 0x40, 0x00, 0x10, 0x10, 0x02, 0x00, 0x00, 0x00,\ +/* 184 */ 0x00, 0x00, 0x00, 0x00,} /* The resolve3P2Values() routine is able to run faster if it knows ** the value of the largest JUMP opcode. The smaller the maximum@@ -16205,14 +16746,20 @@ # define sqlite3VdbeNoJumpsOutsideSubrtn(A,B,C,D)
#endif SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp,int iLineno); #ifndef SQLITE_OMIT_EXPLAIN -SQLITE_PRIVATE void sqlite3VdbeExplain(Parse*,u8,const char*,...); +SQLITE_PRIVATE int sqlite3VdbeExplain(Parse*,u8,const char*,...); SQLITE_PRIVATE void sqlite3VdbeExplainPop(Parse*); SQLITE_PRIVATE int sqlite3VdbeExplainParent(Parse*); # define ExplainQueryPlan(P) sqlite3VdbeExplain P +# ifdef SQLITE_ENABLE_STMT_SCANSTATUS +# define ExplainQueryPlan2(V,P) (V = sqlite3VdbeExplain P) +# else +# define ExplainQueryPlan2(V,P) ExplainQueryPlan(P) +# endif # define ExplainQueryPlanPop(P) sqlite3VdbeExplainPop(P) # define ExplainQueryPlanParent(P) sqlite3VdbeExplainParent(P) #else # define ExplainQueryPlan(P) +# define ExplainQueryPlan2(V,P) # define ExplainQueryPlanPop(P) # define ExplainQueryPlanParent(P) 0 # define sqlite3ExplainBreakpoint(A,B) /*no-op*/@@ -16321,7 +16868,7 @@ /*
** The VdbeCoverage macros are used to set a coverage testing point ** for VDBE branch instructions. The coverage testing points are line ** numbers in the sqlite3.c source file. VDBE branch coverage testing -** only works with an amalagmation build. That's ok since a VDBE branch +** only works with an amalgamation build. That's ok since a VDBE branch ** coverage build designed for testing the test suite only. No application ** should ever ship with VDBE branch coverage measuring turned on. **@@ -16339,7 +16886,7 @@ ** // taken on the first two ways. The
** // NULL option is not possible ** ** VdbeCoverageEqNe(v) // Previous OP_Jump is only interested -** // in distingishing equal and not-equal. +** // in distinguishing equal and not-equal. ** ** Every VDBE branch operation must be tagged with one of the macros above. ** If not, then when "make test" is run with -DSQLITE_VDBE_COVERAGE and@@ -16349,7 +16896,7 @@ **
** During testing, the test application will invoke ** sqlite3_test_control(SQLITE_TESTCTRL_VDBE_COVERAGE,...) to set a callback ** routine that is invoked as each bytecode branch is taken. The callback -** contains the sqlite3.c source line number ov the VdbeCoverage macro and +** contains the sqlite3.c source line number of the VdbeCoverage macro and ** flags to indicate whether or not the branch was taken. The test application ** is responsible for keeping track of this and reporting byte-code branches ** that are never taken.@@ -16385,14 +16932,22 @@ #endif
#ifdef SQLITE_ENABLE_STMT_SCANSTATUS SQLITE_PRIVATE void sqlite3VdbeScanStatus(Vdbe*, int, int, int, LogEst, const char*); +SQLITE_PRIVATE void sqlite3VdbeScanStatusRange(Vdbe*, int, int, int); +SQLITE_PRIVATE void sqlite3VdbeScanStatusCounters(Vdbe*, int, int, int); #else -# define sqlite3VdbeScanStatus(a,b,c,d,e) +# define sqlite3VdbeScanStatus(a,b,c,d,e,f) +# define sqlite3VdbeScanStatusRange(a,b,c,d) +# define sqlite3VdbeScanStatusCounters(a,b,c,d) #endif #if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE*, int, VdbeOp*); #endif +#if defined(SQLITE_ENABLE_CURSOR_HINTS) && defined(SQLITE_DEBUG) +SQLITE_PRIVATE int sqlite3CursorRangeHintExprCheck(Walker *pWalker, Expr *pExpr); +#endif + #endif /* SQLITE_VDBE_H */ /************** End of vdbe.h ************************************************/@@ -16441,7 +16996,7 @@ ** Elements above, except pCache, are public. All that follow are
** private to pcache.c and should not be accessed by other modules. ** pCache is grouped with the public elements for efficiency. */ - i16 nRef; /* Number of users of this page */ + i64 nRef; /* Number of users of this page */ PgHdr *pDirtyNext; /* Next element in list of dirty pages */ PgHdr *pDirtyPrev; /* Previous element in list of dirty pages */ /* NB: pDirtyNext and pDirtyPrev are undefined if the@@ -16522,12 +17077,12 @@ /* Discard the contents of the cache */
SQLITE_PRIVATE void sqlite3PcacheClear(PCache*); /* Return the total number of outstanding page references */ -SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache*); +SQLITE_PRIVATE i64 sqlite3PcacheRefCount(PCache*); /* Increment the reference count of an existing page */ SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr*); -SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr*); +SQLITE_PRIVATE i64 sqlite3PcachePageRefcount(PgHdr*); /* Return the total number of pages stored in the cache */ SQLITE_PRIVATE int sqlite3PcachePagecount(PCache*);@@ -16680,7 +17235,7 @@
/* ** Default synchronous levels. ** -** Note that (for historcal reasons) the PAGER_SYNCHRONOUS_* macros differ +** Note that (for historical reasons) the PAGER_SYNCHRONOUS_* macros differ ** from the SQLITE_DEFAULT_SYNCHRONOUS value by 1. ** ** PAGER_SYNCHRONOUS DEFAULT_SYNCHRONOUS@@ -16719,7 +17274,7 @@ /*
** An instance of the following structure stores a database schema. ** ** Most Schema objects are associated with a Btree. The exception is -** the Schema for the TEMP databaes (sqlite3.aDb[1]) which is free-standing. +** the Schema for the TEMP database (sqlite3.aDb[1]) which is free-standing. ** In shared cache mode, a single Schema object can be shared by multiple ** Btrees that refer to the same underlying BtShared object. **@@ -16830,7 +17385,7 @@ u32 anStat[3]; /* 0: hits. 1: size misses. 2: full misses */
LookasideSlot *pInit; /* List of buffers not previously used */ LookasideSlot *pFree; /* List of available buffers */ #ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE - LookasideSlot *pSmallInit; /* List of small buffers not prediously used */ + LookasideSlot *pSmallInit; /* List of small buffers not previously used */ LookasideSlot *pSmallFree; /* List of available small buffers */ void *pMiddle; /* First byte past end of full-size buffers and ** the first byte of LOOKASIDE_SMALL buffers */@@ -16847,7 +17402,7 @@ #define DisableLookaside db->lookaside.bDisable++;db->lookaside.sz=0
#define EnableLookaside db->lookaside.bDisable--;\ db->lookaside.sz=db->lookaside.bDisable?0:db->lookaside.szTrue -/* Size of the smaller allocations in two-size lookside */ +/* Size of the smaller allocations in two-size lookaside */ #ifdef SQLITE_OMIT_TWOSIZE_LOOKASIDE # define LOOKASIDE_SMALL 0 #else@@ -17047,6 +17602,7 @@ int nStatement; /* Number of nested statement-transactions */
i64 nDeferredCons; /* Net deferred constraints this transaction. */ i64 nDeferredImmCons; /* Net deferred immediate constraints */ int *pnBytesFreed; /* If not NULL, increment this in DbFree() */ + DbClientData *pDbData; /* sqlite3_set_clientdata() content */ #ifdef SQLITE_ENABLE_UNLOCK_NOTIFY /* The following variables are all protected by the STATIC_MAIN ** mutex, not by sqlite3.mutex. They are used by code in notify.c.@@ -17102,7 +17658,7 @@ ** vtabs in the schema definition */
#define SQLITE_NullCallback 0x00000100 /* Invoke the callback once if the */ /* result set is empty */ #define SQLITE_IgnoreChecks 0x00000200 /* Do not enforce check constraints */ -#define SQLITE_ReadUncommit 0x00000400 /* READ UNCOMMITTED in shared-cache */ +#define SQLITE_StmtScanStatus 0x00000400 /* Enable stmt_scanstats() counters */ #define SQLITE_NoCkptOnClose 0x00000800 /* No checkpoint on close()/DETACH */ #define SQLITE_ReverseOrder 0x00001000 /* Reverse unordered SELECTs */ #define SQLITE_RecTriggers 0x00002000 /* Enable recursive triggers */@@ -17128,6 +17684,8 @@ #define SQLITE_CountRows HI(0x00001) /* Count rows changed by INSERT, */
/* DELETE, or UPDATE and return */ /* the count using a callback. */ #define SQLITE_CorruptRdOnly HI(0x00002) /* Prohibit writes due to error */ +#define SQLITE_ReadUncommit HI(0x00004) /* READ UNCOMMITTED in shared-cache */ +#define SQLITE_FkNoAction HI(0x00008) /* Treat all FK as NO ACTION */ /* Flags used only if debugging */ #ifdef SQLITE_DEBUG@@ -17183,6 +17741,9 @@ #define SQLITE_ReleaseReg 0x00400000 /* Use OP_ReleaseReg for testing */
#define SQLITE_FlttnUnionAll 0x00800000 /* Disable the UNION ALL flattener */ /* TH3 expects this value ^^^^^^^^^^ See flatten04.test */ #define SQLITE_IndexedExpr 0x01000000 /* Pull exprs from index when able */ +#define SQLITE_Coroutines 0x02000000 /* Co-routines for subqueries */ +#define SQLITE_NullUnusedCols 0x04000000 /* NULL unused columns in subqueries */ +#define SQLITE_OnePass 0x08000000 /* Single-pass DELETE and UPDATE */ #define SQLITE_AllOpts 0xffffffff /* All optimizations */ /*@@ -17265,10 +17826,17 @@ ** SQLITE_FUNC_MINMAX == NC_MinMaxAgg == SF_MinMaxAgg
** SQLITE_FUNC_ANYORDER == NC_OrderAgg == SF_OrderByReqd ** SQLITE_FUNC_LENGTH == OPFLAG_LENGTHARG ** SQLITE_FUNC_TYPEOF == OPFLAG_TYPEOFARG +** SQLITE_FUNC_BYTELEN == OPFLAG_BYTELENARG ** SQLITE_FUNC_CONSTANT == SQLITE_DETERMINISTIC from the API ** SQLITE_FUNC_DIRECT == SQLITE_DIRECTONLY from the API -** SQLITE_FUNC_UNSAFE == SQLITE_INNOCUOUS +** SQLITE_FUNC_UNSAFE == SQLITE_INNOCUOUS -- opposite meanings!!! ** SQLITE_FUNC_ENCMASK depends on SQLITE_UTF* macros in the API +** +** Note that even though SQLITE_FUNC_UNSAFE and SQLITE_INNOCUOUS have the +** same bit value, their meanings are inverted. SQLITE_FUNC_UNSAFE is +** used internally and if set means that the function has side effects. +** SQLITE_INNOCUOUS is used by application code and means "not unsafe". +** See multiple instances of tag-20230109-1. */ #define SQLITE_FUNC_ENCMASK 0x0003 /* SQLITE_UTF8, SQLITE_UTF16BE or UTF16LE */ #define SQLITE_FUNC_LIKE 0x0004 /* Candidate for the LIKE optimization */@@ -17277,6 +17845,7 @@ #define SQLITE_FUNC_EPHEM 0x0010 /* Ephemeral. Delete with VDBE */
#define SQLITE_FUNC_NEEDCOLL 0x0020 /* sqlite3GetFuncCollSeq() might be called*/ #define SQLITE_FUNC_LENGTH 0x0040 /* Built-in length() function */ #define SQLITE_FUNC_TYPEOF 0x0080 /* Built-in typeof() function */ +#define SQLITE_FUNC_BYTELEN 0x00c0 /* Built-in octet_length() function */ #define SQLITE_FUNC_COUNT 0x0100 /* Built-in count(*) aggregate */ /* 0x0200 -- available for reuse */ #define SQLITE_FUNC_UNLIKELY 0x0400 /* Built-in unlikely() function */@@ -17285,14 +17854,15 @@ #define SQLITE_FUNC_MINMAX 0x1000 /* True for min() and max() aggregates */
#define SQLITE_FUNC_SLOCHNG 0x2000 /* "Slow Change". Value constant during a ** single query - might change over time */ #define SQLITE_FUNC_TEST 0x4000 /* Built-in testing functions */ -/* 0x8000 -- available for reuse */ +#define SQLITE_FUNC_RUNONLY 0x8000 /* Cannot be used by valueFromFunction */ #define SQLITE_FUNC_WINDOW 0x00010000 /* Built-in window-only function */ #define SQLITE_FUNC_INTERNAL 0x00040000 /* For use by NestedParse() only */ #define SQLITE_FUNC_DIRECT 0x00080000 /* Not for use in TRIGGERs or VIEWs */ -#define SQLITE_FUNC_SUBTYPE 0x00100000 /* Result likely to have sub-type */ +/* SQLITE_SUBTYPE 0x00100000 // Consumer of subtypes */ #define SQLITE_FUNC_UNSAFE 0x00200000 /* Function has side effects */ #define SQLITE_FUNC_INLINE 0x00400000 /* Functions implemented in-line */ #define SQLITE_FUNC_BUILTIN 0x00800000 /* This is a built-in function */ +/* SQLITE_RESULT_SUBTYPE 0x01000000 // Generator of subtypes */ #define SQLITE_FUNC_ANYORDER 0x08000000 /* count/min/max aggregate */ /* Identifier numbers for each in-line function */@@ -17384,9 +17954,10 @@ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} }
#define MFUNCTION(zName, nArg, xPtr, xFunc) \ {nArg, SQLITE_FUNC_BUILTIN|SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \ xPtr, 0, xFunc, 0, 0, 0, #zName, {0} } -#define JFUNCTION(zName, nArg, iArg, xFunc) \ - {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS|\ - SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \ +#define JFUNCTION(zName, nArg, bUseCache, bWS, bRS, iArg, xFunc) \ + {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|SQLITE_FUNC_CONSTANT|\ + SQLITE_UTF8|((bUseCache)*SQLITE_FUNC_RUNONLY)|\ + ((bRS)*SQLITE_SUBTYPE)|((bWS)*SQLITE_RESULT_SUBTYPE), \ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} } #define INLINE_FUNC(zName, nArg, iArg, mFlags) \ {nArg, SQLITE_FUNC_BUILTIN|\@@ -17577,6 +18148,7 @@ #define SQLITE_AFF_TEXT 0x42 /* 'B' */
#define SQLITE_AFF_NUMERIC 0x43 /* 'C' */ #define SQLITE_AFF_INTEGER 0x44 /* 'D' */ #define SQLITE_AFF_REAL 0x45 /* 'E' */ +#define SQLITE_AFF_FLEXNUM 0x46 /* 'F' */ #define sqlite3IsNumericAffinity(X) ((X)>=SQLITE_AFF_NUMERIC)@@ -17647,6 +18219,7 @@ Module *pMod; /* Pointer to module implementation */
sqlite3_vtab *pVtab; /* Pointer to vtab instance */ int nRef; /* Number of pointers to this structure */ u8 bConstraint; /* True if constraints are supported */ + u8 bAllSchemas; /* True if might use any attached schema */ u8 eVtabRisk; /* Riskiness of allowing hacker access */ int iSavepoint; /* Depth of the SAVEPOINT stack */ VTable *pNext; /* Next in linked list (see above) */@@ -17854,7 +18427,7 @@ ** referenced table row is propagated into the row that holds the
** foreign key. ** ** The OE_Default value is a place holder that means to use whatever -** conflict resolution algorthm is required from context. +** conflict resolution algorithm is required from context. ** ** The following symbolic values are used to record which type ** of conflict resolution action to take.@@ -18027,6 +18600,7 @@ unsigned bHasExpr:1; /* Index contains an expression, either a literal
** expression, or a reference to a VIRTUAL column */ #ifdef SQLITE_ENABLE_STAT4 int nSample; /* Number of elements in aSample[] */ + int mxSample; /* Number of slots allocated to aSample[] */ int nSampleCol; /* Size of IndexSample.anEq[] and so on */ tRowcnt *aAvgEq; /* Average nEq values for keys not in aSample */ IndexSample *aSample; /* Samples of the left-most key */@@ -18108,16 +18682,15 @@ u8 directMode; /* Direct rendering mode means take data directly
** from source tables rather than from accumulators */ u8 useSortingIdx; /* In direct mode, reference the sorting index rather ** than the source table */ + u16 nSortingColumn; /* Number of columns in the sorting index */ int sortingIdx; /* Cursor number of the sorting index */ int sortingIdxPTab; /* Cursor number of pseudo-table */ - int nSortingColumn; /* Number of columns in the sorting index */ - int mnReg, mxReg; /* Range of registers allocated for aCol and aFunc */ + int iFirstReg; /* First register in range for aCol[] and aFunc[] */ ExprList *pGroupBy; /* The group by clause */ struct AggInfo_col { /* For each column used in source tables */ Table *pTab; /* Source table */ Expr *pCExpr; /* The original expression */ int iTable; /* Cursor number of the source table */ - int iMem; /* Memory location that acts as accumulator */ i16 iColumn; /* Column number within the source table */ i16 iSorterColumn; /* Column number in the sorting index */ } *aCol;@@ -18128,15 +18701,31 @@ ** aggregate functions */
struct AggInfo_func { /* For each aggregate function */ Expr *pFExpr; /* Expression encoding the function */ FuncDef *pFunc; /* The aggregate function implementation */ - int iMem; /* Memory location that acts as accumulator */ int iDistinct; /* Ephemeral table used to enforce DISTINCT */ int iDistAddr; /* Address of OP_OpenEphemeral */ + int iOBTab; /* Ephemeral table to implement ORDER BY */ + u8 bOBPayload; /* iOBTab has payload columns separate from key */ + u8 bOBUnique; /* Enforce uniqueness on iOBTab keys */ } *aFunc; int nFunc; /* Number of entries in aFunc[] */ u32 selId; /* Select to which this AggInfo belongs */ +#ifdef SQLITE_DEBUG + Select *pSelect; /* SELECT statement that this AggInfo supports */ +#endif }; /* +** Macros to compute aCol[] and aFunc[] register numbers. +** +** These macros should not be used prior to the call to +** assignAggregateRegisters() that computes the value of pAggInfo->iFirstReg. +** The assert()s that are part of this macro verify that constraint. +*/ +#define AggInfoColumnReg(A,I) (assert((A)->iFirstReg),(A)->iFirstReg+(I)) +#define AggInfoFuncReg(A,I) \ + (assert((A)->iFirstReg),(A)->iFirstReg+(A)->nColumn+(I)) + +/* ** The datatype ynVar is a signed integer, either 16-bit or 32-bit. ** Usually it is 16-bits. But if SQLITE_MAX_VARIABLE_NUMBER is greater ** than 32767 we have to make it 32-bit. 16-bit is preferred because@@ -18255,7 +18844,7 @@ int iTable; /* TK_COLUMN: cursor number of table holding column
** TK_REGISTER: register number ** TK_TRIGGER: 1 -> new, 0 -> old ** EP_Unlikely: 134217728 times likelihood - ** TK_IN: ephemerial table holding RHS + ** TK_IN: ephemeral table holding RHS ** TK_SELECT_COLUMN: Number of columns on the LHS ** TK_SELECT: 1st register of result vector */ ynVar iColumn; /* TK_COLUMN: column index. -1 for rowid.@@ -18301,7 +18890,7 @@ #define EP_Skip 0x002000 /* Operator does not contribute to affinity */
#define EP_Reduced 0x004000 /* Expr struct EXPR_REDUCEDSIZE bytes only */ #define EP_Win 0x008000 /* Contains window functions */ #define EP_TokenOnly 0x010000 /* Expr struct EXPR_TOKENONLYSIZE bytes only */ - /* 0x020000 // Available for reuse */ +#define EP_FullSize 0x020000 /* Expr structure must remain full sized */ #define EP_IfNullRow 0x040000 /* The TK_IF_NULL_ROW opcode */ #define EP_Unlikely 0x080000 /* unlikely() or likelihood() function */ #define EP_ConstFunc 0x100000 /* A SQLITE_FUNC_CONSTANT or _SLOCHNG function */@@ -18331,12 +18920,15 @@ #define ExprSetProperty(E,P) (E)->flags|=(P)
#define ExprClearProperty(E,P) (E)->flags&=~(P) #define ExprAlwaysTrue(E) (((E)->flags&(EP_OuterON|EP_IsTrue))==EP_IsTrue) #define ExprAlwaysFalse(E) (((E)->flags&(EP_OuterON|EP_IsFalse))==EP_IsFalse) +#define ExprIsFullSize(E) (((E)->flags&(EP_Reduced|EP_TokenOnly))==0) /* Macros used to ensure that the correct members of unions are accessed ** in Expr. */ #define ExprUseUToken(E) (((E)->flags&EP_IntValue)==0) #define ExprUseUValue(E) (((E)->flags&EP_IntValue)!=0) +#define ExprUseWOfst(E) (((E)->flags&(EP_InnerON|EP_OuterON))==0) +#define ExprUseWJoin(E) (((E)->flags&(EP_InnerON|EP_OuterON))!=0) #define ExprUseXList(E) (((E)->flags&EP_xIsSelect)==0) #define ExprUseXSelect(E) (((E)->flags&EP_xIsSelect)!=0) #define ExprUseYTab(E) (((E)->flags&(EP_WinFunc|EP_Subrtn))==0)@@ -18446,6 +19038,7 @@ */
#define ENAME_NAME 0 /* The AS clause of a result set */ #define ENAME_SPAN 1 /* Complete text of the result set expression */ #define ENAME_TAB 2 /* "DB.TABLE.NAME" for the result set */ +#define ENAME_ROWID 3 /* "DB.TABLE._rowid_" for * expansion of rowid */ /* ** An instance of this structure can hold a simple list of identifiers,@@ -18525,7 +19118,7 @@ unsigned isCte :1; /* This is a CTE */
unsigned notCte :1; /* This item may not match a CTE */ unsigned isUsing :1; /* u3.pUsing is valid */ unsigned isOn :1; /* u3.pOn was once valid and non-NULL */ - unsigned isSynthUsing :1; /* u3.pUsing is synthensized from NATURAL */ + unsigned isSynthUsing :1; /* u3.pUsing is synthesized from NATURAL */ unsigned isNestedFrom :1; /* pSelect is a SF_NestedFrom subquery */ } fg; int iCursor; /* The VDBE cursor number used to access this table */@@ -18666,7 +19259,7 @@ #define NC_GenCol 0x000008 /* True for a GENERATED ALWAYS AS clause */
#define NC_HasAgg 0x000010 /* One or more aggregate functions seen */ #define NC_IdxExpr 0x000020 /* True if resolving columns of CREATE INDEX */ #define NC_SelfRef 0x00002e /* Combo: PartIdx, isCheck, GenCol, and IdxExpr */ -#define NC_VarSelect 0x000040 /* A correlated subquery has been seen */ +#define NC_Subquery 0x000040 /* A subquery has been seen */ #define NC_UEList 0x000080 /* True if uNC.pEList is used */ #define NC_UAggInfo 0x000100 /* True if uNC.pAggInfo is used */ #define NC_UUpsert 0x000200 /* True if uNC.pUpsert is used */@@ -18795,6 +19388,7 @@ #define SF_PushDown 0x1000000 /* SELECT has be modified by push-down opt */
#define SF_MultiPart 0x2000000 /* Has multiple incompatible PARTITIONs */ #define SF_CopyCte 0x4000000 /* SELECT statement is a copy of a CTE */ #define SF_OrderByReqd 0x8000000 /* The ORDER BY clause may not be omitted */ +#define SF_UpdateFrom 0x10000000 /* Query originates with UPDATE FROM */ /* True if S exists and has SF_NestedFrom */ #define IsNestedFrom(S) ((S)!=0 && ((S)->selFlags&SF_NestedFrom)!=0)@@ -18903,7 +19497,7 @@ int iSDParm; /* A parameter used by the eDest disposal method */
int iSDParm2; /* A second parameter for the eDest disposal method */ int iSdst; /* Base register where results are written */ int nSdst; /* Number of registers allocated */ - char *zAffSdst; /* Affinity used for SRT_Set, SRT_Table, and similar */ + char *zAffSdst; /* Affinity used for SRT_Set */ ExprList *pOrderBy; /* Key columns for SRT_Queue and SRT_DistQueue */ };@@ -18962,10 +19556,10 @@ # define DbMaskNonZero(M) (sqlite3DbMaskAllZero(M)==0)
#else typedef unsigned int yDbMask; # define DbMaskTest(M,I) (((M)&(((yDbMask)1)<<(I)))!=0) -# define DbMaskZero(M) (M)=0 -# define DbMaskSet(M,I) (M)|=(((yDbMask)1)<<(I)) -# define DbMaskAllZero(M) (M)==0 -# define DbMaskNonZero(M) (M)!=0 +# define DbMaskZero(M) ((M)=0) +# define DbMaskSet(M,I) ((M)|=(((yDbMask)1)<<(I))) +# define DbMaskAllZero(M) ((M)==0) +# define DbMaskNonZero(M) ((M)!=0) #endif /*@@ -18984,6 +19578,7 @@ int iDataCur; /* The data cursor associated with the index */
int iIdxCur; /* The index cursor */ int iIdxCol; /* The index column that contains value of pExpr */ u8 bMaybeNullRow; /* True if we need an OP_IfNullRow check */ + u8 aff; /* Affinity of the pExpr expression */ IndexedExpr *pIENext; /* Next in a list of all indexed expressions */ #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS const char *zIdxName; /* Name of index, used only for bytecode comments */@@ -19036,6 +19631,9 @@ u8 withinRJSubrtn; /* Nesting level for RIGHT JOIN body subroutines */
#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) u8 earlyCleanup; /* OOM inside sqlite3ParserAddCleanup() */ #endif +#ifdef SQLITE_DEBUG + u8 ifNotExists; /* Might be true if IF NOT EXISTS. Assert()s only */ +#endif int nRangeReg; /* Size of the temporary register block */ int iRangeReg; /* First register in temporary register block */ int nErr; /* Number of errors seen */@@ -19048,7 +19646,8 @@ int nLabel; /* The *negative* of the number of labels used */
int nLabelAlloc; /* Number of slots in aLabel */ int *aLabel; /* Space to hold the labels */ ExprList *pConstExpr;/* Constant expressions */ - IndexedExpr *pIdxExpr;/* List of expressions used by active indexes */ + IndexedExpr *pIdxEpr;/* List of expressions used by active indexes */ + IndexedExpr *pIdxPartExpr; /* Exprs constrained by index WHERE clauses */ Token constraintName;/* Name of the constraint currently being parsed */ yDbMask writeMask; /* Start a write transaction on these databases */ yDbMask cookieMask; /* Bitmask of schema verified databases */@@ -19056,6 +19655,9 @@ int regRowid; /* Register holding rowid of CREATE TABLE entry */
int regRoot; /* Register holding root page number for new objects */ int nMaxArg; /* Max args passed to user function by sub-program */ int nSelect; /* Number of SELECT stmts. Counter for Select.selId */ +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK + u32 nProgressSteps; /* xProgress steps taken during sqlite3_prepare() */ +#endif #ifndef SQLITE_OMIT_SHARED_CACHE int nTableLock; /* Number of locks in aTableLock */ TableLock *aTableLock; /* Required table locks for shared-cache mode */@@ -19069,9 +19671,9 @@ union {
int addrCrTab; /* Address of OP_CreateBtree on CREATE TABLE */ Returning *pReturning; /* The RETURNING clause */ } u1; - u32 nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */ u32 oldmask; /* Mask of old.* columns referenced */ u32 newmask; /* Mask of new.* columns referenced */ + LogEst nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */ u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */ u8 bReturning; /* Coding a RETURNING trigger */ u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */@@ -19195,6 +19797,7 @@ #define OPFLAG_USESEEKRESULT 0x10 /* Try to avoid a seek in BtreeInsert() */
#define OPFLAG_ISNOOP 0x40 /* OP_Delete does pre-update-hook only */ #define OPFLAG_LENGTHARG 0x40 /* OP_Column only used for length() */ #define OPFLAG_TYPEOFARG 0x80 /* OP_Column only used for typeof() */ +#define OPFLAG_BYTELENARG 0xc0 /* OP_Column only for octet_length() */ #define OPFLAG_BULKCSR 0x01 /* OP_Open** used to open bulk cursor */ #define OPFLAG_SEEKEQ 0x02 /* OP_Open** cursor uses EQ seek only */ #define OPFLAG_FORDELETE 0x08 /* OP_Open should use BTREE_FORDELETE */@@ -19316,6 +19919,7 @@ TriggerStep retTStep; /* The trigger step */
int iRetCur; /* Transient table holding RETURNING results */ int nRetCol; /* Number of in pReturnEL after expansion */ int iRetReg; /* Register array for holding a row of RETURNING */ + char zName[40]; /* Name of trigger: "sqlite_returning_%p" */ }; /*@@ -19337,6 +19941,25 @@ #define SQLITE_PRINTF_MALLOCED 0x04 /* True if xText is allocated space */
#define isMalloced(X) (((X)->printfFlags & SQLITE_PRINTF_MALLOCED)!=0) +/* +** The following object is the header for an "RCStr" or "reference-counted +** string". An RCStr is passed around and used like any other char* +** that has been dynamically allocated. The important interface +** differences: +** +** 1. RCStr strings are reference counted. They are deallocated +** when the reference count reaches zero. +** +** 2. Use sqlite3RCStrUnref() to free an RCStr string rather than +** sqlite3_free() +** +** 3. Make a (read-only) copy of a read-only RCStr string using +** sqlite3RCStrRef(). +*/ +struct RCStr { + u64 nRCRef; /* Number of references */ + /* Total structure size should be a multiple of 8 bytes for alignment */ +}; /* ** A pointer to this structure is used to communicate information@@ -19363,7 +19986,7 @@
/* Tuning parameters are set using SQLITE_TESTCTRL_TUNE and are controlled ** on debug-builds of the CLI using ".testctrl tune ID VALUE". Tuning ** parameters are for temporary use during development, to help find -** optimial values for parameters in the query planner. The should not +** optimal values for parameters in the query planner. The should not ** be used on trunk check-ins. They are a temporary mechanism available ** for transient development builds only. **@@ -19389,6 +20012,7 @@ u8 bOpenUri; /* True to interpret filenames as URIs */
u8 bUseCis; /* Use covering indices for full-scans */ u8 bSmallMalloc; /* Avoid large memory allocations if true */ u8 bExtraSchemaChecks; /* Verify type,name,tbl_name in schema */ + u8 bUseLongDouble; /* Make use of long double */ int mxStrlen; /* Maximum string length */ int neverCorrupt; /* Database is always well-formed */ int szLookaside; /* Default lookaside buffer size */@@ -19475,6 +20099,7 @@ int (*xSelectCallback)(Walker*,Select*); /* Callback for SELECTs */
void (*xSelectCallback2)(Walker*,Select*);/* Second callback for SELECTs */ int walkerDepth; /* Number of subqueries */ u16 eCode; /* A small processing code */ + u16 mWFlags; /* Use-dependent flags */ union { /* Extra data for callback */ NameContext *pNC; /* Naming context */ int n; /* A counter */@@ -19493,6 +20118,7 @@ struct Table *pTab; /* Table of generated column */
struct CoveringIndexCheck *pCovIdxCk; /* Check for covering index */ SrcItem *pSrcItem; /* A single FROM clause item */ DbFixer *pFix; /* See sqlite3FixSelect() */ + Mem *aMem; /* See sqlite3BtreeCursorHint() */ } u; };@@ -19513,6 +20139,7 @@ };
/* Forward declarations */ SQLITE_PRIVATE int sqlite3WalkExpr(Walker*, Expr*); +SQLITE_PRIVATE int sqlite3WalkExprNN(Walker*, Expr*); SQLITE_PRIVATE int sqlite3WalkExprList(Walker*, ExprList*); SQLITE_PRIVATE int sqlite3WalkSelect(Walker*, Select*); SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker*, Select*);@@ -19593,6 +20220,16 @@ u8 eM10d; /* The MATERIALIZED flag */
}; +/* Client data associated with sqlite3_set_clientdata() and +** sqlite3_get_clientdata(). +*/ +struct DbClientData { + DbClientData *pNext; /* Next in a linked list */ + void *pData; /* The data */ + void (*xDestructor)(void*); /* Destructor. Might be NULL */ + char zName[1]; /* Name of this client data. MUST BE LAST */ +}; + #ifdef SQLITE_DEBUG /* ** An instance of the TreeView object is used for printing the content of@@ -19762,6 +20399,8 @@ # define sqlite3Isdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x04)
# define sqlite3Isxdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x08) # define sqlite3Tolower(x) (sqlite3UpperToLower[(unsigned char)(x)]) # define sqlite3Isquote(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x80) +# define sqlite3JsonId1(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x42) +# define sqlite3JsonId2(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x46) #else # define sqlite3Toupper(x) toupper((unsigned char)(x)) # define sqlite3Isspace(x) isspace((unsigned char)(x))@@ -19771,6 +20410,8 @@ # define sqlite3Isdigit(x) isdigit((unsigned char)(x))
# define sqlite3Isxdigit(x) isxdigit((unsigned char)(x)) # define sqlite3Tolower(x) tolower((unsigned char)(x)) # define sqlite3Isquote(x) ((x)=='"'||(x)=='\''||(x)=='['||(x)=='`') +# define sqlite3JsonId1(x) (sqlite3IsIdChar(x)&&(x)<'0') +# define sqlite3JsonId2(x) sqlite3IsIdChar(x) #endif SQLITE_PRIVATE int sqlite3IsIdChar(u8);@@ -19820,13 +20461,11 @@ */
#ifdef SQLITE_USE_ALLOCA # define sqlite3StackAllocRaw(D,N) alloca(N) # define sqlite3StackAllocRawNN(D,N) alloca(N) -# define sqlite3StackAllocZero(D,N) memset(alloca(N), 0, N) # define sqlite3StackFree(D,P) # define sqlite3StackFreeNN(D,P) #else # define sqlite3StackAllocRaw(D,N) sqlite3DbMallocRaw(D,N) # define sqlite3StackAllocRawNN(D,N) sqlite3DbMallocRawNN(D,N) -# define sqlite3StackAllocZero(D,N) sqlite3DbMallocZero(D,N) # define sqlite3StackFree(D,P) sqlite3DbFree(D,P) # define sqlite3StackFreeNN(D,P) sqlite3DbFreeNN(D,P) #endif@@ -19892,6 +20531,20 @@ int nUsed; /* Number of arguments used so far */
sqlite3_value **apArg; /* The argument values */ }; +/* +** An instance of this object receives the decoding of a floating point +** value into an approximate decimal representation. +*/ +struct FpDecode { + char sign; /* '+' or '-' */ + char isSpecial; /* 1: Infinity 2: NaN */ + int n; /* Significant digits in the decode */ + int iDP; /* Location of the decimal point */ + char *z; /* Start of significant digits */ + char zBuf[24]; /* Storage for significant digits */ +}; + +SQLITE_PRIVATE void sqlite3FpDecode(FpDecode*,double,int,int); SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3*,const char*, ...); SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3*,const char*, va_list); #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE)@@ -19951,6 +20604,7 @@ #endif
#endif SQLITE_PRIVATE void sqlite3SetString(char **, sqlite3*, const char*); +SQLITE_PRIVATE void sqlite3ProgressCheck(Parse*); SQLITE_PRIVATE void sqlite3ErrorMsg(Parse*, const char*, ...); SQLITE_PRIVATE int sqlite3ErrorToParser(sqlite3*,int); SQLITE_PRIVATE void sqlite3Dequote(char*);@@ -19965,6 +20619,10 @@ SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse*,int);
SQLITE_PRIVATE int sqlite3GetTempRange(Parse*,int); SQLITE_PRIVATE void sqlite3ReleaseTempRange(Parse*,int,int); SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse*); +SQLITE_PRIVATE void sqlite3TouchRegister(Parse*,int); +#if defined(SQLITE_ENABLE_STAT4) || defined(SQLITE_DEBUG) +SQLITE_PRIVATE int sqlite3FirstAvailableRegister(Parse*,int); +#endif #ifdef SQLITE_DEBUG SQLITE_PRIVATE int sqlite3NoTempsInRange(Parse*,int,int); #endif@@ -19976,6 +20634,8 @@ SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse*, Expr*, Select*);
SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse*,Expr*, Expr*); SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr*); SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, const Token*, int); +SQLITE_PRIVATE void sqlite3ExprAddFunctionOrderBy(Parse*,Expr*,ExprList*); +SQLITE_PRIVATE void sqlite3ExprOrderByAggregateError(Parse*,Expr*); SQLITE_PRIVATE void sqlite3ExprFunctionUsable(Parse*,const Expr*,const FuncDef*); SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32); SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*);@@ -20008,7 +20668,7 @@ SQLITE_PRIVATE const char *sqlite3ColumnColl(Column*);
SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3*,Table*); SQLITE_PRIVATE void sqlite3GenerateColumnNames(Parse *pParse, Select *pSelect); SQLITE_PRIVATE int sqlite3ColumnsFromExprList(Parse*,ExprList*,i16*,Column**); -SQLITE_PRIVATE void sqlite3SelectAddColumnTypeAndCollation(Parse*,Table*,Select*,char); +SQLITE_PRIVATE void sqlite3SubqueryColumnTypes(Parse*,Table*,Select*,char); SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse*,Select*,char); SQLITE_PRIVATE void sqlite3OpenSchemaTable(Parse *, int); SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table*);@@ -20115,7 +20775,7 @@ SQLITE_PRIVATE Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList*,
Expr*,ExprList*,u32,Expr*); SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3*, Select*); SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse*, SrcList*); -SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, int); +SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, Trigger*); SQLITE_PRIVATE void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int); #if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) SQLITE_PRIVATE Expr *sqlite3LimitWhere(Parse*,SrcList*,Expr*,ExprList*,Expr*,char*);@@ -20177,7 +20837,7 @@ SQLITE_PRIVATE int sqlite3ExprCompare(const Parse*,const Expr*,const Expr*, int);
SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr*,Expr*,int); SQLITE_PRIVATE int sqlite3ExprListCompare(const ExprList*,const ExprList*, int); SQLITE_PRIVATE int sqlite3ExprImpliesExpr(const Parse*,const Expr*,const Expr*, int); -SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr*,int); +SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr*,int,int); SQLITE_PRIVATE void sqlite3AggInfoPersistWalkerInit(Walker*,Parse*); SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*); SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*);@@ -20204,7 +20864,7 @@ SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr*);
SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*, u8); SQLITE_PRIVATE int sqlite3ExprIsConstantOrGroupBy(Parse*, Expr*, ExprList*); SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr*,int); -SQLITE_PRIVATE int sqlite3ExprIsTableConstraint(Expr*,const SrcItem*); +SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint(Expr*,const SrcList*,int); #ifdef SQLITE_ENABLE_CURSOR_HINTS SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr*); #endif@@ -20212,6 +20872,7 @@ SQLITE_PRIVATE int sqlite3ExprIsInteger(const Expr*, int*);
SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr*); SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr*, char); SQLITE_PRIVATE int sqlite3IsRowid(const char*); +SQLITE_PRIVATE const char *sqlite3RowidAlias(Table *pTab); SQLITE_PRIVATE void sqlite3GenerateRowDelete( Parse*,Table*,Trigger*,int,int,int,i16,u8,u8,u8,int); SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(Parse*, Table*, int, int, int*, int);@@ -20326,9 +20987,10 @@ SQLITE_PRIVATE int sqlite3FixSrcList(DbFixer*, SrcList*);
SQLITE_PRIVATE int sqlite3FixSelect(DbFixer*, Select*); SQLITE_PRIVATE int sqlite3FixExpr(DbFixer*, Expr*); SQLITE_PRIVATE int sqlite3FixTriggerStep(DbFixer*, TriggerStep*); + SQLITE_PRIVATE int sqlite3RealSameAsInt(double,sqlite3_int64); SQLITE_PRIVATE i64 sqlite3RealToI64(double); -SQLITE_PRIVATE void sqlite3Int64ToText(i64,char*); +SQLITE_PRIVATE int sqlite3Int64ToText(i64,char*); SQLITE_PRIVATE int sqlite3AtoF(const char *z, double*, int, u8); SQLITE_PRIVATE int sqlite3GetInt32(const char *, int*); SQLITE_PRIVATE int sqlite3GetUInt32(const char*, u32*);@@ -20379,6 +21041,7 @@ SQLITE_PRIVATE char sqlite3CompareAffinity(const Expr *pExpr, char aff2);
SQLITE_PRIVATE int sqlite3IndexAffinityOk(const Expr *pExpr, char idx_affinity); SQLITE_PRIVATE char sqlite3TableColumnAffinity(const Table*,int); SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr); +SQLITE_PRIVATE int sqlite3ExprDataType(const Expr *pExpr); SQLITE_PRIVATE int sqlite3Atoi64(const char*, i64*, int, u8); SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char*, i64*); SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3*, int, const char*,...);@@ -20395,6 +21058,9 @@ #endif
#ifndef SQLITE_OMIT_DESERIALIZE SQLITE_PRIVATE int sqlite3MemdbInit(void); +SQLITE_PRIVATE int sqlite3IsMemdb(const sqlite3_vfs*); +#else +# define sqlite3IsMemdb(X) 0 #endif SQLITE_PRIVATE const char *sqlite3ErrStr(int);@@ -20426,6 +21092,7 @@ #endif
SQLITE_PRIVATE u8 sqlite3GetBoolean(const char *z,u8); SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value*, u8); +SQLITE_PRIVATE int sqlite3ValueIsOfClass(const sqlite3_value*, void(*)(void*)); SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value*, u8); SQLITE_PRIVATE void sqlite3ValueSetStr(sqlite3_value*, int, const void *,u8, void(*)(void*));@@ -20477,7 +21144,8 @@ SQLITE_PRIVATE int sqlite3MatchEName(
const struct ExprList_item*, const char*, const char*, - const char* + const char*, + int* ); SQLITE_PRIVATE Bitmask sqlite3ExprColUsed(Expr*); SQLITE_PRIVATE u8 sqlite3StrIHash(const char*);@@ -20533,8 +21201,13 @@ SQLITE_PRIVATE void sqlite3OomClear(sqlite3*);
SQLITE_PRIVATE int sqlite3ApiExit(sqlite3 *db, int); SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *); +SQLITE_PRIVATE char *sqlite3RCStrRef(char*); +SQLITE_PRIVATE void sqlite3RCStrUnref(void*); +SQLITE_PRIVATE char *sqlite3RCStrNew(u64); +SQLITE_PRIVATE char *sqlite3RCStrResize(char*,u64); + SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum*, sqlite3*, char*, int, int); -SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum*, int); +SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum*, i64); SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum*); SQLITE_PRIVATE void sqlite3StrAccumSetError(StrAccum*, u8); SQLITE_PRIVATE void sqlite3ResultStrAccum(sqlite3_context*,StrAccum*);@@ -20648,10 +21321,7 @@ SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3*, int, const char *);
SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *, VTable *); SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction(sqlite3 *,FuncDef*, int nArg, Expr*); -#if (defined(SQLITE_ENABLE_DBPAGE_VTAB) || defined(SQLITE_TEST)) \ - && !defined(SQLITE_OMIT_VIRTUALTABLE) -SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(sqlite3_index_info*); -#endif +SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(Parse*); SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context*); SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe*, const char*, int); SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *, sqlite3_stmt *);@@ -20787,6 +21457,7 @@ #else
#define sqlite3SelectExprHeight(x) 0 #define sqlite3ExprCheckHeight(x,y) #endif +SQLITE_PRIVATE void sqlite3ExprSetErrorOffset(Expr*,int); SQLITE_PRIVATE u32 sqlite3Get4byte(const u8*); SQLITE_PRIVATE void sqlite3Put4byte(u8*, u32);@@ -20892,6 +21563,18 @@ #if SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL)
SQLITE_PRIVATE int sqlite3KvvfsInit(void); #endif +#if defined(VDBE_PROFILE) \ + || defined(SQLITE_PERFORMANCE_TRACE) \ + || defined(SQLITE_ENABLE_STMT_SCANSTATUS) +SQLITE_PRIVATE sqlite3_uint64 sqlite3Hwtime(void); +#endif + +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS +# define IS_STMT_SCANSTATUS(db) (db->flags & SQLITE_StmtScanStatus) +#else +# define IS_STMT_SCANSTATUS(db) 0 +#endif + #endif /* SQLITEINT_H */ /************** End of sqliteInt.h *******************************************/@@ -20933,101 +21616,6 @@ ** on i486 hardware.
*/ #ifdef SQLITE_PERFORMANCE_TRACE -/* -** hwtime.h contains inline assembler code for implementing -** high-performance timing routines. -*/ -/************** Include hwtime.h in the middle of os_common.h ****************/ -/************** Begin file hwtime.h ******************************************/ -/* -** 2008 May 27 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains inline asm code for retrieving "high-performance" -** counters for x86 and x86_64 class CPUs. -*/ -#ifndef SQLITE_HWTIME_H -#define SQLITE_HWTIME_H - -/* -** The following routine only works on pentium-class (or newer) processors. -** It uses the RDTSC opcode to read the cycle count value out of the -** processor and returns that value. This can be used for high-res -** profiling. -*/ -#if !defined(__STRICT_ANSI__) && \ - (defined(__GNUC__) || defined(_MSC_VER)) && \ - (defined(i386) || defined(__i386__) || defined(_M_IX86)) - - #if defined(__GNUC__) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned int lo, hi; - __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); - return (sqlite_uint64)hi << 32 | lo; - } - - #elif defined(_MSC_VER) - - __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ - __asm { - rdtsc - ret ; return value at EDX:EAX - } - } - - #endif - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long val; - __asm__ __volatile__ ("rdtsc" : "=A" (val)); - return val; - } - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long long retval; - unsigned long junk; - __asm__ __volatile__ ("\n\ - 1: mftbu %1\n\ - mftb %L0\n\ - mftbu %0\n\ - cmpw %0,%1\n\ - bne 1b" - : "=r" (retval), "=r" (junk)); - return retval; - } - -#else - - /* - ** asm() is needed for hardware timing support. Without asm(), - ** disable the sqlite3Hwtime() routine. - ** - ** sqlite3Hwtime() is only used for some obscure debugging - ** and analysis configurations, not in any deliverable, so this - ** should not be a great loss. - */ -SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } - -#endif - -#endif /* !defined(SQLITE_HWTIME_H) */ - -/************** End of hwtime.h **********************************************/ -/************** Continuing where we left off in os_common.h ******************/ - static sqlite_uint64 g_start; static sqlite_uint64 g_elapsed; #define TIMER_START g_start=sqlite3Hwtime()@@ -21155,9 +21743,6 @@ #endif
#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC "4_BYTE_ALIGNED_MALLOC", #endif -#ifdef SQLITE_64BIT_STATS - "64BIT_STATS", -#endif #ifdef SQLITE_ALLOW_COVERING_INDEX_SCAN # if SQLITE_ALLOW_COVERING_INDEX_SCAN != 1 "ALLOW_COVERING_INDEX_SCAN=" CTIMEOPT_VAL(SQLITE_ALLOW_COVERING_INDEX_SCAN),@@ -21453,6 +22038,9 @@ #endif
#ifdef SQLITE_EXPLAIN_ESTIMATED_ROWS "EXPLAIN_ESTIMATED_ROWS", #endif +#ifdef SQLITE_EXTRA_AUTOEXT + "EXTRA_AUTOEXT=" CTIMEOPT_VAL(SQLITE_EXTRA_AUTOEXT), +#endif #ifdef SQLITE_EXTRA_IFNULLROW "EXTRA_IFNULLROW", #endif@@ -21493,6 +22081,9 @@ "INT64_TYPE",
#endif #ifdef SQLITE_INTEGRITY_CHECK_ERROR_MAX "INTEGRITY_CHECK_ERROR_MAX=" CTIMEOPT_VAL(SQLITE_INTEGRITY_CHECK_ERROR_MAX), +#endif +#ifdef SQLITE_LEGACY_JSON_VALID + "LEGACY_JSON_VALID", #endif #ifdef SQLITE_LIKE_DOESNT_MATCH_BLOBS "LIKE_DOESNT_MATCH_BLOBS",@@ -21731,6 +22322,9 @@ #endif
#ifdef SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS "OMIT_SCHEMA_VERSION_PRAGMAS", #endif +#ifdef SQLITE_OMIT_SEH + "OMIT_SEH", +#endif #ifdef SQLITE_OMIT_SHARED_CACHE "OMIT_SHARED_CACHE", #endif@@ -21982,7 +22576,7 @@ ** isdigit() 0x04
** isalnum() 0x06 ** isxdigit() 0x08 ** toupper() 0x20 -** SQLite identifier character 0x40 +** SQLite identifier character 0x40 $, _, or non-ascii ** Quote character 0x80 ** ** Bit 0x20 is set if the mapped character requires translation to upper@@ -22128,6 +22722,7 @@ SQLITE_USE_URI, /* bOpenUri */
SQLITE_ALLOW_COVERING_INDEX_SCAN, /* bUseCis */ 0, /* bSmallMalloc */ 1, /* bExtraSchemaChecks */ + sizeof(LONGDOUBLE_TYPE)>8, /* bUseLongDouble */ 0x7ffffffe, /* mxStrlen */ 0, /* neverCorrupt */ SQLITE_DEFAULT_LOOKASIDE, /* szLookaside, nLookaside */@@ -22176,7 +22771,7 @@ 0x7ffffffe, /* iOnceResetThreshold */
SQLITE_DEFAULT_SORTERREF_SIZE, /* szSorterRef */ 0, /* iPrngSeed */ #ifdef SQLITE_DEBUG - {0,0,0,0,0,0} /* aTune */ + {0,0,0,0,0,0}, /* aTune */ #endif };@@ -22357,6 +22952,9 @@
/* Elements of the linked list at Vdbe.pAuxData */ typedef struct AuxData AuxData; +/* A cache of large TEXT or BLOB values in a VdbeCursor */ +typedef struct VdbeTxtBlbCache VdbeTxtBlbCache; + /* Types of VDBE cursors */ #define CURTYPE_BTREE 0 #define CURTYPE_SORTER 1@@ -22388,6 +22986,7 @@ Bool isEphemeral:1; /* True for an ephemeral table */
Bool useRandomRowid:1; /* Generate new record numbers semi-randomly */ Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */ Bool noReuse:1; /* OpenEphemeral may not reuse this cursor */ + Bool colCache:1; /* pCache pointer is initialized and non-NULL */ u16 seekHit; /* See the OP_SeekHit and OP_IfNoHope opcodes */ union { /* pBtx for isEphermeral. pAltMap otherwise */ Btree *pBtx; /* Separate file holding temporary table */@@ -22428,6 +23027,7 @@ u32 szRow; /* Byte available in aRow */
#ifdef SQLITE_ENABLE_COLUMN_USED_MASK u64 maskUsed; /* Mask of columns used by this cursor */ #endif + VdbeTxtBlbCache *pCache; /* Cache of large TEXT or BLOB values */ /* 2*nField extra array elements allocated for aType[], beyond the one ** static element declared in the structure. nField total array slots for@@ -22440,13 +23040,26 @@ */
#define IsNullCursor(P) \ ((P)->eCurType==CURTYPE_PSEUDO && (P)->nullRow && (P)->seekResult==0) - /* ** A value for VdbeCursor.cacheStatus that means the cache is always invalid. */ #define CACHE_STALE 0 /* +** Large TEXT or BLOB values can be slow to load, so we want to avoid +** loading them more than once. For that reason, large TEXT and BLOB values +** can be stored in a cache defined by this object, and attached to the +** VdbeCursor using the pCache field. +*/ +struct VdbeTxtBlbCache { + char *pCValue; /* A RCStr buffer to hold the value */ + i64 iOffset; /* File offset of the row being cached */ + int iCol; /* Column for which the cache is valid */ + u32 cacheStatus; /* Vdbe.cacheCtr value */ + u32 colCacheCtr; /* Column cache counter */ +}; + +/* ** When a sub-program is executed (OP_Program), a structure of this type ** is allocated to store the current value of the program counter, as ** well as the current memory cell array and various other frame specific@@ -22472,7 +23085,6 @@ struct VdbeFrame {
Vdbe *v; /* VM this frame belongs to */ VdbeFrame *pParent; /* Parent of this frame, or NULL if parent is main */ Op *aOp; /* Program instructions for parent frame */ - i64 *anExec; /* Event counters from parent frame */ Mem *aMem; /* Array of memory cells for parent frame */ VdbeCursor **apCsr; /* Array of Vdbe cursors for parent frame */ u8 *aOnce; /* Bitmask used by OP_Once */@@ -22688,10 +23300,19 @@ typedef unsigned bft; /* Bit Field Type */
/* The ScanStatus object holds a single value for the ** sqlite3_stmt_scanstatus() interface. +** +** aAddrRange[]: +** This array is used by ScanStatus elements associated with EQP +** notes that make an SQLITE_SCANSTAT_NCYCLE value available. It is +** an array of up to 3 ranges of VM addresses for which the Vdbe.anCycle[] +** values should be summed to calculate the NCYCLE value. Each pair of +** integer addresses is a start and end address (both inclusive) for a range +** instructions. A start value of 0 indicates an empty range. */ typedef struct ScanStatus ScanStatus; struct ScanStatus { int addrExplain; /* OP_Explain for loop */ + int aAddrRange[6]; int addrLoop; /* Address of "loops" counter */ int addrVisit; /* Address of "rows visited" counter */ int iSelectID; /* The "Select-ID" for this loop */@@ -22747,7 +23368,7 @@ Op *aOp; /* Space to hold the virtual machine's program */
int nOp; /* Number of instructions in the program */ int nOpAlloc; /* Slots allocated for aOp[] */ Mem *aColName; /* Column names to return */ - Mem *pResultSet; /* Pointer to an array of results */ + Mem *pResultRow; /* Current output row */ char *zErrMsg; /* Error message written here */ VList *pVList; /* Name of variables */ #ifndef SQLITE_OMIT_TRACE@@ -22758,16 +23379,18 @@ int rcApp; /* errcode set by sqlite3_result_error_code() */
u32 nWrite; /* Number of write operations that have occurred */ #endif u16 nResColumn; /* Number of columns in one row of the result set */ + u16 nResAlloc; /* Column slots allocated to aColName[] */ u8 errorAction; /* Recovery action to do in case of an error */ u8 minWriteFileFormat; /* Minimum file format for writable database files */ u8 prepFlags; /* SQLITE_PREPARE_* flags */ u8 eVdbeState; /* On of the VDBE_*_STATE values */ bft expired:2; /* 1: recompile VM immediately 2: when convenient */ - bft explain:2; /* True if EXPLAIN present on SQL command */ + bft explain:2; /* 0: normal, 1: EXPLAIN, 2: EXPLAIN QUERY PLAN */ bft changeCntOn:1; /* True to update the change-counter */ bft usesStmtJournal:1; /* True if uses a statement journal */ bft readOnly:1; /* True for statements that do not write */ bft bIsReader:1; /* True for statements that read */ + bft haveEqpOps:1; /* Bytecode supports EXPLAIN QUERY PLAN */ yDbMask btreeMask; /* Bitmask of db->aDb[] entries referenced */ yDbMask lockMask; /* Subset of btreeMask that requires a lock */ u32 aCounter[9]; /* Counters used by sqlite3_stmt_status() */@@ -22784,7 +23407,6 @@ u32 expmask; /* Binding to these vars invalidates VM */
SubProgram *pProgram; /* Linked list of all sub-programs used by VM */ AuxData *pAuxData; /* Linked list of auxdata allocations */ #ifdef SQLITE_ENABLE_STMT_SCANSTATUS - i64 *anExec; /* Number of times each op has been executed */ int nScan; /* Entries in aScan[] */ ScanStatus *aScan; /* Scan definitions for sqlite3_stmt_scanstatus() */ #endif@@ -22815,7 +23437,7 @@ int iBlobWrite; /* Value returned by preupdate_blobwrite() */
i64 iKey1; /* First key value passed to hook */ i64 iKey2; /* Second key value passed to hook */ Mem *aNew; /* Array of new.* values */ - Table *pTab; /* Schema object being upated */ + Table *pTab; /* Schema object being updated */ Index *pPk; /* PK index if pTab is WITHOUT ROWID */ };@@ -22905,6 +23527,7 @@ #ifdef SQLITE_DEBUG
SQLITE_PRIVATE int sqlite3VdbeMemIsRowSet(const Mem*); #endif SQLITE_PRIVATE int sqlite3VdbeMemSetRowSet(Mem*); +SQLITE_PRIVATE void sqlite3VdbeMemZeroTerminateIfAble(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem*, u8, u8); SQLITE_PRIVATE int sqlite3IntFloatCompare(i64,double);@@ -22950,6 +23573,8 @@ SQLITE_PRIVATE int sqlite3VdbeSorterNext(sqlite3 *, const VdbeCursor *);
SQLITE_PRIVATE int sqlite3VdbeSorterRewind(const VdbeCursor *, int *); SQLITE_PRIVATE int sqlite3VdbeSorterWrite(const VdbeCursor *, Mem *); SQLITE_PRIVATE int sqlite3VdbeSorterCompare(const VdbeCursor *, Mem *, int, int *); + +SQLITE_PRIVATE void sqlite3VdbeValueListFree(void*); #ifdef SQLITE_DEBUG SQLITE_PRIVATE void sqlite3VdbeIncrWriteCounter(Vdbe*, VdbeCursor*);@@ -23466,6 +24091,7 @@ char validHMS; /* True (1) if h,m,s are valid */
char validTZ; /* True (1) if tz is valid */ char tzSet; /* Timezone was set explicitly */ char isError; /* An overflow has occurred */ + char useSubsec; /* Display subsecond precision */ };@@ -23498,8 +24124,8 @@ ** The function returns the number of successful conversions.
*/ static int getDigits(const char *zDate, const char *zFormat, ...){ /* The aMx[] array translates the 3rd character of each format - ** spec into a max size: a b c d e f */ - static const u16 aMx[] = { 12, 14, 24, 31, 59, 9999 }; + ** spec into a max size: a b c d e f */ + static const u16 aMx[] = { 12, 14, 24, 31, 59, 14712 }; va_list ap; int cnt = 0; char nextC;@@ -23780,6 +24406,11 @@ return setDateTimeToCurrent(context, p);
}else if( sqlite3AtoF(zDate, &r, sqlite3Strlen30(zDate), SQLITE_UTF8)>0 ){ setRawDateNumber(p, r); return 0; + }else if( (sqlite3StrICmp(zDate,"subsec")==0 + || sqlite3StrICmp(zDate,"subsecond")==0) + && sqlite3NotPureFunc(context) ){ + p->useSubsec = 1; + return setDateTimeToCurrent(context, p); } return 1; }@@ -23835,17 +24466,14 @@ /*
** Compute the Hour, Minute, and Seconds from the julian day number. */ static void computeHMS(DateTime *p){ - int s; + int day_ms, day_min; /* milliseconds, minutes into the day */ if( p->validHMS ) return; computeJD(p); - s = (int)((p->iJD + 43200000) % 86400000); - p->s = s/1000.0; - s = (int)p->s; - p->s -= s; - p->h = s/3600; - s -= p->h*3600; - p->m = s/60; - p->s += s - p->m*60; + day_ms = (int)((p->iJD + 43200000) % 86400000); + p->s = (day_ms % 60000)/1000.0; + day_min = day_ms/60000; + p->m = day_min % 60; + p->h = day_min / 60; p->rawS = 0; p->validHMS = 1; }@@ -24025,6 +24653,25 @@ { 4, "year", 14713.0, 31536000.0 },
}; /* +** If the DateTime p is raw number, try to figure out if it is +** a julian day number of a unix timestamp. Set the p value +** appropriately. +*/ +static void autoAdjustDate(DateTime *p){ + if( !p->rawS || p->validJD ){ + p->rawS = 0; + }else if( p->s>=-21086676*(i64)10000 /* -4713-11-24 12:00:00 */ + && p->s<=(25340230*(i64)10000)+799 /* 9999-12-31 23:59:59 */ + ){ + double r = p->s*1000.0 + 210866760000000.0; + clearYMD_HMS_TZ(p); + p->iJD = (sqlite3_int64)(r + 0.5); + p->validJD = 1; + p->rawS = 0; + } +} + +/* ** Process a modifier to a date-time stamp. The modifiers are ** as follows: **@@ -24067,19 +24714,8 @@ ** a unix timestamp, depending on its magnitude.
*/ if( sqlite3_stricmp(z, "auto")==0 ){ if( idx>1 ) return 1; /* IMP: R-33611-57934 */ - if( !p->rawS || p->validJD ){ - rc = 0; - p->rawS = 0; - }else if( p->s>=-21086676*(i64)10000 /* -4713-11-24 12:00:00 */ - && p->s<=(25340230*(i64)10000)+799 /* 9999-12-31 23:59:59 */ - ){ - r = p->s*1000.0 + 210866760000000.0; - clearYMD_HMS_TZ(p); - p->iJD = (sqlite3_int64)(r + 0.5); - p->validJD = 1; - p->rawS = 0; - rc = 0; - } + autoAdjustDate(p); + rc = 0; } break; }@@ -24138,7 +24774,7 @@ if( p->tzSet==0 ){
i64 iOrigJD; /* Original localtime */ i64 iGuess; /* Guess at the corresponding utc time */ int cnt = 0; /* Safety to prevent infinite loop */ - int iErr; /* Guess is off by this much */ + i64 iErr; /* Guess is off by this much */ computeJD(p); iGuess = iOrigJD = p->iJD;@@ -24194,8 +24830,22 @@ ** start of TTTTT
** ** Move the date backwards to the beginning of the current day, ** or month or year. + ** + ** subsecond + ** subsec + ** + ** Show subsecond precision in the output of datetime() and + ** unixepoch() and strftime('%s'). */ - if( sqlite3_strnicmp(z, "start of ", 9)!=0 ) break; + if( sqlite3_strnicmp(z, "start of ", 9)!=0 ){ + if( sqlite3_stricmp(z, "subsec")==0 + || sqlite3_stricmp(z, "subsecond")==0 + ){ + p->useSubsec = 1; + rc = 0; + } + break; + } if( !p->validJD && !p->validYMD && !p->validHMS ) break; z += 9; computeYMD(p);@@ -24231,18 +24881,73 @@ case '8':
case '9': { double rRounder; int i; - for(n=1; z[n] && z[n]!=':' && !sqlite3Isspace(z[n]); n++){} + int Y,M,D,h,m,x; + const char *z2 = z; + char z0 = z[0]; + for(n=1; z[n]; n++){ + if( z[n]==':' ) break; + if( sqlite3Isspace(z[n]) ) break; + if( z[n]=='-' ){ + if( n==5 && getDigits(&z[1], "40f", &Y)==1 ) break; + if( n==6 && getDigits(&z[1], "50f", &Y)==1 ) break; + } + } if( sqlite3AtoF(z, &r, n, SQLITE_UTF8)<=0 ){ - rc = 1; + assert( rc==1 ); break; } - if( z[n]==':' ){ + if( z[n]=='-' ){ + /* A modifier of the form (+|-)YYYY-MM-DD adds or subtracts the + ** specified number of years, months, and days. MM is limited to + ** the range 0-11 and DD is limited to 0-30. + */ + if( z0!='+' && z0!='-' ) break; /* Must start with +/- */ + if( n==5 ){ + if( getDigits(&z[1], "40f-20a-20d", &Y, &M, &D)!=3 ) break; + }else{ + assert( n==6 ); + if( getDigits(&z[1], "50f-20a-20d", &Y, &M, &D)!=3 ) break; + z++; + } + if( M>=12 ) break; /* M range 0..11 */ + if( D>=31 ) break; /* D range 0..30 */ + computeYMD_HMS(p); + p->validJD = 0; + if( z0=='-' ){ + p->Y -= Y; + p->M -= M; + D = -D; + }else{ + p->Y += Y; + p->M += M; + } + x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; + p->Y += x; + p->M -= x*12; + computeJD(p); + p->validHMS = 0; + p->validYMD = 0; + p->iJD += (i64)D*86400000; + if( z[11]==0 ){ + rc = 0; + break; + } + if( sqlite3Isspace(z[11]) + && getDigits(&z[12], "20c:20e", &h, &m)==2 + ){ + z2 = &z[12]; + n = 2; + }else{ + break; + } + } + if( z2[n]==':' ){ /* A modifier of the form (+|-)HH:MM:SS.FFF adds (or subtracts) the ** specified number of hours, minutes, seconds, and fractional seconds ** to the time. The ".FFF" may be omitted. The ":SS.FFF" may be ** omitted. */ - const char *z2 = z; + DateTime tx; sqlite3_int64 day; if( !sqlite3Isdigit(*z2) ) z2++;@@ -24252,7 +24957,7 @@ computeJD(&tx);
tx.iJD -= 43200000; day = tx.iJD/86400000; tx.iJD -= day*86400000; - if( z[0]=='-' ) tx.iJD = -tx.iJD; + if( z0=='-' ) tx.iJD = -tx.iJD; computeJD(p); clearYMD_HMS_TZ(p); p->iJD += tx.iJD;@@ -24268,7 +24973,7 @@ n = sqlite3Strlen30(z);
if( n>10 || n<3 ) break; if( sqlite3UpperToLower[(u8)z[n-1]]=='s' ) n--; computeJD(p); - rc = 1; + assert( rc==1 ); rRounder = r<0 ? -0.5 : +0.5; for(i=0; i<ArraySize(aXformType); i++){ if( aXformType[i].nName==n@@ -24277,7 +24982,6 @@ && r>-aXformType[i].rLimit && r<aXformType[i].rLimit
){ switch( i ){ case 4: { /* Special processing to add months */ - int x; assert( strcmp(aXformType[i].zName,"month")==0 ); computeYMD_HMS(p); p->M += (int)r;@@ -24393,7 +25097,11 @@ ){
DateTime x; if( isDate(context, argc, argv, &x)==0 ){ computeJD(&x); - sqlite3_result_int64(context, x.iJD/1000 - 21086676*(i64)10000); + if( x.useSubsec ){ + sqlite3_result_double(context, (x.iJD - 21086676*(i64)10000000)/1000.0); + }else{ + sqlite3_result_int64(context, x.iJD/1000 - 21086676*(i64)10000); + } } }@@ -24409,8 +25117,8 @@ sqlite3_value **argv
){ DateTime x; if( isDate(context, argc, argv, &x)==0 ){ - int Y, s; - char zBuf[24]; + int Y, s, n; + char zBuf[32]; computeYMD_HMS(&x); Y = x.Y; if( Y<0 ) Y = -Y;@@ -24431,15 +25139,28 @@ zBuf[14] = ':';
zBuf[15] = '0' + (x.m/10)%10; zBuf[16] = '0' + (x.m)%10; zBuf[17] = ':'; - s = (int)x.s; - zBuf[18] = '0' + (s/10)%10; - zBuf[19] = '0' + (s)%10; - zBuf[20] = 0; + if( x.useSubsec ){ + s = (int)(1000.0*x.s + 0.5); + zBuf[18] = '0' + (s/10000)%10; + zBuf[19] = '0' + (s/1000)%10; + zBuf[20] = '.'; + zBuf[21] = '0' + (s/100)%10; + zBuf[22] = '0' + (s/10)%10; + zBuf[23] = '0' + (s)%10; + zBuf[24] = 0; + n = 24; + }else{ + s = (int)x.s; + zBuf[18] = '0' + (s/10)%10; + zBuf[19] = '0' + (s)%10; + zBuf[20] = 0; + n = 20; + } if( x.Y<0 ){ zBuf[0] = '-'; - sqlite3_result_text(context, zBuf, 20, SQLITE_TRANSIENT); + sqlite3_result_text(context, zBuf, n, SQLITE_TRANSIENT); }else{ - sqlite3_result_text(context, &zBuf[1], 19, SQLITE_TRANSIENT); + sqlite3_result_text(context, &zBuf[1], n-1, SQLITE_TRANSIENT); } } }@@ -24456,7 +25177,7 @@ sqlite3_value **argv
){ DateTime x; if( isDate(context, argc, argv, &x)==0 ){ - int s; + int s, n; char zBuf[16]; computeHMS(&x); zBuf[0] = '0' + (x.h/10)%10;@@ -24465,11 +25186,24 @@ zBuf[2] = ':';
zBuf[3] = '0' + (x.m/10)%10; zBuf[4] = '0' + (x.m)%10; zBuf[5] = ':'; - s = (int)x.s; - zBuf[6] = '0' + (s/10)%10; - zBuf[7] = '0' + (s)%10; - zBuf[8] = 0; - sqlite3_result_text(context, zBuf, 8, SQLITE_TRANSIENT); + if( x.useSubsec ){ + s = (int)(1000.0*x.s + 0.5); + zBuf[6] = '0' + (s/10000)%10; + zBuf[7] = '0' + (s/1000)%10; + zBuf[8] = '.'; + zBuf[9] = '0' + (s/100)%10; + zBuf[10] = '0' + (s/10)%10; + zBuf[11] = '0' + (s)%10; + zBuf[12] = 0; + n = 12; + }else{ + s = (int)x.s; + zBuf[6] = '0' + (s/10)%10; + zBuf[7] = '0' + (s)%10; + zBuf[8] = 0; + n = 8; + } + sqlite3_result_text(context, zBuf, n, SQLITE_TRANSIENT); } }@@ -24524,7 +25258,7 @@ ** %m month 01-12
** %M minute 00-59 ** %s seconds since 1970-01-01 ** %S seconds 00-59 -** %w day of week 0-6 sunday==0 +** %w day of week 0-6 Sunday==0 ** %W week of year 00-53 ** %Y year 0000-9999 ** %% %@@ -24550,13 +25284,16 @@
computeJD(&x); computeYMD_HMS(&x); for(i=j=0; zFmt[i]; i++){ + char cf; if( zFmt[i]!='%' ) continue; if( j<i ) sqlite3_str_append(&sRes, zFmt+j, (int)(i-j)); i++; j = i + 1; - switch( zFmt[i] ){ - case 'd': { - sqlite3_str_appendf(&sRes, "%02d", x.D); + cf = zFmt[i]; + switch( cf ){ + case 'd': /* Fall thru */ + case 'e': { + sqlite3_str_appendf(&sRes, cf=='d' ? "%02d" : "%2d", x.D); break; } case 'f': {@@ -24565,8 +25302,21 @@ if( s>59.999 ) s = 59.999;
sqlite3_str_appendf(&sRes, "%06.3f", s); break; } - case 'H': { - sqlite3_str_appendf(&sRes, "%02d", x.h); + case 'F': { + sqlite3_str_appendf(&sRes, "%04d-%02d-%02d", x.Y, x.M, x.D); + break; + } + case 'H': + case 'k': { + sqlite3_str_appendf(&sRes, cf=='H' ? "%02d" : "%2d", x.h); + break; + } + case 'I': /* Fall thru */ + case 'l': { + int h = x.h; + if( h>12 ) h -= 12; + if( h==0 ) h = 12; + sqlite3_str_appendf(&sRes, cf=='I' ? "%02d" : "%2d", h); break; } case 'W': /* Fall thru */@@ -24578,7 +25328,7 @@ y.M = 1;
y.D = 1; computeJD(&y); nDay = (int)((x.iJD-y.iJD+43200000)/86400000); - if( zFmt[i]=='W' ){ + if( cf=='W' ){ int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */ wd = (int)(((x.iJD+43200000)/86400000)%7); sqlite3_str_appendf(&sRes,"%02d",(nDay+7-wd)/7);@@ -24599,18 +25349,42 @@ case 'M': {
sqlite3_str_appendf(&sRes,"%02d",x.m); break; } + case 'p': /* Fall thru */ + case 'P': { + if( x.h>=12 ){ + sqlite3_str_append(&sRes, cf=='p' ? "PM" : "pm", 2); + }else{ + sqlite3_str_append(&sRes, cf=='p' ? "AM" : "am", 2); + } + break; + } + case 'R': { + sqlite3_str_appendf(&sRes, "%02d:%02d", x.h, x.m); + break; + } case 's': { - i64 iS = (i64)(x.iJD/1000 - 21086676*(i64)10000); - sqlite3_str_appendf(&sRes,"%lld",iS); + if( x.useSubsec ){ + sqlite3_str_appendf(&sRes,"%.3f", + (x.iJD - 21086676*(i64)10000000)/1000.0); + }else{ + i64 iS = (i64)(x.iJD/1000 - 21086676*(i64)10000); + sqlite3_str_appendf(&sRes,"%lld",iS); + } break; } case 'S': { sqlite3_str_appendf(&sRes,"%02d",(int)x.s); break; } + case 'T': { + sqlite3_str_appendf(&sRes,"%02d:%02d:%02d", x.h, x.m, (int)x.s); + break; + } + case 'u': /* Fall thru */ case 'w': { - sqlite3_str_appendchar(&sRes, 1, - (char)(((x.iJD+129600000)/86400000) % 7) + '0'); + char c = (char)(((x.iJD+129600000)/86400000) % 7) + '0'; + if( c=='0' && cf=='u' ) c = '7'; + sqlite3_str_appendchar(&sRes, 1, c); break; } case 'Y': {@@ -24660,6 +25434,117 @@ dateFunc(context, 0, 0);
} /* +** timediff(DATE1, DATE2) +** +** Return the amount of time that must be added to DATE2 in order to +** convert it into DATE2. The time difference format is: +** +** +YYYY-MM-DD HH:MM:SS.SSS +** +** The initial "+" becomes "-" if DATE1 occurs before DATE2. For +** date/time values A and B, the following invariant should hold: +** +** datetime(A) == (datetime(B, timediff(A,B)) +** +** Both DATE arguments must be either a julian day number, or an +** ISO-8601 string. The unix timestamps are not supported by this +** routine. +*/ +static void timediffFunc( + sqlite3_context *context, + int NotUsed1, + sqlite3_value **argv +){ + char sign; + int Y, M; + DateTime d1, d2; + sqlite3_str sRes; + UNUSED_PARAMETER(NotUsed1); + if( isDate(context, 1, &argv[0], &d1) ) return; + if( isDate(context, 1, &argv[1], &d2) ) return; + computeYMD_HMS(&d1); + computeYMD_HMS(&d2); + if( d1.iJD>=d2.iJD ){ + sign = '+'; + Y = d1.Y - d2.Y; + if( Y ){ + d2.Y = d1.Y; + d2.validJD = 0; + computeJD(&d2); + } + M = d1.M - d2.M; + if( M<0 ){ + Y--; + M += 12; + } + if( M!=0 ){ + d2.M = d1.M; + d2.validJD = 0; + computeJD(&d2); + } + while( d1.iJD<d2.iJD ){ + M--; + if( M<0 ){ + M = 11; + Y--; + } + d2.M--; + if( d2.M<1 ){ + d2.M = 12; + d2.Y--; + } + d2.validJD = 0; + computeJD(&d2); + } + d1.iJD -= d2.iJD; + d1.iJD += (u64)1486995408 * (u64)100000; + }else /* d1<d2 */{ + sign = '-'; + Y = d2.Y - d1.Y; + if( Y ){ + d2.Y = d1.Y; + d2.validJD = 0; + computeJD(&d2); + } + M = d2.M - d1.M; + if( M<0 ){ + Y--; + M += 12; + } + if( M!=0 ){ + d2.M = d1.M; + d2.validJD = 0; + computeJD(&d2); + } + while( d1.iJD>d2.iJD ){ + M--; + if( M<0 ){ + M = 11; + Y--; + } + d2.M++; + if( d2.M>12 ){ + d2.M = 1; + d2.Y++; + } + d2.validJD = 0; + computeJD(&d2); + } + d1.iJD = d2.iJD - d1.iJD; + d1.iJD += (u64)1486995408 * (u64)100000; + } + d1.validYMD = 0; + d1.validHMS = 0; + d1.validTZ = 0; + computeYMD_HMS(&d1); + sqlite3StrAccumInit(&sRes, 0, 0, 0, 100); + sqlite3_str_appendf(&sRes, "%c%04d-%02d-%02d %02d:%02d:%06.3f", + sign, Y, M, d1.D-1, d1.h, d1.m, d1.s); + sqlite3ResultStrAccum(context, &sRes); +} + + +/* ** current_timestamp() ** ** This function returns the same value as datetime('now').@@ -24733,6 +25618,7 @@ PURE_DATE(date, -1, 0, 0, dateFunc ),
PURE_DATE(time, -1, 0, 0, timeFunc ), PURE_DATE(datetime, -1, 0, 0, datetimeFunc ), PURE_DATE(strftime, -1, 0, 0, strftimeFunc ), + PURE_DATE(timediff, 2, 0, 0, timediffFunc ), DFUNCTION(current_time, 0, 0, 0, ctimeFunc ), DFUNCTION(current_timestamp, 0, 0, 0, ctimestampFunc), DFUNCTION(current_date, 0, 0, 0, cdateFunc ),@@ -24886,7 +25772,7 @@ ){
/* Faults are not injected into COMMIT_PHASETWO because, assuming SQLite ** is using a regular VFS, it is called after the corresponding ** transaction has been committed. Injecting a fault at this point - ** confuses the test scripts - the COMMIT comand returns SQLITE_NOMEM + ** confuses the test scripts - the COMMIT command returns SQLITE_NOMEM ** but the transaction is committed anyway. ** ** The core must call OsFileControl() though, not OsFileControlHint(),@@ -25507,7 +26393,7 @@ ** Like free() but works for allocations obtained from sqlite3MemMalloc()
** or sqlite3MemRealloc(). ** ** For this low-level routine, we already know that pPrior!=0 since -** cases where pPrior==0 will have been intecepted and dealt with +** cases where pPrior==0 will have been intercepted and dealt with ** by higher-level routines. */ static void sqlite3MemFree(void *pPrior){@@ -25595,7 +26481,7 @@ if( _sqliteZone_ ){
return SQLITE_OK; } len = sizeof(cpuCount); - /* One usually wants to use hw.acctivecpu for MT decisions, but not here */ + /* One usually wants to use hw.activecpu for MT decisions, but not here */ sysctlbyname("hw.ncpu", &cpuCount, &len, NULL, 0); if( cpuCount>1 ){ /* defer MT decisions to system malloc */@@ -27290,9 +28176,13 @@ if( n<=mem5.szAtom*2 ){
if( n<=mem5.szAtom ) return mem5.szAtom; return mem5.szAtom*2; } - if( n>0x40000000 ) return 0; + if( n>0x10000000 ){ + if( n>0x40000000 ) return 0; + if( n>0x20000000 ) return 0x40000000; + return 0x20000000; + } for(iFullSz=mem5.szAtom*8; iFullSz<n; iFullSz *= 4); - if( (iFullSz/2)>=n ) return iFullSz/2; + if( (iFullSz/2)>=(i64)n ) return iFullSz/2; return iFullSz; }@@ -27583,7 +28473,7 @@ assert( SQLITE_MUTEX_RECURSIVE<2 );
assert( SQLITE_MUTEX_FAST<2 ); assert( SQLITE_MUTEX_WARNONCONTENTION<2 ); -#if SQLITE_ENABLE_API_ARMOR +#ifdef SQLITE_ENABLE_API_ARMOR if( ((CheckMutex*)p)->iType<2 ) #endif {@@ -28058,7 +28948,7 @@ #include <pthread.h>
/* ** The sqlite3_mutex.id, sqlite3_mutex.nRef, and sqlite3_mutex.owner fields -** are necessary under two condidtions: (1) Debug builds and (2) using +** are necessary under two conditions: (1) Debug builds and (2) using ** home-grown mutexes. Encapsulate these conditions into a single #define. */ #if defined(SQLITE_DEBUG) || defined(SQLITE_HOMEGROWN_RECURSIVE_MUTEX)@@ -28255,7 +29145,7 @@ ** mutex that it allocates.
*/ static void pthreadMutexFree(sqlite3_mutex *p){ assert( p->nRef==0 ); -#if SQLITE_ENABLE_API_ARMOR +#ifdef SQLITE_ENABLE_API_ARMOR if( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ) #endif {@@ -28559,7 +29449,7 @@ struct sqlite3_mutex {
CRITICAL_SECTION mutex; /* Mutex controlling the lock */ int id; /* Mutex type */ #ifdef SQLITE_DEBUG - volatile int nRef; /* Number of enterances */ + volatile int nRef; /* Number of entrances */ volatile DWORD owner; /* Thread holding this mutex */ volatile LONG trace; /* True to trace changes */ #endif@@ -28608,7 +29498,7 @@ #if defined(SQLITE_MEMORY_BARRIER)
SQLITE_MEMORY_BARRIER; #elif defined(__GNUC__) __sync_synchronize(); -#elif MSVC_VERSION>=1300 +#elif MSVC_VERSION>=1400 _ReadWriteBarrier(); #elif defined(MemoryBarrier) MemoryBarrier();@@ -29202,7 +30092,7 @@ **
** The upper bound is slightly less than 2GiB: 0x7ffffeff == 2,147,483,391 ** This provides a 256-byte safety margin for defense against 32-bit ** signed integer overflow bugs when computing memory allocation sizes. -** Parnoid applications might want to reduce the maximum allocation size +** Paranoid applications might want to reduce the maximum allocation size ** further for an even larger safety margin. 0x3fffffff or 0x0fffffff ** or even smaller would be reasonable upper bounds on the size of a memory ** allocations for most applications.@@ -29716,9 +30606,14 @@ ** sqlite3DbMalloc(). Omit leading and trailing whitespace.
*/ SQLITE_PRIVATE char *sqlite3DbSpanDup(sqlite3 *db, const char *zStart, const char *zEnd){ int n; +#ifdef SQLITE_DEBUG + /* Because of the way the parser works, the span is guaranteed to contain + ** at least one non-space character */ + for(n=0; sqlite3Isspace(zStart[n]); n++){ assert( &zStart[n]<zEnd ); } +#endif while( sqlite3Isspace(zStart[0]) ) zStart++; n = (int)(zEnd - zStart); - while( ALWAYS(n>0) && sqlite3Isspace(zStart[n-1]) ) n--; + while( sqlite3Isspace(zStart[n-1]) ) n--; return sqlite3DbStrNDup(db, zStart, n); }@@ -29814,7 +30709,7 @@ assert( sqlite3_mutex_held(db->mutex) );
if( db->mallocFailed || rc ){ return apiHandleError(db, rc); } - return rc & db->errMask; + return 0; } /************** End of malloc.c **********************************************/@@ -29926,43 +30821,6 @@ ** %S Takes a pointer to SrcItem. Shows name or database.name
** %!S Like %S but prefer the zName over the zAlias */ -/* Floating point constants used for rounding */ -static const double arRound[] = { - 5.0e-01, 5.0e-02, 5.0e-03, 5.0e-04, 5.0e-05, - 5.0e-06, 5.0e-07, 5.0e-08, 5.0e-09, 5.0e-10, -}; - -/* -** If SQLITE_OMIT_FLOATING_POINT is defined, then none of the floating point -** conversions will work. -*/ -#ifndef SQLITE_OMIT_FLOATING_POINT -/* -** "*val" is a double such that 0.1 <= *val < 10.0 -** Return the ascii code for the leading digit of *val, then -** multiply "*val" by 10.0 to renormalize. -** -** Example: -** input: *val = 3.14159 -** output: *val = 1.4159 function return = '3' -** -** The counter *cnt is incremented each time. After counter exceeds -** 16 (the number of significant digits in a 64-bit float) '0' is -** always returned. -*/ -static char et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){ - int digit; - LONGDOUBLE_TYPE d; - if( (*cnt)<=0 ) return '0'; - (*cnt)--; - digit = (int)*val; - d = digit; - digit += '0'; - *val = (*val - d)*10.0; - return (char)digit; -} -#endif /* SQLITE_OMIT_FLOATING_POINT */ - /* ** Set the StrAccum object to an error mode. */@@ -30054,18 +30912,15 @@ etByte xtype = etINVALID; /* Conversion paradigm */
u8 bArgList; /* True for SQLITE_PRINTF_SQLFUNC */ char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */ sqlite_uint64 longvalue; /* Value for integer types */ - LONGDOUBLE_TYPE realvalue; /* Value for real types */ + double realvalue; /* Value for real types */ const et_info *infop; /* Pointer to the appropriate info structure */ char *zOut; /* Rendering buffer */ int nOut; /* Size of the rendering buffer */ char *zExtra = 0; /* Malloced memory used by some conversion */ -#ifndef SQLITE_OMIT_FLOATING_POINT - int exp, e2; /* exponent of real numbers */ - int nsd; /* Number of significant digits returned */ - double rounder; /* Used for rounding floating point values */ + int exp, e2; /* exponent of real numbers */ etByte flag_dp; /* True if decimal point should be shown */ etByte flag_rtz; /* True if trailing zeros should be removed */ -#endif + PrintfArguments *pArgList = 0; /* Arguments for SQLITE_PRINTF_SQLFUNC */ char buf[etBUFSIZE]; /* Conversion buffer */@@ -30340,73 +31195,66 @@ length = (int)(&zOut[nOut-1]-bufpt);
break; case etFLOAT: case etEXP: - case etGENERIC: + case etGENERIC: { + FpDecode s; + int iRound; + int j; + if( bArgList ){ realvalue = getDoubleArg(pArgList); }else{ realvalue = va_arg(ap,double); } -#ifdef SQLITE_OMIT_FLOATING_POINT - length = 0; -#else if( precision<0 ) precision = 6; /* Set default precision */ #ifdef SQLITE_FP_PRECISION_LIMIT if( precision>SQLITE_FP_PRECISION_LIMIT ){ precision = SQLITE_FP_PRECISION_LIMIT; } #endif - if( realvalue<0.0 ){ - realvalue = -realvalue; - prefix = '-'; - }else{ - prefix = flag_prefix; - } - if( xtype==etGENERIC && precision>0 ) precision--; - testcase( precision>0xfff ); - idx = precision & 0xfff; - rounder = arRound[idx%10]; - while( idx>=10 ){ rounder *= 1.0e-10; idx -= 10; } if( xtype==etFLOAT ){ - double rx = (double)realvalue; - sqlite3_uint64 u; - int ex; - memcpy(&u, &rx, sizeof(u)); - ex = -1023 + (int)((u>>52)&0x7ff); - if( precision+(ex/3) < 15 ) rounder += realvalue*3e-16; - realvalue += rounder; - } - /* Normalize realvalue to within 10.0 > realvalue >= 1.0 */ - exp = 0; - if( sqlite3IsNaN((double)realvalue) ){ - bufpt = "NaN"; - length = 3; - break; + iRound = -precision; + }else if( xtype==etGENERIC ){ + iRound = precision; + }else{ + iRound = precision+1; } - if( realvalue>0.0 ){ - LONGDOUBLE_TYPE scale = 1.0; - while( realvalue>=1e100*scale && exp<=350 ){ scale *= 1e100;exp+=100;} - while( realvalue>=1e10*scale && exp<=350 ){ scale *= 1e10; exp+=10; } - while( realvalue>=10.0*scale && exp<=350 ){ scale *= 10.0; exp++; } - realvalue /= scale; - while( realvalue<1e-8 ){ realvalue *= 1e8; exp-=8; } - while( realvalue<1.0 ){ realvalue *= 10.0; exp--; } - if( exp>350 ){ + sqlite3FpDecode(&s, realvalue, iRound, flag_altform2 ? 26 : 16); + if( s.isSpecial ){ + if( s.isSpecial==2 ){ + bufpt = flag_zeropad ? "null" : "NaN"; + length = sqlite3Strlen30(bufpt); + break; + }else if( flag_zeropad ){ + s.z[0] = '9'; + s.iDP = 1000; + s.n = 1; + }else{ + memcpy(buf, "-Inf", 5); bufpt = buf; - buf[0] = prefix; - memcpy(buf+(prefix!=0),"Inf",4); - length = 3+(prefix!=0); + if( s.sign=='-' ){ + /* no-op */ + }else if( flag_prefix ){ + buf[0] = flag_prefix; + }else{ + bufpt++; + } + length = sqlite3Strlen30(bufpt); break; } } - bufpt = buf; + if( s.sign=='-' ){ + prefix = '-'; + }else{ + prefix = flag_prefix; + } + + exp = s.iDP-1; + if( xtype==etGENERIC && precision>0 ) precision--; + /* ** If the field type is etGENERIC, then convert to either etEXP ** or etFLOAT, as appropriate. */ - if( xtype!=etFLOAT ){ - realvalue += rounder; - if( realvalue>=10.0 ){ realvalue *= 0.1; exp++; } - } if( xtype==etGENERIC ){ flag_rtz = !flag_alternateform; if( exp<-4 || exp>precision ){@@ -30421,29 +31269,32 @@ }
if( xtype==etEXP ){ e2 = 0; }else{ - e2 = exp; + e2 = s.iDP - 1; } + bufpt = buf; { i64 szBufNeeded; /* Size of a temporary buffer needed */ szBufNeeded = MAX(e2,0)+(i64)precision+(i64)width+15; + if( cThousand && e2>0 ) szBufNeeded += (e2+2)/3; if( szBufNeeded > etBUFSIZE ){ bufpt = zExtra = printfTempBuf(pAccum, szBufNeeded); if( bufpt==0 ) return; } } zOut = bufpt; - nsd = 16 + flag_altform2*10; flag_dp = (precision>0 ?1:0) | flag_alternateform | flag_altform2; /* The sign in front of the number */ if( prefix ){ *(bufpt++) = prefix; } /* Digits prior to the decimal point */ + j = 0; if( e2<0 ){ *(bufpt++) = '0'; }else{ for(; e2>=0; e2--){ - *(bufpt++) = et_getdigit(&realvalue,&nsd); + *(bufpt++) = j<s.n ? s.z[j++] : '0'; + if( cThousand && (e2%3)==0 && e2>1 ) *(bufpt++) = ','; } } /* The decimal point */@@ -30452,13 +31303,12 @@ *(bufpt++) = '.';
} /* "0" digits after the decimal point but before the first ** significant digit of the number */ - for(e2++; e2<0; precision--, e2++){ - assert( precision>0 ); + for(e2++; e2<0 && precision>0; precision--, e2++){ *(bufpt++) = '0'; } /* Significant digits after the decimal point */ while( (precision--)>0 ){ - *(bufpt++) = et_getdigit(&realvalue,&nsd); + *(bufpt++) = j<s.n ? s.z[j++] : '0'; } /* Remove trailing zeros and the "." if no digits follow the "." */ if( flag_rtz && flag_dp ){@@ -30474,6 +31324,7 @@ }
} /* Add the "eNNN" suffix */ if( xtype==etEXP ){ + exp = s.iDP - 1; *(bufpt++) = aDigits[infop->charset]; if( exp<0 ){ *(bufpt++) = '-'; exp = -exp;@@ -30507,8 +31358,8 @@ i = prefix!=0;
while( nPad-- ) bufpt[i++] = '0'; length = width; } -#endif /* !defined(SQLITE_OMIT_FLOATING_POINT) */ break; + } case etSIZE: if( !bArgList ){ *(va_arg(ap,int*)) = pAccum->nChar;@@ -30557,13 +31408,26 @@ length = 4;
} } if( precision>1 ){ + i64 nPrior = 1; width -= precision-1; if( width>1 && !flag_leftjustify ){ sqlite3_str_appendchar(pAccum, width-1, ' '); width = 0; } - while( precision-- > 1 ){ - sqlite3_str_append(pAccum, buf, length); + sqlite3_str_append(pAccum, buf, length); + precision--; + while( precision > 1 ){ + i64 nCopyBytes; + if( nPrior > precision-1 ) nPrior = precision - 1; + nCopyBytes = length*nPrior; + if( nCopyBytes + pAccum->nChar >= pAccum->nAlloc ){ + sqlite3StrAccumEnlarge(pAccum, nCopyBytes); + } + if( pAccum->accError ) break; + sqlite3_str_append(pAccum, + &pAccum->zText[pAccum->nChar-nCopyBytes], nCopyBytes); + precision -= nPrior; + nPrior *= 2; } } bufpt = buf;@@ -30791,9 +31655,9 @@ **
** Return the number of bytes of text that StrAccum is able to accept ** after the attempted enlargement. The value returned might be zero. */ -SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum *p, int N){ +SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum *p, i64 N){ char *zNew; - assert( p->nChar+(i64)N >= p->nAlloc ); /* Only called if really needed */ + assert( p->nChar+N >= p->nAlloc ); /* Only called if really needed */ if( p->accError ){ testcase(p->accError==SQLITE_TOOBIG); testcase(p->accError==SQLITE_NOMEM);@@ -30804,8 +31668,7 @@ sqlite3StrAccumSetError(p, SQLITE_TOOBIG);
return p->nAlloc - p->nChar - 1; }else{ char *zOld = isMalloced(p) ? p->zText : 0; - i64 szNew = p->nChar; - szNew += (sqlite3_int64)N + 1; + i64 szNew = p->nChar + N + 1; if( szNew+p->nChar<=p->mxAlloc ){ /* Force exponential buffer size growth as long as it does not overflow, ** to avoid having to call this routine too often */@@ -30835,7 +31698,8 @@ sqlite3StrAccumSetError(p, SQLITE_NOMEM);
return 0; } } - return N; + assert( N>=0 && N<=0x7fffffff ); + return (int)N; } /*@@ -31126,12 +31990,22 @@ zBuf[acc.nChar] = 0;
return zBuf; } SQLITE_API char *sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){ - char *z; + StrAccum acc; va_list ap; + if( n<=0 ) return zBuf; +#ifdef SQLITE_ENABLE_API_ARMOR + if( zBuf==0 || zFormat==0 ) { + (void)SQLITE_MISUSE_BKPT; + if( zBuf ) zBuf[0] = 0; + return zBuf; + } +#endif + sqlite3StrAccumInit(&acc, 0, zBuf, n, 0); va_start(ap,zFormat); - z = sqlite3_vsnprintf(n, zBuf, zFormat, ap); + sqlite3_str_vappendf(&acc, zFormat, ap); va_end(ap); - return z; + zBuf[acc.nChar] = 0; + return zBuf; } /*@@ -31207,6 +32081,75 @@ va_list ap;
va_start(ap,zFormat); sqlite3_str_vappendf(p, zFormat, ap); va_end(ap); +} + + +/***************************************************************************** +** Reference counted string storage +*****************************************************************************/ + +/* +** Increase the reference count of the string by one. +** +** The input parameter is returned. +*/ +SQLITE_PRIVATE char *sqlite3RCStrRef(char *z){ + RCStr *p = (RCStr*)z; + assert( p!=0 ); + p--; + p->nRCRef++; + return z; +} + +/* +** Decrease the reference count by one. Free the string when the +** reference count reaches zero. +*/ +SQLITE_PRIVATE void sqlite3RCStrUnref(void *z){ + RCStr *p = (RCStr*)z; + assert( p!=0 ); + p--; + assert( p->nRCRef>0 ); + if( p->nRCRef>=2 ){ + p->nRCRef--; + }else{ + sqlite3_free(p); + } +} + +/* +** Create a new string that is capable of holding N bytes of text, not counting +** the zero byte at the end. The string is uninitialized. +** +** The reference count is initially 1. Call sqlite3RCStrUnref() to free the +** newly allocated string. +** +** This routine returns 0 on an OOM. +*/ +SQLITE_PRIVATE char *sqlite3RCStrNew(u64 N){ + RCStr *p = sqlite3_malloc64( N + sizeof(*p) + 1 ); + if( p==0 ) return 0; + p->nRCRef = 1; + return (char*)&p[1]; +} + +/* +** Change the size of the string so that it is able to hold N bytes. +** The string might be reallocated, so return the new allocation. +*/ +SQLITE_PRIVATE char *sqlite3RCStrResize(char *z, u64 N){ + RCStr *p = (RCStr*)z; + RCStr *pNew; + assert( p!=0 ); + p--; + assert( p->nRCRef==1 ); + pNew = sqlite3_realloc64(p, N+sizeof(RCStr)+1); + if( pNew==0 ){ + sqlite3_free(p); + return 0; + }else{ + return (char*)&pNew[1]; + } } /************** End of printf.c **********************************************/@@ -31431,6 +32374,13 @@ }
if( pItem->fg.isOn || (pItem->fg.isUsing==0 && pItem->u3.pOn!=0) ){ sqlite3_str_appendf(&x, " ON"); } + if( pItem->fg.isTabFunc ) sqlite3_str_appendf(&x, " isTabFunc"); + if( pItem->fg.isCorrelated ) sqlite3_str_appendf(&x, " isCorrelated"); + if( pItem->fg.isMaterialized ) sqlite3_str_appendf(&x, " isMaterialized"); + if( pItem->fg.viaCoroutine ) sqlite3_str_appendf(&x, " viaCoroutine"); + if( pItem->fg.notCte ) sqlite3_str_appendf(&x, " notCte"); + if( pItem->fg.isNestedFrom ) sqlite3_str_appendf(&x, " isNestedFrom"); + sqlite3StrAccumFinish(&x); sqlite3TreeViewItem(pView, zLine, i<pSrc->nSrc-1); n = 0;@@ -31618,6 +32568,7 @@ if( pWin->pFilter ){
sqlite3TreeViewItem(pView, "FILTER", 1); sqlite3TreeViewExpr(pView, pWin->pFilter, 0); sqlite3TreeViewPop(&pView); + if( pWin->eFrmType==TK_FILTER ) return; } sqlite3TreeViewPush(&pView, more); if( pWin->zName ){@@ -31627,7 +32578,7 @@ sqlite3TreeViewLine(pView, "OVER (%p)", pWin);
} if( pWin->zBase ) nElement++; if( pWin->pOrderBy ) nElement++; - if( pWin->eFrmType ) nElement++; + if( pWin->eFrmType!=0 && pWin->eFrmType!=TK_FILTER ) nElement++; if( pWin->eExclude ) nElement++; if( pWin->zBase ){ sqlite3TreeViewPush(&pView, (--nElement)>0);@@ -31640,7 +32591,7 @@ }
if( pWin->pOrderBy ){ sqlite3TreeViewExprList(pView, pWin->pOrderBy, (--nElement)>0, "ORDER-BY"); } - if( pWin->eFrmType ){ + if( pWin->eFrmType!=0 && pWin->eFrmType!=TK_FILTER ){ char zBuf[30]; const char *zFrmType = "ROWS"; if( pWin->eFrmType==TK_RANGE ) zFrmType = "RANGE";@@ -31700,7 +32651,7 @@ sqlite3TreeViewLine(pView, "nil");
sqlite3TreeViewPop(&pView); return; } - if( pExpr->flags || pExpr->affExpr || pExpr->vvaFlags ){ + if( pExpr->flags || pExpr->affExpr || pExpr->vvaFlags || pExpr->pAggInfo ){ StrAccum x; sqlite3StrAccumInit(&x, 0, zFlgs, sizeof(zFlgs), 0); sqlite3_str_appendf(&x, " fg.af=%x.%c",@@ -31716,6 +32667,9 @@ sqlite3_str_appendf(&x, " DDL");
} if( ExprHasVVAProperty(pExpr, EP_Immutable) ){ sqlite3_str_appendf(&x, " IMMUTABLE"); + } + if( pExpr->pAggInfo!=0 ){ + sqlite3_str_appendf(&x, " agg-column[%d]", pExpr->iAgg); } sqlite3StrAccumFinish(&x); }else{@@ -31846,7 +32800,8 @@ "IS-FALSE", "IS-TRUE", "IS-NOT-FALSE", "IS-NOT-TRUE"
}; assert( pExpr->op2==TK_IS || pExpr->op2==TK_ISNOT ); assert( pExpr->pRight ); - assert( sqlite3ExprSkipCollate(pExpr->pRight)->op==TK_TRUEFALSE ); + assert( sqlite3ExprSkipCollateAndLikely(pExpr->pRight)->op + == TK_TRUEFALSE ); x = (pExpr->op2==TK_ISNOT)*2 + sqlite3ExprTruthValue(pExpr->pRight); zUniOp = azOp[x]; break;@@ -31884,7 +32839,7 @@ }else{
assert( ExprUseXList(pExpr) ); pFarg = pExpr->x.pList; #ifndef SQLITE_OMIT_WINDOWFUNC - pWin = ExprHasProperty(pExpr, EP_WinFunc) ? pExpr->y.pWin : 0; + pWin = IsWindowFunc(pExpr) ? pExpr->y.pWin : 0; #else pWin = 0; #endif@@ -31910,13 +32865,23 @@ }else{
sqlite3TreeViewLine(pView, "FUNCTION %Q%s", pExpr->u.zToken, zFlgs); } if( pFarg ){ - sqlite3TreeViewExprList(pView, pFarg, pWin!=0, 0); + sqlite3TreeViewExprList(pView, pFarg, pWin!=0 || pExpr->pLeft, 0); + if( pExpr->pLeft ){ + Expr *pOB = pExpr->pLeft; + assert( pOB->op==TK_ORDER ); + assert( ExprUseXList(pOB) ); + sqlite3TreeViewExprList(pView, pOB->x.pList, pWin!=0, "ORDERBY"); + } } #ifndef SQLITE_OMIT_WINDOWFUNC if( pWin ){ sqlite3TreeViewWindow(pView, pWin, 0); } #endif + break; + } + case TK_ORDER: { + sqlite3TreeViewExprList(pView, pExpr->x.pList, 0, "ORDERBY"); break; } #ifndef SQLITE_OMIT_SUBQUERY@@ -33505,7 +34470,7 @@
/* ** Calls to sqlite3FaultSim() are used to simulate a failure during testing, ** or to bypass normal error detection during testing in order to let -** execute proceed futher downstream. +** execute proceed further downstream. ** ** In deployment, sqlite3FaultSim() *always* return SQLITE_OK (0). The ** sqlite3FaultSim() function only returns non-zero during testing.@@ -33622,6 +34587,23 @@ ** to do based on the SQLite error code in rc.
*/ SQLITE_PRIVATE void sqlite3SystemError(sqlite3 *db, int rc){ if( rc==SQLITE_IOERR_NOMEM ) return; +#ifdef SQLITE_USE_SEH + if( rc==SQLITE_IOERR_IN_PAGE ){ + int ii; + int iErr; + sqlite3BtreeEnterAll(db); + for(ii=0; ii<db->nDb; ii++){ + if( db->aDb[ii].pBt ){ + iErr = sqlite3PagerWalSystemErrno(sqlite3BtreePager(db->aDb[ii].pBt)); + if( iErr ){ + db->iSysErrno = iErr; + } + } + } + sqlite3BtreeLeaveAll(db); + return; + } +#endif rc &= 0xff; if( rc==SQLITE_CANTOPEN || rc==SQLITE_IOERR ){ db->iSysErrno = sqlite3OsGetLastError(db->pVfs);@@ -33657,6 +34639,30 @@ }
} /* +** Check for interrupts and invoke progress callback. +*/ +SQLITE_PRIVATE void sqlite3ProgressCheck(Parse *p){ + sqlite3 *db = p->db; + if( AtomicLoad(&db->u1.isInterrupted) ){ + p->nErr++; + p->rc = SQLITE_INTERRUPT; + } +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + if( p->rc==SQLITE_INTERRUPT ){ + p->nProgressSteps = 0; + }else if( (++p->nProgressSteps)>=db->nProgressOps ){ + if( db->xProgress(db->pProgressArg) ){ + p->nErr++; + p->rc = SQLITE_INTERRUPT; + } + p->nProgressSteps = 0; + } + } +#endif +} + +/* ** Add an error message to pParse->zErrMsg and increment pParse->nErr. ** ** This function should be used to report any error that occurs while@@ -33847,43 +34853,40 @@ }
return h; } -/* -** Compute 10 to the E-th power. Examples: E==1 results in 10. -** E==2 results in 100. E==50 results in 1.0e50. +/* Double-Double multiplication. (x[0],x[1]) *= (y,yy) ** -** This routine only works for values of E between 1 and 341. +** Reference: +** T. J. Dekker, "A Floating-Point Technique for Extending the +** Available Precision". 1971-07-26. */ -static LONGDOUBLE_TYPE sqlite3Pow10(int E){ -#if defined(_MSC_VER) - static const LONGDOUBLE_TYPE x[] = { - 1.0e+001L, - 1.0e+002L, - 1.0e+004L, - 1.0e+008L, - 1.0e+016L, - 1.0e+032L, - 1.0e+064L, - 1.0e+128L, - 1.0e+256L - }; - LONGDOUBLE_TYPE r = 1.0; - int i; - assert( E>=0 && E<=307 ); - for(i=0; E!=0; i++, E >>=1){ - if( E & 1 ) r *= x[i]; - } - return r; -#else - LONGDOUBLE_TYPE x = 10.0; - LONGDOUBLE_TYPE r = 1.0; - while(1){ - if( E & 1 ) r *= x; - E >>= 1; - if( E==0 ) break; - x *= x; - } - return r; -#endif +static void dekkerMul2(volatile double *x, double y, double yy){ + /* + ** The "volatile" keywords on parameter x[] and on local variables + ** below are needed force intermediate results to be truncated to + ** binary64 rather than be carried around in an extended-precision + ** format. The truncation is necessary for the Dekker algorithm to + ** work. Intel x86 floating point might omit the truncation without + ** the use of volatile. + */ + volatile double tx, ty, p, q, c, cc; + double hx, hy; + u64 m; + memcpy(&m, (void*)&x[0], 8); + m &= 0xfffffffffc000000LL; + memcpy(&hx, &m, 8); + tx = x[0] - hx; + memcpy(&m, &y, 8); + m &= 0xfffffffffc000000LL; + memcpy(&hy, &m, 8); + ty = y - hy; + p = hx*hy; + q = hx*ty + tx*hy; + c = p+q; + cc = p - c + q + tx*ty; + cc = x[0]*yy + x[1]*y + cc; + x[0] = c + cc; + x[1] = c - x[0]; + x[1] += cc; } /*@@ -33924,12 +34927,11 @@ int incr;
const char *zEnd; /* sign * significand * (10 ^ (esign * exponent)) */ int sign = 1; /* sign of significand */ - i64 s = 0; /* significand */ + u64 s = 0; /* significand */ int d = 0; /* adjust exponent for shifting decimal point */ int esign = 1; /* sign of exponent */ int e = 0; /* exponent */ int eValid = 1; /* True exponent is either not used or is well-formed */ - double result; int nDigit = 0; /* Number of digits processed */ int eType = 1; /* 1: pure integer, 2+: fractional -1 or less: bad UTF16 */@@ -33969,7 +34971,7 @@ /* copy max significant digits to significand */
while( z<zEnd && sqlite3Isdigit(*z) ){ s = s*10 + (*z - '0'); z+=incr; nDigit++; - if( s>=((LARGEST_INT64-9)/10) ){ + if( s>=((LARGEST_UINT64-9)/10) ){ /* skip non-significant significand digits ** (increase exponent by d to shift decimal left) */ while( z<zEnd && sqlite3Isdigit(*z) ){ z+=incr; d++; }@@ -33984,7 +34986,7 @@ eType++;
/* copy digits from after decimal to significand ** (decrease exponent by d to shift decimal right) */ while( z<zEnd && sqlite3Isdigit(*z) ){ - if( s<((LARGEST_INT64-9)/10) ){ + if( s<((LARGEST_UINT64-9)/10) ){ s = s*10 + (*z - '0'); d--; nDigit++;@@ -34024,79 +35026,89 @@ /* skip trailing spaces */
while( z<zEnd && sqlite3Isspace(*z) ) z+=incr; do_atof_calc: + /* Zero is a special case */ + if( s==0 ){ + *pResult = sign<0 ? -0.0 : +0.0; + goto atof_return; + } + /* adjust exponent by d, and update sign */ e = (e*esign) + d; - if( e<0 ) { - esign = -1; - e *= -1; - } else { - esign = 1; + + /* Try to adjust the exponent to make it smaller */ + while( e>0 && s<(LARGEST_UINT64/10) ){ + s *= 10; + e--; + } + while( e<0 && (s%10)==0 ){ + s /= 10; + e++; } - if( s==0 ) { - /* In the IEEE 754 standard, zero is signed. */ - result = sign<0 ? -(double)0 : (double)0; - } else { - /* Attempt to reduce exponent. - ** - ** Branches that are not required for the correct answer but which only - ** help to obtain the correct answer faster are marked with special - ** comments, as a hint to the mutation tester. - */ - while( e>0 ){ /*OPTIMIZATION-IF-TRUE*/ - if( esign>0 ){ - if( s>=(LARGEST_INT64/10) ) break; /*OPTIMIZATION-IF-FALSE*/ - s *= 10; - }else{ - if( s%10!=0 ) break; /*OPTIMIZATION-IF-FALSE*/ - s /= 10; - } - e--; + if( e==0 ){ + *pResult = s; + }else if( sqlite3Config.bUseLongDouble ){ + LONGDOUBLE_TYPE r = (LONGDOUBLE_TYPE)s; + if( e>0 ){ + while( e>=100 ){ e-=100; r *= 1.0e+100L; } + while( e>=10 ){ e-=10; r *= 1.0e+10L; } + while( e>=1 ){ e-=1; r *= 1.0e+01L; } + }else{ + while( e<=-100 ){ e+=100; r *= 1.0e-100L; } + while( e<=-10 ){ e+=10; r *= 1.0e-10L; } + while( e<=-1 ){ e+=1; r *= 1.0e-01L; } } - - /* adjust the sign of significand */ - s = sign<0 ? -s : s; - - if( e==0 ){ /*OPTIMIZATION-IF-TRUE*/ - result = (double)s; - }else{ - /* attempt to handle extremely small/large numbers better */ - if( e>307 ){ /*OPTIMIZATION-IF-TRUE*/ - if( e<342 ){ /*OPTIMIZATION-IF-TRUE*/ - LONGDOUBLE_TYPE scale = sqlite3Pow10(e-308); - if( esign<0 ){ - result = s / scale; - result /= 1.0e+308; - }else{ - result = s * scale; - result *= 1.0e+308; - } - }else{ assert( e>=342 ); - if( esign<0 ){ - result = 0.0*s; - }else{ + assert( r>=0.0 ); + if( r>+1.7976931348623157081452742373e+308L ){ #ifdef INFINITY - result = INFINITY*s; + *pResult = +INFINITY; #else - result = 1e308*1e308*s; /* Infinity */ + *pResult = 1.0e308*10.0; #endif - } - } - }else{ - LONGDOUBLE_TYPE scale = sqlite3Pow10(e); - if( esign<0 ){ - result = s / scale; - }else{ - result = s * scale; - } + }else{ + *pResult = (double)r; + } + }else{ + double rr[2]; + u64 s2; + rr[0] = (double)s; + s2 = (u64)rr[0]; + rr[1] = s>=s2 ? (double)(s - s2) : -(double)(s2 - s); + if( e>0 ){ + while( e>=100 ){ + e -= 100; + dekkerMul2(rr, 1.0e+100, -1.5902891109759918046e+83); + } + while( e>=10 ){ + e -= 10; + dekkerMul2(rr, 1.0e+10, 0.0); + } + while( e>=1 ){ + e -= 1; + dekkerMul2(rr, 1.0e+01, 0.0); + } + }else{ + while( e<=-100 ){ + e += 100; + dekkerMul2(rr, 1.0e-100, -1.99918998026028836196e-117); + } + while( e<=-10 ){ + e += 10; + dekkerMul2(rr, 1.0e-10, -3.6432197315497741579e-27); + } + while( e<=-1 ){ + e += 1; + dekkerMul2(rr, 1.0e-01, -5.5511151231257827021e-18); } } + *pResult = rr[0]+rr[1]; + if( sqlite3IsNaN(*pResult) ) *pResult = 1e300*1e300; } - - /* store the result */ - *pResult = result; + if( sign<0 ) *pResult = -*pResult; + assert( !sqlite3IsNaN(*pResult) ); - /* return true if number and no extra non-whitespace chracters after */ +atof_return: + /* return true if number and no extra non-whitespace characters after */ if( z==zEnd && nDigit>0 && eValid && eType>0 ){ return eType; }else if( eType>=2 && (eType==3 || eValid) && nDigit>0 ){@@ -34113,11 +35125,14 @@ #pragma warning(default : 4756)
#endif /* -** Render an signed 64-bit integer as text. Store the result in zOut[]. +** Render an signed 64-bit integer as text. Store the result in zOut[] and +** return the length of the string that was stored, in bytes. The value +** returned does not include the zero terminator at the end of the output +** string. ** ** The caller must ensure that zOut[] is at least 21 bytes in size. */ -SQLITE_PRIVATE void sqlite3Int64ToText(i64 v, char *zOut){ +SQLITE_PRIVATE int sqlite3Int64ToText(i64 v, char *zOut){ int i; u64 x; char zTemp[22];@@ -34128,12 +35143,15 @@ x = v;
} i = sizeof(zTemp)-2; zTemp[sizeof(zTemp)-1] = 0; - do{ - zTemp[i--] = (x%10) + '0'; + while( 1 /*exit-by-break*/ ){ + zTemp[i] = (x%10) + '0'; x = x/10; - }while( x ); - if( v<0 ) zTemp[i--] = '-'; - memcpy(zOut, &zTemp[i+1], sizeof(zTemp)-1-i); + if( x==0 ) break; + i--; + }; + if( v<0 ) zTemp[--i] = '-'; + memcpy(zOut, &zTemp[i], sizeof(zTemp)-i); + return sizeof(zTemp)-1-i; } /*@@ -34226,7 +35244,7 @@ if( u>LARGEST_INT64 ){
/* This test and assignment is needed only to suppress UB warnings ** from clang and -fsanitize=undefined. This test and assignment make ** the code a little larger and slower, and no harm comes from omitting - ** them, but we must appaise the undefined-behavior pharisees. */ + ** them, but we must appease the undefined-behavior pharisees. */ *pNum = neg ? SMALLEST_INT64 : LARGEST_INT64; }else if( neg ){ *pNum = -(i64)u;@@ -34298,11 +35316,15 @@ for(k=i; sqlite3Isxdigit(z[k]); k++){
u = u*16 + sqlite3HexToInt(z[k]); } memcpy(pOut, &u, 8); - return (z[k]==0 && k-i<=16) ? 0 : 2; + if( k-i>16 ) return 2; + if( z[k]!=0 ) return 1; + return 0; }else #endif /* SQLITE_OMIT_HEX_INTEGER */ { - return sqlite3Atoi64(z, pOut, sqlite3Strlen30(z), SQLITE_UTF8); + int n = (int)(0x3fffffff&strspn(z,"+- \n\t0123456789")); + if( z[n] ) n++; + return sqlite3Atoi64(z, pOut, n, SQLITE_UTF8); } }@@ -34334,7 +35356,7 @@ ){
u32 u = 0; zNum += 2; while( zNum[0]=='0' ) zNum++; - for(i=0; sqlite3Isxdigit(zNum[i]) && i<8; i++){ + for(i=0; i<8 && sqlite3Isxdigit(zNum[i]); i++){ u = u*16 + sqlite3HexToInt(zNum[i]); } if( (u&0x80000000)==0 && sqlite3Isxdigit(zNum[i])==0 ){@@ -34382,6 +35404,153 @@ return x;
} /* +** Decode a floating-point value into an approximate decimal +** representation. +** +** Round the decimal representation to n significant digits if +** n is positive. Or round to -n signficant digits after the +** decimal point if n is negative. No rounding is performed if +** n is zero. +** +** The significant digits of the decimal representation are +** stored in p->z[] which is a often (but not always) a pointer +** into the middle of p->zBuf[]. There are p->n significant digits. +** The p->z[] array is *not* zero-terminated. +*/ +SQLITE_PRIVATE void sqlite3FpDecode(FpDecode *p, double r, int iRound, int mxRound){ + int i; + u64 v; + int e, exp = 0; + p->isSpecial = 0; + p->z = p->zBuf; + + /* Convert negative numbers to positive. Deal with Infinity, 0.0, and + ** NaN. */ + if( r<0.0 ){ + p->sign = '-'; + r = -r; + }else if( r==0.0 ){ + p->sign = '+'; + p->n = 1; + p->iDP = 1; + p->z = "0"; + return; + }else{ + p->sign = '+'; + } + memcpy(&v,&r,8); + e = v>>52; + if( (e&0x7ff)==0x7ff ){ + p->isSpecial = 1 + (v!=0x7ff0000000000000LL); + p->n = 0; + p->iDP = 0; + return; + } + + /* Multiply r by powers of ten until it lands somewhere in between + ** 1.0e+19 and 1.0e+17. + */ + if( sqlite3Config.bUseLongDouble ){ + LONGDOUBLE_TYPE rr = r; + if( rr>=1.0e+19 ){ + while( rr>=1.0e+119L ){ exp+=100; rr *= 1.0e-100L; } + while( rr>=1.0e+29L ){ exp+=10; rr *= 1.0e-10L; } + while( rr>=1.0e+19L ){ exp++; rr *= 1.0e-1L; } + }else{ + while( rr<1.0e-97L ){ exp-=100; rr *= 1.0e+100L; } + while( rr<1.0e+07L ){ exp-=10; rr *= 1.0e+10L; } + while( rr<1.0e+17L ){ exp--; rr *= 1.0e+1L; } + } + v = (u64)rr; + }else{ + /* If high-precision floating point is not available using "long double", + ** then use Dekker-style double-double computation to increase the + ** precision. + ** + ** The error terms on constants like 1.0e+100 computed using the + ** decimal extension, for example as follows: + ** + ** SELECT decimal_exp(decimal_sub('1.0e+100',decimal(1.0e+100))); + */ + double rr[2]; + rr[0] = r; + rr[1] = 0.0; + if( rr[0]>9.223372036854774784e+18 ){ + while( rr[0]>9.223372036854774784e+118 ){ + exp += 100; + dekkerMul2(rr, 1.0e-100, -1.99918998026028836196e-117); + } + while( rr[0]>9.223372036854774784e+28 ){ + exp += 10; + dekkerMul2(rr, 1.0e-10, -3.6432197315497741579e-27); + } + while( rr[0]>9.223372036854774784e+18 ){ + exp += 1; + dekkerMul2(rr, 1.0e-01, -5.5511151231257827021e-18); + } + }else{ + while( rr[0]<9.223372036854774784e-83 ){ + exp -= 100; + dekkerMul2(rr, 1.0e+100, -1.5902891109759918046e+83); + } + while( rr[0]<9.223372036854774784e+07 ){ + exp -= 10; + dekkerMul2(rr, 1.0e+10, 0.0); + } + while( rr[0]<9.22337203685477478e+17 ){ + exp -= 1; + dekkerMul2(rr, 1.0e+01, 0.0); + } + } + v = rr[1]<0.0 ? (u64)rr[0]-(u64)(-rr[1]) : (u64)rr[0]+(u64)rr[1]; + } + + + /* Extract significant digits. */ + i = sizeof(p->zBuf)-1; + assert( v>0 ); + while( v ){ p->zBuf[i--] = (v%10) + '0'; v /= 10; } + assert( i>=0 && i<sizeof(p->zBuf)-1 ); + p->n = sizeof(p->zBuf) - 1 - i; + assert( p->n>0 ); + assert( p->n<sizeof(p->zBuf) ); + p->iDP = p->n + exp; + if( iRound<0 ){ + iRound = p->iDP - iRound; + if( iRound==0 && p->zBuf[i+1]>='5' ){ + iRound = 1; + p->zBuf[i--] = '0'; + p->n++; + p->iDP++; + } + } + if( iRound>0 && (iRound<p->n || p->n>mxRound) ){ + char *z = &p->zBuf[i+1]; + if( iRound>mxRound ) iRound = mxRound; + p->n = iRound; + if( z[iRound]>='5' ){ + int j = iRound-1; + while( 1 /*exit-by-break*/ ){ + z[j]++; + if( z[j]<='9' ) break; + z[j] = '0'; + if( j==0 ){ + p->z[i--] = '1'; + p->n++; + p->iDP++; + break; + }else{ + j--; + } + } + } + } + p->z = &p->zBuf[i+1]; + assert( i+p->n < sizeof(p->zBuf) ); + while( ALWAYS(p->n>0) && p->z[p->n-1]=='0' ){ p->n--; } +} + +/* ** Try to convert z into an unsigned 32-bit integer. Return true on ** success and false if there is an error. **@@ -34644,121 +35813,32 @@ ** single-byte case. All code should use the MACRO version as
** this function assumes the single-byte case has already been handled. */ SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *p, u32 *v){ - u32 a,b; + u64 v64; + u8 n; - /* The 1-byte case. Overwhelmingly the most common. Handled inline - ** by the getVarin32() macro */ - a = *p; - /* a: p0 (unmasked) */ -#ifndef getVarint32 - if (!(a&0x80)) - { - /* Values between 0 and 127 */ - *v = a; - return 1; - } -#endif + /* Assume that the single-byte case has already been handled by + ** the getVarint32() macro */ + assert( (p[0] & 0x80)!=0 ); - /* The 2-byte case */ - p++; - b = *p; - /* b: p1 (unmasked) */ - if (!(b&0x80)) - { - /* Values between 128 and 16383 */ - a &= 0x7f; - a = a<<7; - *v = a | b; + if( (p[1] & 0x80)==0 ){ + /* This is the two-byte case */ + *v = ((p[0]&0x7f)<<7) | p[1]; return 2; } - - /* The 3-byte case */ - p++; - a = a<<14; - a |= *p; - /* a: p0<<14 | p2 (unmasked) */ - if (!(a&0x80)) - { - /* Values between 16384 and 2097151 */ - a &= (0x7f<<14)|(0x7f); - b &= 0x7f; - b = b<<7; - *v = a | b; + if( (p[2] & 0x80)==0 ){ + /* This is the three-byte case */ + *v = ((p[0]&0x7f)<<14) | ((p[1]&0x7f)<<7) | p[2]; return 3; } - - /* A 32-bit varint is used to store size information in btrees. - ** Objects are rarely larger than 2MiB limit of a 3-byte varint. - ** A 3-byte varint is sufficient, for example, to record the size - ** of a 1048569-byte BLOB or string. - ** - ** We only unroll the first 1-, 2-, and 3- byte cases. The very - ** rare larger cases can be handled by the slower 64-bit varint - ** routine. - */ -#if 1 - { - u64 v64; - u8 n; - - n = sqlite3GetVarint(p-2, &v64); - assert( n>3 && n<=9 ); - if( (v64 & SQLITE_MAX_U32)!=v64 ){ - *v = 0xffffffff; - }else{ - *v = (u32)v64; - } - return n; - } - -#else - /* For following code (kept for historical record only) shows an - ** unrolling for the 3- and 4-byte varint cases. This code is - ** slightly faster, but it is also larger and much harder to test. - */ - p++; - b = b<<14; - b |= *p; - /* b: p1<<14 | p3 (unmasked) */ - if (!(b&0x80)) - { - /* Values between 2097152 and 268435455 */ - b &= (0x7f<<14)|(0x7f); - a &= (0x7f<<14)|(0x7f); - a = a<<7; - *v = a | b; - return 4; - } - - p++; - a = a<<14; - a |= *p; - /* a: p0<<28 | p2<<14 | p4 (unmasked) */ - if (!(a&0x80)) - { - /* Values between 268435456 and 34359738367 */ - a &= SLOT_4_2_0; - b &= SLOT_4_2_0; - b = b<<7; - *v = a | b; - return 5; - } - - /* We can only reach this point when reading a corrupt database - ** file. In that case we are not in any hurry. Use the (relatively - ** slow) general-purpose sqlite3GetVarint() routine to extract the - ** value. */ - { - u64 v64; - u8 n; - - p -= 4; - n = sqlite3GetVarint(p, &v64); - assert( n>5 && n<=9 ); + /* four or more bytes */ + n = sqlite3GetVarint(p, &v64); + assert( n>3 && n<=9 ); + if( (v64 & SQLITE_MAX_U32)!=v64 ){ + *v = 0xffffffff; + }else{ *v = (u32)v64; - return n; } -#endif + return n; } /*@@ -34909,7 +35989,7 @@ }
} /* -** Attempt to add, substract, or multiply the 64-bit signed value iB against +** Attempt to add, subtract, or multiply the 64-bit signed value iB against ** the other 64-bit signed integer at *pA and store the result in *pA. ** Return 0 on success. Or if the operation would have resulted in an ** overflow, leave *pA unchanged and return 1.@@ -35195,6 +36275,104 @@ }while( i<mx );
return 0; } +/* +** High-resolution hardware timer used for debugging and testing only. +*/ +#if defined(VDBE_PROFILE) \ + || defined(SQLITE_PERFORMANCE_TRACE) \ + || defined(SQLITE_ENABLE_STMT_SCANSTATUS) +/************** Include hwtime.h in the middle of util.c *********************/ +/************** Begin file hwtime.h ******************************************/ +/* +** 2008 May 27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains inline asm code for retrieving "high-performance" +** counters for x86 and x86_64 class CPUs. +*/ +#ifndef SQLITE_HWTIME_H +#define SQLITE_HWTIME_H + +/* +** The following routine only works on Pentium-class (or newer) processors. +** It uses the RDTSC opcode to read the cycle count value out of the +** processor and returns that value. This can be used for high-res +** profiling. +*/ +#if !defined(__STRICT_ANSI__) && \ + (defined(__GNUC__) || defined(_MSC_VER)) && \ + (defined(i386) || defined(__i386__) || defined(_M_IX86)) + + #if defined(__GNUC__) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + + #elif defined(_MSC_VER) + + __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ + __asm { + rdtsc + ret ; return value at EDX:EAX + } + } + + #endif + +#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + +#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long long retval; + unsigned long junk; + __asm__ __volatile__ ("\n\ + 1: mftbu %1\n\ + mftb %L0\n\ + mftbu %0\n\ + cmpw %0,%1\n\ + bne 1b" + : "=r" (retval), "=r" (junk)); + return retval; + } + +#else + + /* + ** asm() is needed for hardware timing support. Without asm(), + ** disable the sqlite3Hwtime() routine. + ** + ** sqlite3Hwtime() is only used for some obscure debugging + ** and analysis configurations, not in any deliverable, so this + ** should not be a great loss. + */ +SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } + +#endif + +#endif /* !defined(SQLITE_HWTIME_H) */ + +/************** End of hwtime.h **********************************************/ +/************** Continuing where we left off in util.c ***********************/ +#endif + /************** End of util.c ************************************************/ /************** Begin file hash.c ********************************************/ /*@@ -35296,7 +36474,7 @@ }
} -/* Resize the hash table so that it cantains "new_size" buckets. +/* Resize the hash table so that it contains "new_size" buckets. ** ** The hash table might fail to resize if sqlite3_malloc() fails or ** if the new size is the same as the prior size.@@ -35365,12 +36543,13 @@ elem = pH->first;
count = pH->count; } if( pHash ) *pHash = h; - while( count-- ){ + while( count ){ assert( elem!=0 ); if( sqlite3StrICmp(elem->pKey,pKey)==0 ){ return elem; } elem = elem->next; + count--; } return &nullElement; }@@ -35655,19 +36834,20 @@ /* 170 */ "VBegin" OpHelp(""),
/* 171 */ "VCreate" OpHelp(""), /* 172 */ "VDestroy" OpHelp(""), /* 173 */ "VOpen" OpHelp(""), - /* 174 */ "VInitIn" OpHelp("r[P2]=ValueList(P1,P3)"), - /* 175 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), - /* 176 */ "VRename" OpHelp(""), - /* 177 */ "Pagecount" OpHelp(""), - /* 178 */ "MaxPgcnt" OpHelp(""), - /* 179 */ "ClrSubtype" OpHelp("r[P1].subtype = 0"), - /* 180 */ "FilterAdd" OpHelp("filter(P1) += key(P3@P4)"), - /* 181 */ "Trace" OpHelp(""), - /* 182 */ "CursorHint" OpHelp(""), - /* 183 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"), - /* 184 */ "Noop" OpHelp(""), - /* 185 */ "Explain" OpHelp(""), - /* 186 */ "Abortable" OpHelp(""), + /* 174 */ "VCheck" OpHelp(""), + /* 175 */ "VInitIn" OpHelp("r[P2]=ValueList(P1,P3)"), + /* 176 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), + /* 177 */ "VRename" OpHelp(""), + /* 178 */ "Pagecount" OpHelp(""), + /* 179 */ "MaxPgcnt" OpHelp(""), + /* 180 */ "ClrSubtype" OpHelp("r[P1].subtype = 0"), + /* 181 */ "FilterAdd" OpHelp("filter(P1) += key(P3@P4)"), + /* 182 */ "Trace" OpHelp(""), + /* 183 */ "CursorHint" OpHelp(""), + /* 184 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"), + /* 185 */ "Noop" OpHelp(""), + /* 186 */ "Explain" OpHelp(""), + /* 187 */ "Abortable" OpHelp(""), }; return azName[i]; }@@ -35729,7 +36909,9 @@ unsigned int nJrnl; /* Space allocated for aJrnl[] */
char *aJrnl; /* Journal content */ int szPage; /* Last known page size */ sqlite3_int64 szDb; /* Database file size. -1 means unknown */ + char *aData; /* Buffer to hold page data */ }; +#define SQLITE_KVOS_SZ 133073 /* ** Methods for KVVfsFile@@ -36092,8 +37274,7 @@ }
if( j+n>nOut ) return -1; memset(&aOut[j], 0, n); j += n; - c = aIn[i]; - if( c==0 ) break; + if( c==0 || mult==1 ) break; /* progress stalled if mult==1 */ }else{ aOut[j] = c<<4; c = kvvfsHexValue[aIn[++i]];@@ -36170,6 +37351,7 @@
SQLITE_KV_LOG(("xClose %s %s\n", pFile->zClass, pFile->isJournal ? "journal" : "db")); sqlite3_free(pFile->aJrnl); + sqlite3_free(pFile->aData); return SQLITE_OK; }@@ -36218,7 +37400,7 @@ KVVfsFile *pFile = (KVVfsFile*)pProtoFile;
unsigned int pgno; int got, n; char zKey[30]; - char aData[133073]; + char *aData = pFile->aData; assert( iOfst>=0 ); assert( iAmt>=0 ); SQLITE_KV_LOG(("xRead('%s-db',%d,%lld)\n", pFile->zClass, iAmt, iOfst));@@ -36235,7 +37417,8 @@ }else{
pgno = 1; } sqlite3_snprintf(sizeof(zKey), zKey, "%u", pgno); - got = sqlite3KvvfsMethods.xRead(pFile->zClass, zKey, aData, sizeof(aData)-1); + got = sqlite3KvvfsMethods.xRead(pFile->zClass, zKey, + aData, SQLITE_KVOS_SZ-1); if( got<0 ){ n = 0; }else{@@ -36243,7 +37426,7 @@ aData[got] = 0;
if( iOfst+iAmt<512 ){ int k = iOfst+iAmt; aData[k*2] = 0; - n = kvvfsDecode(aData, &aData[2000], sizeof(aData)-2000); + n = kvvfsDecode(aData, &aData[2000], SQLITE_KVOS_SZ-2000); if( n>=iOfst+iAmt ){ memcpy(zBuf, &aData[2000+iOfst], iAmt); n = iAmt;@@ -36302,7 +37485,7 @@ ){
KVVfsFile *pFile = (KVVfsFile*)pProtoFile; unsigned int pgno; char zKey[30]; - char aData[131073]; + char *aData = pFile->aData; SQLITE_KV_LOG(("xWrite('%s-db',%d,%lld)\n", pFile->zClass, iAmt, iOfst)); assert( iAmt>=512 && iAmt<=65536 ); assert( (iAmt & (iAmt-1))==0 );@@ -36511,6 +37694,10 @@ pFile->zClass = "session";
}else{ pFile->zClass = "local"; } + pFile->aData = sqlite3_malloc64(SQLITE_KVOS_SZ); + if( pFile->aData==0 ){ + return SQLITE_NOMEM; + } pFile->aJrnl = 0; pFile->nJrnl = 0; pFile->szPage = -1;@@ -36674,7 +37861,7 @@ **
** This source file is organized into divisions where the logic for various ** subfunctions is contained within the appropriate division. PLEASE ** KEEP THE STRUCTURE OF THIS FILE INTACT. New code should be placed -** in the correct division and should be clearly labeled. +** in the correct division and should be clearly labelled. ** ** The layout of divisions is as follows: **@@ -36724,7 +37911,7 @@ # endif
#endif /* Use pread() and pwrite() if they are available */ -#if defined(__APPLE__) +#if defined(__APPLE__) || defined(__linux__) # define HAVE_PREAD 1 # define HAVE_PWRITE 1 #endif@@ -36747,7 +37934,8 @@ #include <unistd.h> /* amalgamator: keep */
/* #include <time.h> */ #include <sys/time.h> /* amalgamator: keep */ #include <errno.h> -#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 +#if (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) \ + &&am