diff --git a/.github/workflows/update-json-assets.yaml b/.github/workflows/update-json-assets.yaml index e465515..eb5a796 100644 --- a/.github/workflows/update-json-assets.yaml +++ b/.github/workflows/update-json-assets.yaml @@ -26,6 +26,8 @@ jobs: with: node-version: latest cache: yarn + - name: install jq + run: sudo apt update && sudo apt install jq -y - name: Install run: yarn install - name: Build @@ -41,7 +43,19 @@ jobs: fi done - git add assets/**/*.json + json_assets = "assets/**/*.json" + for i in $json_assets; do + # verify that the file contains valid JSON + # based on: https://stackoverflow.com/a/46955018 + if jq -e . >/dev/null 2>&1 <<<"$(cat $i)"; then + echo "successfully parsed JSON contents of '$i'" + git add $i + else + echo "failed to parse JSON contents of '$i' -> resetting to previous commit ..." + git restore --source main $i + fi + done + git status - name: creating the pull request body diff --git a/assets/details/lua_nginx_module.json b/assets/details/lua_nginx_module.json index 4c76b4d..09c03c9 100644 --- a/assets/details/lua_nginx_module.json +++ b/assets/details/lua_nginx_module.json @@ -35,14 +35,14 @@ [4,"header_filter_by_lua_block","Uses Lua code specified in { lua-script } to define an output header filter.","

Uses Lua code specified in { lua-script } to define an output header filter.

Note that the following API functions are currently disabled within this context:

Here is an example of overriding a response header (or adding one if absent) in our Lua header filter:

 location / {\n     proxy_pass http://mybackend;\n     header_filter_by_lua_block {\n         ngx.header.Foo = \"blah\"\n     }\n }

This directive was first introduced in the v0.9.17 release.

",["Uses Lua code specified in { lua-script } to define an output header filter.","Note that the following API functions are currently disabled within this context:","\nOutput API functions (e.g., ngx.say and ngx.send_headers)\nControl API functions (e.g., ngx.redirect and ngx.exec)\nSubrequest API functions (e.g., ngx.location.capture and ngx.location.capture_multi)\nCosocket API functions (e.g., ngx.socket.tcp and ngx.req.socket).\n","Here is an example of overriding a response header (or adding one if absent) in our Lua header filter:"," location / {\n proxy_pass http://mybackend;\n header_filter_by_lua_block {\n ngx.header.Foo = \"blah\"\n }\n }","This directive was first introduced in the v0.9.17 release."],"
Syntax:header_filter_by_lua_block { lua-script }
Default:
header_filter_by_lua_block {}
Context:http,server,location,location if
"], [4,"header_filter_by_lua_file","Equivalent to header_filter_by_lua_block, except that the file specified by contains the Lua code, or as from the v0.5.0rc32 release, the LuaJIT bytecode to be executed.","

Equivalent to header_filter_by_lua_block, except that the file specified by <path-to-lua-script-file> contains the Lua code, or as from the v0.5.0rc32 release, the LuaJIT bytecode to be executed.

When a relative path like foo/bar.lua is given, they will be turned into the absolute path relative to the server prefix path determined by the -p PATH command-line option while starting the Nginx server.

This directive was first introduced in the v0.2.1rc20 release.

",["Equivalent to header_filter_by_lua_block, except that the file specified by contains the Lua code, or as from the v0.5.0rc32 release, the LuaJIT bytecode to be executed.","When a relative path like foo/bar.lua is given, they will be turned into the absolute path relative to the server prefix path determined by the -p PATH command-line option while starting the Nginx server.","This directive was first introduced in the v0.2.1rc20 release."],"
Syntax:header_filter_by_lua_file
Default:
header_filter_by_lua_file .lua;
Context:http,server,location,location if
"], [4,"body_filter_by_lua","NOTE Use of this directive is discouraged following the v0.9.17 release. Use the body_filter_by_lua_block directive instead.","

NOTE Use of this directive is discouraged following the v0.9.17 release. Use the body_filter_by_lua_block directive instead.

Similar to the body_filter_by_lua_block directive, but accepts the Lua source directly in an Nginx string literal (which requires\nspecial character escaping).

For instance,

 body_filter_by_lua '\n     local data, eof = ngx.arg[1], ngx.arg[2]\n ';

This directive was first introduced in the v0.5.0rc32 release.

",["NOTE Use of this directive is discouraged following the v0.9.17 release. Use the body_filter_by_lua_block directive instead.","Similar to the body_filter_by_lua_block directive, but accepts the Lua source directly in an Nginx string literal (which requires\nspecial character escaping).","For instance,"," body_filter_by_lua '\n local data, eof = ngx.arg[1], ngx.arg[2]\n ';","This directive was first introduced in the v0.5.0rc32 release."],"
Syntax:body_filter_by_lua
Default:
body_filter_by_lua '';
Context:http,server,location,location if
"], -[4,"body_filter_by_lua_block","Uses Lua code specified in { lua-script } to define an output body filter.","

Uses Lua code specified in { lua-script } to define an output body filter.

The input data chunk is passed via ngx.arg[1] (as a Lua string value) and the \"eof\" flag indicating the end of the response body data stream is passed via ngx.arg[2] (as a Lua boolean value).

Behind the scene, the \"eof\" flag is just the last_buf (for main requests) or last_in_chain (for subrequests) flag of the Nginx chain link buffers. (Before the v0.7.14 release, the \"eof\" flag does not work at all in subrequests.)

The output data stream can be aborted immediately by running the following Lua statement:

 return ngx.ERROR

This will truncate the response body and usually result in incomplete and also invalid responses.

The Lua code can pass its own modified version of the input data chunk to the downstream Nginx output body filters by overriding ngx.arg[1] with a Lua string or a Lua table of strings. For example, to transform all the lowercase letters in the response body, we can just write:

 location / {\n     proxy_pass http://mybackend;\n     body_filter_by_lua_block {\n         ngx.arg[1] = string.upper(ngx.arg[1])\n     }\n }

When setting nil or an empty Lua string value to ngx.arg[1], no data chunk will be passed to the downstream Nginx output filters at all.

Likewise, new \"eof\" flag can also be specified by setting a boolean value to ngx.arg[2]. For example,

 location /t {\n     echo hello world;\n     echo hiya globe;\n\n     body_filter_by_lua_block {\n         local chunk = ngx.arg[1]\n         if string.match(chunk, \"hello\") then\n             ngx.arg[2] = true  -- new eof\n             return\n         end\n\n         -- just throw away any remaining chunk data\n         ngx.arg[1] = nil\n     }\n }

Then GET /t will just return the output

hello world\n

That is, when the body filter sees a chunk containing the word \"hello\", then it will set the \"eof\" flag to true immediately, resulting in truncated but still valid responses.

When the Lua code may change the length of the response body, then it is required to always clear out the Content-Length response header (if any) in a header filter to enforce streaming output, as in

 location /foo {\n     # fastcgi_pass/proxy_pass/...\n\n     header_filter_by_lua_block {\n         ngx.header.content_length = nil\n     }\n     body_filter_by_lua_block {\n         ngx.arg[1] = string.len(ngx.arg[1]) .. \"\\n\"\n     }\n }

Note that the following API functions are currently disabled within this context due to the limitations in Nginx output filter's current implementation:

Nginx output filters may be called multiple times for a single request because response body may be delivered in chunks. Thus, the Lua code specified by in this directive may also run multiple times in the lifetime of a single HTTP request.

This directive was first introduced in the v0.9.17 release.

",["Uses Lua code specified in { lua-script } to define an output body filter.","The input data chunk is passed via ngx.arg[1] (as a Lua string value) and the \"eof\" flag indicating the end of the response body data stream is passed via ngx.arg[2] (as a Lua boolean value).","Behind the scene, the \"eof\" flag is just the last_buf (for main requests) or last_in_chain (for subrequests) flag of the Nginx chain link buffers. (Before the v0.7.14 release, the \"eof\" flag does not work at all in subrequests.)","The output data stream can be aborted immediately by running the following Lua statement:"," return ngx.ERROR","This will truncate the response body and usually result in incomplete and also invalid responses.","The Lua code can pass its own modified version of the input data chunk to the downstream Nginx output body filters by overriding ngx.arg[1] with a Lua string or a Lua table of strings. For example, to transform all the lowercase letters in the response body, we can just write:"," location / {\n proxy_pass http://mybackend;\n body_filter_by_lua_block {\n ngx.arg[1] = string.upper(ngx.arg[1])\n }\n }","When setting nil or an empty Lua string value to ngx.arg[1], no data chunk will be passed to the downstream Nginx output filters at all.","Likewise, new \"eof\" flag can also be specified by setting a boolean value to ngx.arg[2]. For example,"," location /t {\n echo hello world;\n echo hiya globe;\n\n body_filter_by_lua_block {\n local chunk = ngx.arg[1]\n if string.match(chunk, \"hello\") then\n ngx.arg[2] = true -- new eof\n return\n end\n\n -- just throw away any remaining chunk data\n ngx.arg[1] = nil\n }\n }","Then GET /t will just return the output","hello world\n","That is, when the body filter sees a chunk containing the word \"hello\", then it will set the \"eof\" flag to true immediately, resulting in truncated but still valid responses.","When the Lua code may change the length of the response body, then it is required to always clear out the Content-Length response header (if any) in a header filter to enforce streaming output, as in"," location /foo {\n # fastcgi_pass/proxy_pass/...\n\n header_filter_by_lua_block {\n ngx.header.content_length = nil\n }\n body_filter_by_lua_block {\n ngx.arg[1] = string.len(ngx.arg[1]) .. \"\\n\"\n }\n }","Note that the following API functions are currently disabled within this context due to the limitations in Nginx output filter's current implementation:","\nOutput API functions (e.g., ngx.say and ngx.send_headers)\nControl API functions (e.g., ngx.exit and ngx.exec)\nSubrequest API functions (e.g., ngx.location.capture and ngx.location.capture_multi)\nCosocket API functions (e.g., ngx.socket.tcp and ngx.req.socket).\n","Nginx output filters may be called multiple times for a single request because response body may be delivered in chunks. Thus, the Lua code specified by in this directive may also run multiple times in the lifetime of a single HTTP request.","This directive was first introduced in the v0.9.17 release."],"
Syntax:body_filter_by_lua_block { lua-script-str }
Default:
body_filter_by_lua_block {}
Context:http,server,location,location if
"], +[4,"body_filter_by_lua_block","Uses Lua code specified in { lua-script } to define an output body filter.","

Uses Lua code specified in { lua-script } to define an output body filter.

The input data chunk is passed via ngx.arg[1] (as a Lua string value) and the \"eof\" flag indicating the end of the response body data stream is passed via ngx.arg[2] (as a Lua boolean value).

Behind the scene, the \"eof\" flag is just the last_buf (for main requests) or last_in_chain (for subrequests) flag of the Nginx chain link buffers. (Before the v0.7.14 release, the \"eof\" flag does not work at all in subrequests.)

The output data stream can be aborted immediately by running the following Lua statement:

 return ngx.ERROR

This will truncate the response body and usually result in incomplete and also invalid responses.

The Lua code can pass its own modified version of the input data chunk to the downstream Nginx output body filters by overriding ngx.arg[1] with a Lua string or a Lua table of strings. For example, to transform all the lowercase letters in the response body, we can just write:

 location / {\n     proxy_pass http://mybackend;\n     body_filter_by_lua_block {\n         ngx.arg[1] = string.upper(ngx.arg[1])\n     }\n }

When setting nil or an empty Lua string value to ngx.arg[1], no data chunk will be passed to the downstream Nginx output filters at all.

Likewise, new \"eof\" flag can also be specified by setting a boolean value to ngx.arg[2]. For example,

 location /t {\n     echo hello world;\n     echo hiya globe;\n\n     body_filter_by_lua_block {\n         local chunk = ngx.arg[1]\n         if string.match(chunk, \"hello\") then\n             ngx.arg[2] = true  -- new eof\n             return\n         end\n\n         -- just throw away any remaining chunk data\n         ngx.arg[1] = nil\n     }\n }

Then GET /t will just return the output

hello world\n

That is, when the body filter sees a chunk containing the word \"hello\", then it will set the \"eof\" flag to true immediately, resulting in truncated but still valid responses.

When the Lua code may change the length of the response body, then it is required to always clear out the Content-Length response header (if any) in a header filter to enforce streaming output, as in

 location /foo {\n     # fastcgi_pass/proxy_pass/...\n\n     header_filter_by_lua_block {\n         ngx.header.content_length = nil\n     }\n     body_filter_by_lua_block {\n         ngx.arg[1] = string.len(ngx.arg[1]) .. \"\\n\"\n     }\n }

Note that the following API functions are currently disabled within this context due to the limitations in Nginx output filter's current implementation:

Nginx output filters may be called multiple times for a single request because response body may be delivered in chunks. Thus, the Lua code specified by in this directive may also run multiple times in the lifetime of a single HTTP request.

This directive was first introduced in the v0.9.17 release.

",["Uses Lua code specified in { lua-script } to define an output body filter.","The input data chunk is passed via ngx.arg[1] (as a Lua string value) and the \"eof\" flag indicating the end of the response body data stream is passed via ngx.arg[2] (as a Lua boolean value).","Behind the scene, the \"eof\" flag is just the last_buf (for main requests) or last_in_chain (for subrequests) flag of the Nginx chain link buffers. (Before the v0.7.14 release, the \"eof\" flag does not work at all in subrequests.)","The output data stream can be aborted immediately by running the following Lua statement:"," return ngx.ERROR","This will truncate the response body and usually result in incomplete and also invalid responses.","The Lua code can pass its own modified version of the input data chunk to the downstream Nginx output body filters by overriding ngx.arg[1] with a Lua string or a Lua table of strings. For example, to transform all the lowercase letters in the response body, we can just write:"," location / {\n proxy_pass http://mybackend;\n body_filter_by_lua_block {\n ngx.arg[1] = string.upper(ngx.arg[1])\n }\n }","When setting nil or an empty Lua string value to ngx.arg[1], no data chunk will be passed to the downstream Nginx output filters at all.","Likewise, new \"eof\" flag can also be specified by setting a boolean value to ngx.arg[2]. For example,"," location /t {\n echo hello world;\n echo hiya globe;\n\n body_filter_by_lua_block {\n local chunk = ngx.arg[1]\n if string.match(chunk, \"hello\") then\n ngx.arg[2] = true -- new eof\n return\n end\n\n -- just throw away any remaining chunk data\n ngx.arg[1] = nil\n }\n }","Then GET /t will just return the output","hello world\n","That is, when the body filter sees a chunk containing the word \"hello\", then it will set the \"eof\" flag to true immediately, resulting in truncated but still valid responses.","When the Lua code may change the length of the response body, then it is required to always clear out the Content-Length response header (if any) in a header filter to enforce streaming output, as in"," location /foo {\n # fastcgi_pass/proxy_pass/...\n\n header_filter_by_lua_block {\n ngx.header.content_length = nil\n }\n body_filter_by_lua_block {\n ngx.arg[1] = string.len(ngx.arg[1]) .. \"\\n\"\n }\n }","Note that the following API functions are currently disabled within this context due to the limitations in Nginx output filter's current implementation:","\nOutput API functions (e.g., ngx.say and ngx.send_headers)\nControl API functions (e.g., ngx.exit and ngx.exec)\nSubrequest API functions (e.g., ngx.location.capture and ngx.location.capture_multi)\nCosocket API functions (e.g., ngx.socket.tcp and ngx.req.socket).\n","Nginx output filters may be called multiple times for a single request because response body may be delivered in chunks. Thus, the Lua code specified by in this directive may also run multiple times in the lifetime of a single HTTP request.","This directive was first introduced in the v0.9.17 release."],"
Syntax:body_filter_by_lua_block { lua-script-str }
Default:
body_filter_by_lua_block {}
Context:http,server,location,location if
"], [4,"body_filter_by_lua_file","Equivalent to body_filter_by_lua_block, except that the file specified by contains the Lua code, or, as from the v0.5.0rc32 release, the LuaJIT bytecode to be executed.","

Equivalent to body_filter_by_lua_block, except that the file specified by <path-to-lua-script-file> contains the Lua code, or, as from the v0.5.0rc32 release, the LuaJIT bytecode to be executed.

When a relative path like foo/bar.lua is given, they will be turned into the absolute path relative to the server prefix path determined by the -p PATH command-line option while starting the Nginx server.

This directive was first introduced in the v0.5.0rc32 release.

",["Equivalent to body_filter_by_lua_block, except that the file specified by contains the Lua code, or, as from the v0.5.0rc32 release, the LuaJIT bytecode to be executed.","When a relative path like foo/bar.lua is given, they will be turned into the absolute path relative to the server prefix path determined by the -p PATH command-line option while starting the Nginx server.","This directive was first introduced in the v0.5.0rc32 release."],"
Syntax:body_filter_by_lua_file
Default:
body_filter_by_lua_file .lua;
Context:http,server,location,location if
"], [4,"log_by_lua","NOTE Use of this directive is discouraged following the v0.9.17 release. Use the log_by_lua_block directive instead.","

NOTE Use of this directive is discouraged following the v0.9.17 release. Use the log_by_lua_block directive instead.

Similar to the log_by_lua_block directive, but accepts the Lua source directly in an Nginx string literal (which requires\nspecial character escaping).

For instance,

 log_by_lua '\n     print(\"I need no extra escaping here, for example: \\r\\nblah\")\n ';

This directive was first introduced in the v0.5.0rc31 release.

",["NOTE Use of this directive is discouraged following the v0.9.17 release. Use the log_by_lua_block directive instead.","Similar to the log_by_lua_block directive, but accepts the Lua source directly in an Nginx string literal (which requires\nspecial character escaping).","For instance,"," log_by_lua '\n print(\"I need no extra escaping here, for example: \\r\\nblah\")\n ';","This directive was first introduced in the v0.5.0rc31 release."],"
Syntax:log_by_lua
Default:
log_by_lua '';
Context:http,server,location,location if
"], [4,"log_by_lua_block","Runs the Lua source code inlined as the { lua-script } at the log request processing phase. This does not replace the current access logs, but runs before.","

Runs the Lua source code inlined as the { lua-script } at the log request processing phase. This does not replace the current access logs, but runs before.

Note that the following API functions are currently disabled within this context:

Here is an example of gathering average data for $upstream_response_time:

 lua_shared_dict log_dict 5M;\n\n server {\n     location / {\n         proxy_pass http://mybackend;\n\n         log_by_lua_block {\n             local log_dict = ngx.shared.log_dict\n             local upstream_time = tonumber(ngx.var.upstream_response_time)\n\n             local sum = log_dict:get(\"upstream_time-sum\") or 0\n             sum = sum + upstream_time\n             log_dict:set(\"upstream_time-sum\", sum)\n\n             local newval, err = log_dict:incr(\"upstream_time-nb\", 1)\n             if not newval and err == \"not found\" then\n                 log_dict:add(\"upstream_time-nb\", 0)\n                 log_dict:incr(\"upstream_time-nb\", 1)\n             end\n         }\n     }\n\n     location = /status {\n         content_by_lua_block {\n             local log_dict = ngx.shared.log_dict\n             local sum = log_dict:get(\"upstream_time-sum\")\n             local nb = log_dict:get(\"upstream_time-nb\")\n\n             if nb and sum then\n                 ngx.say(\"average upstream response time: \", sum / nb,\n                         \" (\", nb, \" reqs)\")\n             else\n                 ngx.say(\"no data yet\")\n             end\n         }\n     }\n }

This directive was first introduced in the v0.9.17 release.

",["Runs the Lua source code inlined as the { lua-script } at the log request processing phase. This does not replace the current access logs, but runs before.","Note that the following API functions are currently disabled within this context:","\nOutput API functions (e.g., ngx.say and ngx.send_headers)\nControl API functions (e.g., ngx.exit)\nSubrequest API functions (e.g., ngx.location.capture and ngx.location.capture_multi)\nCosocket API functions (e.g., ngx.socket.tcp and ngx.req.socket).\n","Here is an example of gathering average data for $upstream_response_time:"," lua_shared_dict log_dict 5M;\n\n server {\n location / {\n proxy_pass http://mybackend;\n\n log_by_lua_block {\n local log_dict = ngx.shared.log_dict\n local upstream_time = tonumber(ngx.var.upstream_response_time)\n\n local sum = log_dict:get(\"upstream_time-sum\") or 0\n sum = sum + upstream_time\n log_dict:set(\"upstream_time-sum\", sum)\n\n local newval, err = log_dict:incr(\"upstream_time-nb\", 1)\n if not newval and err == \"not found\" then\n log_dict:add(\"upstream_time-nb\", 0)\n log_dict:incr(\"upstream_time-nb\", 1)\n end\n }\n }\n\n location = /status {\n content_by_lua_block {\n local log_dict = ngx.shared.log_dict\n local sum = log_dict:get(\"upstream_time-sum\")\n local nb = log_dict:get(\"upstream_time-nb\")\n\n if nb and sum then\n ngx.say(\"average upstream response time: \", sum / nb,\n \" (\", nb, \" reqs)\")\n else\n ngx.say(\"no data yet\")\n end\n }\n }\n }","This directive was first introduced in the v0.9.17 release."],"
Syntax:log_by_lua_block { lua-script }
Default:
log_by_lua_block {}
Context:http,server,location,location if
"], [4,"log_by_lua_file","Equivalent to log_by_lua_block, except that the file specified by contains the Lua code, or, as from the v0.5.0rc32 release, the LuaJIT bytecode to be executed.","

Equivalent to log_by_lua_block, except that the file specified by <path-to-lua-script-file> contains the Lua code, or, as from the v0.5.0rc32 release, the LuaJIT bytecode to be executed.

When a relative path like foo/bar.lua is given, they will be turned into the absolute path relative to the server prefix path determined by the -p PATH command-line option while starting the Nginx server.

This directive was first introduced in the v0.5.0rc31 release.

",["Equivalent to log_by_lua_block, except that the file specified by contains the Lua code, or, as from the v0.5.0rc32 release, the LuaJIT bytecode to be executed.","When a relative path like foo/bar.lua is given, they will be turned into the absolute path relative to the server prefix path determined by the -p PATH command-line option while starting the Nginx server.","This directive was first introduced in the v0.5.0rc31 release."],"
Syntax:log_by_lua_file
Default:
log_by_lua_file .lua;
Context:http,server,location,location if
"], [4,"balancer_by_lua_block","This directive runs Lua code as an upstream balancer for any upstream entities defined\nby the upstream {} configuration block.","

This directive runs Lua code as an upstream balancer for any upstream entities defined\nby the upstream {} configuration block.

For instance,

 upstream foo {\n     server 127.0.0.1;\n     balancer_by_lua_block {\n         -- use Lua to do something interesting here\n         -- as a dynamic balancer\n     }\n }\n\n server {\n     location / {\n         proxy_pass http://foo;\n     }\n }

The resulting Lua load balancer can work with any existing Nginx upstream modules\nlike ngx_proxy and\nngx_fastcgi.

Also, the Lua load balancer can work with the standard upstream connection pool mechanism,\ni.e., the standard keepalive directive.\nJust ensure that the keepalive directive\nis used after this balancer_by_lua_block directive in a single upstream {} configuration block.

The Lua load balancer can totally ignore the list of servers defined in the upstream {} block\nand select peer from a completely dynamic server list (even changing per request) via the\nngx.balancer module\nfrom the lua-resty-core library.

The Lua code handler registered by this directive might get called more than once in a single\ndownstream request when the Nginx upstream mechanism retries the request on conditions\nspecified by directives like the proxy_next_upstream\ndirective.

This Lua code execution context does not support yielding, so Lua APIs that may yield\n(like cosockets and \"light threads\") are disabled in this context. One can usually work\naround this limitation by doing such operations in an earlier phase handler (like\naccess_by_lua*) and passing along the result into this context\nvia the ngx.ctx table.

This directive was first introduced in the v0.10.0 release.

",["This directive runs Lua code as an upstream balancer for any upstream entities defined\nby the upstream {} configuration block.","For instance,"," upstream foo {\n server 127.0.0.1;\n balancer_by_lua_block {\n -- use Lua to do something interesting here\n -- as a dynamic balancer\n }\n }\n\n server {\n location / {\n proxy_pass http://foo;\n }\n }","The resulting Lua load balancer can work with any existing Nginx upstream modules\nlike ngx_proxy and\nngx_fastcgi.","Also, the Lua load balancer can work with the standard upstream connection pool mechanism,\ni.e., the standard keepalive directive.\nJust ensure that the keepalive directive\nis used after this balancer_by_lua_block directive in a single upstream {} configuration block.","The Lua load balancer can totally ignore the list of servers defined in the upstream {} block\nand select peer from a completely dynamic server list (even changing per request) via the\nngx.balancer module\nfrom the lua-resty-core library.","The Lua code handler registered by this directive might get called more than once in a single\ndownstream request when the Nginx upstream mechanism retries the request on conditions\nspecified by directives like the proxy_next_upstream\ndirective.","This Lua code execution context does not support yielding, so Lua APIs that may yield\n(like cosockets and \"light threads\") are disabled in this context. One can usually work\naround this limitation by doing such operations in an earlier phase handler (like\naccess_by_lua*) and passing along the result into this context\nvia the ngx.ctx table.","This directive was first introduced in the v0.10.0 release."],"
Syntax:balancer_by_lua_block { lua-script }
Default:
balancer_by_lua_block {}
Context:upstream
"], [4,"balancer_by_lua_file","Equivalent to balancer_by_lua_block, except that the file specified by contains the Lua code, or, as from the v0.5.0rc32 release, the LuaJIT bytecode to be executed.","

Equivalent to balancer_by_lua_block, except that the file specified by <path-to-lua-script-file> contains the Lua code, or, as from the v0.5.0rc32 release, the LuaJIT bytecode to be executed.

When a relative path like foo/bar.lua is given, they will be turned into the absolute path relative to the server prefix path determined by the -p PATH command-line option while starting the Nginx server.

This directive was first introduced in the v0.10.0 release.

",["Equivalent to balancer_by_lua_block, except that the file specified by contains the Lua code, or, as from the v0.5.0rc32 release, the LuaJIT bytecode to be executed.","When a relative path like foo/bar.lua is given, they will be turned into the absolute path relative to the server prefix path determined by the -p PATH command-line option while starting the Nginx server.","This directive was first introduced in the v0.10.0 release."],"
Syntax:balancer_by_lua_file
Default:
balancer_by_lua_file .lua;
Context:upstream
"], -[4,"lua_need_request_body","Due to the stream processing feature of HTTP/2 or HTTP/3, this configuration could potentially block the entire request. Therefore, this configuration is effective only when HTTP/2 or HTTP/3 requests send content-length header. For requests with versions lower than HTTP/2, this configuration can still be used without any problems.","

Due to the stream processing feature of HTTP/2 or HTTP/3, this configuration could potentially block the entire request. Therefore, this configuration is effective only when HTTP/2 or HTTP/3 requests send content-length header. For requests with versions lower than HTTP/2, this configuration can still be used without any problems.

Determines whether to force the request body data to be read before running rewrite/access/content_by_lua* or not. The Nginx core does not read the client request body by default and if request body data is required, then this directive should be turned on or the ngx.req.read_body function should be called within the Lua code.

To read the request body data within the $request_body variable,\nclient_body_buffer_size must have the same value as client_max_body_size. Because when the content length exceeds client_body_buffer_size but less than client_max_body_size, Nginx will buffer the data into a temporary file on the disk, which will lead to empty value in the $request_body variable.

If the current location includes rewrite_by_lua* directives,\nthen the request body will be read just before the rewrite_by_lua* code is run (and also at the\nrewrite phase). Similarly, if only content_by_lua is specified,\nthe request body will not be read until the content handler's Lua code is\nabout to run (i.e., the request body will be read during the content phase).

It is recommended however, to use the ngx.req.read_body and ngx.req.discard_body functions for finer control over the request body reading process instead.

This also applies to access_by_lua*.

",["Due to the stream processing feature of HTTP/2 or HTTP/3, this configuration could potentially block the entire request. Therefore, this configuration is effective only when HTTP/2 or HTTP/3 requests send content-length header. For requests with versions lower than HTTP/2, this configuration can still be used without any problems.","Determines whether to force the request body data to be read before running rewrite/access/content_by_lua* or not. The Nginx core does not read the client request body by default and if request body data is required, then this directive should be turned on or the ngx.req.read_body function should be called within the Lua code.","To read the request body data within the $request_body variable,\nclient_body_buffer_size must have the same value as client_max_body_size. Because when the content length exceeds client_body_buffer_size but less than client_max_body_size, Nginx will buffer the data into a temporary file on the disk, which will lead to empty value in the $request_body variable.","If the current location includes rewrite_by_lua* directives,\nthen the request body will be read just before the rewrite_by_lua* code is run (and also at the\nrewrite phase). Similarly, if only content_by_lua is specified,\nthe request body will not be read until the content handler's Lua code is\nabout to run (i.e., the request body will be read during the content phase).","It is recommended however, to use the ngx.req.read_body and ngx.req.discard_body functions for finer control over the request body reading process instead.","This also applies to access_by_lua*."],"
Syntax:lua_need_request_body
Default:
off
Context:http,server,location,location if
"], +[4,"lua_need_request_body","Determines whether to force the request body data to be read before running rewrite/access/content_by_lua* or not. The Nginx core does not read the client request body by default and if request body data is required, then this directive should be turned on or the ngx.req.read_body function should be called within the Lua code.","

Determines whether to force the request body data to be read before running rewrite/access/content_by_lua* or not. The Nginx core does not read the client request body by default and if request body data is required, then this directive should be turned on or the ngx.req.read_body function should be called within the Lua code.

To read the request body data within the $request_body variable,\nclient_body_buffer_size must have the same value as client_max_body_size. Because when the content length exceeds client_body_buffer_size but less than client_max_body_size, Nginx will buffer the data into a temporary file on the disk, which will lead to empty value in the $request_body variable.

If the current location includes rewrite_by_lua* directives,\nthen the request body will be read just before the rewrite_by_lua* code is run (and also at the\nrewrite phase). Similarly, if only content_by_lua is specified,\nthe request body will not be read until the content handler's Lua code is\nabout to run (i.e., the request body will be read during the content phase).

It is recommended however, to use the ngx.req.read_body and ngx.req.discard_body functions for finer control over the request body reading process instead.

This also applies to access_by_lua*.

",["Determines whether to force the request body data to be read before running rewrite/access/content_by_lua* or not. The Nginx core does not read the client request body by default and if request body data is required, then this directive should be turned on or the ngx.req.read_body function should be called within the Lua code.","To read the request body data within the $request_body variable,\nclient_body_buffer_size must have the same value as client_max_body_size. Because when the content length exceeds client_body_buffer_size but less than client_max_body_size, Nginx will buffer the data into a temporary file on the disk, which will lead to empty value in the $request_body variable.","If the current location includes rewrite_by_lua* directives,\nthen the request body will be read just before the rewrite_by_lua* code is run (and also at the\nrewrite phase). Similarly, if only content_by_lua is specified,\nthe request body will not be read until the content handler's Lua code is\nabout to run (i.e., the request body will be read during the content phase).","It is recommended however, to use the ngx.req.read_body and ngx.req.discard_body functions for finer control over the request body reading process instead.","This also applies to access_by_lua*."],"
Syntax:lua_need_request_body
Default:
off
Context:http,server,location,location if
"], [4,"ssl_client_hello_by_lua_block","This directive runs user Lua code when Nginx is about to post-process the SSL client hello message for the downstream\nSSL (https) connections.","

This directive runs user Lua code when Nginx is about to post-process the SSL client hello message for the downstream\nSSL (https) connections.

It is particularly useful for dynamically setting the SSL protocols according to the SNI.

It is also useful to do some custom operations according to the per-connection information in the client hello message.

For example, one can parse custom client hello extension and do the corresponding handling in pure Lua.

This Lua handler will always run whether the SSL session is resumed (via SSL session IDs or TLS session tickets) or not.\nWhile the ssl_certificate_by_lua* Lua handler will only runs when initiating a full SSL handshake.

The ngx.ssl.clienthello Lua modules\nprovided by the lua-resty-core\nlibrary are particularly useful in this context.

Note that this handler runs in extremely early stage of SSL handshake, before the SSL client hello extensions are parsed.\nSo you can not use some Lua API like ssl.server_name() which is dependent on the later stage's processing.

Also note that only the directive in default server is valid for several virtual servers with the same IP address and port.

Below is a trivial example using the\nngx.ssl.clienthello module\nat the same time:

 server {\n     listen 443 ssl;\n     server_name   test.com;\n     ssl_certificate /path/to/cert.crt;\n     ssl_certificate_key /path/to/key.key;\n     ssl_client_hello_by_lua_block {\n         local ssl_clt = require \"ngx.ssl.clienthello\"\n         local host, err = ssl_clt.get_client_hello_server_name()\n         if host == \"test.com\" then\n             ssl_clt.set_protocols({\"TLSv1\", \"TLSv1.1\"})\n         elseif host == \"test2.com\" then\n             ssl_clt.set_protocols({\"TLSv1.2\", \"TLSv1.3\"})\n         elseif not host then\n             ngx.log(ngx.ERR, \"failed to get the SNI name: \", err)\n             ngx.exit(ngx.ERROR)\n         else\n             ngx.log(ngx.ERR, \"unknown SNI name: \", host)\n             ngx.exit(ngx.ERROR)\n         end\n     }\n     ...\n }\n server {\n     listen 443 ssl;\n     server_name   test2.com;\n     ssl_certificate /path/to/cert.crt;\n     ssl_certificate_key /path/to/key.key;\n     ...\n }

See more information in the ngx.ssl.clienthello\nLua modules' official documentation.

Uncaught Lua exceptions in the user Lua code immediately abort the current SSL session, so does the\nngx.exit call with an error code like ngx.ERROR.

This Lua code execution context does support yielding, so Lua APIs that may yield\n(like cosockets, sleeping, and \"light threads\")\nare enabled in this context

Note, you need to configure the ssl_certificate\nand ssl_certificate_key\nto avoid the following error while starting NGINX:

nginx: [emerg] no ssl configured for the server\n

This directive requires OpenSSL 1.1.1 or greater.

If you are using the official pre-built\npackages for\nOpenResty 1.21.4.1 or later, then everything should\nwork out of the box.

If you are not using the Nginx core shipped with\nOpenResty 1.21.4.1 or later, you will need to apply\npatches to the standard Nginx core:

https://openresty.org/en/nginx-ssl-patches.html

This directive was first introduced in the v0.10.21 release.

",["This directive runs user Lua code when Nginx is about to post-process the SSL client hello message for the downstream\nSSL (https) connections.","It is particularly useful for dynamically setting the SSL protocols according to the SNI.","It is also useful to do some custom operations according to the per-connection information in the client hello message.","For example, one can parse custom client hello extension and do the corresponding handling in pure Lua.","This Lua handler will always run whether the SSL session is resumed (via SSL session IDs or TLS session tickets) or not.\nWhile the ssl_certificate_by_lua* Lua handler will only runs when initiating a full SSL handshake.","The ngx.ssl.clienthello Lua modules\nprovided by the lua-resty-core\nlibrary are particularly useful in this context.","Note that this handler runs in extremely early stage of SSL handshake, before the SSL client hello extensions are parsed.\nSo you can not use some Lua API like ssl.server_name() which is dependent on the later stage's processing.","Also note that only the directive in default server is valid for several virtual servers with the same IP address and port.","Below is a trivial example using the\nngx.ssl.clienthello module\nat the same time:"," server {\n listen 443 ssl;\n server_name test.com;\n ssl_certificate /path/to/cert.crt;\n ssl_certificate_key /path/to/key.key;\n ssl_client_hello_by_lua_block {\n local ssl_clt = require \"ngx.ssl.clienthello\"\n local host, err = ssl_clt.get_client_hello_server_name()\n if host == \"test.com\" then\n ssl_clt.set_protocols({\"TLSv1\", \"TLSv1.1\"})\n elseif host == \"test2.com\" then\n ssl_clt.set_protocols({\"TLSv1.2\", \"TLSv1.3\"})\n elseif not host then\n ngx.log(ngx.ERR, \"failed to get the SNI name: \", err)\n ngx.exit(ngx.ERROR)\n else\n ngx.log(ngx.ERR, \"unknown SNI name: \", host)\n ngx.exit(ngx.ERROR)\n end\n }\n ...\n }\n server {\n listen 443 ssl;\n server_name test2.com;\n ssl_certificate /path/to/cert.crt;\n ssl_certificate_key /path/to/key.key;\n ...\n }","See more information in the ngx.ssl.clienthello\nLua modules' official documentation.","Uncaught Lua exceptions in the user Lua code immediately abort the current SSL session, so does the\nngx.exit call with an error code like ngx.ERROR.","This Lua code execution context does support yielding, so Lua APIs that may yield\n(like cosockets, sleeping, and \"light threads\")\nare enabled in this context","Note, you need to configure the ssl_certificate\nand ssl_certificate_key\nto avoid the following error while starting NGINX:","nginx: [emerg] no ssl configured for the server\n","This directive requires OpenSSL 1.1.1 or greater.","If you are using the official pre-built\npackages for\nOpenResty 1.21.4.1 or later, then everything should\nwork out of the box.","If you are not using the Nginx core shipped with\nOpenResty 1.21.4.1 or later, you will need to apply\npatches to the standard Nginx core:","https://openresty.org/en/nginx-ssl-patches.html","This directive was first introduced in the v0.10.21 release."],"
Syntax:ssl_client_hello_by_lua_block { lua-script }
Default:
ssl_client_hello_by_lua_block {}
Context:http,server
"], [4,"ssl_client_hello_by_lua_file","Equivalent to ssl_client_hello_by_lua_block, except that the file specified by contains the Lua code, or, as from the v0.5.0rc32 release, the LuaJIT bytecode to be executed.","

Equivalent to ssl_client_hello_by_lua_block, except that the file specified by <path-to-lua-script-file> contains the Lua code, or, as from the v0.5.0rc32 release, the LuaJIT bytecode to be executed.

When a relative path like foo/bar.lua is given, they will be turned into the absolute path relative to the server prefix path determined by the -p PATH command-line option while starting the Nginx server.

This directive was first introduced in the v0.10.21 release.

",["Equivalent to ssl_client_hello_by_lua_block, except that the file specified by contains the Lua code, or, as from the v0.5.0rc32 release, the LuaJIT bytecode to be executed.","When a relative path like foo/bar.lua is given, they will be turned into the absolute path relative to the server prefix path determined by the -p PATH command-line option while starting the Nginx server.","This directive was first introduced in the v0.10.21 release."],"
Syntax:ssl_client_hello_by_lua_file
Default:
ssl_client_hello_by_lua_file .lua;
Context:http,server
"], [4,"ssl_certificate_by_lua_block","This directive runs user Lua code when Nginx is about to start the SSL handshake for the downstream\nSSL (https) connections.","

This directive runs user Lua code when Nginx is about to start the SSL handshake for the downstream\nSSL (https) connections.

It is particularly useful for setting the SSL certificate chain and the corresponding private key on a per-request\nbasis. It is also useful to load such handshake configurations nonblockingly from the remote (for example,\nwith the cosocket API). And one can also do per-request OCSP stapling handling in pure\nLua here as well.

Another typical use case is to do SSL handshake traffic control nonblockingly in this context,\nwith the help of the lua-resty-limit-traffic#readme\nlibrary, for example.

One can also do interesting things with the SSL handshake requests from the client side, like\nrejecting old SSL clients using the SSLv3 protocol or even below selectively.

The ngx.ssl\nand ngx.ocsp Lua modules\nprovided by the lua-resty-core\nlibrary are particularly useful in this context. You can use the Lua API offered by these two Lua modules\nto manipulate the SSL certificate chain and private key for the current SSL connection\nbeing initiated.

This Lua handler does not run at all, however, when Nginx/OpenSSL successfully resumes\nthe SSL session via SSL session IDs or TLS session tickets for the current SSL connection. In\nother words, this Lua handler only runs when Nginx has to initiate a full SSL handshake.

Below is a trivial example using the\nngx.ssl module\nat the same time:

 server {\n     listen 443 ssl;\n     server_name   test.com;\n\n     ssl_certificate_by_lua_block {\n         print(\"About to initiate a new SSL handshake!\")\n     }\n\n     location / {\n         root html;\n     }\n }

See more complicated examples in the ngx.ssl\nand ngx.ocsp\nLua modules' official documentation.

Uncaught Lua exceptions in the user Lua code immediately abort the current SSL session, so does the\nngx.exit call with an error code like ngx.ERROR.

This Lua code execution context does support yielding, so Lua APIs that may yield\n(like cosockets, sleeping, and \"light threads\")\nare enabled in this context.

Note, however, you still need to configure the ssl_certificate and\nssl_certificate_key\ndirectives even though you will not use this static certificate and private key at all. This is\nbecause the NGINX core requires their appearance otherwise you are seeing the following error\nwhile starting NGINX:

nginx: [emerg] no ssl configured for the server\n

This directive requires OpenSSL 1.0.2e or greater.

If you are using the official pre-built\npackages for\nOpenResty 1.9.7.2 or later, then everything should\nwork out of the box.

If you are not using the Nginx core shipped with\nOpenResty 1.9.7.2 or later, you will need to apply\npatches to the standard Nginx core:

https://openresty.org/en/nginx-ssl-patches.html

This directive was first introduced in the v0.10.0 release.

",["This directive runs user Lua code when Nginx is about to start the SSL handshake for the downstream\nSSL (https) connections.","It is particularly useful for setting the SSL certificate chain and the corresponding private key on a per-request\nbasis. It is also useful to load such handshake configurations nonblockingly from the remote (for example,\nwith the cosocket API). And one can also do per-request OCSP stapling handling in pure\nLua here as well.","Another typical use case is to do SSL handshake traffic control nonblockingly in this context,\nwith the help of the lua-resty-limit-traffic#readme\nlibrary, for example.","One can also do interesting things with the SSL handshake requests from the client side, like\nrejecting old SSL clients using the SSLv3 protocol or even below selectively.","The ngx.ssl\nand ngx.ocsp Lua modules\nprovided by the lua-resty-core\nlibrary are particularly useful in this context. You can use the Lua API offered by these two Lua modules\nto manipulate the SSL certificate chain and private key for the current SSL connection\nbeing initiated.","This Lua handler does not run at all, however, when Nginx/OpenSSL successfully resumes\nthe SSL session via SSL session IDs or TLS session tickets for the current SSL connection. In\nother words, this Lua handler only runs when Nginx has to initiate a full SSL handshake.","Below is a trivial example using the\nngx.ssl module\nat the same time:"," server {\n listen 443 ssl;\n server_name test.com;\n\n ssl_certificate_by_lua_block {\n print(\"About to initiate a new SSL handshake!\")\n }\n\n location / {\n root html;\n }\n }","See more complicated examples in the ngx.ssl\nand ngx.ocsp\nLua modules' official documentation.","Uncaught Lua exceptions in the user Lua code immediately abort the current SSL session, so does the\nngx.exit call with an error code like ngx.ERROR.","This Lua code execution context does support yielding, so Lua APIs that may yield\n(like cosockets, sleeping, and \"light threads\")\nare enabled in this context.","Note, however, you still need to configure the ssl_certificate and\nssl_certificate_key\ndirectives even though you will not use this static certificate and private key at all. This is\nbecause the NGINX core requires their appearance otherwise you are seeing the following error\nwhile starting NGINX:","nginx: [emerg] no ssl configured for the server\n","This directive requires OpenSSL 1.0.2e or greater.","If you are using the official pre-built\npackages for\nOpenResty 1.9.7.2 or later, then everything should\nwork out of the box.","If you are not using the Nginx core shipped with\nOpenResty 1.9.7.2 or later, you will need to apply\npatches to the standard Nginx core:","https://openresty.org/en/nginx-ssl-patches.html","This directive was first introduced in the v0.10.0 release."],"
Syntax:ssl_certificate_by_lua_block { lua-script }
Default:
ssl_certificate_by_lua_block {}
Context:server
"], @@ -76,5 +76,5 @@ [4,"lua_max_pending_timers","Controls the maximum number of pending timers allowed.","

Controls the maximum number of pending timers allowed.

Pending timers are those timers that have not expired yet.

When exceeding this limit, the ngx.timer.at call will immediately return nil and the error string \"too many pending timers\".

This directive was first introduced in the v0.8.0 release.

",["Controls the maximum number of pending timers allowed.","Pending timers are those timers that have not expired yet.","When exceeding this limit, the ngx.timer.at call will immediately return nil and the error string \"too many pending timers\".","This directive was first introduced in the v0.8.0 release."],"
Syntax:lua_max_pending_timers
Default:
lua_max_pending_timers 1024
Context:http
"], [4,"lua_max_running_timers","Controls the maximum number of \"running timers\" allowed.","

Controls the maximum number of \"running timers\" allowed.

Running timers are those timers whose user callback functions are still running or lightthreads spawned in callback functions are still running.

When exceeding this limit, Nginx will stop running the callbacks of newly expired timers and log an error message \"N lua_max_running_timers are not enough\" where \"N\" is the current value of this directive.

This directive was first introduced in the v0.8.0 release.

",["Controls the maximum number of \"running timers\" allowed.","Running timers are those timers whose user callback functions are still running or lightthreads spawned in callback functions are still running.","When exceeding this limit, Nginx will stop running the callbacks of newly expired timers and log an error message \"N lua_max_running_timers are not enough\" where \"N\" is the current value of this directive.","This directive was first introduced in the v0.8.0 release."],"
Syntax:lua_max_running_timers
Default:
lua_max_running_timers 256
Context:http
"], [4,"lua_sa_restart","When enabled, this module will set the SA_RESTART flag on Nginx workers signal dispositions.","

When enabled, this module will set the SA_RESTART flag on Nginx workers signal dispositions.

This allows Lua I/O primitives to not be interrupted by Nginx's handling of various signals.

This directive was first introduced in the v0.10.14 release.

",["When enabled, this module will set the SA_RESTART flag on Nginx workers signal dispositions.","This allows Lua I/O primitives to not be interrupted by Nginx's handling of various signals.","This directive was first introduced in the v0.10.14 release."],"
Syntax:lua_sa_restart on|off
Default:
lua_sa_restart on
Context:http
"], -[4,"lua_worker_thread_vm_pool_size","Specifies the size limit of the Lua VM pool (default 100) that will be used in the ngx.run_worker_thread API.","

Specifies the size limit of the Lua VM pool (default 100) that will be used in the ngx.run_worker_thread API.

Also, it is not allowed to create Lua VMs that exceeds the pool size limit.

The Lua VM in the VM pool is used to execute Lua code in separate thread.

The pool is global at Nginx worker level. And it is used to reuse Lua VMs between requests.

",["Specifies the size limit of the Lua VM pool (default 100) that will be used in the ngx.run_worker_thread API.","Also, it is not allowed to create Lua VMs that exceeds the pool size limit.","The Lua VM in the VM pool is used to execute Lua code in separate thread.","The pool is global at Nginx worker level. And it is used to reuse Lua VMs between requests."],"
Syntax:lua_worker_thread_vm_pool_size
Default:
lua_worker_thread_vm_pool_size 100
Context:http
"] +[4,"lua_worker_thread_vm_pool_size","Specifies the size limit of the Lua VM pool (default 100) that will be used in the ngx.run_worker_thread API.","

Specifies the size limit of the Lua VM pool (default 100) that will be used in the ngx.run_worker_thread API.

Also, it is not allowed to create Lua VMs that exceeds the pool size limit.

The Lua VM in the VM pool is used to execute Lua code in separate thread.

The pool is global at Nginx worker level. And it is used to reuse Lua VMs between requests.

Warning: Each worker thread uses a separate Lua VM and caches the Lua VM for reuse in subsequent operations. Configuring too many worker threads can result in consuming a lot of memory.

",["Specifies the size limit of the Lua VM pool (default 100) that will be used in the ngx.run_worker_thread API.","Also, it is not allowed to create Lua VMs that exceeds the pool size limit.","The Lua VM in the VM pool is used to execute Lua code in separate thread.","The pool is global at Nginx worker level. And it is used to reuse Lua VMs between requests.","Warning: Each worker thread uses a separate Lua VM and caches the Lua VM for reuse in subsequent operations. Configuring too many worker threads can result in consuming a lot of memory."],"
Syntax:lua_worker_thread_vm_pool_size
Default:
lua_worker_thread_vm_pool_size 10
Context:http
"] ] \ No newline at end of file diff --git a/assets/details/ngx_http_fastcgi_module.json b/assets/details/ngx_http_fastcgi_module.json index 37a71ac..9c7ad4b 100644 --- a/assets/details/ngx_http_fastcgi_module.json +++ b/assets/details/ngx_http_fastcgi_module.json @@ -14,8 +14,8 @@ [4,"fastcgi_cache_max_range_offset","Sets an offset in bytes for byte-range requests. If the range is beyond the offset, the range request will be passed to the FastCGI server and the response will not be cached.","

Sets an offset in bytes for byte-range requests. If the range is beyond the offset, the range request will be passed to the FastCGI server and the response will not be cached.

",[],"
Syntax:fastcgi_cache_max_range_offset number;
Default:
Context:http, server, location

This directive appeared in version 1.11.6.

"], [4,"fastcgi_cache_methods","If the client request method is listed in this directive then the response will be cached. “`GET`” and “`HEAD`” methods are always added to the list, though it is recommended to specify them explicitly. See also the [fastcgi\\_no\\_cache](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_no_cache) directive.","

If the client request method is listed in this directive then the response will be cached. “GET” and “HEAD” methods are always added to the list, though it is recommended to specify them explicitly. See also the fastcgi_no_cache directive.

",[],"
Syntax:fastcgi_cache_methods GET | HEAD | POST ...;
Default:
fastcgi_cache_methods GET HEAD;
Context:http, server, location

This directive appeared in version 0.7.59.

"], [4,"fastcgi_cache_min_uses","Sets the `number` of requests after which the response will be cached.","

Sets the number of requests after which the response will be cached.

",[],"
Syntax:fastcgi_cache_min_uses number;
Default:
fastcgi_cache_min_uses 1;
Context:http, server, location
"], -[4,"fastcgi_cache_path","Sets the path and other parameters of a cache. Cache data are stored in files. Both the key and file name in a cache are a result of applying the MD5 function to the proxied URL. The `levels` parameter defines hierarchy levels of a cache: from 1 to 3, each level accepts values 1 or 2. For example, in the following configuration","

Sets the path and other parameters of a cache. Cache data are stored in files. Both the key and file name in a cache are a result of applying the MD5 function to the proxied URL. The levels parameter defines hierarchy levels of a cache: from 1 to 3, each level accepts values 1 or 2. For example, in the following configuration

fastcgi_cache_path /data/nginx/cache levels=1:2 keys_zone=one:10m;\n

file names in a cache will look like this:

/data/nginx/cache/c/29/b7f54b2df7773722d382f4809d65029c\n

A cached response is first written to a temporary file, and then the file is renamed. Starting from version 0.8.9, temporary files and the cache can be put on different file systems. However, be aware that in this case a file is copied across two file systems instead of the cheap renaming operation. It is thus recommended that for any given location both cache and a directory holding temporary files are put on the same file system. A directory for temporary files is set based on the use_temp_path parameter (1.7.10). If this parameter is omitted or set to the value on, the directory set by the fastcgi_temp_path directive for the given location will be used. If the value is set to off, temporary files will be put directly in the cache directory.

In addition, all active keys and information about data are stored in a shared memory zone, whose name and size are configured by the keys_zone parameter. One megabyte zone can store about 8 thousand keys.

As part of commercial subscription, the shared memory zone also stores extended cache information, thus, it is required to specify a larger zone size for the same number of keys. For example, one megabyte zone can store about 4 thousand keys.

Cached data that are not accessed during the time specified by the inactive parameter get removed from the cache regardless of their freshness. By default, inactive is set to 10 minutes.

",["As part of [commercial subscription](http://nginx.com/products/), the shared memory zone also stores extended cache [information](https://nginx.org/en/docs/http/ngx_http_api_module.html#http_caches_), thus, it is required to specify a larger zone size for the same number of keys. For example, one megabyte zone can store about 4 thousand keys."],"
Syntax:fastcgi_cache_path path [levels=levels] [use_temp_path=on|off] keys_zone=name:size [inactive=time] [max_size=size] [min_free=size] [manager_files=number] [manager_sleep=time] [manager_threshold=time] [loader_files=number] [loader_sleep=time] [loader_threshold=time] [purger=on|off] [purger_files=number] [purger_sleep=time] [purger_threshold=time];
Default:
Context:http
"], -[4,"fastcgi_cache_purge","Defines conditions under which the request will be considered a cache purge request. If at least one value of the string parameters is not empty and is not equal to “0” then the cache entry with a corresponding [cache key](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_key) is removed. The result of successful operation is indicated by returning the 204 (No Content) response.","

Defines conditions under which the request will be considered a cache purge request. If at least one value of the string parameters is not empty and is not equal to “0” then the cache entry with a corresponding cache key is removed. The result of successful operation is indicated by returning the 204 (No Content) response.

If the cache key of a purge request ends with an asterisk (“*”), all cache entries matching the wildcard key will be removed from the cache. However, these entries will remain on the disk until they are deleted for either inactivity, or processed by the cache purger (1.7.12), or a client attempts to access them.

Example configuration:

fastcgi_cache_path /data/nginx/cache keys_zone=cache_zone:10m;\n\nmap $request_method $purge_method {\n    PURGE   1;\n    default 0;\n}\n\nserver {\n    ...\n    location / {\n        fastcgi_pass        backend;\n        fastcgi_cache       cache_zone;\n        fastcgi_cache_key   $uri;\n        fastcgi_cache_purge $purge_method;\n    }\n}\n
This functionality is available as part of our commercial subscription.
",["This functionality is available as part of our [commercial subscription](http://nginx.com/products/)."],"
Syntax:fastcgi_cache_purge string ...;
Default:
Context:http, server, location

This directive appeared in version 1.5.7.

"], +[4,"fastcgi_cache_path","Sets the path and other parameters of a cache. Cache data are stored in files. Both the key and file name in a cache are a result of applying the MD5 function to the proxied URL. The `levels` parameter defines hierarchy levels of a cache: from 1 to 3, each level accepts values 1 or 2. For example, in the following configuration","

Sets the path and other parameters of a cache. Cache data are stored in files. Both the key and file name in a cache are a result of applying the MD5 function to the proxied URL. The levels parameter defines hierarchy levels of a cache: from 1 to 3, each level accepts values 1 or 2. For example, in the following configuration

fastcgi_cache_path /data/nginx/cache levels=1:2 keys_zone=one:10m;\n

file names in a cache will look like this:

/data/nginx/cache/c/29/b7f54b2df7773722d382f4809d65029c\n

A cached response is first written to a temporary file, and then the file is renamed. Starting from version 0.8.9, temporary files and the cache can be put on different file systems. However, be aware that in this case a file is copied across two file systems instead of the cheap renaming operation. It is thus recommended that for any given location both cache and a directory holding temporary files are put on the same file system. A directory for temporary files is set based on the use_temp_path parameter (1.7.10). If this parameter is omitted or set to the value on, the directory set by the fastcgi_temp_path directive for the given location will be used. If the value is set to off, temporary files will be put directly in the cache directory.

In addition, all active keys and information about data are stored in a shared memory zone, whose name and size are configured by the keys_zone parameter. One megabyte zone can store about 8 thousand keys.

As part of commercial subscription, the shared memory zone also stores extended cache information, thus, it is required to specify a larger zone size for the same number of keys. For example, one megabyte zone can store about 4 thousand keys.

Cached data that are not accessed during the time specified by the inactive parameter get removed from the cache regardless of their freshness. By default, inactive is set to 10 minutes.

",["As part of [commercial subscription](https://www.f5.com/products), the shared memory zone also stores extended cache [information](https://nginx.org/en/docs/http/ngx_http_api_module.html#http_caches_), thus, it is required to specify a larger zone size for the same number of keys. For example, one megabyte zone can store about 4 thousand keys."],"
Syntax:fastcgi_cache_path path [levels=levels] [use_temp_path=on|off] keys_zone=name:size [inactive=time] [max_size=size] [min_free=size] [manager_files=number] [manager_sleep=time] [manager_threshold=time] [loader_files=number] [loader_sleep=time] [loader_threshold=time] [purger=on|off] [purger_files=number] [purger_sleep=time] [purger_threshold=time];
Default:
Context:http
"], +[4,"fastcgi_cache_purge","Defines conditions under which the request will be considered a cache purge request. If at least one value of the string parameters is not empty and is not equal to “0” then the cache entry with a corresponding [cache key](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache_key) is removed. The result of successful operation is indicated by returning the 204 (No Content) response.","

Defines conditions under which the request will be considered a cache purge request. If at least one value of the string parameters is not empty and is not equal to “0” then the cache entry with a corresponding cache key is removed. The result of successful operation is indicated by returning the 204 (No Content) response.

If the cache key of a purge request ends with an asterisk (“*”), all cache entries matching the wildcard key will be removed from the cache. However, these entries will remain on the disk until they are deleted for either inactivity, or processed by the cache purger (1.7.12), or a client attempts to access them.

Example configuration:

fastcgi_cache_path /data/nginx/cache keys_zone=cache_zone:10m;\n\nmap $request_method $purge_method {\n    PURGE   1;\n    default 0;\n}\n\nserver {\n    ...\n    location / {\n        fastcgi_pass        backend;\n        fastcgi_cache       cache_zone;\n        fastcgi_cache_key   $uri;\n        fastcgi_cache_purge $purge_method;\n    }\n}\n
This functionality is available as part of our commercial subscription.
",["This functionality is available as part of our [commercial subscription](https://www.f5.com/products)."],"
Syntax:fastcgi_cache_purge string ...;
Default:
Context:http, server, location

This directive appeared in version 1.5.7.

"], [4,"fastcgi_cache_revalidate","Enables revalidation of expired cache items using conditional requests with the “If-Modified-Since” and “If-None-Match” header fields.","

Enables revalidation of expired cache items using conditional requests with the “If-Modified-Since” and “If-None-Match” header fields.

",[],"
Syntax:fastcgi_cache_revalidate on | off;
Default:
fastcgi_cache_revalidate off;
Context:http, server, location

This directive appeared in version 1.5.7.

"], [4,"fastcgi_cache_use_stale","Determines in which cases a stale cached response can be used when an error occurs during communication with the FastCGI server. The directive’s parameters match the parameters of the [fastcgi\\_next\\_upstream](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_next_upstream) directive.","

Determines in which cases a stale cached response can be used when an error occurs during communication with the FastCGI server. The directive’s parameters match the parameters of the fastcgi_next_upstream directive.

The error parameter also permits using a stale cached response if a FastCGI server to process a request cannot be selected.

",[],"
Syntax:fastcgi_cache_use_stale error | timeout | invalid_header | updating | http_500 | http_503 | http_403 | http_404 | http_429 | off ...;
Default:
fastcgi_cache_use_stale off;
Context:http, server, location
"], [4,"fastcgi_cache_valid","Sets caching time for different response codes. For example, the following directives","

Sets caching time for different response codes. For example, the following directives

fastcgi_cache_valid 200 302 10m;\nfastcgi_cache_valid 404      1m;\n

set 10 minutes of caching for responses with codes 200 and 302 and 1 minute for responses with code 404.

If only caching time is specified

fastcgi_cache_valid 5m;\n

then only 200, 301, and 302 responses are cached.

In addition, the any parameter can be specified to cache any responses:

fastcgi_cache_valid 200 302 10m;\nfastcgi_cache_valid 301      1h;\nfastcgi_cache_valid any      1m;\n

Parameters of caching can also be set directly in the response header. This has higher priority than setting of caching time using the directive.

",[],"
Syntax:fastcgi_cache_valid [code ...] time;
Default:
Context:http, server, location
"], @@ -28,7 +28,7 @@ [4,"fastcgi_index","Sets a file name that will be appended after a URI that ends with a slash, in the value of the `$fastcgi_script_name` variable. For example, with these settings","

Sets a file name that will be appended after a URI that ends with a slash, in the value of the $fastcgi_script_name variable. For example, with these settings

fastcgi_index index.php;\nfastcgi_param SCRIPT_FILENAME /home/www/scripts/php$fastcgi_script_name;\n

and the “/page.php” request, the SCRIPT_FILENAME parameter will be equal to “/home/www/scripts/php/page.php”, and with the “/” request it will be equal to “/home/www/scripts/php/index.php”.

",[],"
Syntax:fastcgi_index name;
Default:
Context:http, server, location
"], [4,"fastcgi_intercept_errors","Determines whether FastCGI server responses with codes greater than or equal to 300 should be passed to a client or be intercepted and redirected to nginx for processing with the [error\\_page](https://nginx.org/en/docs/http/ngx_http_core_module.html#error_page) directive.","

Determines whether FastCGI server responses with codes greater than or equal to 300 should be passed to a client or be intercepted and redirected to nginx for processing with the error_page directive.

",[],"
Syntax:fastcgi_intercept_errors on | off;
Default:
fastcgi_intercept_errors off;
Context:http, server, location
"], [4,"fastcgi_keep_conn","By default, a FastCGI server will close a connection right after sending the response. However, when this directive is set to the value `on`, nginx will instruct a FastCGI server to keep connections open. This is necessary, in particular, for [keepalive](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive) connections to FastCGI servers to function.","

By default, a FastCGI server will close a connection right after sending the response. However, when this directive is set to the value on, nginx will instruct a FastCGI server to keep connections open. This is necessary, in particular, for keepalive connections to FastCGI servers to function.

",[],"
Syntax:fastcgi_keep_conn on | off;
Default:
fastcgi_keep_conn off;
Context:http, server, location

This directive appeared in version 1.1.4.

"], -[4,"fastcgi_limit_rate","Limits the speed of reading the response from the FastCGI server. The `rate` is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if nginx simultaneously opens two connections to the FastCFI server, the overall rate will be twice as much as the specified limit. The limitation works only if [buffering](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_buffering) of responses from the FastCGI server is enabled.","

Limits the speed of reading the response from the FastCGI server. The rate is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if nginx simultaneously opens two connections to the FastCFI server, the overall rate will be twice as much as the specified limit. The limitation works only if buffering of responses from the FastCGI server is enabled.

",[],"
Syntax:fastcgi_limit_rate rate;
Default:
fastcgi_limit_rate 0;
Context:http, server, location

This directive appeared in version 1.7.7.

"], +[4,"fastcgi_limit_rate","Limits the speed of reading the response from the FastCGI server. The `rate` is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if nginx simultaneously opens two connections to the FastCFI server, the overall rate will be twice as much as the specified limit. The limitation works only if [buffering](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_buffering) of responses from the FastCGI server is enabled. Parameter value can contain variables (1.27.0).","

Limits the speed of reading the response from the FastCGI server. The rate is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if nginx simultaneously opens two connections to the FastCFI server, the overall rate will be twice as much as the specified limit. The limitation works only if buffering of responses from the FastCGI server is enabled. Parameter value can contain variables (1.27.0).

",[],"
Syntax:fastcgi_limit_rate rate;
Default:
fastcgi_limit_rate 0;
Context:http, server, location

This directive appeared in version 1.7.7.

"], [4,"fastcgi_max_temp_file_size","When [buffering](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_buffering) of responses from the FastCGI server is enabled, and the whole response does not fit into the buffers set by the [fastcgi\\_buffer\\_size](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_buffer_size) and [fastcgi\\_buffers](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_buffers) directives, a part of the response can be saved to a temporary file. This directive sets the maximum `size` of the temporary file. The size of data written to the temporary file at a time is set by the [fastcgi\\_temp\\_file\\_write\\_size](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_temp_file_write_size) directive.","

When buffering of responses from the FastCGI server is enabled, and the whole response does not fit into the buffers set by the fastcgi_buffer_size and fastcgi_buffers directives, a part of the response can be saved to a temporary file. This directive sets the maximum size of the temporary file. The size of data written to the temporary file at a time is set by the fastcgi_temp_file_write_size directive.

The zero value disables buffering of responses to temporary files.

This restriction does not apply to responses that will be cached or stored on disk.
",["This restriction does not apply to responses that will be [cached](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_cache) or [stored](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_store) on disk."],"
Syntax:fastcgi_max_temp_file_size size;
Default:
fastcgi_max_temp_file_size 1024m;
Context:http, server, location
"], [4,"fastcgi_next_upstream","Specifies in which cases a request should be passed to the next server:","

Specifies in which cases a request should be passed to the next server:

error
an error occurred while establishing a connection with the server, passing a request to it, or reading the response header;
timeout
a timeout has occurred while establishing a connection with the server, passing a request to it, or reading the response header;
invalid_header
a server returned an empty or invalid response;
http_500
a server returned a response with the code 500;
http_503
a server returned a response with the code 503;
http_403
a server returned a response with the code 403;
http_404
a server returned a response with the code 404;
http_429
a server returned a response with the code 429 (1.11.13);
non_idempotent
normally, requests with a non-idempotent method (POST, LOCK, PATCH) are not passed to the next server if a request has been sent to an upstream server (1.9.13); enabling this option explicitly allows retrying such requests;
off
disables passing a request to the next server.

One should bear in mind that passing a request to the next server is only possible if nothing has been sent to a client yet. That is, if an error or timeout occurs in the middle of the transferring of a response, fixing this is impossible.

The directive also defines what is considered an unsuccessful attempt of communication with a server. The cases of error, timeout and invalid_header are always considered unsuccessful attempts, even if they are not specified in the directive. The cases of http_500, http_503, and http_429 are considered unsuccessful attempts only if they are specified in the directive. The cases of http_403 and http_404 are never considered unsuccessful attempts.

Passing a request to the next server can be limited by the number of tries and by time.

",[],"
Syntax:fastcgi_next_upstream error | timeout | invalid_header | http_500 | http_503 | http_403 | http_404 | http_429 | non_idempotent | off ...;
Default:
fastcgi_next_upstream error timeout;
Context:http, server, location
"], [4,"fastcgi_next_upstream_timeout","Limits the time during which a request can be passed to the [next server](https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_next_upstream). The `0` value turns off this limitation.","

Limits the time during which a request can be passed to the next server. The 0 value turns off this limitation.

",[],"
Syntax:fastcgi_next_upstream_timeout time;
Default:
fastcgi_next_upstream_timeout 0;
Context:http, server, location

This directive appeared in version 1.7.5.

"], diff --git a/assets/details/ngx_http_js_module.json b/assets/details/ngx_http_js_module.json index ca9168e..88b75da 100644 --- a/assets/details/ngx_http_js_module.json +++ b/assets/details/ngx_http_js_module.json @@ -14,7 +14,7 @@ [4,"js_include","Specifies a file that implements location and variable handlers in njs:","

Specifies a file that implements location and variable handlers in njs:

nginx.conf:\njs_include http.js;\nlocation   /version {\n    js_content version;\n}\n\nhttp.js:\nfunction version(r) {\n    r.return(200, njs.version);\n}\n

The directive was made obsolete in version 0.4.0 and was removed in version 0.7.1. The js_import directive should be used instead.

",[],"
Syntax:js_include file;
Default:
Context:http
"], [4,"js_path","Sets an additional path for njs modules.","

Sets an additional path for njs modules.

The directive can be specified on the server and location level since 0.7.7.
",["The directive can be specified on the `server` and `location` level since [0.7.7](https://nginx.org/en/docs/njs/changes.html#njs0.7.7)."],"
Syntax:js_path path;
Default:
Context:http, server, location

This directive appeared in version 0.3.0.

"], [4,"js_periodic","Specifies a content handler to run at regular interval. The handler receives a [session object](https://nginx.org/en/docs/njs/reference.html#periodic_session) as its first argument, it also has access to global objects such as [ngx](https://nginx.org/en/docs/njs/reference.html#ngx).","

Specifies a content handler to run at regular interval. The handler receives a session object as its first argument, it also has access to global objects such as ngx.

The optional interval parameter sets the interval between two consecutive runs, by default, 5 seconds.

The optional jitter parameter sets the time within which the location content handler will be randomly delayed, by default, there is no delay.

By default, the js_handler is executed on worker process 0. The optional worker_affinity parameter allows specifying particular worker processes where the location content handler should be executed. Each worker process set is represented by a bitmask of allowed worker processes. The all mask allows the handler to be executed in all worker processes.

Example:

example.conf:\n\nlocation @periodics {\n    # to be run at 1 minute intervals in worker process 0\n    js_periodic main.handler interval=60s;\n\n    # to be run at 1 minute intervals in all worker processes\n    js_periodic main.handler interval=60s worker_affinity=all;\n\n    # to be run at 1 minute intervals in worker processes 1 and 3\n    js_periodic main.handler interval=60s worker_affinity=0101;\n\n    resolver 10.0.0.1;\n    js_fetch_trusted_certificate /path/to/ISRG_Root_X1.pem;\n}\n\nexample.js:\n\nasync function handler(s) {\n    let reply = await ngx.fetch('https://nginx.org/en/docs/njs/');\n    let body = await reply.text();\n\n    ngx.log(ngx.INFO, body);\n}\n
",[],"
Syntax:js_periodic function | module.function [interval=time] [jitter=number] [worker_affinity=mask];
Default:
Context:location

This directive appeared in version 0.8.1.

"], -[4,"js_preload_object","Preloads an [immutable object](https://nginx.org/en/docs/njs/preload_objects.html) at configure time. The `name` is used a name of the global variable though which the object is available in njs code. If the `name` is not specified, the file name will be used instead.","

Preloads an immutable object at configure time. The name is used a name of the global variable though which the object is available in njs code. If the name is not specified, the file name will be used instead.

js_preload_object map.json;\n

Here, the map is used as a name while accessing the preloaded object.

Several js_preload_object directives can be specified.

",[],"
Syntax:js_preload_object name.json | name from file.json;
Default:
Context:http, server, location

This directive appeared in version 0.7.8.

"], +[4,"js_preload_object","Preloads an [immutable object](https://nginx.org/en/docs/njs/preload_objects.html) at configure time. The `name` is used as a name of the global variable though which the object is available in njs code. If the `name` is not specified, the file name will be used instead.","

Preloads an immutable object at configure time. The name is used as a name of the global variable though which the object is available in njs code. If the name is not specified, the file name will be used instead.

js_preload_object map.json;\n

Here, the map is used as a name while accessing the preloaded object.

Several js_preload_object directives can be specified.

",[],"
Syntax:js_preload_object name.json | name from file.json;
Default:
Context:http, server, location

This directive appeared in version 0.7.8.

"], [4,"js_set","Sets an njs `function` for the specified `variable`. Since [0.4.0](https://nginx.org/en/docs/njs/changes.html#njs0.4.0), a module function can be referenced.","

Sets an njs function for the specified variable. Since 0.4.0, a module function can be referenced.

The function is called when the variable is referenced for the first time for a given request. The exact moment depends on a phase at which the variable is referenced. This can be used to perform some logic not related to variable evaluation. For example, if the variable is referenced only in the log_format directive, its handler will not be executed until the log phase. This handler can be used to do some cleanup right before the request is freed.

As the js_set handler returns its result immediately, it supports only synchronous operations. Thus, asynchronous operations such as r.subrequest() or setTimeout() are not supported.
The directive can be specified on the server and location level since 0.7.7.
",["As the `js_set` handler returns its result immediately, it supports only synchronous operations. Thus, asynchronous operations such as [r.subrequest()](https://nginx.org/en/docs/njs/reference.html#r_subrequest) or [setTimeout()](https://nginx.org/en/docs/njs/reference.html#settimeout) are not supported.","The directive can be specified on the `server` and `location` level since [0.7.7](https://nginx.org/en/docs/njs/changes.html#njs0.7.7)."],"
Syntax:js_set $variable function | module.function;
Default:
Context:http, server, location
"], [4,"js_shared_dict_zone","Sets the `name` and `size` of the shared memory zone that keeps the key-value [dictionary](https://nginx.org/en/docs/njs/reference.html#dict) shared between worker processes.","

Sets the name and size of the shared memory zone that keeps the key-value dictionary shared between worker processes.

By default the shared dictionary uses a string as a key and a value. The optional type parameter allows redefining the value type to number.

The optional timeout parameter sets the time after which all shared dictionary entries are removed from the zone.

The optional evict parameter removes the oldest key-value pair when the zone storage is exhausted.

Example:

example.conf:\n    # Creates a 1Mb dictionary with string values,\n    # removes key-value pairs after 60 seconds of inactivity:\n    js_shared_dict_zone zone=foo:1M timeout=60s;\n\n    # Creates a 512Kb dictionary with string values,\n    # forcibly removes oldest key-value pairs when the zone is exhausted:\n    js_shared_dict_zone zone=bar:512K timeout=30s evict;\n\n    # Creates a 32Kb permanent dictionary with number values:\n    js_shared_dict_zone zone=num:32k type=number;\n\nexample.js:\n    function get(r) {\n        r.return(200, ngx.shared.foo.get(r.args.key));\n    }\n\n    function set(r) {\n        r.return(200, ngx.shared.foo.set(r.args.key, r.args.value));\n    }\n\n    function del(r) {\n        r.return(200, ngx.shared.bar.delete(r.args.key));\n    }\n\n    function increment(r) {\n        r.return(200, ngx.shared.num.incr(r.args.key, 2));\n    }\n
",[],"
Syntax:js_shared_dict_zone zone=name:size [timeout=time] [type=string|number] [evict];
Default:
Context:http

This directive appeared in version 0.8.0.

"], [4,"js_var","Declares a [writable](https://nginx.org/en/docs/njs/reference.html#r_variables) variable. The value can contain text, variables, and their combination. The variable is not overwritten after a redirect unlike variables created with the [set](https://nginx.org/en/docs/http/ngx_http_rewrite_module.html#set) directive.","

Declares a writable variable. The value can contain text, variables, and their combination. The variable is not overwritten after a redirect unlike variables created with the set directive.

The directive can be specified on the server and location level since 0.7.7.
",["The directive can be specified on the `server` and `location` level since [0.7.7](https://nginx.org/en/docs/njs/changes.html#njs0.7.7)."],"
Syntax:js_var $variable [value];
Default:
Context:http, server, location

This directive appeared in version 0.5.3.

"] diff --git a/assets/details/ngx_http_limit_conn_module.json b/assets/details/ngx_http_limit_conn_module.json index e230aed..936cf0d 100644 --- a/assets/details/ngx_http_limit_conn_module.json +++ b/assets/details/ngx_http_limit_conn_module.json @@ -3,7 +3,7 @@ [4,"limit_conn_dry_run","Enables the dry run mode. In this mode, the number of connections is not limited, however, in the shared memory zone, the number of excessive connections is accounted as usual.","

Enables the dry run mode. In this mode, the number of connections is not limited, however, in the shared memory zone, the number of excessive connections is accounted as usual.

",[],"
Syntax:limit_conn_dry_run on | off;
Default:
limit_conn_dry_run off;
Context:http, server, location

This directive appeared in version 1.17.6.

"], [4,"limit_conn_log_level","Sets the desired logging level for cases when the server limits the number of connections.","

Sets the desired logging level for cases when the server limits the number of connections.

",[],"
Syntax:limit_conn_log_level info | notice | warn | error;
Default:
limit_conn_log_level error;
Context:http, server, location

This directive appeared in version 0.8.18.

"], [4,"limit_conn_status","Sets the status code to return in response to rejected requests.","

Sets the status code to return in response to rejected requests.

",[],"
Syntax:limit_conn_status code;
Default:
limit_conn_status 503;
Context:http, server, location

This directive appeared in version 1.3.15.

"], -[4,"limit_conn_zone","Sets parameters for a shared memory zone that will keep states for various keys. In particular, the state includes the current number of connections. The `key` can contain text, variables, and their combination. Requests with an empty key value are not accounted.","

Sets parameters for a shared memory zone that will keep states for various keys. In particular, the state includes the current number of connections. The key can contain text, variables, and their combination. Requests with an empty key value are not accounted.

Prior to version 1.7.6, a key could contain exactly one variable.

Usage example:

limit_conn_zone $binary_remote_addr zone=addr:10m;\n

Here, a client IP address serves as a key. Note that instead of $remote_addr, the $binary_remote_addr variable is used here. The $remote_addr variable’s size can vary from 7 to 15 bytes. The stored state occupies either 32 or 64 bytes of memory on 32-bit platforms and always 64 bytes on 64-bit platforms. The $binary_remote_addr variable’s size is always 4 bytes for IPv4 addresses or 16 bytes for IPv6 addresses. The stored state always occupies 32 or 64 bytes on 32-bit platforms and 64 bytes on 64-bit platforms. One megabyte zone can keep about 32 thousand 32-byte states or about 16 thousand 64-byte states. If the zone storage is exhausted, the server will return the error to all further requests.

Additionally, as part of our commercial subscription, the status information for each such shared memory zone can be obtained or reset with the API since 1.17.7.
",["Prior to version 1.7.6, a `key` could contain exactly one variable.","Additionally, as part of our [commercial subscription](http://nginx.com/products/), the [status information](https://nginx.org/en/docs/http/ngx_http_api_module.html#http_limit_conns_) for each such shared memory zone can be [obtained](https://nginx.org/en/docs/http/ngx_http_api_module.html#getHttpLimitConnZone) or [reset](https://nginx.org/en/docs/http/ngx_http_api_module.html#deleteHttpLimitConnZoneStat) with the [API](https://nginx.org/en/docs/http/ngx_http_api_module.html) since 1.17.7."],"
Syntax:limit_conn_zone key zone=name:size;
Default:
Context:http
"], +[4,"limit_conn_zone","Sets parameters for a shared memory zone that will keep states for various keys. In particular, the state includes the current number of connections. The `key` can contain text, variables, and their combination. Requests with an empty key value are not accounted.","

Sets parameters for a shared memory zone that will keep states for various keys. In particular, the state includes the current number of connections. The key can contain text, variables, and their combination. Requests with an empty key value are not accounted.

Prior to version 1.7.6, a key could contain exactly one variable.

Usage example:

limit_conn_zone $binary_remote_addr zone=addr:10m;\n

Here, a client IP address serves as a key. Note that instead of $remote_addr, the $binary_remote_addr variable is used here. The $remote_addr variable’s size can vary from 7 to 15 bytes. The stored state occupies either 32 or 64 bytes of memory on 32-bit platforms and always 64 bytes on 64-bit platforms. The $binary_remote_addr variable’s size is always 4 bytes for IPv4 addresses or 16 bytes for IPv6 addresses. The stored state always occupies 32 or 64 bytes on 32-bit platforms and 64 bytes on 64-bit platforms. One megabyte zone can keep about 32 thousand 32-byte states or about 16 thousand 64-byte states. If the zone storage is exhausted, the server will return the error to all further requests.

Additionally, as part of our commercial subscription, the status information for each such shared memory zone can be obtained or reset with the API since 1.17.7.
",["Prior to version 1.7.6, a `key` could contain exactly one variable.","Additionally, as part of our [commercial subscription](https://www.f5.com/products), the [status information](https://nginx.org/en/docs/http/ngx_http_api_module.html#http_limit_conns_) for each such shared memory zone can be [obtained](https://nginx.org/en/docs/http/ngx_http_api_module.html#getHttpLimitConnZone) or [reset](https://nginx.org/en/docs/http/ngx_http_api_module.html#deleteHttpLimitConnZoneStat) with the [API](https://nginx.org/en/docs/http/ngx_http_api_module.html) since 1.17.7."],"
Syntax:limit_conn_zone key zone=name:size;
Default:
Context:http
"], [4,"limit_zone","This directive was made obsolete in version 1.1.8 and was removed in version 1.7.6. An equivalent [limit\\_conn\\_zone](https://nginx.org/en/docs/http/ngx_http_limit_conn_module.html#limit_conn_zone) directive with a changed syntax should be used instead:","

This directive was made obsolete in version 1.1.8 and was removed in version 1.7.6. An equivalent limit_conn_zone directive with a changed syntax should be used instead:

limit_conn_zone $variable zone=name:size;
",["`limit_conn_zone` `$variable` `zone`\\=`name`:`size`;"],"
Syntax:limit_zone name $variable size;
Default:
Context:http
"], [5,"

$limit_conn_status
keeps the result of limiting the number of connections (1.17.6): PASSED, REJECTED, or REJECTED_DRY_RUN
"] ] \ No newline at end of file diff --git a/assets/details/ngx_http_mp4_module.json b/assets/details/ngx_http_mp4_module.json index ed57cab..70e3247 100644 --- a/assets/details/ngx_http_mp4_module.json +++ b/assets/details/ngx_http_mp4_module.json @@ -2,7 +2,7 @@ [4,"mp4","Turns on module processing in a surrounding location.","

Turns on module processing in a surrounding location.

",[],"
Syntax:mp4;
Default:
Context:location
"], [4,"mp4_buffer_size","Sets the initial `size` of the buffer used for processing MP4 files.","

Sets the initial size of the buffer used for processing MP4 files.

",[],"
Syntax:mp4_buffer_size size;
Default:
mp4_buffer_size 512K;
Context:http, server, location
"], [4,"mp4_max_buffer_size","During metadata processing, a larger buffer may become necessary. Its size cannot exceed the specified `size`, or else nginx will return the 500 (Internal Server Error) server error, and log the following message:","

During metadata processing, a larger buffer may become necessary. Its size cannot exceed the specified size, or else nginx will return the 500 (Internal Server Error) server error, and log the following message:

\"/some/movie/file.mp4\" mp4 moov atom is too large:\n12583268, you may want to increase mp4_max_buffer_size\n
",[],"
Syntax:mp4_max_buffer_size size;
Default:
mp4_max_buffer_size 10M;
Context:http, server, location
"], -[4,"mp4_limit_rate","Limits the rate of response transmission to a client. The rate is limited based on the average bitrate of the MP4 file served. To calculate the rate, the bitrate is multiplied by the specified `factor`. The special value “`on`” corresponds to the factor of 1.1. The special value “`off`” disables rate limiting. The limit is set per a request, and so if a client simultaneously opens two connections, the overall rate will be twice as much as the specified limit.","

Limits the rate of response transmission to a client. The rate is limited based on the average bitrate of the MP4 file served. To calculate the rate, the bitrate is multiplied by the specified factor. The special value “on” corresponds to the factor of 1.1. The special value “off” disables rate limiting. The limit is set per a request, and so if a client simultaneously opens two connections, the overall rate will be twice as much as the specified limit.

This directive is available as part of our commercial subscription.
",["This directive is available as part of our [commercial subscription](http://nginx.com/products/)."],"
Syntax:mp4_limit_rate on | off | factor;
Default:
mp4_limit_rate off;
Context:http, server, location
"], -[4,"mp4_limit_rate_after","Sets the initial amount of media data (measured in playback time) after which the further transmission of the response to a client will be rate limited.","

Sets the initial amount of media data (measured in playback time) after which the further transmission of the response to a client will be rate limited.

This directive is available as part of our commercial subscription.
",["This directive is available as part of our [commercial subscription](http://nginx.com/products/)."],"
Syntax:mp4_limit_rate_after time;
Default:
mp4_limit_rate_after 60s;
Context:http, server, location
"], +[4,"mp4_limit_rate","Limits the rate of response transmission to a client. The rate is limited based on the average bitrate of the MP4 file served. To calculate the rate, the bitrate is multiplied by the specified `factor`. The special value “`on`” corresponds to the factor of 1.1. The special value “`off`” disables rate limiting. The limit is set per a request, and so if a client simultaneously opens two connections, the overall rate will be twice as much as the specified limit.","

Limits the rate of response transmission to a client. The rate is limited based on the average bitrate of the MP4 file served. To calculate the rate, the bitrate is multiplied by the specified factor. The special value “on” corresponds to the factor of 1.1. The special value “off” disables rate limiting. The limit is set per a request, and so if a client simultaneously opens two connections, the overall rate will be twice as much as the specified limit.

This directive is available as part of our commercial subscription.
",["This directive is available as part of our [commercial subscription](https://www.f5.com/products)."],"
Syntax:mp4_limit_rate on | off | factor;
Default:
mp4_limit_rate off;
Context:http, server, location
"], +[4,"mp4_limit_rate_after","Sets the initial amount of media data (measured in playback time) after which the further transmission of the response to a client will be rate limited.","

Sets the initial amount of media data (measured in playback time) after which the further transmission of the response to a client will be rate limited.

This directive is available as part of our commercial subscription.
",["This directive is available as part of our [commercial subscription](https://www.f5.com/products)."],"
Syntax:mp4_limit_rate_after time;
Default:
mp4_limit_rate_after 60s;
Context:http, server, location
"], [4,"mp4_start_key_frame","Forces output video to always start with a key video frame. If the `start` argument does not point to a key frame, initial frames are hidden using an mp4 edit list. Edit lists are supported by major players and browsers such as Chrome, Safari, QuickTime and ffmpeg, partially supported by Firefox.","

Forces output video to always start with a key video frame. If the start argument does not point to a key frame, initial frames are hidden using an mp4 edit list. Edit lists are supported by major players and browsers such as Chrome, Safari, QuickTime and ffmpeg, partially supported by Firefox.

",[],"
Syntax:mp4_start_key_frame on | off;
Default:
mp4_start_key_frame off;
Context:http, server, location

This directive appeared in version 1.21.4.

"] ] \ No newline at end of file diff --git a/assets/details/ngx_http_proxy_module.json b/assets/details/ngx_http_proxy_module.json index c31b55e..14ec2c7 100644 --- a/assets/details/ngx_http_proxy_module.json +++ b/assets/details/ngx_http_proxy_module.json @@ -15,8 +15,8 @@ [4,"proxy_cache_max_range_offset","Sets an offset in bytes for byte-range requests. If the range is beyond the offset, the range request will be passed to the proxied server and the response will not be cached.","

Sets an offset in bytes for byte-range requests. If the range is beyond the offset, the range request will be passed to the proxied server and the response will not be cached.

",[],"
Syntax:proxy_cache_max_range_offset number;
Default:
Context:http, server, location

This directive appeared in version 1.11.6.

"], [4,"proxy_cache_methods","If the client request method is listed in this directive then the response will be cached. “`GET`” and “`HEAD`” methods are always added to the list, though it is recommended to specify them explicitly. See also the [proxy\\_no\\_cache](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_no_cache) directive.","

If the client request method is listed in this directive then the response will be cached. “GET” and “HEAD” methods are always added to the list, though it is recommended to specify them explicitly. See also the proxy_no_cache directive.

",[],"
Syntax:proxy_cache_methods GET | HEAD | POST ...;
Default:
proxy_cache_methods GET HEAD;
Context:http, server, location

This directive appeared in version 0.7.59.

"], [4,"proxy_cache_min_uses","Sets the `number` of requests after which the response will be cached.","

Sets the number of requests after which the response will be cached.

",[],"
Syntax:proxy_cache_min_uses number;
Default:
proxy_cache_min_uses 1;
Context:http, server, location
"], -[4,"proxy_cache_path","Sets the path and other parameters of a cache. Cache data are stored in files. The file name in a cache is a result of applying the MD5 function to the [cache key](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_key). The `levels` parameter defines hierarchy levels of a cache: from 1 to 3, each level accepts values 1 or 2. For example, in the following configuration","

Sets the path and other parameters of a cache. Cache data are stored in files. The file name in a cache is a result of applying the MD5 function to the cache key. The levels parameter defines hierarchy levels of a cache: from 1 to 3, each level accepts values 1 or 2. For example, in the following configuration

proxy_cache_path /data/nginx/cache levels=1:2 keys_zone=one:10m;\n

file names in a cache will look like this:

/data/nginx/cache/c/29/b7f54b2df7773722d382f4809d65029c\n

A cached response is first written to a temporary file, and then the file is renamed. Starting from version 0.8.9, temporary files and the cache can be put on different file systems. However, be aware that in this case a file is copied across two file systems instead of the cheap renaming operation. It is thus recommended that for any given location both cache and a directory holding temporary files are put on the same file system. The directory for temporary files is set based on the use_temp_path parameter (1.7.10). If this parameter is omitted or set to the value on, the directory set by the proxy_temp_path directive for the given location will be used. If the value is set to off, temporary files will be put directly in the cache directory.

In addition, all active keys and information about data are stored in a shared memory zone, whose name and size are configured by the keys_zone parameter. One megabyte zone can store about 8 thousand keys.

As part of commercial subscription, the shared memory zone also stores extended cache information, thus, it is required to specify a larger zone size for the same number of keys. For example, one megabyte zone can store about 4 thousand keys.

Cached data that are not accessed during the time specified by the inactive parameter get removed from the cache regardless of their freshness. By default, inactive is set to 10 minutes.

",["As part of [commercial subscription](http://nginx.com/products/), the shared memory zone also stores extended cache [information](https://nginx.org/en/docs/http/ngx_http_api_module.html#http_caches_), thus, it is required to specify a larger zone size for the same number of keys. For example, one megabyte zone can store about 4 thousand keys."],"
Syntax:proxy_cache_path path [levels=levels] [use_temp_path=on|off] keys_zone=name:size [inactive=time] [max_size=size] [min_free=size] [manager_files=number] [manager_sleep=time] [manager_threshold=time] [loader_files=number] [loader_sleep=time] [loader_threshold=time] [purger=on|off] [purger_files=number] [purger_sleep=time] [purger_threshold=time];
Default:
Context:http
"], -[4,"proxy_cache_purge","Defines conditions under which the request will be considered a cache purge request. If at least one value of the string parameters is not empty and is not equal to “0” then the cache entry with a corresponding [cache key](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_key) is removed. The result of successful operation is indicated by returning the 204 (No Content) response.","

Defines conditions under which the request will be considered a cache purge request. If at least one value of the string parameters is not empty and is not equal to “0” then the cache entry with a corresponding cache key is removed. The result of successful operation is indicated by returning the 204 (No Content) response.

If the cache key of a purge request ends with an asterisk (“*”), all cache entries matching the wildcard key will be removed from the cache. However, these entries will remain on the disk until they are deleted for either inactivity, or processed by the cache purger (1.7.12), or a client attempts to access them.

Example configuration:

proxy_cache_path /data/nginx/cache keys_zone=cache_zone:10m;\n\nmap $request_method $purge_method {\n    PURGE   1;\n    default 0;\n}\n\nserver {\n    ...\n    location / {\n        proxy_pass http://backend;\n        proxy_cache cache_zone;\n        proxy_cache_key $uri;\n        proxy_cache_purge $purge_method;\n    }\n}\n
This functionality is available as part of our commercial subscription.
",["This functionality is available as part of our [commercial subscription](http://nginx.com/products/)."],"
Syntax:proxy_cache_purge string ...;
Default:
Context:http, server, location

This directive appeared in version 1.5.7.

"], +[4,"proxy_cache_path","Sets the path and other parameters of a cache. Cache data are stored in files. The file name in a cache is a result of applying the MD5 function to the [cache key](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_key). The `levels` parameter defines hierarchy levels of a cache: from 1 to 3, each level accepts values 1 or 2. For example, in the following configuration","

Sets the path and other parameters of a cache. Cache data are stored in files. The file name in a cache is a result of applying the MD5 function to the cache key. The levels parameter defines hierarchy levels of a cache: from 1 to 3, each level accepts values 1 or 2. For example, in the following configuration

proxy_cache_path /data/nginx/cache levels=1:2 keys_zone=one:10m;\n

file names in a cache will look like this:

/data/nginx/cache/c/29/b7f54b2df7773722d382f4809d65029c\n

A cached response is first written to a temporary file, and then the file is renamed. Starting from version 0.8.9, temporary files and the cache can be put on different file systems. However, be aware that in this case a file is copied across two file systems instead of the cheap renaming operation. It is thus recommended that for any given location both cache and a directory holding temporary files are put on the same file system. The directory for temporary files is set based on the use_temp_path parameter (1.7.10). If this parameter is omitted or set to the value on, the directory set by the proxy_temp_path directive for the given location will be used. If the value is set to off, temporary files will be put directly in the cache directory.

In addition, all active keys and information about data are stored in a shared memory zone, whose name and size are configured by the keys_zone parameter. One megabyte zone can store about 8 thousand keys.

As part of commercial subscription, the shared memory zone also stores extended cache information, thus, it is required to specify a larger zone size for the same number of keys. For example, one megabyte zone can store about 4 thousand keys.

Cached data that are not accessed during the time specified by the inactive parameter get removed from the cache regardless of their freshness. By default, inactive is set to 10 minutes.

",["As part of [commercial subscription](https://www.f5.com/products), the shared memory zone also stores extended cache [information](https://nginx.org/en/docs/http/ngx_http_api_module.html#http_caches_), thus, it is required to specify a larger zone size for the same number of keys. For example, one megabyte zone can store about 4 thousand keys."],"
Syntax:proxy_cache_path path [levels=levels] [use_temp_path=on|off] keys_zone=name:size [inactive=time] [max_size=size] [min_free=size] [manager_files=number] [manager_sleep=time] [manager_threshold=time] [loader_files=number] [loader_sleep=time] [loader_threshold=time] [purger=on|off] [purger_files=number] [purger_sleep=time] [purger_threshold=time];
Default:
Context:http
"], +[4,"proxy_cache_purge","Defines conditions under which the request will be considered a cache purge request. If at least one value of the string parameters is not empty and is not equal to “0” then the cache entry with a corresponding [cache key](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_key) is removed. The result of successful operation is indicated by returning the 204 (No Content) response.","

Defines conditions under which the request will be considered a cache purge request. If at least one value of the string parameters is not empty and is not equal to “0” then the cache entry with a corresponding cache key is removed. The result of successful operation is indicated by returning the 204 (No Content) response.

If the cache key of a purge request ends with an asterisk (“*”), all cache entries matching the wildcard key will be removed from the cache. However, these entries will remain on the disk until they are deleted for either inactivity, or processed by the cache purger (1.7.12), or a client attempts to access them.

Example configuration:

proxy_cache_path /data/nginx/cache keys_zone=cache_zone:10m;\n\nmap $request_method $purge_method {\n    PURGE   1;\n    default 0;\n}\n\nserver {\n    ...\n    location / {\n        proxy_pass http://backend;\n        proxy_cache cache_zone;\n        proxy_cache_key $uri;\n        proxy_cache_purge $purge_method;\n    }\n}\n
This functionality is available as part of our commercial subscription.
",["This functionality is available as part of our [commercial subscription](https://www.f5.com/products)."],"
Syntax:proxy_cache_purge string ...;
Default:
Context:http, server, location

This directive appeared in version 1.5.7.

"], [4,"proxy_cache_revalidate","Enables revalidation of expired cache items using conditional requests with the “If-Modified-Since” and “If-None-Match” header fields.","

Enables revalidation of expired cache items using conditional requests with the “If-Modified-Since” and “If-None-Match” header fields.

",[],"
Syntax:proxy_cache_revalidate on | off;
Default:
proxy_cache_revalidate off;
Context:http, server, location

This directive appeared in version 1.5.7.

"], [4,"proxy_cache_use_stale","Determines in which cases a stale cached response can be used during communication with the proxied server. The directive’s parameters match the parameters of the [proxy\\_next\\_upstream](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_next_upstream) directive.","

Determines in which cases a stale cached response can be used during communication with the proxied server. The directive’s parameters match the parameters of the proxy_next_upstream directive.

The error parameter also permits using a stale cached response if a proxied server to process a request cannot be selected.

",[],"
Syntax:proxy_cache_use_stale error | timeout | invalid_header | updating | http_500 | http_502 | http_503 | http_504 | http_403 | http_404 | http_429 | off ...;
Default:
proxy_cache_use_stale off;
Context:http, server, location
"], [4,"proxy_cache_valid","Sets caching time for different response codes. For example, the following directives","

Sets caching time for different response codes. For example, the following directives

proxy_cache_valid 200 302 10m;\nproxy_cache_valid 404      1m;\n

set 10 minutes of caching for responses with codes 200 and 302 and 1 minute for responses with code 404.

If only caching time is specified

proxy_cache_valid 5m;\n

then only 200, 301, and 302 responses are cached.

In addition, the any parameter can be specified to cache any responses:

proxy_cache_valid 200 302 10m;\nproxy_cache_valid 301      1h;\nproxy_cache_valid any      1m;\n

Parameters of caching can also be set directly in the response header. This has higher priority than setting of caching time using the directive.

",[],"
Syntax:proxy_cache_valid [code ...] time;
Default:
Context:http, server, location
"], @@ -32,7 +32,7 @@ [4,"proxy_ignore_client_abort","Determines whether the connection with a proxied server should be closed when a client closes the connection without waiting for a response.","

Determines whether the connection with a proxied server should be closed when a client closes the connection without waiting for a response.

",[],"
Syntax:proxy_ignore_client_abort on | off;
Default:
proxy_ignore_client_abort off;
Context:http, server, location
"], [4,"proxy_ignore_headers","Disables processing of certain response header fields from the proxied server. The following fields can be ignored: “X-Accel-Redirect”, “X-Accel-Expires”, “X-Accel-Limit-Rate” (1.1.6), “X-Accel-Buffering” (1.1.6), “X-Accel-Charset” (1.1.6), “Expires”, “Cache-Control”, “Set-Cookie” (0.8.44), and “Vary” (1.7.7).","

Disables processing of certain response header fields from the proxied server. The following fields can be ignored: “X-Accel-Redirect”, “X-Accel-Expires”, “X-Accel-Limit-Rate” (1.1.6), “X-Accel-Buffering” (1.1.6), “X-Accel-Charset” (1.1.6), “Expires”, “Cache-Control”, “Set-Cookie” (0.8.44), and “Vary” (1.7.7).

If not disabled, processing of these header fields has the following effect:

",[],"
Syntax:proxy_ignore_headers field ...;
Default:
Context:http, server, location
"], [4,"proxy_intercept_errors","Determines whether proxied responses with codes greater than or equal to 300 should be passed to a client or be intercepted and redirected to nginx for processing with the [error\\_page](https://nginx.org/en/docs/http/ngx_http_core_module.html#error_page) directive.","

Determines whether proxied responses with codes greater than or equal to 300 should be passed to a client or be intercepted and redirected to nginx for processing with the error_page directive.

",[],"
Syntax:proxy_intercept_errors on | off;
Default:
proxy_intercept_errors off;
Context:http, server, location
"], -[4,"proxy_limit_rate","Limits the speed of reading the response from the proxied server. The `rate` is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if nginx simultaneously opens two connections to the proxied server, the overall rate will be twice as much as the specified limit. The limitation works only if [buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering) of responses from the proxied server is enabled.","

Limits the speed of reading the response from the proxied server. The rate is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if nginx simultaneously opens two connections to the proxied server, the overall rate will be twice as much as the specified limit. The limitation works only if buffering of responses from the proxied server is enabled.

",[],"
Syntax:proxy_limit_rate rate;
Default:
proxy_limit_rate 0;
Context:http, server, location

This directive appeared in version 1.7.7.

"], +[4,"proxy_limit_rate","Limits the speed of reading the response from the proxied server. The `rate` is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if nginx simultaneously opens two connections to the proxied server, the overall rate will be twice as much as the specified limit. The limitation works only if [buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering) of responses from the proxied server is enabled. Parameter value can contain variables (1.27.0).","

Limits the speed of reading the response from the proxied server. The rate is specified in bytes per second. The zero value disables rate limiting. The limit is set per a request, and so if nginx simultaneously opens two connections to the proxied server, the overall rate will be twice as much as the specified limit. The limitation works only if buffering of responses from the proxied server is enabled. Parameter value can contain variables (1.27.0).

",[],"
Syntax:proxy_limit_rate rate;
Default:
proxy_limit_rate 0;
Context:http, server, location

This directive appeared in version 1.7.7.

"], [4,"proxy_max_temp_file_size","When [buffering](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering) of responses from the proxied server is enabled, and the whole response does not fit into the buffers set by the [proxy\\_buffer\\_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) and [proxy\\_buffers](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers) directives, a part of the response can be saved to a temporary file. This directive sets the maximum `size` of the temporary file. The size of data written to the temporary file at a time is set by the [proxy\\_temp\\_file\\_write\\_size](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_temp_file_write_size) directive.","

When buffering of responses from the proxied server is enabled, and the whole response does not fit into the buffers set by the proxy_buffer_size and proxy_buffers directives, a part of the response can be saved to a temporary file. This directive sets the maximum size of the temporary file. The size of data written to the temporary file at a time is set by the proxy_temp_file_write_size directive.

The zero value disables buffering of responses to temporary files.

This restriction does not apply to responses that will be cached or stored on disk.
",["This restriction does not apply to responses that will be [cached](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache) or [stored](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_store) on disk."],"
Syntax:proxy_max_temp_file_size size;
Default:
proxy_max_temp_file_size 1024m;
Context:http, server, location
"], [4,"proxy_method","Specifies the HTTP `method` to use in requests forwarded to the proxied server instead of the method from the client request. Parameter value can contain variables (1.11.6).","

Specifies the HTTP method to use in requests forwarded to the proxied server instead of the method from the client request. Parameter value can contain variables (1.11.6).

",[],"
Syntax:proxy_method method;
Default:
Context:http, server, location
"], [4,"proxy_next_upstream","Specifies in which cases a request should be passed to the next server:","

Specifies in which cases a request should be passed to the next server:

error
an error occurred while establishing a connection with the server, passing a request to it, or reading the response header;
timeout
a timeout has occurred while establishing a connection with the server, passing a request to it, or reading the response header;
invalid_header
a server returned an empty or invalid response;
http_500
a server returned a response with the code 500;
http_502
a server returned a response with the code 502;
http_503
a server returned a response with the code 503;
http_504
a server returned a response with the code 504;
http_403
a server returned a response with the code 403;
http_404
a server returned a response with the code 404;
http_429
a server returned a response with the code 429 (1.11.13);
non_idempotent
normally, requests with a non-idempotent method (POST, LOCK, PATCH) are not passed to the next server if a request has been sent to an upstream server (1.9.13); enabling this option explicitly allows retrying such requests;
off
disables passing a request to the next server.

One should bear in mind that passing a request to the next server is only possible if nothing has been sent to a client yet. That is, if an error or timeout occurs in the middle of the transferring of a response, fixing this is impossible.

The directive also defines what is considered an unsuccessful attempt of communication with a server. The cases of error, timeout and invalid_header are always considered unsuccessful attempts, even if they are not specified in the directive. The cases of http_500, http_502, http_503, http_504, and http_429 are considered unsuccessful attempts only if they are specified in the directive. The cases of http_403 and http_404 are never considered unsuccessful attempts.

Passing a request to the next server can be limited by the number of tries and by time.

",[],"
Syntax:proxy_next_upstream error | timeout | invalid_header | http_500 | http_502 | http_503 | http_504 | http_403 | http_404 | http_429 | non_idempotent | off ...;
Default:
proxy_next_upstream error timeout;
Context:http, server, location
"], diff --git a/assets/manifest/core.json b/assets/manifest/core.json index 0e2947a..29abece 100644 --- a/assets/manifest/core.json +++ b/assets/manifest/core.json @@ -1,5 +1,5 @@ [ -[1,"ngx_core_module","ngx_http_core_module","ngx_http_access_module","ngx_http_addition_module","ngx_http_api_module","ngx_http_auth_basic_module","ngx_http_auth_jwt_module","ngx_http_auth_request_module","ngx_http_autoindex_module","ngx_http_browser_module","ngx_http_charset_module","ngx_http_dav_module","ngx_http_empty_gif_module","ngx_http_f4f_module","ngx_http_fastcgi_module","ngx_http_flv_module","ngx_http_geo_module","ngx_http_geoip_module","ngx_http_grpc_module","ngx_http_gunzip_module","ngx_http_gzip_module","ngx_http_gzip_static_module","ngx_http_headers_module","ngx_http_hls_module","ngx_http_image_filter_module","ngx_http_index_module","ngx_http_internal_redirect_module","ngx_http_keyval_module","ngx_http_limit_conn_module","ngx_http_limit_req_module","ngx_http_log_module","ngx_http_map_module","ngx_http_memcached_module","ngx_http_mirror_module","ngx_http_mp4_module","ngx_http_perl_module","ngx_http_proxy_module","ngx_http_proxy_protocol_vendor_module","ngx_http_random_index_module","ngx_http_realip_module","ngx_http_referer_module","ngx_http_rewrite_module","ngx_http_scgi_module","ngx_http_secure_link_module","ngx_http_session_log_module","ngx_http_slice_module","ngx_http_split_clients_module","ngx_http_ssi_module","ngx_http_ssl_module","ngx_http_status_module","ngx_http_stub_status_module","ngx_http_sub_module","ngx_http_upstream_module","ngx_http_upstream_conf_module","ngx_http_upstream_hc_module","ngx_http_userid_module","ngx_http_uwsgi_module","ngx_http_v2_module","ngx_http_v3_module","ngx_http_xslt_module","ngx_mail_core_module","ngx_mail_auth_http_module","ngx_mail_proxy_module","ngx_mail_realip_module","ngx_mail_ssl_module","ngx_mail_imap_module","ngx_mail_pop3_module","ngx_mail_smtp_module","ngx_stream_core_module","ngx_stream_access_module","ngx_stream_geo_module","ngx_stream_geoip_module","ngx_stream_keyval_module","ngx_stream_limit_conn_module","ngx_stream_log_module","ngx_stream_map_module","ngx_stream_mqtt_preread_module","ngx_stream_mqtt_filter_module","ngx_stream_proxy_module","ngx_stream_proxy_protocol_vendor_module","ngx_stream_realip_module","ngx_stream_return_module","ngx_stream_set_module","ngx_stream_split_clients_module","ngx_stream_ssl_module","ngx_stream_ssl_preread_module","ngx_stream_upstream_module","ngx_stream_upstream_hc_module","ngx_stream_zone_sync_module","ngx_google_perftools_module","ngx_otel_module"], +[1,"ngx_core_module","ngx_http_core_module","ngx_http_access_module","ngx_http_addition_module","ngx_http_api_module","ngx_http_auth_basic_module","ngx_http_auth_jwt_module","ngx_http_auth_request_module","ngx_http_autoindex_module","ngx_http_browser_module","ngx_http_charset_module","ngx_http_dav_module","ngx_http_empty_gif_module","ngx_http_f4f_module","ngx_http_fastcgi_module","ngx_http_flv_module","ngx_http_geo_module","ngx_http_geoip_module","ngx_http_grpc_module","ngx_http_gunzip_module","ngx_http_gzip_module","ngx_http_gzip_static_module","ngx_http_headers_module","ngx_http_hls_module","ngx_http_image_filter_module","ngx_http_index_module","ngx_http_internal_redirect_module","ngx_http_keyval_module","ngx_http_limit_conn_module","ngx_http_limit_req_module","ngx_http_log_module","ngx_http_map_module","ngx_http_memcached_module","ngx_http_mirror_module","ngx_http_mp4_module","ngx_http_perl_module","ngx_http_proxy_module","ngx_http_proxy_protocol_vendor_module","ngx_http_random_index_module","ngx_http_realip_module","ngx_http_referer_module","ngx_http_rewrite_module","ngx_http_scgi_module","ngx_http_secure_link_module","ngx_http_session_log_module","ngx_http_slice_module","ngx_http_split_clients_module","ngx_http_ssi_module","ngx_http_ssl_module","ngx_http_status_module","ngx_http_stub_status_module","ngx_http_sub_module","ngx_http_upstream_module","ngx_http_upstream_conf_module","ngx_http_upstream_hc_module","ngx_http_userid_module","ngx_http_uwsgi_module","ngx_http_v2_module","ngx_http_v3_module","ngx_http_xslt_module","ngx_mail_core_module","ngx_mail_auth_http_module","ngx_mail_proxy_module","ngx_mail_realip_module","ngx_mail_ssl_module","ngx_mail_imap_module","ngx_mail_pop3_module","ngx_mail_smtp_module","ngx_stream_core_module","ngx_stream_access_module","ngx_stream_geo_module","ngx_stream_geoip_module","ngx_stream_keyval_module","ngx_stream_limit_conn_module","ngx_stream_log_module","ngx_stream_map_module","ngx_stream_mqtt_preread_module","ngx_stream_mqtt_filter_module","ngx_stream_pass_module","ngx_stream_proxy_module","ngx_stream_proxy_protocol_vendor_module","ngx_stream_realip_module","ngx_stream_return_module","ngx_stream_set_module","ngx_stream_split_clients_module","ngx_stream_ssl_module","ngx_stream_ssl_preread_module","ngx_stream_upstream_module","ngx_stream_upstream_hc_module","ngx_stream_zone_sync_module","ngx_google_perftools_module","ngx_mgmt_module","ngx_otel_module"], [2,"accept_mutex",["on | off"],"accept_mutex off;",["events"],1,null,"ngx_core_module.html#accept_mutex",{"insert":"accept_mutex ${1|on,off|};$0"}], [2,"accept_mutex_delay",["time"],"accept_mutex_delay 500ms;",["events"],1,null,"ngx_core_module.html#accept_mutex_delay",{"insert":"accept_mutex_delay ${1:time};$0"}], [2,"daemon",["on | off"],"daemon on;",["main"],1,null,"ngx_core_module.html#daemon",{"insert":"daemon ${1|on,off|};$0"}], @@ -440,533 +440,4 @@ [2,"proxy_temp_path",["path\n [level1\n [level2\n [level3]]]"],"proxy_temp_path proxy_temp;",["http","server","location"],37,null,"http/ngx_http_proxy_module.html#proxy_temp_path",{"insert":"proxy_temp_path ${1:path [level1 [level2 [level3]]]};$0"}], [3,"$proxy_host","name and port of a proxied server as specified in the\nproxy_pass directive;",37,null,"http/ngx_http_proxy_module.html#var_proxy_host",null], [3,"$proxy_port","port of a proxied server as specified in the\nproxy_pass directive, or the protocol’s default port;",37,null,"http/ngx_http_proxy_module.html#var_proxy_port",null], -[3,"$proxy_add_x_forwarded_for","the “X-Forwarded-For” client request header field\nwith the $remote_addr variable appended to it, separated by a comma.\nIf the “X-Forwarded-For” field is not present in the client\nrequest header, the $proxy_add_x_forwarded_for variable is equal\nto the $remote_addr variable.",37,null,"http/ngx_http_proxy_module.html#var_proxy_add_x_forwarded_for",null], -[3,"$proxy_protocol_tlv_aws_vpce_id","TLV value from the PROXY Protocol header representing the\nID\nof AWS VPC endpoint",38,null,"http/ngx_http_proxy_protocol_vendor_module.html#var_proxy_protocol_tlv_aws_vpce_id",null], -[3,"$proxy_protocol_tlv_azure_pel_id","TLV value from the PROXY Protocol header representing the\nLinkID\nof Azure private endpoint",38,null,"http/ngx_http_proxy_protocol_vendor_module.html#var_proxy_protocol_tlv_azure_pel_id",null], -[3,"$proxy_protocol_tlv_gcp_conn_id","TLV value from the PROXY Protocol header representing\nGoogle Cloud PSC\nconnection ID",38,null,"http/ngx_http_proxy_protocol_vendor_module.html#var_proxy_protocol_tlv_gcp_conn_id",null], -[2,"random_index",["on | off"],"random_index off;",["location"],39,null,"http/ngx_http_random_index_module.html#random_index",{"insert":"random_index ${1|on,off|};$0"}], -[2,"set_real_ip_from",["address |\n CIDR |\n unix:"],null,["http","server","location"],40,null,"http/ngx_http_realip_module.html#set_real_ip_from",{"insert":"set_real_ip_from ${1:address | CIDR | unix:};$0"}], -[2,"real_ip_header",["field |\n X-Real-IP |\n X-Forwarded-For |\n proxy_protocol"],"real_ip_header X-Real-IP;",["http","server","location"],40,null,"http/ngx_http_realip_module.html#real_ip_header",{"insert":"real_ip_header ${1:field | X-Real-IP | X-Forwarded-For | proxy_protocol};$0"}], -[2,"real_ip_recursive",["on | off"],"real_ip_recursive off;",["http","server","location"],40,"1.3.0","http/ngx_http_realip_module.html#real_ip_recursive",{"insert":"real_ip_recursive ${1|on,off|};$0"}], -[3,"$realip_remote_addr","keeps the original client address (1.9.7)",40,null,"http/ngx_http_realip_module.html#var_realip_remote_addr",null], -[3,"$realip_remote_port","keeps the original client port (1.11.0)",40,null,"http/ngx_http_realip_module.html#var_realip_remote_port",null], -[2,"referer_hash_bucket_size",["size"],"referer_hash_bucket_size 64;",["server","location"],41,"1.0.5","http/ngx_http_referer_module.html#referer_hash_bucket_size",{"insert":"referer_hash_bucket_size ${1:size};$0"}], -[2,"referer_hash_max_size",["size"],"referer_hash_max_size 2048;",["server","location"],41,"1.0.5","http/ngx_http_referer_module.html#referer_hash_max_size",{"insert":"referer_hash_max_size ${1:size};$0"}], -[2,"valid_referers",["none |\n blocked |\n server_names |\n string\n ..."],null,["server","location"],41,null,"http/ngx_http_referer_module.html#valid_referers",{"insert":"valid_referers ${1:none | blocked | server_names | string ...};$0"}], -[3,"$invalid_referer","Empty string, if the “Referer” request header field\nvalue is considered\nvalid, otherwise “1”.",41,null,"http/ngx_http_referer_module.html#var_invalid_referer",null], -[2,"break",[""],null,["server","location","if"],42,null,"http/ngx_http_rewrite_module.html#break",{"insert":"break;$0"}], -[2,"if",["(condition) { ... }"],null,["server","location"],42,null,"http/ngx_http_rewrite_module.html#if",{"insert":"if ${1:(condition) { ... \\}};$0"}], -[2,"return",["code [text]","code URL","URL"],null,["server","location","if"],42,null,"http/ngx_http_rewrite_module.html#return",{}], -[2,"rewrite",["regex\n replacement\n [flag]"],null,["server","location","if"],42,null,"http/ngx_http_rewrite_module.html#rewrite",{"insert":"rewrite ${1:regex replacement [flag]};$0"}], -[2,"rewrite_log",["on | off"],"rewrite_log off;",["http","server","location","if"],42,null,"http/ngx_http_rewrite_module.html#rewrite_log",{"insert":"rewrite_log ${1|on,off|};$0"}], -[2,"set",["$variable value"],null,["server","location","if"],42,null,"http/ngx_http_rewrite_module.html#set",{"insert":"set ${1:$variable value};$0"}], -[2,"uninitialized_variable_warn",["on | off"],"uninitialized_variable_warn on;",["http","server","location","if"],42,null,"http/ngx_http_rewrite_module.html#uninitialized_variable_warn",{"insert":"uninitialized_variable_warn ${1|on,off|};$0"}], -[2,"scgi_bind",["address\n [transparent] |\n off"],null,["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_bind",{"insert":"scgi_bind ${1:address [transparent] | off};$0"}], -[2,"scgi_buffer_size",["size"],"scgi_buffer_size 4k|8k;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_buffer_size",{"insert":"scgi_buffer_size ${1:size};$0"}], -[2,"scgi_buffering",["on | off"],"scgi_buffering on;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_buffering",{"insert":"scgi_buffering ${1|on,off|};$0"}], -[2,"scgi_buffers",["number size"],"scgi_buffers 8 4k|8k;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_buffers",{"insert":"scgi_buffers ${1:number} ${2:size};$0"}], -[2,"scgi_busy_buffers_size",["size"],"scgi_busy_buffers_size 8k|16k;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_busy_buffers_size",{"insert":"scgi_busy_buffers_size ${1:size};$0"}], -[2,"scgi_cache",["zone | off"],"scgi_cache off;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_cache",{"insert":"scgi_cache ${1:zone | off};$0"}], -[2,"scgi_cache_background_update",["on | off"],"scgi_cache_background_update off;",["http","server","location"],43,"1.11.10","http/ngx_http_scgi_module.html#scgi_cache_background_update",{"insert":"scgi_cache_background_update ${1|on,off|};$0"}], -[2,"scgi_cache_bypass",["string ..."],null,["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_cache_bypass",{"insert":"scgi_cache_bypass ${1:string ...};$0"}], -[2,"scgi_cache_key",["string"],null,["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_cache_key",{"insert":"scgi_cache_key ${1:string};$0"}], -[2,"scgi_cache_lock",["on | off"],"scgi_cache_lock off;",["http","server","location"],43,"1.1.12","http/ngx_http_scgi_module.html#scgi_cache_lock",{"insert":"scgi_cache_lock ${1|on,off|};$0"}], -[2,"scgi_cache_lock_age",["time"],"scgi_cache_lock_age 5s;",["http","server","location"],43,"1.7.8","http/ngx_http_scgi_module.html#scgi_cache_lock_age",{"insert":"scgi_cache_lock_age ${1:time};$0"}], -[2,"scgi_cache_lock_timeout",["time"],"scgi_cache_lock_timeout 5s;",["http","server","location"],43,"1.1.12","http/ngx_http_scgi_module.html#scgi_cache_lock_timeout",{"insert":"scgi_cache_lock_timeout ${1:time};$0"}], -[2,"scgi_cache_max_range_offset",["number"],null,["http","server","location"],43,"1.11.6","http/ngx_http_scgi_module.html#scgi_cache_max_range_offset",{"insert":"scgi_cache_max_range_offset ${1:number};$0"}], -[2,"scgi_cache_methods",["GET |\n HEAD |\n POST\n ..."],"scgi_cache_methods GET HEAD;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_cache_methods",{"insert":"scgi_cache_methods ${1:GET | HEAD | POST ...};$0"}], -[2,"scgi_cache_min_uses",["number"],"scgi_cache_min_uses 1;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_cache_min_uses",{"insert":"scgi_cache_min_uses ${1:number};$0"}], -[2,"scgi_cache_path",["path\n [levels=levels]\n [use_temp_path=on|off]\n keys_zone=name:size\n [inactive=time]\n [max_size=size]\n [min_free=size]\n [manager_files=number]\n [manager_sleep=time]\n [manager_threshold=time]\n [loader_files=number]\n [loader_sleep=time]\n [loader_threshold=time]\n [purger=on|off]\n [purger_files=number]\n [purger_sleep=time]\n [purger_threshold=time]"],null,["http"],43,null,"http/ngx_http_scgi_module.html#scgi_cache_path",{"insert":"scgi_cache_path ${1:path [levels=levels] [use_temp_path=on|off] keys_zone=name:size [inactive=time] [max_size=size] [min_free=size] [manager_files=number] [manager_sleep=time] [manager_threshold=time] [loader_files=number] [loader_sleep=time] [loader_threshold=time] [purger=on|off] [purger_files=number] [purger_sleep=time] [purger_threshold=time]};$0","args":["levels=levels","use_temp_path=on","keys_zone=name","inactive=time","max_size=size","min_free=size","manager_files=number","manager_sleep=time","manager_threshold=time","loader_files=number","loader_sleep=time","loader_threshold=time","purger=on","purger_files=number","purger_sleep=time","purger_threshold=time"]}], -[2,"scgi_cache_purge",["string ..."],null,["http","server","location"],43,"1.5.7","http/ngx_http_scgi_module.html#scgi_cache_purge",{"insert":"scgi_cache_purge ${1:string ...};$0"}], -[2,"scgi_cache_revalidate",["on | off"],"scgi_cache_revalidate off;",["http","server","location"],43,"1.5.7","http/ngx_http_scgi_module.html#scgi_cache_revalidate",{"insert":"scgi_cache_revalidate ${1|on,off|};$0"}], -[2,"scgi_cache_use_stale",["error |\n timeout |\n invalid_header |\n updating |\n http_500 |\n http_503 |\n http_403 |\n http_404 |\n http_429 |\n off\n ..."],"scgi_cache_use_stale off;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_cache_use_stale",{"insert":"scgi_cache_use_stale ${1:error | timeout | invalid_header | updating | http_500 | http_503 | http_403 | http_404 | http_429 | off ...};$0"}], -[2,"scgi_cache_valid",["[code ...] time"],null,["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_cache_valid",{"insert":"scgi_cache_valid ${1:[code ...] time};$0"}], -[2,"scgi_connect_timeout",["time"],"scgi_connect_timeout 60s;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_connect_timeout",{"insert":"scgi_connect_timeout ${1:time};$0"}], -[2,"scgi_force_ranges",["on | off"],"scgi_force_ranges off;",["http","server","location"],43,"1.7.7","http/ngx_http_scgi_module.html#scgi_force_ranges",{"insert":"scgi_force_ranges ${1|on,off|};$0"}], -[2,"scgi_hide_header",["field"],null,["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_hide_header",{"insert":"scgi_hide_header ${1:field};$0"}], -[2,"scgi_ignore_client_abort",["on | off"],"scgi_ignore_client_abort off;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_ignore_client_abort",{"insert":"scgi_ignore_client_abort ${1|on,off|};$0"}], -[2,"scgi_ignore_headers",["field ..."],null,["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_ignore_headers",{"insert":"scgi_ignore_headers ${1:field ...};$0"}], -[2,"scgi_intercept_errors",["on | off"],"scgi_intercept_errors off;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_intercept_errors",{"insert":"scgi_intercept_errors ${1|on,off|};$0"}], -[2,"scgi_limit_rate",["rate"],"scgi_limit_rate 0;",["http","server","location"],43,"1.7.7","http/ngx_http_scgi_module.html#scgi_limit_rate",{"insert":"scgi_limit_rate ${1:rate};$0"}], -[2,"scgi_max_temp_file_size",["size"],"scgi_max_temp_file_size 1024m;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_max_temp_file_size",{"insert":"scgi_max_temp_file_size ${1:size};$0"}], -[2,"scgi_next_upstream",["error |\n timeout |\n invalid_header |\n http_500 |\n http_503 |\n http_403 |\n http_404 |\n http_429 |\n non_idempotent |\n off\n ..."],"scgi_next_upstream error timeout;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_next_upstream",{"insert":"scgi_next_upstream ${1:error | timeout | invalid_header | http_500 | http_503 | http_403 | http_404 | http_429 | non_idempotent | off ...};$0"}], -[2,"scgi_next_upstream_timeout",["time"],"scgi_next_upstream_timeout 0;",["http","server","location"],43,"1.7.5","http/ngx_http_scgi_module.html#scgi_next_upstream_timeout",{"insert":"scgi_next_upstream_timeout ${1:time};$0"}], -[2,"scgi_next_upstream_tries",["number"],"scgi_next_upstream_tries 0;",["http","server","location"],43,"1.7.5","http/ngx_http_scgi_module.html#scgi_next_upstream_tries",{"insert":"scgi_next_upstream_tries ${1:number};$0"}], -[2,"scgi_no_cache",["string ..."],null,["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_no_cache",{"insert":"scgi_no_cache ${1:string ...};$0"}], -[2,"scgi_param",["parameter value\n [if_not_empty]"],null,["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_param",{"insert":"scgi_param ${1:parameter value [if_not_empty]};$0"}], -[2,"scgi_pass",["address"],null,["location","if"],43,null,"http/ngx_http_scgi_module.html#scgi_pass",{"insert":"scgi_pass ${1:address};$0"}], -[2,"scgi_pass_header",["field"],null,["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_pass_header",{"insert":"scgi_pass_header ${1:field};$0"}], -[2,"scgi_pass_request_body",["on | off"],"scgi_pass_request_body on;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_pass_request_body",{"insert":"scgi_pass_request_body ${1|on,off|};$0"}], -[2,"scgi_pass_request_headers",["on | off"],"scgi_pass_request_headers on;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_pass_request_headers",{"insert":"scgi_pass_request_headers ${1|on,off|};$0"}], -[2,"scgi_read_timeout",["time"],"scgi_read_timeout 60s;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_read_timeout",{"insert":"scgi_read_timeout ${1:time};$0"}], -[2,"scgi_request_buffering",["on | off"],"scgi_request_buffering on;",["http","server","location"],43,"1.7.11","http/ngx_http_scgi_module.html#scgi_request_buffering",{"insert":"scgi_request_buffering ${1|on,off|};$0"}], -[2,"scgi_send_timeout",["time"],"scgi_send_timeout 60s;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_send_timeout",{"insert":"scgi_send_timeout ${1:time};$0"}], -[2,"scgi_socket_keepalive",["on | off"],"scgi_socket_keepalive off;",["http","server","location"],43,"1.15.6","http/ngx_http_scgi_module.html#scgi_socket_keepalive",{"insert":"scgi_socket_keepalive ${1|on,off|};$0"}], -[2,"scgi_store",["on |\n off |\n string"],"scgi_store off;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_store",{"insert":"scgi_store ${1:on | off | string};$0"}], -[2,"scgi_store_access",["users:permissions ..."],"scgi_store_access user:rw;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_store_access",{"insert":"scgi_store_access ${1:users:permissions ...};$0"}], -[2,"scgi_temp_file_write_size",["size"],"scgi_temp_file_write_size 8k|16k;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_temp_file_write_size",{"insert":"scgi_temp_file_write_size ${1:size};$0"}], -[2,"scgi_temp_path",["path\n [level1\n [level2\n [level3]]]"],"scgi_temp_path scgi_temp;",["http","server","location"],43,null,"http/ngx_http_scgi_module.html#scgi_temp_path",{"insert":"scgi_temp_path ${1:path [level1 [level2 [level3]]]};$0"}], -[2,"secure_link",["expression"],null,["http","server","location"],44,null,"http/ngx_http_secure_link_module.html#secure_link",{"insert":"secure_link ${1:expression};$0"}], -[2,"secure_link_md5",["expression"],null,["http","server","location"],44,null,"http/ngx_http_secure_link_module.html#secure_link_md5",{"insert":"secure_link_md5 ${1:expression};$0"}], -[2,"secure_link_secret",["word"],null,["location"],44,null,"http/ngx_http_secure_link_module.html#secure_link_secret",{"insert":"secure_link_secret ${1:word};$0"}], -[3,"$secure_link","The status of a link check.\nThe specific value depends on the selected operation mode.",44,null,"http/ngx_http_secure_link_module.html#var_secure_link",null], -[3,"$secure_link_expires","The lifetime of a link passed in a request;\nintended to be used only in the\nsecure_link_md5 directive.",44,null,"http/ngx_http_secure_link_module.html#var_secure_link_expires",null], -[2,"session_log",["name | off"],"session_log off;",["http","server","location"],45,null,"http/ngx_http_session_log_module.html#session_log",{"insert":"session_log ${1:name | off};$0"}], -[2,"session_log_format",["name\n string ..."],"session_log_format combined \"...\";",["http"],45,null,"http/ngx_http_session_log_module.html#session_log_format",{"insert":"session_log_format ${1:name string ...};$0"}], -[2,"session_log_zone",["path\n zone=name:size\n [format=format]\n [timeout=time]\n [id=id]\n [md5=md5]"],null,["http"],45,null,"http/ngx_http_session_log_module.html#session_log_zone",{"insert":"session_log_zone ${1:path zone=name:size [format=format] [timeout=time] [id=id] [md5=md5]};$0","args":["zone=name","format=format","timeout=time","id=id","md5=md5"]}], -[3,"$session_log_id","current session ID;",45,null,"http/ngx_http_session_log_module.html#var_session_log_id",null], -[3,"$session_log_binary_id","current session ID in binary form (16 bytes).",45,null,"http/ngx_http_session_log_module.html#var_session_log_binary_id",null], -[2,"slice",["size"],"slice 0;",["http","server","location"],46,null,"http/ngx_http_slice_module.html#slice",{"insert":"slice ${1:size};$0"}], -[3,"$slice_range","the current slice range in\nHTTP\nbyte range format,\nfor example, bytes=0-1048575.",46,null,"http/ngx_http_slice_module.html#var_slice_range",null], -[2,"split_clients",["string\n $variable { ... }"],null,["http"],47,null,"http/ngx_http_split_clients_module.html#split_clients",{"insert":"split_clients ${1:string $variable { ... \\}};$0"}], -[2,"ssi",["on | off"],"ssi off;",["http","server","location","if"],48,null,"http/ngx_http_ssi_module.html#ssi",{"insert":"ssi ${1|on,off|};$0"}], -[2,"ssi_last_modified",["on | off"],"ssi_last_modified off;",["http","server","location"],48,"1.5.1","http/ngx_http_ssi_module.html#ssi_last_modified",{"insert":"ssi_last_modified ${1|on,off|};$0"}], -[2,"ssi_min_file_chunk",["size"],"ssi_min_file_chunk 1k;",["http","server","location"],48,null,"http/ngx_http_ssi_module.html#ssi_min_file_chunk",{"insert":"ssi_min_file_chunk ${1:size};$0"}], -[2,"ssi_silent_errors",["on | off"],"ssi_silent_errors off;",["http","server","location"],48,null,"http/ngx_http_ssi_module.html#ssi_silent_errors",{"insert":"ssi_silent_errors ${1|on,off|};$0"}], -[2,"ssi_types",["mime-type ..."],"ssi_types text/html;",["http","server","location"],48,null,"http/ngx_http_ssi_module.html#ssi_types",{"insert":"ssi_types ${1:mime-type ...};$0"}], -[2,"ssi_value_length",["length"],"ssi_value_length 256;",["http","server","location"],48,null,"http/ngx_http_ssi_module.html#ssi_value_length",{"insert":"ssi_value_length ${1:length};$0"}], -[3,"$date_local","current time in the local time zone.\nThe format is set by the config command\nwith the timefmt parameter.",48,null,"http/ngx_http_ssi_module.html#var_date_local",null], -[3,"$date_gmt","current time in GMT.\nThe format is set by the config command\nwith the timefmt parameter.",48,null,"http/ngx_http_ssi_module.html#var_date_gmt",null], -[2,"ssl",["on | off"],"ssl off;",["http","server"],49,null,"http/ngx_http_ssl_module.html#ssl",{"insert":"ssl ${1|on,off|};$0"}], -[2,"ssl_buffer_size",["size"],"ssl_buffer_size 16k;",["http","server"],49,"1.5.9","http/ngx_http_ssl_module.html#ssl_buffer_size",{"insert":"ssl_buffer_size ${1:size};$0"}], -[2,"ssl_certificate",["file"],null,["http","server"],49,null,"http/ngx_http_ssl_module.html#ssl_certificate",{"insert":"ssl_certificate ${1:file};$0"}], -[2,"ssl_certificate_key",["file"],null,["http","server"],49,null,"http/ngx_http_ssl_module.html#ssl_certificate_key",{"insert":"ssl_certificate_key ${1:file};$0"}], -[2,"ssl_ciphers",["ciphers"],"ssl_ciphers HIGH:!aNULL:!MD5;",["http","server"],49,null,"http/ngx_http_ssl_module.html#ssl_ciphers",{"insert":"ssl_ciphers ${1:ciphers};$0"}], -[2,"ssl_client_certificate",["file"],null,["http","server"],49,null,"http/ngx_http_ssl_module.html#ssl_client_certificate",{"insert":"ssl_client_certificate ${1:file};$0"}], -[2,"ssl_conf_command",["name value"],null,["http","server"],49,"1.19.4","http/ngx_http_ssl_module.html#ssl_conf_command",{"insert":"ssl_conf_command ${1:name} ${2:value};$0"}], -[2,"ssl_crl",["file"],null,["http","server"],49,"0.8.7","http/ngx_http_ssl_module.html#ssl_crl",{"insert":"ssl_crl ${1:file};$0"}], -[2,"ssl_dhparam",["file"],null,["http","server"],49,"0.7.2","http/ngx_http_ssl_module.html#ssl_dhparam",{"insert":"ssl_dhparam ${1:file};$0"}], -[2,"ssl_early_data",["on | off"],"ssl_early_data off;",["http","server"],49,"1.15.3","http/ngx_http_ssl_module.html#ssl_early_data",{"insert":"ssl_early_data ${1|on,off|};$0"}], -[2,"ssl_ecdh_curve",["curve"],"ssl_ecdh_curve auto;",["http","server"],49,"1.1.0","http/ngx_http_ssl_module.html#ssl_ecdh_curve",{"insert":"ssl_ecdh_curve ${1:curve};$0"}], -[2,"ssl_ocsp",["on |\n off |\n leaf"],"ssl_ocsp off;",["http","server"],49,"1.19.0","http/ngx_http_ssl_module.html#ssl_ocsp",{"insert":"ssl_ocsp ${1:on | off | leaf};$0"}], -[2,"ssl_ocsp_cache",["off |\n [shared:name:size]"],"ssl_ocsp_cache off;",["http","server"],49,"1.19.0","http/ngx_http_ssl_module.html#ssl_ocsp_cache",{"insert":"ssl_ocsp_cache ${1:off | [shared:name:size]};$0"}], -[2,"ssl_ocsp_responder",["url"],null,["http","server"],49,"1.19.0","http/ngx_http_ssl_module.html#ssl_ocsp_responder",{"insert":"ssl_ocsp_responder ${1:url};$0"}], -[2,"ssl_password_file",["file"],null,["http","server"],49,"1.7.3","http/ngx_http_ssl_module.html#ssl_password_file",{"insert":"ssl_password_file ${1:file};$0"}], -[2,"ssl_prefer_server_ciphers",["on | off"],"ssl_prefer_server_ciphers off;",["http","server"],49,null,"http/ngx_http_ssl_module.html#ssl_prefer_server_ciphers",{"insert":"ssl_prefer_server_ciphers ${1|on,off|};$0"}], -[2,"ssl_protocols",["[SSLv2]\n [SSLv3]\n [TLSv1]\n [TLSv1.1]\n [TLSv1.2]\n [TLSv1.3]"],"ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;",["http","server"],49,null,"http/ngx_http_ssl_module.html#ssl_protocols",{"insert":"ssl_protocols ${1:[SSLv2] [SSLv3] [TLSv1] [TLSv1.1] [TLSv1.2] [TLSv1.3]};$0"}], -[2,"ssl_reject_handshake",["on | off"],"ssl_reject_handshake off;",["http","server"],49,"1.19.4","http/ngx_http_ssl_module.html#ssl_reject_handshake",{"insert":"ssl_reject_handshake ${1|on,off|};$0"}], -[2,"ssl_session_cache",["off |\n none |\n [builtin[:size]]\n [shared:name:size]"],"ssl_session_cache none;",["http","server"],49,null,"http/ngx_http_ssl_module.html#ssl_session_cache",{"insert":"ssl_session_cache ${1:off | none | [builtin[:size]] [shared:name:size]};$0"}], -[2,"ssl_session_ticket_key",["file"],null,["http","server"],49,"1.5.7","http/ngx_http_ssl_module.html#ssl_session_ticket_key",{"insert":"ssl_session_ticket_key ${1:file};$0"}], -[2,"ssl_session_tickets",["on | off"],"ssl_session_tickets on;",["http","server"],49,"1.5.9","http/ngx_http_ssl_module.html#ssl_session_tickets",{"insert":"ssl_session_tickets ${1|on,off|};$0"}], -[2,"ssl_session_timeout",["time"],"ssl_session_timeout 5m;",["http","server"],49,null,"http/ngx_http_ssl_module.html#ssl_session_timeout",{"insert":"ssl_session_timeout ${1:time};$0"}], -[2,"ssl_stapling",["on | off"],"ssl_stapling off;",["http","server"],49,"1.3.7","http/ngx_http_ssl_module.html#ssl_stapling",{"insert":"ssl_stapling ${1|on,off|};$0"}], -[2,"ssl_stapling_file",["file"],null,["http","server"],49,"1.3.7","http/ngx_http_ssl_module.html#ssl_stapling_file",{"insert":"ssl_stapling_file ${1:file};$0"}], -[2,"ssl_stapling_responder",["url"],null,["http","server"],49,"1.3.7","http/ngx_http_ssl_module.html#ssl_stapling_responder",{"insert":"ssl_stapling_responder ${1:url};$0"}], -[2,"ssl_stapling_verify",["on | off"],"ssl_stapling_verify off;",["http","server"],49,"1.3.7","http/ngx_http_ssl_module.html#ssl_stapling_verify",{"insert":"ssl_stapling_verify ${1|on,off|};$0"}], -[2,"ssl_trusted_certificate",["file"],null,["http","server"],49,"1.3.7","http/ngx_http_ssl_module.html#ssl_trusted_certificate",{"insert":"ssl_trusted_certificate ${1:file};$0"}], -[2,"ssl_verify_client",["on | off |\n optional | optional_no_ca"],"ssl_verify_client off;",["http","server"],49,null,"http/ngx_http_ssl_module.html#ssl_verify_client",{"insert":"ssl_verify_client ${1:on | off | optional | optional_no_ca};$0"}], -[2,"ssl_verify_depth",["number"],"ssl_verify_depth 1;",["http","server"],49,null,"http/ngx_http_ssl_module.html#ssl_verify_depth",{"insert":"ssl_verify_depth ${1:number};$0"}], -[3,"$ssl_alpn_protocol","returns the protocol selected by ALPN during the SSL handshake,\nor an empty string otherwise (1.21.4);",49,null,"http/ngx_http_ssl_module.html#var_ssl_alpn_protocol",null], -[3,"$ssl_cipher","returns the name of the cipher used\nfor an established SSL connection;",49,null,"http/ngx_http_ssl_module.html#var_ssl_cipher",null], -[3,"$ssl_ciphers","returns the list of ciphers supported by the client (1.11.7).\nKnown ciphers are listed by names, unknown are shown in hexadecimal,\nfor example:\nAES128-SHA:AES256-SHA:0x00ff\n\n\nThe variable is fully supported only when using OpenSSL version 1.0.2 or higher.\nWith older versions, the variable is available\nonly for new sessions and lists only known ciphers.",49,null,"http/ngx_http_ssl_module.html#var_ssl_ciphers",null], -[3,"$ssl_client_escaped_cert","returns the client certificate in the PEM format (urlencoded)\nfor an established SSL connection (1.13.5);",49,null,"http/ngx_http_ssl_module.html#var_ssl_client_escaped_cert",null], -[3,"$ssl_client_cert","returns the client certificate in the PEM format\nfor an established SSL connection, with each line except the first\nprepended with the tab character;\nthis is intended for the use in the\nproxy_set_header directive;\n\nThe variable is deprecated,\nthe $ssl_client_escaped_cert variable should be used instead.",49,null,"http/ngx_http_ssl_module.html#var_ssl_client_cert",null], -[3,"$ssl_client_fingerprint","returns the SHA1 fingerprint of the client certificate\nfor an established SSL connection (1.7.1);",49,null,"http/ngx_http_ssl_module.html#var_ssl_client_fingerprint",null], -[3,"$ssl_client_i_dn","returns the “issuer DN” string of the client certificate\nfor an established SSL connection according to\nRFC 2253 (1.11.6);",49,null,"http/ngx_http_ssl_module.html#var_ssl_client_i_dn",null], -[3,"$ssl_client_i_dn_legacy","returns the “issuer DN” string of the client certificate\nfor an established SSL connection;\n\nPrior to version 1.11.6, the variable name was $ssl_client_i_dn.",49,null,"http/ngx_http_ssl_module.html#var_ssl_client_i_dn_legacy",null], -[3,"$ssl_client_raw_cert","returns the client certificate in the PEM format\nfor an established SSL connection;",49,null,"http/ngx_http_ssl_module.html#var_ssl_client_raw_cert",null], -[3,"$ssl_client_s_dn","returns the “subject DN” string of the client certificate\nfor an established SSL connection according to\nRFC 2253 (1.11.6);",49,null,"http/ngx_http_ssl_module.html#var_ssl_client_s_dn",null], -[3,"$ssl_client_s_dn_legacy","returns the “subject DN” string of the client certificate\nfor an established SSL connection;\n\nPrior to version 1.11.6, the variable name was $ssl_client_s_dn.",49,null,"http/ngx_http_ssl_module.html#var_ssl_client_s_dn_legacy",null], -[3,"$ssl_client_serial","returns the serial number of the client certificate\nfor an established SSL connection;",49,null,"http/ngx_http_ssl_module.html#var_ssl_client_serial",null], -[3,"$ssl_client_v_end","returns the end date of the client certificate (1.11.7);",49,null,"http/ngx_http_ssl_module.html#var_ssl_client_v_end",null], -[3,"$ssl_client_v_remain","returns the number of days\nuntil the client certificate expires (1.11.7);",49,null,"http/ngx_http_ssl_module.html#var_ssl_client_v_remain",null], -[3,"$ssl_client_v_start","returns the start date of the client certificate (1.11.7);",49,null,"http/ngx_http_ssl_module.html#var_ssl_client_v_start",null], -[3,"$ssl_client_verify","returns the result of client certificate verification:\n“SUCCESS”, “FAILED:reason”,\nand “NONE” if a certificate was not present;\n\nPrior to version 1.11.7, the “FAILED” result\ndid not contain the reason string.",49,null,"http/ngx_http_ssl_module.html#var_ssl_client_verify",null], -[3,"$ssl_curve","returns the negotiated curve used for\nSSL handshake key exchange process (1.21.5).\nKnown curves are listed by names, unknown are shown in hexadecimal,\nfor example:\nprime256v1\n\n\nThe variable is supported only when using OpenSSL version 3.0 or higher.\nWith older versions, the variable value will be an empty string.",49,null,"http/ngx_http_ssl_module.html#var_ssl_curve",null], -[3,"$ssl_curves","returns the list of curves supported by the client (1.11.7).\nKnown curves are listed by names, unknown are shown in hexadecimal,\nfor example:\n0x001d:prime256v1:secp521r1:secp384r1\n\n\nThe variable is supported only when using OpenSSL version 1.0.2 or higher.\nWith older versions, the variable value will be an empty string.\n\n\nThe variable is available only for new sessions.",49,null,"http/ngx_http_ssl_module.html#var_ssl_curves",null], -[3,"$ssl_early_data","returns “1” if\nTLS 1.3 early data is used\nand the handshake is not complete, otherwise “” (1.15.3).",49,null,"http/ngx_http_ssl_module.html#var_ssl_early_data",null], -[3,"$ssl_protocol","returns the protocol of an established SSL connection;",49,null,"http/ngx_http_ssl_module.html#var_ssl_protocol",null], -[3,"$ssl_server_name","returns the server name requested through\nSNI\n(1.7.0);",49,null,"http/ngx_http_ssl_module.html#var_ssl_server_name",null], -[3,"$ssl_session_id","returns the session identifier of an established SSL connection;",49,null,"http/ngx_http_ssl_module.html#var_ssl_session_id",null], -[3,"$ssl_session_reused","returns “r” if an SSL session was reused,\nor “.” otherwise (1.5.11).",49,null,"http/ngx_http_ssl_module.html#var_ssl_session_reused",null], -[2,"status",[""],null,["location"],50,null,"http/ngx_http_status_module.html#status",{"insert":"status;$0"}], -[2,"status_format",["json","jsonp [callback]"],"status_format json;",["http","server","location"],50,null,"http/ngx_http_status_module.html#status_format",{}], -[2,"status_zone",["zone"],null,["server"],50,null,"http/ngx_http_status_module.html#status_zone",{"insert":"status_zone ${1:zone};$0"}], -[2,"stub_status",[""],null,["server","location"],51,null,"http/ngx_http_stub_status_module.html#stub_status",{"insert":"stub_status;$0"}], -[3,"$connections_active","same as the Active connections value;",51,null,"http/ngx_http_stub_status_module.html#var_connections_active",null], -[3,"$connections_reading","same as the Reading value;",51,null,"http/ngx_http_stub_status_module.html#var_connections_reading",null], -[3,"$connections_writing","same as the Writing value;",51,null,"http/ngx_http_stub_status_module.html#var_connections_writing",null], -[3,"$connections_waiting","same as the Waiting value.",51,null,"http/ngx_http_stub_status_module.html#var_connections_waiting",null], -[2,"sub_filter",["string replacement"],null,["http","server","location"],52,null,"http/ngx_http_sub_module.html#sub_filter",{"insert":"sub_filter ${1:string} ${2:replacement};$0"}], -[2,"sub_filter_last_modified",["on | off"],"sub_filter_last_modified off;",["http","server","location"],52,"1.5.1","http/ngx_http_sub_module.html#sub_filter_last_modified",{"insert":"sub_filter_last_modified ${1|on,off|};$0"}], -[2,"sub_filter_once",["on | off"],"sub_filter_once on;",["http","server","location"],52,null,"http/ngx_http_sub_module.html#sub_filter_once",{"insert":"sub_filter_once ${1|on,off|};$0"}], -[2,"sub_filter_types",["mime-type ..."],"sub_filter_types text/html;",["http","server","location"],52,null,"http/ngx_http_sub_module.html#sub_filter_types",{"insert":"sub_filter_types ${1:mime-type ...};$0"}], -[2,"upstream",["name { ... }"],null,["http"],53,null,"http/ngx_http_upstream_module.html#upstream",{"insert":"upstream ${1:name { ... \\}};$0"}], -[2,"server",["address [parameters]"],null,["upstream"],53,null,"http/ngx_http_upstream_module.html#server",{"insert":"server ${1:address [parameters]};$0"}], -[2,"zone",["name [size]"],null,["upstream"],53,"1.9.0","http/ngx_http_upstream_module.html#zone",{"insert":"zone ${1:name [size]};$0"}], -[2,"state",["file"],null,["upstream"],53,"1.9.7","http/ngx_http_upstream_module.html#state",{"insert":"state ${1:file};$0"}], -[2,"hash",["key [consistent]"],null,["upstream"],53,"1.7.2","http/ngx_http_upstream_module.html#hash",{"insert":"hash ${1:key [consistent]};$0"}], -[2,"ip_hash",[""],null,["upstream"],53,null,"http/ngx_http_upstream_module.html#ip_hash",{"insert":"ip_hash;$0"}], -[2,"keepalive",["connections"],null,["upstream"],53,"1.1.4","http/ngx_http_upstream_module.html#keepalive",{"insert":"keepalive ${1:connections};$0"}], -[2,"keepalive_requests",["number"],"keepalive_requests 1000;",["upstream"],53,"1.15.3","http/ngx_http_upstream_module.html#keepalive_requests",{"insert":"keepalive_requests ${1:number};$0"}], -[2,"keepalive_time",["time"],"keepalive_time 1h;",["upstream"],53,"1.19.10","http/ngx_http_upstream_module.html#keepalive_time",{"insert":"keepalive_time ${1:time};$0"}], -[2,"keepalive_timeout",["timeout"],"keepalive_timeout 60s;",["upstream"],53,"1.15.3","http/ngx_http_upstream_module.html#keepalive_timeout",{"insert":"keepalive_timeout ${1:timeout};$0"}], -[2,"ntlm",[""],null,["upstream"],53,"1.9.2","http/ngx_http_upstream_module.html#ntlm",{"insert":"ntlm;$0"}], -[2,"least_conn",[""],null,["upstream"],53,"1.3.1","http/ngx_http_upstream_module.html#least_conn",{"insert":"least_conn;$0"}], -[2,"least_time",["header |\n last_byte\n [inflight]"],null,["upstream"],53,"1.7.10","http/ngx_http_upstream_module.html#least_time",{"insert":"least_time ${1:header | last_byte [inflight]};$0"}], -[2,"queue",["number\n[timeout=time]"],null,["upstream"],53,"1.5.12","http/ngx_http_upstream_module.html#queue",{"insert":"queue ${1:number [timeout=time]};$0","args":["timeout=time"]}], -[2,"random",["[two [method]]"],null,["upstream"],53,"1.15.1","http/ngx_http_upstream_module.html#random",{"insert":"random ${1:[two [method]]};$0"}], -[2,"resolver",["address ...\n [valid=time]\n [ipv4=on|off]\n [ipv6=on|off]\n [status_zone=zone]"],null,["upstream"],53,"1.17.5","http/ngx_http_upstream_module.html#resolver",{"insert":"resolver ${1:address ... [valid=time] [ipv4=on|off] [ipv6=on|off] [status_zone=zone]};$0","args":["valid=time","ipv4=on","ipv6=on","status_zone=zone"]}], -[2,"resolver_timeout",["time"],"resolver_timeout 30s;",["upstream"],53,"1.17.5","http/ngx_http_upstream_module.html#resolver_timeout",{"insert":"resolver_timeout ${1:time};$0"}], -[2,"sticky",["cookie name\n [expires=time]\n [domain=domain]\n [httponly]\n [samesite=strict|lax|none|$variable]\n [secure]\n [path=path]","route $variable ...","learn\n create=$variable\n lookup=$variable\n zone=name:size\n [timeout=time]\n [header]\n [sync]"],null,["upstream"],53,"1.5.7","http/ngx_http_upstream_module.html#sticky",{"args":["expires=time","domain=domain","samesite=strict","path=path","zone=name","timeout=time"]}], -[2,"sticky_cookie_insert",["name\n[expires=time]\n[domain=domain]\n[path=path]"],null,["upstream"],53,null,"http/ngx_http_upstream_module.html#sticky_cookie_insert",{"insert":"sticky_cookie_insert ${1:name [expires=time] [domain=domain] [path=path]};$0","args":["expires=time","domain=domain","path=path"]}], -[3,"$upstream_addr","keeps the IP address and port,\nor the path to the UNIX-domain socket of the upstream server.\nIf several servers were contacted during request processing,\ntheir addresses are separated by commas, e.g.\n“192.168.1.1:80, 192.168.1.2:80, unix:/tmp/sock”.\nIf an internal redirect from one server group to another happens,\ninitiated by\n“X-Accel-Redirect” or\nerror_page,\nthen the server addresses from different groups are separated by colons, e.g.\n“192.168.1.1:80, 192.168.1.2:80, unix:/tmp/sock : 192.168.10.1:80, 192.168.10.2:80”.\nIf a server cannot be selected,\nthe variable keeps the name of the server group.",53,null,"http/ngx_http_upstream_module.html#var_upstream_addr",null], -[3,"$upstream_bytes_received","number of bytes received from an upstream server (1.11.4).\nValues from several connections\nare separated by commas and colons like addresses in the\n$upstream_addr variable.",53,null,"http/ngx_http_upstream_module.html#var_upstream_bytes_received",null], -[3,"$upstream_bytes_sent","number of bytes sent to an upstream server (1.15.8).\nValues from several connections\nare separated by commas and colons like addresses in the\n$upstream_addr variable.",53,null,"http/ngx_http_upstream_module.html#var_upstream_bytes_sent",null], -[3,"$upstream_cache_status","keeps the status of accessing a response cache (0.8.3).\nThe status can be either “MISS”,\n“BYPASS”, “EXPIRED”,\n“STALE”, “UPDATING”,\n“REVALIDATED”, or “HIT”.",53,null,"http/ngx_http_upstream_module.html#var_upstream_cache_status",null], -[3,"$upstream_connect_time","keeps time spent on establishing a connection with the upstream server (1.9.1);\nthe time is kept in seconds with millisecond resolution.\nIn case of SSL, includes time spent on handshake.\nTimes of several connections\nare separated by commas and colons like addresses in the\n$upstream_addr variable.",53,null,"http/ngx_http_upstream_module.html#var_upstream_connect_time",null], -[3,"$upstream_cookie_name","cookie with the specified name sent by the upstream server\nin the “Set-Cookie” response header field (1.7.1).\nOnly the cookies from the response of the last server are saved.",53,null,"http/ngx_http_upstream_module.html#var_upstream_cookie_",null], -[3,"$upstream_header_time","keeps time\nspent on receiving the response header from the upstream server (1.7.10);\nthe time is kept in seconds with millisecond resolution.\nTimes of several responses\nare separated by commas and colons like addresses in the\n$upstream_addr variable.",53,null,"http/ngx_http_upstream_module.html#var_upstream_header_time",null], -[3,"$upstream_http_name","keep server response header fields.\nFor example, the “Server” response header field\nis available through the $upstream_http_server variable.\nThe rules of converting header field names to variable names are the same\nas for the variables that start with the\n“$http_” prefix.\nOnly the header fields from the response of the last server are saved.",53,null,"http/ngx_http_upstream_module.html#var_upstream_http_",null], -[3,"$upstream_queue_time","keeps time the request spent in the upstream queue\n(1.13.9);\nthe time is kept in seconds with millisecond resolution.\nTimes of several responses\nare separated by commas and colons like addresses in the\n$upstream_addr variable.",53,null,"http/ngx_http_upstream_module.html#var_upstream_queue_time",null], -[3,"$upstream_response_length","keeps the length of the response obtained from the upstream server (0.7.27);\nthe length is kept in bytes.\nLengths of several responses\nare separated by commas and colons like addresses in the\n$upstream_addr variable.",53,null,"http/ngx_http_upstream_module.html#var_upstream_response_length",null], -[3,"$upstream_response_time","keeps time spent on receiving the response from the upstream server;\nthe time is kept in seconds with millisecond resolution.\nTimes of several responses\nare separated by commas and colons like addresses in the\n$upstream_addr variable.",53,null,"http/ngx_http_upstream_module.html#var_upstream_response_time",null], -[3,"$upstream_status","keeps status code of the response obtained from the upstream server.\nStatus codes of several responses\nare separated by commas and colons like addresses in the\n$upstream_addr variable.\nIf a server cannot be selected,\nthe variable keeps the 502 (Bad Gateway) status code.",53,null,"http/ngx_http_upstream_module.html#var_upstream_status",null], -[3,"$upstream_trailer_name","keeps fields from the end of the response\nobtained from the upstream server (1.13.10).",53,null,"http/ngx_http_upstream_module.html#var_upstream_trailer_",null], -[2,"upstream_conf",[""],null,["location"],54,null,"http/ngx_http_upstream_conf_module.html#upstream_conf",{"insert":"upstream_conf;$0"}], -[2,"health_check",["[parameters]"],null,["location"],55,null,"http/ngx_http_upstream_hc_module.html#health_check",{"insert":"health_check ${1:[parameters]};$0"}], -[2,"match",["name { ... }"],null,["http"],55,null,"http/ngx_http_upstream_hc_module.html#match",{"insert":"match ${1:name { ... \\}};$0"}], -[2,"userid",["on |\n v1 |\n log |\n off"],"userid off;",["http","server","location"],56,null,"http/ngx_http_userid_module.html#userid",{"insert":"userid ${1:on | v1 | log | off};$0"}], -[2,"userid_domain",["name | none"],"userid_domain none;",["http","server","location"],56,null,"http/ngx_http_userid_module.html#userid_domain",{"insert":"userid_domain ${1:name | none};$0"}], -[2,"userid_expires",["time | max |\n off"],"userid_expires off;",["http","server","location"],56,null,"http/ngx_http_userid_module.html#userid_expires",{"insert":"userid_expires ${1:time | max | off};$0"}], -[2,"userid_flags",["off |\n flag ..."],"userid_flags off;",["http","server","location"],56,"1.19.3","http/ngx_http_userid_module.html#userid_flags",{"insert":"userid_flags ${1:off | flag ...};$0"}], -[2,"userid_mark",["letter | digit |\n = |\n off"],"userid_mark off;",["http","server","location"],56,null,"http/ngx_http_userid_module.html#userid_mark",{"insert":"userid_mark ${1:letter | digit | = | off};$0"}], -[2,"userid_name",["name"],"userid_name uid;",["http","server","location"],56,null,"http/ngx_http_userid_module.html#userid_name",{"insert":"userid_name ${1:name};$0"}], -[2,"userid_p3p",["string | none"],"userid_p3p none;",["http","server","location"],56,null,"http/ngx_http_userid_module.html#userid_p3p",{"insert":"userid_p3p ${1:string | none};$0"}], -[2,"userid_path",["path"],"userid_path /;",["http","server","location"],56,null,"http/ngx_http_userid_module.html#userid_path",{"insert":"userid_path ${1:path};$0"}], -[2,"userid_service",["number"],"userid_service IP address of the server;",["http","server","location"],56,null,"http/ngx_http_userid_module.html#userid_service",{"insert":"userid_service ${1:number};$0"}], -[3,"$uid_got","The cookie name and received client identifier.",56,null,"http/ngx_http_userid_module.html#var_uid_got",null], -[3,"$uid_reset","If the variable is set to a non-empty string that is not “0”,\nthe client identifiers are reset.\nThe special value “log” additionally leads to the output of\nmessages about the reset identifiers to the\nerror_log.",56,null,"http/ngx_http_userid_module.html#var_uid_reset",null], -[3,"$uid_set","The cookie name and sent client identifier.",56,null,"http/ngx_http_userid_module.html#var_uid_set",null], -[2,"uwsgi_bind",["address\n [transparent] |\n off"],null,["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_bind",{"insert":"uwsgi_bind ${1:address [transparent] | off};$0"}], -[2,"uwsgi_buffer_size",["size"],"uwsgi_buffer_size 4k|8k;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_buffer_size",{"insert":"uwsgi_buffer_size ${1:size};$0"}], -[2,"uwsgi_buffering",["on | off"],"uwsgi_buffering on;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_buffering",{"insert":"uwsgi_buffering ${1|on,off|};$0"}], -[2,"uwsgi_buffers",["number size"],"uwsgi_buffers 8 4k|8k;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_buffers",{"insert":"uwsgi_buffers ${1:number} ${2:size};$0"}], -[2,"uwsgi_busy_buffers_size",["size"],"uwsgi_busy_buffers_size 8k|16k;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_busy_buffers_size",{"insert":"uwsgi_busy_buffers_size ${1:size};$0"}], -[2,"uwsgi_cache",["zone | off"],"uwsgi_cache off;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_cache",{"insert":"uwsgi_cache ${1:zone | off};$0"}], -[2,"uwsgi_cache_background_update",["on | off"],"uwsgi_cache_background_update off;",["http","server","location"],57,"1.11.10","http/ngx_http_uwsgi_module.html#uwsgi_cache_background_update",{"insert":"uwsgi_cache_background_update ${1|on,off|};$0"}], -[2,"uwsgi_cache_bypass",["string ..."],null,["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_cache_bypass",{"insert":"uwsgi_cache_bypass ${1:string ...};$0"}], -[2,"uwsgi_cache_key",["string"],null,["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_cache_key",{"insert":"uwsgi_cache_key ${1:string};$0"}], -[2,"uwsgi_cache_lock",["on | off"],"uwsgi_cache_lock off;",["http","server","location"],57,"1.1.12","http/ngx_http_uwsgi_module.html#uwsgi_cache_lock",{"insert":"uwsgi_cache_lock ${1|on,off|};$0"}], -[2,"uwsgi_cache_lock_age",["time"],"uwsgi_cache_lock_age 5s;",["http","server","location"],57,"1.7.8","http/ngx_http_uwsgi_module.html#uwsgi_cache_lock_age",{"insert":"uwsgi_cache_lock_age ${1:time};$0"}], -[2,"uwsgi_cache_lock_timeout",["time"],"uwsgi_cache_lock_timeout 5s;",["http","server","location"],57,"1.1.12","http/ngx_http_uwsgi_module.html#uwsgi_cache_lock_timeout",{"insert":"uwsgi_cache_lock_timeout ${1:time};$0"}], -[2,"uwsgi_cache_max_range_offset",["number"],null,["http","server","location"],57,"1.11.6","http/ngx_http_uwsgi_module.html#uwsgi_cache_max_range_offset",{"insert":"uwsgi_cache_max_range_offset ${1:number};$0"}], -[2,"uwsgi_cache_methods",["GET |\n HEAD |\n POST\n ..."],"uwsgi_cache_methods GET HEAD;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_cache_methods",{"insert":"uwsgi_cache_methods ${1:GET | HEAD | POST ...};$0"}], -[2,"uwsgi_cache_min_uses",["number"],"uwsgi_cache_min_uses 1;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_cache_min_uses",{"insert":"uwsgi_cache_min_uses ${1:number};$0"}], -[2,"uwsgi_cache_path",["path\n [levels=levels]\n [use_temp_path=on|off]\n keys_zone=name:size\n [inactive=time]\n [max_size=size]\n [min_free=size]\n [manager_files=number]\n [manager_sleep=time]\n [manager_threshold=time]\n [loader_files=number]\n [loader_sleep=time]\n [loader_threshold=time]\n [purger=on|off]\n [purger_files=number]\n [purger_sleep=time]\n [purger_threshold=time]"],null,["http"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_cache_path",{"insert":"uwsgi_cache_path ${1:path [levels=levels] [use_temp_path=on|off] keys_zone=name:size [inactive=time] [max_size=size] [min_free=size] [manager_files=number] [manager_sleep=time] [manager_threshold=time] [loader_files=number] [loader_sleep=time] [loader_threshold=time] [purger=on|off] [purger_files=number] [purger_sleep=time] [purger_threshold=time]};$0","args":["levels=levels","use_temp_path=on","keys_zone=name","inactive=time","max_size=size","min_free=size","manager_files=number","manager_sleep=time","manager_threshold=time","loader_files=number","loader_sleep=time","loader_threshold=time","purger=on","purger_files=number","purger_sleep=time","purger_threshold=time"]}], -[2,"uwsgi_cache_purge",["string ..."],null,["http","server","location"],57,"1.5.7","http/ngx_http_uwsgi_module.html#uwsgi_cache_purge",{"insert":"uwsgi_cache_purge ${1:string ...};$0"}], -[2,"uwsgi_cache_revalidate",["on | off"],"uwsgi_cache_revalidate off;",["http","server","location"],57,"1.5.7","http/ngx_http_uwsgi_module.html#uwsgi_cache_revalidate",{"insert":"uwsgi_cache_revalidate ${1|on,off|};$0"}], -[2,"uwsgi_cache_use_stale",["error |\n timeout |\n invalid_header |\n updating |\n http_500 |\n http_503 |\n http_403 |\n http_404 |\n http_429 |\n off\n ..."],"uwsgi_cache_use_stale off;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_cache_use_stale",{"insert":"uwsgi_cache_use_stale ${1:error | timeout | invalid_header | updating | http_500 | http_503 | http_403 | http_404 | http_429 | off ...};$0"}], -[2,"uwsgi_cache_valid",["[code ...] time"],null,["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_cache_valid",{"insert":"uwsgi_cache_valid ${1:[code ...] time};$0"}], -[2,"uwsgi_connect_timeout",["time"],"uwsgi_connect_timeout 60s;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_connect_timeout",{"insert":"uwsgi_connect_timeout ${1:time};$0"}], -[2,"uwsgi_force_ranges",["on | off"],"uwsgi_force_ranges off;",["http","server","location"],57,"1.7.7","http/ngx_http_uwsgi_module.html#uwsgi_force_ranges",{"insert":"uwsgi_force_ranges ${1|on,off|};$0"}], -[2,"uwsgi_hide_header",["field"],null,["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_hide_header",{"insert":"uwsgi_hide_header ${1:field};$0"}], -[2,"uwsgi_ignore_client_abort",["on | off"],"uwsgi_ignore_client_abort off;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_ignore_client_abort",{"insert":"uwsgi_ignore_client_abort ${1|on,off|};$0"}], -[2,"uwsgi_ignore_headers",["field ..."],null,["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_ignore_headers",{"insert":"uwsgi_ignore_headers ${1:field ...};$0"}], -[2,"uwsgi_intercept_errors",["on | off"],"uwsgi_intercept_errors off;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_intercept_errors",{"insert":"uwsgi_intercept_errors ${1|on,off|};$0"}], -[2,"uwsgi_limit_rate",["rate"],"uwsgi_limit_rate 0;",["http","server","location"],57,"1.7.7","http/ngx_http_uwsgi_module.html#uwsgi_limit_rate",{"insert":"uwsgi_limit_rate ${1:rate};$0"}], -[2,"uwsgi_max_temp_file_size",["size"],"uwsgi_max_temp_file_size 1024m;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_max_temp_file_size",{"insert":"uwsgi_max_temp_file_size ${1:size};$0"}], -[2,"uwsgi_modifier1",["number"],"uwsgi_modifier1 0;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_modifier1",{"insert":"uwsgi_modifier1 ${1:number};$0"}], -[2,"uwsgi_modifier2",["number"],"uwsgi_modifier2 0;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_modifier2",{"insert":"uwsgi_modifier2 ${1:number};$0"}], -[2,"uwsgi_next_upstream",["error |\n timeout |\n invalid_header |\n http_500 |\n http_503 |\n http_403 |\n http_404 |\n http_429 |\n non_idempotent |\n off\n ..."],"uwsgi_next_upstream error timeout;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_next_upstream",{"insert":"uwsgi_next_upstream ${1:error | timeout | invalid_header | http_500 | http_503 | http_403 | http_404 | http_429 | non_idempotent | off ...};$0"}], -[2,"uwsgi_next_upstream_timeout",["time"],"uwsgi_next_upstream_timeout 0;",["http","server","location"],57,"1.7.5","http/ngx_http_uwsgi_module.html#uwsgi_next_upstream_timeout",{"insert":"uwsgi_next_upstream_timeout ${1:time};$0"}], -[2,"uwsgi_next_upstream_tries",["number"],"uwsgi_next_upstream_tries 0;",["http","server","location"],57,"1.7.5","http/ngx_http_uwsgi_module.html#uwsgi_next_upstream_tries",{"insert":"uwsgi_next_upstream_tries ${1:number};$0"}], -[2,"uwsgi_no_cache",["string ..."],null,["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_no_cache",{"insert":"uwsgi_no_cache ${1:string ...};$0"}], -[2,"uwsgi_param",["parameter value\n [if_not_empty]"],null,["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_param",{"insert":"uwsgi_param ${1:parameter value [if_not_empty]};$0"}], -[2,"uwsgi_pass",["[protocol://]address"],null,["location","if"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_pass",{"insert":"uwsgi_pass ${1:[protocol://]address};$0"}], -[2,"uwsgi_pass_header",["field"],null,["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_pass_header",{"insert":"uwsgi_pass_header ${1:field};$0"}], -[2,"uwsgi_pass_request_body",["on | off"],"uwsgi_pass_request_body on;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_pass_request_body",{"insert":"uwsgi_pass_request_body ${1|on,off|};$0"}], -[2,"uwsgi_pass_request_headers",["on | off"],"uwsgi_pass_request_headers on;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_pass_request_headers",{"insert":"uwsgi_pass_request_headers ${1|on,off|};$0"}], -[2,"uwsgi_read_timeout",["time"],"uwsgi_read_timeout 60s;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_read_timeout",{"insert":"uwsgi_read_timeout ${1:time};$0"}], -[2,"uwsgi_request_buffering",["on | off"],"uwsgi_request_buffering on;",["http","server","location"],57,"1.7.11","http/ngx_http_uwsgi_module.html#uwsgi_request_buffering",{"insert":"uwsgi_request_buffering ${1|on,off|};$0"}], -[2,"uwsgi_send_timeout",["time"],"uwsgi_send_timeout 60s;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_send_timeout",{"insert":"uwsgi_send_timeout ${1:time};$0"}], -[2,"uwsgi_socket_keepalive",["on | off"],"uwsgi_socket_keepalive off;",["http","server","location"],57,"1.15.6","http/ngx_http_uwsgi_module.html#uwsgi_socket_keepalive",{"insert":"uwsgi_socket_keepalive ${1|on,off|};$0"}], -[2,"uwsgi_ssl_certificate",["file"],null,["http","server","location"],57,"1.7.8","http/ngx_http_uwsgi_module.html#uwsgi_ssl_certificate",{"insert":"uwsgi_ssl_certificate ${1:file};$0"}], -[2,"uwsgi_ssl_certificate_key",["file"],null,["http","server","location"],57,"1.7.8","http/ngx_http_uwsgi_module.html#uwsgi_ssl_certificate_key",{"insert":"uwsgi_ssl_certificate_key ${1:file};$0"}], -[2,"uwsgi_ssl_ciphers",["ciphers"],"uwsgi_ssl_ciphers DEFAULT;",["http","server","location"],57,"1.5.8","http/ngx_http_uwsgi_module.html#uwsgi_ssl_ciphers",{"insert":"uwsgi_ssl_ciphers ${1:ciphers};$0"}], -[2,"uwsgi_ssl_conf_command",["name value"],null,["http","server","location"],57,"1.19.4","http/ngx_http_uwsgi_module.html#uwsgi_ssl_conf_command",{"insert":"uwsgi_ssl_conf_command ${1:name} ${2:value};$0"}], -[2,"uwsgi_ssl_crl",["file"],null,["http","server","location"],57,"1.7.0","http/ngx_http_uwsgi_module.html#uwsgi_ssl_crl",{"insert":"uwsgi_ssl_crl ${1:file};$0"}], -[2,"uwsgi_ssl_name",["name"],"uwsgi_ssl_name host from uwsgi_pass;",["http","server","location"],57,"1.7.0","http/ngx_http_uwsgi_module.html#uwsgi_ssl_name",{"insert":"uwsgi_ssl_name ${1:name};$0"}], -[2,"uwsgi_ssl_password_file",["file"],null,["http","server","location"],57,"1.7.8","http/ngx_http_uwsgi_module.html#uwsgi_ssl_password_file",{"insert":"uwsgi_ssl_password_file ${1:file};$0"}], -[2,"uwsgi_ssl_protocols",["[SSLv2]\n [SSLv3]\n [TLSv1]\n [TLSv1.1]\n [TLSv1.2]\n [TLSv1.3]"],"uwsgi_ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;",["http","server","location"],57,"1.5.8","http/ngx_http_uwsgi_module.html#uwsgi_ssl_protocols",{"insert":"uwsgi_ssl_protocols ${1:[SSLv2] [SSLv3] [TLSv1] [TLSv1.1] [TLSv1.2] [TLSv1.3]};$0"}], -[2,"uwsgi_ssl_server_name",["on | off"],"uwsgi_ssl_server_name off;",["http","server","location"],57,"1.7.0","http/ngx_http_uwsgi_module.html#uwsgi_ssl_server_name",{"insert":"uwsgi_ssl_server_name ${1|on,off|};$0"}], -[2,"uwsgi_ssl_session_reuse",["on | off"],"uwsgi_ssl_session_reuse on;",["http","server","location"],57,"1.5.8","http/ngx_http_uwsgi_module.html#uwsgi_ssl_session_reuse",{"insert":"uwsgi_ssl_session_reuse ${1|on,off|};$0"}], -[2,"uwsgi_ssl_trusted_certificate",["file"],null,["http","server","location"],57,"1.7.0","http/ngx_http_uwsgi_module.html#uwsgi_ssl_trusted_certificate",{"insert":"uwsgi_ssl_trusted_certificate ${1:file};$0"}], -[2,"uwsgi_ssl_verify",["on | off"],"uwsgi_ssl_verify off;",["http","server","location"],57,"1.7.0","http/ngx_http_uwsgi_module.html#uwsgi_ssl_verify",{"insert":"uwsgi_ssl_verify ${1|on,off|};$0"}], -[2,"uwsgi_ssl_verify_depth",["number"],"uwsgi_ssl_verify_depth 1;",["http","server","location"],57,"1.7.0","http/ngx_http_uwsgi_module.html#uwsgi_ssl_verify_depth",{"insert":"uwsgi_ssl_verify_depth ${1:number};$0"}], -[2,"uwsgi_store",["on |\n off |\n string"],"uwsgi_store off;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_store",{"insert":"uwsgi_store ${1:on | off | string};$0"}], -[2,"uwsgi_store_access",["users:permissions ..."],"uwsgi_store_access user:rw;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_store_access",{"insert":"uwsgi_store_access ${1:users:permissions ...};$0"}], -[2,"uwsgi_temp_file_write_size",["size"],"uwsgi_temp_file_write_size 8k|16k;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_temp_file_write_size",{"insert":"uwsgi_temp_file_write_size ${1:size};$0"}], -[2,"uwsgi_temp_path",["path\n [level1\n [level2\n [level3]]]"],"uwsgi_temp_path uwsgi_temp;",["http","server","location"],57,null,"http/ngx_http_uwsgi_module.html#uwsgi_temp_path",{"insert":"uwsgi_temp_path ${1:path [level1 [level2 [level3]]]};$0"}], -[2,"http2",["on | off"],"http2 off;",["http","server"],58,"1.25.1","http/ngx_http_v2_module.html#http2",{"insert":"http2 ${1|on,off|};$0"}], -[2,"http2_body_preread_size",["size"],"http2_body_preread_size 64k;",["http","server"],58,"1.11.0","http/ngx_http_v2_module.html#http2_body_preread_size",{"insert":"http2_body_preread_size ${1:size};$0"}], -[2,"http2_chunk_size",["size"],"http2_chunk_size 8k;",["http","server","location"],58,null,"http/ngx_http_v2_module.html#http2_chunk_size",{"insert":"http2_chunk_size ${1:size};$0"}], -[2,"http2_idle_timeout",["time"],"http2_idle_timeout 3m;",["http","server"],58,null,"http/ngx_http_v2_module.html#http2_idle_timeout",{"insert":"http2_idle_timeout ${1:time};$0"}], -[2,"http2_max_concurrent_pushes",["number"],"http2_max_concurrent_pushes 10;",["http","server"],58,"1.13.9","http/ngx_http_v2_module.html#http2_max_concurrent_pushes",{"insert":"http2_max_concurrent_pushes ${1:number};$0"}], -[2,"http2_max_concurrent_streams",["number"],"http2_max_concurrent_streams 128;",["http","server"],58,null,"http/ngx_http_v2_module.html#http2_max_concurrent_streams",{"insert":"http2_max_concurrent_streams ${1:number};$0"}], -[2,"http2_max_field_size",["size"],"http2_max_field_size 4k;",["http","server"],58,null,"http/ngx_http_v2_module.html#http2_max_field_size",{"insert":"http2_max_field_size ${1:size};$0"}], -[2,"http2_max_header_size",["size"],"http2_max_header_size 16k;",["http","server"],58,null,"http/ngx_http_v2_module.html#http2_max_header_size",{"insert":"http2_max_header_size ${1:size};$0"}], -[2,"http2_max_requests",["number"],"http2_max_requests 1000;",["http","server"],58,"1.11.6","http/ngx_http_v2_module.html#http2_max_requests",{"insert":"http2_max_requests ${1:number};$0"}], -[2,"http2_push",["uri | off"],"http2_push off;",["http","server","location"],58,"1.13.9","http/ngx_http_v2_module.html#http2_push",{"insert":"http2_push ${1:uri | off};$0"}], -[2,"http2_push_preload",["on | off"],"http2_push_preload off;",["http","server","location"],58,"1.13.9","http/ngx_http_v2_module.html#http2_push_preload",{"insert":"http2_push_preload ${1|on,off|};$0"}], -[2,"http2_recv_buffer_size",["size"],"http2_recv_buffer_size 256k;",["http"],58,null,"http/ngx_http_v2_module.html#http2_recv_buffer_size",{"insert":"http2_recv_buffer_size ${1:size};$0"}], -[2,"http2_recv_timeout",["time"],"http2_recv_timeout 30s;",["http","server"],58,null,"http/ngx_http_v2_module.html#http2_recv_timeout",{"insert":"http2_recv_timeout ${1:time};$0"}], -[3,"$http2","negotiated protocol identifier:\n“h2” for HTTP/2 over TLS,\n“h2c” for HTTP/2 over cleartext TCP,\nor an empty string otherwise.",58,null,"http/ngx_http_v2_module.html#var_http2",null], -[2,"http3",["on | off"],"http3 on;",["http","server"],59,null,"http/ngx_http_v3_module.html#http3",{"insert":"http3 ${1|on,off|};$0"}], -[2,"http3_hq",["on | off"],"http3_hq off;",["http","server"],59,null,"http/ngx_http_v3_module.html#http3_hq",{"insert":"http3_hq ${1|on,off|};$0"}], -[2,"http3_max_concurrent_streams",["number"],"http3_max_concurrent_streams 128;",["http","server"],59,null,"http/ngx_http_v3_module.html#http3_max_concurrent_streams",{"insert":"http3_max_concurrent_streams ${1:number};$0"}], -[2,"http3_stream_buffer_size",["size"],"http3_stream_buffer_size 64k;",["http","server"],59,null,"http/ngx_http_v3_module.html#http3_stream_buffer_size",{"insert":"http3_stream_buffer_size ${1:size};$0"}], -[2,"quic_active_connection_id_limit",["number"],"quic_active_connection_id_limit 2;",["http","server"],59,null,"http/ngx_http_v3_module.html#quic_active_connection_id_limit",{"insert":"quic_active_connection_id_limit ${1:number};$0"}], -[2,"quic_bpf",["on | off"],"quic_bpf off;",["main"],59,null,"http/ngx_http_v3_module.html#quic_bpf",{"insert":"quic_bpf ${1|on,off|};$0"}], -[2,"quic_gso",["on | off"],"quic_gso off;",["http","server"],59,null,"http/ngx_http_v3_module.html#quic_gso",{"insert":"quic_gso ${1|on,off|};$0"}], -[2,"quic_host_key",["file"],null,["http","server"],59,null,"http/ngx_http_v3_module.html#quic_host_key",{"insert":"quic_host_key ${1:file};$0"}], -[2,"quic_retry",["on | off"],"quic_retry off;",["http","server"],59,null,"http/ngx_http_v3_module.html#quic_retry",{"insert":"quic_retry ${1|on,off|};$0"}], -[3,"$http3","negotiated protocol identifier:\n“h3” for HTTP/3 connections,\n“hq” for hq connections,\nor an empty string otherwise.",59,null,"http/ngx_http_v3_module.html#var_http3",null], -[2,"xml_entities",["path"],null,["http","server","location"],60,null,"http/ngx_http_xslt_module.html#xml_entities",{"insert":"xml_entities ${1:path};$0"}], -[2,"xslt_last_modified",["on | off"],"xslt_last_modified off;",["http","server","location"],60,"1.5.1","http/ngx_http_xslt_module.html#xslt_last_modified",{"insert":"xslt_last_modified ${1|on,off|};$0"}], -[2,"xslt_param",["parameter value"],null,["http","server","location"],60,"1.1.18","http/ngx_http_xslt_module.html#xslt_param",{"insert":"xslt_param ${1:parameter} ${2:value};$0"}], -[2,"xslt_string_param",["parameter value"],null,["http","server","location"],60,"1.1.18","http/ngx_http_xslt_module.html#xslt_string_param",{"insert":"xslt_string_param ${1:parameter} ${2:value};$0"}], -[2,"xslt_stylesheet",["stylesheet\n [parameter=value ...]"],null,["location"],60,null,"http/ngx_http_xslt_module.html#xslt_stylesheet",{"insert":"xslt_stylesheet ${1:stylesheet [parameter=value ...]};$0","args":["parameter=value"]}], -[2,"xslt_types",["mime-type ..."],"xslt_types text/xml;",["http","server","location"],60,null,"http/ngx_http_xslt_module.html#xslt_types",{"insert":"xslt_types ${1:mime-type ...};$0"}], -[2,"listen",["address:port\n [ssl]\n [proxy_protocol]\n [backlog=number]\n [rcvbuf=size]\n [sndbuf=size]\n [bind]\n [ipv6only=on|off]\n [so_keepalive=on|off|[keepidle]:[keepintvl]:[keepcnt]]"],null,["server"],61,null,"mail/ngx_mail_core_module.html#listen",{"insert":"listen ${1:address:port [ssl] [proxy_protocol] [backlog=number] [rcvbuf=size] [sndbuf=size] [bind] [ipv6only=on|off] [so_keepalive=on|off|[keepidle]:[keepintvl]:[keepcnt]]};$0","args":["backlog=number","rcvbuf=size","sndbuf=size","ipv6only=on","so_keepalive=on"]}], -[2,"mail",["{ ... }"],null,["main"],61,null,"mail/ngx_mail_core_module.html#mail",{"insert":"mail ${1:{ ... \\}};$0"}], -[2,"max_errors",["number"],"max_errors 5;",["mail","server"],61,"1.21.0","mail/ngx_mail_core_module.html#max_errors",{"insert":"max_errors ${1:number};$0"}], -[2,"protocol",["imap |\n pop3 |\n smtp"],null,["server"],61,null,"mail/ngx_mail_core_module.html#protocol",{"insert":"protocol ${1:imap | pop3 | smtp};$0"}], -[2,"resolver",["address ...\n [valid=time]\n [ipv4=on|off]\n [ipv6=on|off]\n [status_zone=zone]","off"],"resolver off;",["mail","server"],61,null,"mail/ngx_mail_core_module.html#resolver",{"args":["valid=time","ipv4=on","ipv6=on","status_zone=zone"]}], -[2,"resolver_timeout",["time"],"resolver_timeout 30s;",["mail","server"],61,null,"mail/ngx_mail_core_module.html#resolver_timeout",{"insert":"resolver_timeout ${1:time};$0"}], -[2,"server",["{ ... }"],null,["mail"],61,null,"mail/ngx_mail_core_module.html#server",{"insert":"server ${1:{ ... \\}};$0"}], -[2,"server_name",["name"],"server_name hostname;",["mail","server"],61,null,"mail/ngx_mail_core_module.html#server_name",{"insert":"server_name ${1:name};$0"}], -[2,"timeout",["time"],"timeout 60s;",["mail","server"],61,null,"mail/ngx_mail_core_module.html#timeout",{"insert":"timeout ${1:time};$0"}], -[2,"auth_http",["URL"],null,["mail","server"],62,null,"mail/ngx_mail_auth_http_module.html#auth_http",{"insert":"auth_http ${1:URL};$0"}], -[2,"auth_http_header",["header value"],null,["mail","server"],62,null,"mail/ngx_mail_auth_http_module.html#auth_http_header",{"insert":"auth_http_header ${1:header} ${2:value};$0"}], -[2,"auth_http_pass_client_cert",["on | off"],"auth_http_pass_client_cert off;",["mail","server"],62,"1.7.11","mail/ngx_mail_auth_http_module.html#auth_http_pass_client_cert",{"insert":"auth_http_pass_client_cert ${1|on,off|};$0"}], -[2,"auth_http_timeout",["time"],"auth_http_timeout 60s;",["mail","server"],62,null,"mail/ngx_mail_auth_http_module.html#auth_http_timeout",{"insert":"auth_http_timeout ${1:time};$0"}], -[2,"proxy_buffer",["size"],"proxy_buffer 4k|8k;",["mail","server"],63,null,"mail/ngx_mail_proxy_module.html#proxy_buffer",{"insert":"proxy_buffer ${1:size};$0"}], -[2,"proxy_pass_error_message",["on | off"],"proxy_pass_error_message off;",["mail","server"],63,null,"mail/ngx_mail_proxy_module.html#proxy_pass_error_message",{"insert":"proxy_pass_error_message ${1|on,off|};$0"}], -[2,"proxy_protocol",["on | off"],"proxy_protocol off;",["mail","server"],63,"1.19.8","mail/ngx_mail_proxy_module.html#proxy_protocol",{"insert":"proxy_protocol ${1|on,off|};$0"}], -[2,"proxy_smtp_auth",["on | off"],"proxy_smtp_auth off;",["mail","server"],63,"1.19.4","mail/ngx_mail_proxy_module.html#proxy_smtp_auth",{"insert":"proxy_smtp_auth ${1|on,off|};$0"}], -[2,"proxy_timeout",["timeout"],"proxy_timeout 24h;",["mail","server"],63,null,"mail/ngx_mail_proxy_module.html#proxy_timeout",{"insert":"proxy_timeout ${1:timeout};$0"}], -[2,"xclient",["on | off"],"xclient on;",["mail","server"],63,null,"mail/ngx_mail_proxy_module.html#xclient",{"insert":"xclient ${1|on,off|};$0"}], -[2,"set_real_ip_from",["address |\n CIDR |\n unix:"],null,["mail","server"],64,null,"mail/ngx_mail_realip_module.html#set_real_ip_from",{"insert":"set_real_ip_from ${1:address | CIDR | unix:};$0"}], -[2,"ssl",["on | off"],"ssl off;",["mail","server"],65,null,"mail/ngx_mail_ssl_module.html#ssl",{"insert":"ssl ${1|on,off|};$0"}], -[2,"ssl_certificate",["file"],null,["mail","server"],65,null,"mail/ngx_mail_ssl_module.html#ssl_certificate",{"insert":"ssl_certificate ${1:file};$0"}], -[2,"ssl_certificate_key",["file"],null,["mail","server"],65,null,"mail/ngx_mail_ssl_module.html#ssl_certificate_key",{"insert":"ssl_certificate_key ${1:file};$0"}], -[2,"ssl_ciphers",["ciphers"],"ssl_ciphers HIGH:!aNULL:!MD5;",["mail","server"],65,null,"mail/ngx_mail_ssl_module.html#ssl_ciphers",{"insert":"ssl_ciphers ${1:ciphers};$0"}], -[2,"ssl_client_certificate",["file"],null,["mail","server"],65,"1.7.11","mail/ngx_mail_ssl_module.html#ssl_client_certificate",{"insert":"ssl_client_certificate ${1:file};$0"}], -[2,"ssl_conf_command",["name value"],null,["mail","server"],65,"1.19.4","mail/ngx_mail_ssl_module.html#ssl_conf_command",{"insert":"ssl_conf_command ${1:name} ${2:value};$0"}], -[2,"ssl_crl",["file"],null,["mail","server"],65,"1.7.11","mail/ngx_mail_ssl_module.html#ssl_crl",{"insert":"ssl_crl ${1:file};$0"}], -[2,"ssl_dhparam",["file"],null,["mail","server"],65,"0.7.2","mail/ngx_mail_ssl_module.html#ssl_dhparam",{"insert":"ssl_dhparam ${1:file};$0"}], -[2,"ssl_ecdh_curve",["curve"],"ssl_ecdh_curve auto;",["mail","server"],65,"1.1.0","mail/ngx_mail_ssl_module.html#ssl_ecdh_curve",{"insert":"ssl_ecdh_curve ${1:curve};$0"}], -[2,"ssl_password_file",["file"],null,["mail","server"],65,"1.7.3","mail/ngx_mail_ssl_module.html#ssl_password_file",{"insert":"ssl_password_file ${1:file};$0"}], -[2,"ssl_prefer_server_ciphers",["on | off"],"ssl_prefer_server_ciphers off;",["mail","server"],65,null,"mail/ngx_mail_ssl_module.html#ssl_prefer_server_ciphers",{"insert":"ssl_prefer_server_ciphers ${1|on,off|};$0"}], -[2,"ssl_protocols",["[SSLv2]\n [SSLv3]\n [TLSv1]\n [TLSv1.1]\n [TLSv1.2]\n [TLSv1.3]"],"ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;",["mail","server"],65,null,"mail/ngx_mail_ssl_module.html#ssl_protocols",{"insert":"ssl_protocols ${1:[SSLv2] [SSLv3] [TLSv1] [TLSv1.1] [TLSv1.2] [TLSv1.3]};$0"}], -[2,"ssl_session_cache",["off |\n none |\n [builtin[:size]]\n [shared:name:size]"],"ssl_session_cache none;",["mail","server"],65,null,"mail/ngx_mail_ssl_module.html#ssl_session_cache",{"insert":"ssl_session_cache ${1:off | none | [builtin[:size]] [shared:name:size]};$0"}], -[2,"ssl_session_ticket_key",["file"],null,["mail","server"],65,"1.5.7","mail/ngx_mail_ssl_module.html#ssl_session_ticket_key",{"insert":"ssl_session_ticket_key ${1:file};$0"}], -[2,"ssl_session_tickets",["on | off"],"ssl_session_tickets on;",["mail","server"],65,"1.5.9","mail/ngx_mail_ssl_module.html#ssl_session_tickets",{"insert":"ssl_session_tickets ${1|on,off|};$0"}], -[2,"ssl_session_timeout",["time"],"ssl_session_timeout 5m;",["mail","server"],65,null,"mail/ngx_mail_ssl_module.html#ssl_session_timeout",{"insert":"ssl_session_timeout ${1:time};$0"}], -[2,"ssl_trusted_certificate",["file"],null,["mail","server"],65,"1.7.11","mail/ngx_mail_ssl_module.html#ssl_trusted_certificate",{"insert":"ssl_trusted_certificate ${1:file};$0"}], -[2,"ssl_verify_client",["on | off |\n optional | optional_no_ca"],"ssl_verify_client off;",["mail","server"],65,"1.7.11","mail/ngx_mail_ssl_module.html#ssl_verify_client",{"insert":"ssl_verify_client ${1:on | off | optional | optional_no_ca};$0"}], -[2,"ssl_verify_depth",["number"],"ssl_verify_depth 1;",["mail","server"],65,"1.7.11","mail/ngx_mail_ssl_module.html#ssl_verify_depth",{"insert":"ssl_verify_depth ${1:number};$0"}], -[2,"starttls",["on |\n off |\n only"],"starttls off;",["mail","server"],65,null,"mail/ngx_mail_ssl_module.html#starttls",{"insert":"starttls ${1:on | off | only};$0"}], -[2,"imap_auth",["method ..."],"imap_auth plain;",["mail","server"],66,null,"mail/ngx_mail_imap_module.html#imap_auth",{"insert":"imap_auth ${1:method ...};$0"}], -[2,"imap_capabilities",["extension ..."],"imap_capabilities IMAP4 IMAP4rev1 UIDPLUS;",["mail","server"],66,null,"mail/ngx_mail_imap_module.html#imap_capabilities",{"insert":"imap_capabilities ${1:extension ...};$0"}], -[2,"imap_client_buffer",["size"],"imap_client_buffer 4k|8k;",["mail","server"],66,null,"mail/ngx_mail_imap_module.html#imap_client_buffer",{"insert":"imap_client_buffer ${1:size};$0"}], -[2,"pop3_auth",["method ..."],"pop3_auth plain;",["mail","server"],67,null,"mail/ngx_mail_pop3_module.html#pop3_auth",{"insert":"pop3_auth ${1:method ...};$0"}], -[2,"pop3_capabilities",["extension ..."],"pop3_capabilities TOP USER UIDL;",["mail","server"],67,null,"mail/ngx_mail_pop3_module.html#pop3_capabilities",{"insert":"pop3_capabilities ${1:extension ...};$0"}], -[2,"smtp_auth",["method ..."],"smtp_auth plain login;",["mail","server"],68,null,"mail/ngx_mail_smtp_module.html#smtp_auth",{"insert":"smtp_auth ${1:method ...};$0"}], -[2,"smtp_capabilities",["extension ..."],null,["mail","server"],68,null,"mail/ngx_mail_smtp_module.html#smtp_capabilities",{"insert":"smtp_capabilities ${1:extension ...};$0"}], -[2,"smtp_client_buffer",["size"],"smtp_client_buffer 4k|8k;",["mail","server"],68,null,"mail/ngx_mail_smtp_module.html#smtp_client_buffer",{"insert":"smtp_client_buffer ${1:size};$0"}], -[2,"smtp_greeting_delay",["time"],"smtp_greeting_delay 0;",["mail","server"],68,null,"mail/ngx_mail_smtp_module.html#smtp_greeting_delay",{"insert":"smtp_greeting_delay ${1:time};$0"}], -[2,"listen",["address:port\n [ssl]\n [udp]\n [proxy_protocol]\n [fastopen=number]\n [backlog=number]\n [rcvbuf=size]\n [sndbuf=size]\n [bind]\n [ipv6only=on|off]\n [reuseport]\n [so_keepalive=on|off|[keepidle]:[keepintvl]:[keepcnt]]"],null,["server"],69,null,"stream/ngx_stream_core_module.html#listen",{"insert":"listen ${1:address:port [ssl] [udp] [proxy_protocol] [fastopen=number] [backlog=number] [rcvbuf=size] [sndbuf=size] [bind] [ipv6only=on|off] [reuseport] [so_keepalive=on|off|[keepidle]:[keepintvl]:[keepcnt]]};$0","args":["fastopen=number","backlog=number","rcvbuf=size","sndbuf=size","ipv6only=on","so_keepalive=on"]}], -[2,"preread_buffer_size",["size"],"preread_buffer_size 16k;",["stream","server"],69,"1.11.5","stream/ngx_stream_core_module.html#preread_buffer_size",{"insert":"preread_buffer_size ${1:size};$0"}], -[2,"preread_timeout",["timeout"],"preread_timeout 30s;",["stream","server"],69,"1.11.5","stream/ngx_stream_core_module.html#preread_timeout",{"insert":"preread_timeout ${1:timeout};$0"}], -[2,"proxy_protocol_timeout",["timeout"],"proxy_protocol_timeout 30s;",["stream","server"],69,"1.11.4","stream/ngx_stream_core_module.html#proxy_protocol_timeout",{"insert":"proxy_protocol_timeout ${1:timeout};$0"}], -[2,"resolver",["address ...\n [valid=time]\n [ipv4=on|off]\n [ipv6=on|off]\n [status_zone=zone]"],null,["stream","server"],69,"1.11.3","stream/ngx_stream_core_module.html#resolver",{"insert":"resolver ${1:address ... [valid=time] [ipv4=on|off] [ipv6=on|off] [status_zone=zone]};$0","args":["valid=time","ipv4=on","ipv6=on","status_zone=zone"]}], -[2,"resolver_timeout",["time"],"resolver_timeout 30s;",["stream","server"],69,"1.11.3","stream/ngx_stream_core_module.html#resolver_timeout",{"insert":"resolver_timeout ${1:time};$0"}], -[2,"server",["{ ... }"],null,["stream"],69,null,"stream/ngx_stream_core_module.html#server",{"insert":"server ${1:{ ... \\}};$0"}], -[2,"stream",["{ ... }"],null,["main"],69,null,"stream/ngx_stream_core_module.html#stream",{"insert":"stream ${1:{ ... \\}};$0"}], -[2,"tcp_nodelay",["on | off"],"tcp_nodelay on;",["stream","server"],69,"1.9.4","stream/ngx_stream_core_module.html#tcp_nodelay",{"insert":"tcp_nodelay ${1|on,off|};$0"}], -[2,"variables_hash_bucket_size",["size"],"variables_hash_bucket_size 64;",["stream"],69,"1.11.2","stream/ngx_stream_core_module.html#variables_hash_bucket_size",{"insert":"variables_hash_bucket_size ${1:size};$0"}], -[2,"variables_hash_max_size",["size"],"variables_hash_max_size 1024;",["stream"],69,"1.11.2","stream/ngx_stream_core_module.html#variables_hash_max_size",{"insert":"variables_hash_max_size ${1:size};$0"}], -[3,"$binary_remote_addr","client address in a binary form, value’s length is always 4 bytes\nfor IPv4 addresses or 16 bytes for IPv6 addresses",69,null,"stream/ngx_stream_core_module.html#var_binary_remote_addr",null], -[3,"$bytes_received","number of bytes received from a client (1.11.4)",69,null,"stream/ngx_stream_core_module.html#var_bytes_received",null], -[3,"$bytes_sent","number of bytes sent to a client",69,null,"stream/ngx_stream_core_module.html#var_bytes_sent",null], -[3,"$connection","connection serial number",69,null,"stream/ngx_stream_core_module.html#var_connection",null], -[3,"$hostname","host name",69,null,"stream/ngx_stream_core_module.html#var_hostname",null], -[3,"$msec","current time in seconds with the milliseconds resolution",69,null,"stream/ngx_stream_core_module.html#var_msec",null], -[3,"$nginx_version","nginx version",69,null,"stream/ngx_stream_core_module.html#var_nginx_version",null], -[3,"$pid","PID of the worker process",69,null,"stream/ngx_stream_core_module.html#var_pid",null], -[3,"$protocol","protocol used to communicate with the client:\nTCP or UDP (1.11.4)",69,null,"stream/ngx_stream_core_module.html#var_protocol",null], -[3,"$proxy_protocol_addr","client address from the PROXY protocol header (1.11.4)\n\nThe PROXY protocol must be previously enabled by setting the\nproxy_protocol parameter\nin the listen directive.",69,null,"stream/ngx_stream_core_module.html#var_proxy_protocol_addr",null], -[3,"$proxy_protocol_port","client port from the PROXY protocol header (1.11.4)\n\nThe PROXY protocol must be previously enabled by setting the\nproxy_protocol parameter\nin the listen directive.",69,null,"stream/ngx_stream_core_module.html#var_proxy_protocol_port",null], -[3,"$proxy_protocol_server_addr","server address from the PROXY protocol header (1.17.6)\n\nThe PROXY protocol must be previously enabled by setting the\nproxy_protocol parameter\nin the listen directive.",69,null,"stream/ngx_stream_core_module.html#var_proxy_protocol_server_addr",null], -[3,"$proxy_protocol_server_port","server port from the PROXY protocol header (1.17.6)\n\nThe PROXY protocol must be previously enabled by setting the\nproxy_protocol parameter\nin the listen directive.",69,null,"stream/ngx_stream_core_module.html#var_proxy_protocol_server_port",null], -[3,"$proxy_protocol_tlv_name","TLV from the PROXY Protocol header (1.23.2).\nThe name can be a TLV type name or its numeric value.\nIn the latter case, the value is hexadecimal\nand should be prefixed with 0x:\n\n$proxy_protocol_tlv_alpn\n$proxy_protocol_tlv_0x01\n\nSSL TLVs can also be accessed by TLV type name or its numeric value,\nboth prefixed by ssl_:\n$proxy_protocol_tlv_ssl_version\n$proxy_protocol_tlv_ssl_0x21\n\n\n\nThe following TLV type names are supported:\n \n\n\nalpn (0x01) - \nupper layer protocol used over the connection\n\n\n\nauthority (0x02) - \nhost name value passed by the client\n\n\n\nunique_id (0x05) - \nunique connection id\n\n\n\nnetns (0x30) - \nname of the namespace\n\n\n\nssl (0x20) - \nbinary SSL TLV structure\n\n\n \n\n\n\nThe following SSL TLV type names are supported:\n \n\n\nssl_version (0x21) - \nSSL version used in client connection\n\n\n\nssl_cn (0x22) - \nSSL certificate Common Name\n\n\n\nssl_cipher (0x23) - \nname of the used cipher\n\n\n\nssl_sig_alg (0x24) - \nalgorithm used to sign the certificate\n\n\n\nssl_key_alg (0x25) - \npublic-key algorithm\n\n\n \n\n\n\nAlso, the following special SSL TLV type name is supported:\n \n\n\nssl_verify - \nclient SSL certificate verification result,\nzero if the client presented a certificate\nand it was successfully verified, and non-zero otherwise\n\n\n \n\n\n\nThe PROXY protocol must be previously enabled by setting the\nproxy_protocol parameter\nin the listen directive.",69,null,"stream/ngx_stream_core_module.html#var_proxy_protocol_tlv_",null], -[3,"$remote_addr","client address",69,null,"stream/ngx_stream_core_module.html#var_remote_addr",null], -[3,"$remote_port","client port",69,null,"stream/ngx_stream_core_module.html#var_remote_port",null], -[3,"$server_addr","an address of the server which accepted a connection\n\nComputing a value of this variable usually requires one system call.\nTo avoid a system call, the listen directives\nmust specify addresses and use the bind parameter.",69,null,"stream/ngx_stream_core_module.html#var_server_addr",null], -[3,"$server_port","port of the server which accepted a connection",69,null,"stream/ngx_stream_core_module.html#var_server_port",null], -[3,"$session_time","session duration in seconds with a milliseconds resolution\n(1.11.4);",69,null,"stream/ngx_stream_core_module.html#var_session_time",null], -[3,"$status","session status (1.11.4), can be one of the following:\n\n\n200\n\nsession completed successfully\n\n\n400\n\nclient data could not be parsed, for example,\nthe PROXY protocol header\n\n\n403\n\naccess forbidden, for example, when access is limited for\ncertain client addresses\n\n\n500\n\ninternal server error\n\n\n502\n\nbad gateway, for example,\nif an upstream server could not be selected or reached.\n\n\n503\n\nservice unavailable, for example, when access is limited by the\nnumber of connections",69,null,"stream/ngx_stream_core_module.html#var_status",null], -[3,"$time_iso8601","local time in the ISO 8601 standard format",69,null,"stream/ngx_stream_core_module.html#var_time_iso8601",null], -[3,"$time_local","local time in the Common Log Format",69,null,"stream/ngx_stream_core_module.html#var_time_local",null], -[2,"allow",["address |\n CIDR |\n unix: |\n all"],null,["stream","server"],70,null,"stream/ngx_stream_access_module.html#allow",{"insert":"allow ${1:address | CIDR | unix: | all};$0"}], -[2,"deny",["address |\n CIDR |\n unix: |\n all"],null,["stream","server"],70,null,"stream/ngx_stream_access_module.html#deny",{"insert":"deny ${1:address | CIDR | unix: | all};$0"}], -[2,"geo",["[$address] $variable { ... }"],null,["stream"],71,null,"stream/ngx_stream_geo_module.html#geo",{"insert":"geo ${1:[$address] $variable { ... \\}};$0"}], -[2,"geoip_country",["file"],null,["stream"],72,null,"stream/ngx_stream_geoip_module.html#geoip_country",{"insert":"geoip_country ${1:file};$0"}], -[2,"geoip_city",["file"],null,["stream"],72,null,"stream/ngx_stream_geoip_module.html#geoip_city",{"insert":"geoip_city ${1:file};$0"}], -[2,"geoip_org",["file"],null,["stream"],72,null,"stream/ngx_stream_geoip_module.html#geoip_org",{"insert":"geoip_org ${1:file};$0"}], -[2,"keyval",["key\n $variable\n zone=name"],null,["stream"],73,null,"stream/ngx_stream_keyval_module.html#keyval",{"insert":"keyval ${1:key $variable zone=name};$0","args":["zone=name"]}], -[2,"keyval_zone",["zone=name:size\n [state=file]\n [timeout=time]\n [type=string|ip|prefix]\n [sync]"],null,["stream"],73,null,"stream/ngx_stream_keyval_module.html#keyval_zone",{"insert":"keyval_zone ${1:zone=name:size [state=file] [timeout=time] [type=string|ip|prefix] [sync]};$0","args":["state=file","timeout=time","type=string"]}], -[2,"limit_conn",["zone number"],null,["stream","server"],74,null,"stream/ngx_stream_limit_conn_module.html#limit_conn",{"insert":"limit_conn ${1:zone} ${2:number};$0"}], -[2,"limit_conn_dry_run",["on | off"],"limit_conn_dry_run off;",["stream","server"],74,"1.17.6","stream/ngx_stream_limit_conn_module.html#limit_conn_dry_run",{"insert":"limit_conn_dry_run ${1|on,off|};$0"}], -[2,"limit_conn_log_level",["info |\nnotice |\nwarn |\nerror"],"limit_conn_log_level error;",["stream","server"],74,null,"stream/ngx_stream_limit_conn_module.html#limit_conn_log_level",{"insert":"limit_conn_log_level ${1:info | notice | warn | error};$0"}], -[2,"limit_conn_zone",["key\n zone=name:size"],null,["stream"],74,null,"stream/ngx_stream_limit_conn_module.html#limit_conn_zone",{"insert":"limit_conn_zone ${1:key zone=name:size};$0","args":["zone=name"]}], -[3,"$limit_conn_status","keeps the result of limiting the number of connections (1.17.6):\nPASSED,\nREJECTED, or\nREJECTED_DRY_RUN",74,null,"stream/ngx_stream_limit_conn_module.html#var_limit_conn_status",null], -[2,"access_log",["path\n format\n [buffer=size]\n [gzip[=level]]\n [flush=time]\n [if=condition]","off"],"access_log off;",["stream","server"],75,null,"stream/ngx_stream_log_module.html#access_log",{"args":["buffer=size","?gzip=level","flush=time","if=condition"]}], -[2,"log_format",["name\n [escape=default|json|none]\n string ..."],null,["stream"],75,null,"stream/ngx_stream_log_module.html#log_format",{"insert":"log_format ${1:name [escape=default|json|none] string ...};$0","args":["escape=default"]}], -[2,"open_log_file_cache",["max=N\n[inactive=time]\n[min_uses=N]\n[valid=time]","off"],"open_log_file_cache off;",["stream","server"],75,null,"stream/ngx_stream_log_module.html#open_log_file_cache",{"args":["inactive=time","min_uses=N","valid=time"]}], -[2,"map",["string\n $variable { ... }"],null,["stream"],76,null,"stream/ngx_stream_map_module.html#map",{"insert":"map ${1:string $variable { ... \\}};$0"}], -[2,"map_hash_bucket_size",["size"],"map_hash_bucket_size 32|64|128;",["stream"],76,null,"stream/ngx_stream_map_module.html#map_hash_bucket_size",{"insert":"map_hash_bucket_size ${1:size};$0"}], -[2,"map_hash_max_size",["size"],"map_hash_max_size 2048;",["stream"],76,null,"stream/ngx_stream_map_module.html#map_hash_max_size",{"insert":"map_hash_max_size ${1:size};$0"}], -[2,"mqtt_preread",["on | off"],"mqtt_preread off;",["stream","server"],77,null,"stream/ngx_stream_mqtt_preread_module.html#mqtt_preread",{"insert":"mqtt_preread ${1|on,off|};$0"}], -[3,"$mqtt_preread_clientid","the clientid value from the CONNECT message",77,null,"stream/ngx_stream_mqtt_preread_module.html#var_mqtt_preread_clientid",null], -[3,"$mqtt_preread_username","the username value from the CONNECT message",77,null,"stream/ngx_stream_mqtt_preread_module.html#var_mqtt_preread_username",null], -[2,"mqtt",["on | off"],"mqtt off;",["stream","server"],78,null,"stream/ngx_stream_mqtt_filter_module.html#mqtt",{"insert":"mqtt ${1|on,off|};$0"}], -[2,"mqtt_buffers",["number size"],"mqtt_buffers 100 1k;",["stream","server"],78,"1.25.1","stream/ngx_stream_mqtt_filter_module.html#mqtt_buffers",{"insert":"mqtt_buffers ${1:number} ${2:size};$0"}], -[2,"mqtt_rewrite_buffer_size",["size"],"mqtt_rewrite_buffer_size 4k|8k;",["server"],78,null,"stream/ngx_stream_mqtt_filter_module.html#mqtt_rewrite_buffer_size",{"insert":"mqtt_rewrite_buffer_size ${1:size};$0"}], -[2,"mqtt_set_connect",["field value"],null,["server"],78,null,"stream/ngx_stream_mqtt_filter_module.html#mqtt_set_connect",{"insert":"mqtt_set_connect ${1:field} ${2:value};$0"}], -[2,"proxy_bind",["address\n [transparent] |\n off"],null,["stream","server"],79,"1.9.2","stream/ngx_stream_proxy_module.html#proxy_bind",{"insert":"proxy_bind ${1:address [transparent] | off};$0"}], -[2,"proxy_buffer_size",["size"],"proxy_buffer_size 16k;",["stream","server"],79,"1.9.4","stream/ngx_stream_proxy_module.html#proxy_buffer_size",{"insert":"proxy_buffer_size ${1:size};$0"}], -[2,"proxy_connect_timeout",["time"],"proxy_connect_timeout 60s;",["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_connect_timeout",{"insert":"proxy_connect_timeout ${1:time};$0"}], -[2,"proxy_download_rate",["rate"],"proxy_download_rate 0;",["stream","server"],79,"1.9.3","stream/ngx_stream_proxy_module.html#proxy_download_rate",{"insert":"proxy_download_rate ${1:rate};$0"}], -[2,"proxy_half_close",["on | off"],"proxy_half_close off;",["stream","server"],79,"1.21.4","stream/ngx_stream_proxy_module.html#proxy_half_close",{"insert":"proxy_half_close ${1|on,off|};$0"}], -[2,"proxy_next_upstream",["on | off"],"proxy_next_upstream on;",["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_next_upstream",{"insert":"proxy_next_upstream ${1|on,off|};$0"}], -[2,"proxy_next_upstream_timeout",["time"],"proxy_next_upstream_timeout 0;",["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_next_upstream_timeout",{"insert":"proxy_next_upstream_timeout ${1:time};$0"}], -[2,"proxy_next_upstream_tries",["number"],"proxy_next_upstream_tries 0;",["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_next_upstream_tries",{"insert":"proxy_next_upstream_tries ${1:number};$0"}], -[2,"proxy_pass",["address"],null,["server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_pass",{"insert":"proxy_pass ${1:address};$0"}], -[2,"proxy_protocol",["on | off"],"proxy_protocol off;",["stream","server"],79,"1.9.2","stream/ngx_stream_proxy_module.html#proxy_protocol",{"insert":"proxy_protocol ${1|on,off|};$0"}], -[2,"proxy_requests",["number"],"proxy_requests 0;",["stream","server"],79,"1.15.7","stream/ngx_stream_proxy_module.html#proxy_requests",{"insert":"proxy_requests ${1:number};$0"}], -[2,"proxy_responses",["number"],null,["stream","server"],79,"1.9.13","stream/ngx_stream_proxy_module.html#proxy_responses",{"insert":"proxy_responses ${1:number};$0"}], -[2,"proxy_session_drop",["on | off"],"proxy_session_drop off;",["stream","server"],79,"1.15.8","stream/ngx_stream_proxy_module.html#proxy_session_drop",{"insert":"proxy_session_drop ${1|on,off|};$0"}], -[2,"proxy_socket_keepalive",["on | off"],"proxy_socket_keepalive off;",["stream","server"],79,"1.15.6","stream/ngx_stream_proxy_module.html#proxy_socket_keepalive",{"insert":"proxy_socket_keepalive ${1|on,off|};$0"}], -[2,"proxy_ssl",["on | off"],"proxy_ssl off;",["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_ssl",{"insert":"proxy_ssl ${1|on,off|};$0"}], -[2,"proxy_ssl_certificate",["file"],null,["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_ssl_certificate",{"insert":"proxy_ssl_certificate ${1:file};$0"}], -[2,"proxy_ssl_certificate_key",["file"],null,["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_ssl_certificate_key",{"insert":"proxy_ssl_certificate_key ${1:file};$0"}], -[2,"proxy_ssl_ciphers",["ciphers"],"proxy_ssl_ciphers DEFAULT;",["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_ssl_ciphers",{"insert":"proxy_ssl_ciphers ${1:ciphers};$0"}], -[2,"proxy_ssl_conf_command",["name value"],null,["stream","server"],79,"1.19.4","stream/ngx_stream_proxy_module.html#proxy_ssl_conf_command",{"insert":"proxy_ssl_conf_command ${1:name} ${2:value};$0"}], -[2,"proxy_ssl_crl",["file"],null,["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_ssl_crl",{"insert":"proxy_ssl_crl ${1:file};$0"}], -[2,"proxy_ssl_name",["name"],"proxy_ssl_name host from proxy_pass;",["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_ssl_name",{"insert":"proxy_ssl_name ${1:name};$0"}], -[2,"proxy_ssl_password_file",["file"],null,["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_ssl_password_file",{"insert":"proxy_ssl_password_file ${1:file};$0"}], -[2,"proxy_ssl_protocols",["[SSLv2]\n [SSLv3]\n [TLSv1]\n [TLSv1.1]\n [TLSv1.2]\n [TLSv1.3]"],"proxy_ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;",["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_ssl_protocols",{"insert":"proxy_ssl_protocols ${1:[SSLv2] [SSLv3] [TLSv1] [TLSv1.1] [TLSv1.2] [TLSv1.3]};$0"}], -[2,"proxy_ssl_server_name",["on | off"],"proxy_ssl_server_name off;",["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_ssl_server_name",{"insert":"proxy_ssl_server_name ${1|on,off|};$0"}], -[2,"proxy_ssl_session_reuse",["on | off"],"proxy_ssl_session_reuse on;",["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_ssl_session_reuse",{"insert":"proxy_ssl_session_reuse ${1|on,off|};$0"}], -[2,"proxy_ssl_trusted_certificate",["file"],null,["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_ssl_trusted_certificate",{"insert":"proxy_ssl_trusted_certificate ${1:file};$0"}], -[2,"proxy_ssl_verify",["on | off"],"proxy_ssl_verify off;",["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_ssl_verify",{"insert":"proxy_ssl_verify ${1|on,off|};$0"}], -[2,"proxy_ssl_verify_depth",["number"],"proxy_ssl_verify_depth 1;",["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_ssl_verify_depth",{"insert":"proxy_ssl_verify_depth ${1:number};$0"}], -[2,"proxy_timeout",["timeout"],"proxy_timeout 10m;",["stream","server"],79,null,"stream/ngx_stream_proxy_module.html#proxy_timeout",{"insert":"proxy_timeout ${1:timeout};$0"}], -[2,"proxy_upload_rate",["rate"],"proxy_upload_rate 0;",["stream","server"],79,"1.9.3","stream/ngx_stream_proxy_module.html#proxy_upload_rate",{"insert":"proxy_upload_rate ${1:rate};$0"}], -[3,"$proxy_protocol_tlv_aws_vpce_id","TLV value from the PROXY Protocol header representing the\nID\nof AWS VPC endpoint",80,null,"stream/ngx_stream_proxy_protocol_vendor_module.html#var_proxy_protocol_tlv_aws_vpce_id",null], -[3,"$proxy_protocol_tlv_azure_pel_id","TLV value from the PROXY Protocol header representing the\nLinkID\nof Azure private endpoint",80,null,"stream/ngx_stream_proxy_protocol_vendor_module.html#var_proxy_protocol_tlv_azure_pel_id",null], -[3,"$proxy_protocol_tlv_gcp_conn_id","TLV value from the PROXY Protocol header representing\nGoogle Cloud PSC\nconnection ID",80,null,"stream/ngx_stream_proxy_protocol_vendor_module.html#var_proxy_protocol_tlv_gcp_conn_id",null], -[2,"set_real_ip_from",["address |\n CIDR |\n unix:"],null,["stream","server"],81,null,"stream/ngx_stream_realip_module.html#set_real_ip_from",{"insert":"set_real_ip_from ${1:address | CIDR | unix:};$0"}], -[3,"$realip_remote_addr","keeps the original client address",81,null,"stream/ngx_stream_realip_module.html#var_realip_remote_addr",null], -[3,"$realip_remote_port","keeps the original client port",81,null,"stream/ngx_stream_realip_module.html#var_realip_remote_port",null], -[2,"return",["value"],null,["server"],82,null,"stream/ngx_stream_return_module.html#return",{"insert":"return ${1:value};$0"}], -[2,"set",["$variable value"],null,["server"],83,null,"stream/ngx_stream_set_module.html#set",{"insert":"set ${1:$variable value};$0"}], -[2,"split_clients",["string\n $variable { ... }"],null,["stream"],84,null,"stream/ngx_stream_split_clients_module.html#split_clients",{"insert":"split_clients ${1:string $variable { ... \\}};$0"}], -[2,"ssl_alpn",["protocol ..."],null,["stream","server"],85,"1.21.4","stream/ngx_stream_ssl_module.html#ssl_alpn",{"insert":"ssl_alpn ${1:protocol ...};$0"}], -[2,"ssl_certificate",["file"],null,["stream","server"],85,null,"stream/ngx_stream_ssl_module.html#ssl_certificate",{"insert":"ssl_certificate ${1:file};$0"}], -[2,"ssl_certificate_key",["file"],null,["stream","server"],85,null,"stream/ngx_stream_ssl_module.html#ssl_certificate_key",{"insert":"ssl_certificate_key ${1:file};$0"}], -[2,"ssl_ciphers",["ciphers"],"ssl_ciphers HIGH:!aNULL:!MD5;",["stream","server"],85,null,"stream/ngx_stream_ssl_module.html#ssl_ciphers",{"insert":"ssl_ciphers ${1:ciphers};$0"}], -[2,"ssl_client_certificate",["file"],null,["stream","server"],85,"1.11.8","stream/ngx_stream_ssl_module.html#ssl_client_certificate",{"insert":"ssl_client_certificate ${1:file};$0"}], -[2,"ssl_conf_command",["name value"],null,["stream","server"],85,"1.19.4","stream/ngx_stream_ssl_module.html#ssl_conf_command",{"insert":"ssl_conf_command ${1:name} ${2:value};$0"}], -[2,"ssl_crl",["file"],null,["stream","server"],85,"1.11.8","stream/ngx_stream_ssl_module.html#ssl_crl",{"insert":"ssl_crl ${1:file};$0"}], -[2,"ssl_dhparam",["file"],null,["stream","server"],85,null,"stream/ngx_stream_ssl_module.html#ssl_dhparam",{"insert":"ssl_dhparam ${1:file};$0"}], -[2,"ssl_ecdh_curve",["curve"],"ssl_ecdh_curve auto;",["stream","server"],85,null,"stream/ngx_stream_ssl_module.html#ssl_ecdh_curve",{"insert":"ssl_ecdh_curve ${1:curve};$0"}], -[2,"ssl_handshake_timeout",["time"],"ssl_handshake_timeout 60s;",["stream","server"],85,null,"stream/ngx_stream_ssl_module.html#ssl_handshake_timeout",{"insert":"ssl_handshake_timeout ${1:time};$0"}], -[2,"ssl_password_file",["file"],null,["stream","server"],85,null,"stream/ngx_stream_ssl_module.html#ssl_password_file",{"insert":"ssl_password_file ${1:file};$0"}], -[2,"ssl_prefer_server_ciphers",["on | off"],"ssl_prefer_server_ciphers off;",["stream","server"],85,null,"stream/ngx_stream_ssl_module.html#ssl_prefer_server_ciphers",{"insert":"ssl_prefer_server_ciphers ${1|on,off|};$0"}], -[2,"ssl_protocols",["[SSLv2]\n [SSLv3]\n [TLSv1]\n [TLSv1.1]\n [TLSv1.2]\n [TLSv1.3]"],"ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;",["stream","server"],85,null,"stream/ngx_stream_ssl_module.html#ssl_protocols",{"insert":"ssl_protocols ${1:[SSLv2] [SSLv3] [TLSv1] [TLSv1.1] [TLSv1.2] [TLSv1.3]};$0"}], -[2,"ssl_session_cache",["off |\n none |\n [builtin[:size]]\n [shared:name:size]"],"ssl_session_cache none;",["stream","server"],85,null,"stream/ngx_stream_ssl_module.html#ssl_session_cache",{"insert":"ssl_session_cache ${1:off | none | [builtin[:size]] [shared:name:size]};$0"}], -[2,"ssl_session_ticket_key",["file"],null,["stream","server"],85,null,"stream/ngx_stream_ssl_module.html#ssl_session_ticket_key",{"insert":"ssl_session_ticket_key ${1:file};$0"}], -[2,"ssl_session_tickets",["on | off"],"ssl_session_tickets on;",["stream","server"],85,null,"stream/ngx_stream_ssl_module.html#ssl_session_tickets",{"insert":"ssl_session_tickets ${1|on,off|};$0"}], -[2,"ssl_session_timeout",["time"],"ssl_session_timeout 5m;",["stream","server"],85,null,"stream/ngx_stream_ssl_module.html#ssl_session_timeout",{"insert":"ssl_session_timeout ${1:time};$0"}], -[2,"ssl_trusted_certificate",["file"],null,["stream","server"],85,"1.11.8","stream/ngx_stream_ssl_module.html#ssl_trusted_certificate",{"insert":"ssl_trusted_certificate ${1:file};$0"}], -[2,"ssl_verify_client",["on | off |\n optional | optional_no_ca"],"ssl_verify_client off;",["stream","server"],85,"1.11.8","stream/ngx_stream_ssl_module.html#ssl_verify_client",{"insert":"ssl_verify_client ${1:on | off | optional | optional_no_ca};$0"}], -[2,"ssl_verify_depth",["number"],"ssl_verify_depth 1;",["stream","server"],85,"1.11.8","stream/ngx_stream_ssl_module.html#ssl_verify_depth",{"insert":"ssl_verify_depth ${1:number};$0"}], -[3,"$ssl_alpn_protocol","returns the protocol selected by ALPN during the SSL handshake,\nor an empty string otherwise (1.21.4);",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_alpn_protocol",null], -[3,"$ssl_cipher","returns the name of the cipher used\nfor an established SSL connection;",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_cipher",null], -[3,"$ssl_ciphers","returns the list of ciphers supported by the client (1.11.7).\nKnown ciphers are listed by names, unknown are shown in hexadecimal,\nfor example:\nAES128-SHA:AES256-SHA:0x00ff\n\n\nThe variable is fully supported only when using OpenSSL version 1.0.2 or higher.\nWith older versions, the variable is available\nonly for new sessions and lists only known ciphers.",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_ciphers",null], -[3,"$ssl_client_cert","returns the client certificate in the PEM format\nfor an established SSL connection, with each line except the first\nprepended with the tab character (1.11.8);",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_client_cert",null], -[3,"$ssl_client_fingerprint","returns the SHA1 fingerprint of the client certificate\nfor an established SSL connection (1.11.8);",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_client_fingerprint",null], -[3,"$ssl_client_i_dn","returns the “issuer DN” string of the client certificate\nfor an established SSL connection according to\nRFC 2253 (1.11.8);",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_client_i_dn",null], -[3,"$ssl_client_raw_cert","returns the client certificate in the PEM format\nfor an established SSL connection (1.11.8);",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_client_raw_cert",null], -[3,"$ssl_client_s_dn","returns the “subject DN” string of the client certificate\nfor an established SSL connection according to\nRFC 2253 (1.11.8);",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_client_s_dn",null], -[3,"$ssl_client_serial","returns the serial number of the client certificate\nfor an established SSL connection (1.11.8);",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_client_serial",null], -[3,"$ssl_client_v_end","returns the end date of the client certificate (1.11.8);",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_client_v_end",null], -[3,"$ssl_client_v_remain","returns the number of days\nuntil the client certificate expires (1.11.8);",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_client_v_remain",null], -[3,"$ssl_client_v_start","returns the start date of the client certificate (1.11.8);",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_client_v_start",null], -[3,"$ssl_client_verify","returns the result of client certificate verification (1.11.8):\n“SUCCESS”, “FAILED:reason”,\nand “NONE” if a certificate was not present;",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_client_verify",null], -[3,"$ssl_curve","returns the negotiated curve used for\nSSL handshake key exchange process (1.21.5).\nKnown curves are listed by names, unknown are shown in hexadecimal,\nfor example:\nprime256v1\n\n\nThe variable is supported only when using OpenSSL version 3.0 or higher.\nWith older versions, the variable value will be an empty string.",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_curve",null], -[3,"$ssl_curves","returns the list of curves supported by the client (1.11.7).\nKnown curves are listed by names, unknown are shown in hexadecimal,\nfor example:\n0x001d:prime256v1:secp521r1:secp384r1\n\n\nThe variable is supported only when using OpenSSL version 1.0.2 or higher.\nWith older versions, the variable value will be an empty string.\n\n\nThe variable is available only for new sessions.",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_curves",null], -[3,"$ssl_protocol","returns the protocol of an established SSL connection;",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_protocol",null], -[3,"$ssl_server_name","returns the server name requested through\nSNI;",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_server_name",null], -[3,"$ssl_session_id","returns the session identifier of an established SSL connection;",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_session_id",null], -[3,"$ssl_session_reused","returns “r” if an SSL session was reused,\nor “.” otherwise.",85,null,"stream/ngx_stream_ssl_module.html#var_ssl_session_reused",null], -[2,"ssl_preread",["on | off"],"ssl_preread off;",["stream","server"],86,null,"stream/ngx_stream_ssl_preread_module.html#ssl_preread",{"insert":"ssl_preread ${1|on,off|};$0"}], -[3,"$ssl_preread_protocol","the highest SSL protocol version supported by the client (1.15.2)",86,null,"stream/ngx_stream_ssl_preread_module.html#var_ssl_preread_protocol",null], -[3,"$ssl_preread_server_name","server name requested through SNI",86,null,"stream/ngx_stream_ssl_preread_module.html#var_ssl_preread_server_name",null], -[3,"$ssl_preread_alpn_protocols","list of protocols advertised by the client through ALPN (1.13.10).\nThe values are separated by commas.",86,null,"stream/ngx_stream_ssl_preread_module.html#var_ssl_preread_alpn_protocols",null], -[2,"upstream",["name { ... }"],null,["stream"],87,null,"stream/ngx_stream_upstream_module.html#upstream",{"insert":"upstream ${1:name { ... \\}};$0"}], -[2,"server",["address [parameters]"],null,["upstream"],87,null,"stream/ngx_stream_upstream_module.html#server",{"insert":"server ${1:address [parameters]};$0"}], -[2,"zone",["name [size]"],null,["upstream"],87,null,"stream/ngx_stream_upstream_module.html#zone",{"insert":"zone ${1:name [size]};$0"}], -[2,"state",["file"],null,["upstream"],87,"1.9.7","stream/ngx_stream_upstream_module.html#state",{"insert":"state ${1:file};$0"}], -[2,"hash",["key [consistent]"],null,["upstream"],87,null,"stream/ngx_stream_upstream_module.html#hash",{"insert":"hash ${1:key [consistent]};$0"}], -[2,"least_conn",[""],null,["upstream"],87,null,"stream/ngx_stream_upstream_module.html#least_conn",{"insert":"least_conn;$0"}], -[2,"least_time",["connect |\n first_byte |\n last_byte\n [inflight]"],null,["upstream"],87,null,"stream/ngx_stream_upstream_module.html#least_time",{"insert":"least_time ${1:connect | first_byte | last_byte [inflight]};$0"}], -[2,"random",["[two [method]]"],null,["upstream"],87,"1.15.1","stream/ngx_stream_upstream_module.html#random",{"insert":"random ${1:[two [method]]};$0"}], -[2,"resolver",["address ...\n [valid=time]\n [ipv4=on|off]\n [ipv6=on|off]\n [status_zone=zone]"],null,["upstream"],87,"1.17.5","stream/ngx_stream_upstream_module.html#resolver",{"insert":"resolver ${1:address ... [valid=time] [ipv4=on|off] [ipv6=on|off] [status_zone=zone]};$0","args":["valid=time","ipv4=on","ipv6=on","status_zone=zone"]}], -[2,"resolver_timeout",["time"],"resolver_timeout 30s;",["upstream"],87,"1.17.5","stream/ngx_stream_upstream_module.html#resolver_timeout",{"insert":"resolver_timeout ${1:time};$0"}], -[3,"$upstream_addr","keeps the IP address and port,\nor the path to the UNIX-domain socket of the upstream server (1.11.4).\nIf several servers were contacted during proxying,\ntheir addresses are separated by commas, e.g.\n“192.168.1.1:12345, 192.168.1.2:12345, unix:/tmp/sock”.\nIf a server cannot be selected,\nthe variable keeps the name of the server group.",87,null,"stream/ngx_stream_upstream_module.html#var_upstream_addr",null], -[3,"$upstream_bytes_received","number of bytes received from an upstream server (1.11.4).\nValues from several connections\nare separated by commas like addresses in the\n$upstream_addr variable.",87,null,"stream/ngx_stream_upstream_module.html#var_upstream_bytes_received",null], -[3,"$upstream_bytes_sent","number of bytes sent to an upstream server (1.11.4).\nValues from several connections\nare separated by commas like addresses in the\n$upstream_addr variable.",87,null,"stream/ngx_stream_upstream_module.html#var_upstream_bytes_sent",null], -[3,"$upstream_connect_time","time to connect to the upstream server (1.11.4);\nthe time is kept in seconds with millisecond resolution.\nTimes of several connections\nare separated by commas like addresses in the\n$upstream_addr variable.",87,null,"stream/ngx_stream_upstream_module.html#var_upstream_connect_time",null], -[3,"$upstream_first_byte_time","time to receive the first byte of data (1.11.4);\nthe time is kept in seconds with millisecond resolution.\nTimes of several connections\nare separated by commas like addresses in the\n$upstream_addr variable.",87,null,"stream/ngx_stream_upstream_module.html#var_upstream_first_byte_time",null], -[3,"$upstream_session_time","session duration in seconds with millisecond resolution (1.11.4).\nTimes of several connections\nare separated by commas like addresses in the\n$upstream_addr variable.",87,null,"stream/ngx_stream_upstream_module.html#var_upstream_session_time",null], -[2,"health_check",["[parameters]"],null,["server"],88,null,"stream/ngx_stream_upstream_hc_module.html#health_check",{"insert":"health_check ${1:[parameters]};$0"}], -[2,"health_check_timeout",["timeout"],"health_check_timeout 5s;",["stream","server"],88,null,"stream/ngx_stream_upstream_hc_module.html#health_check_timeout",{"insert":"health_check_timeout ${1:timeout};$0"}], -[2,"match",["name { ... }"],null,["stream"],88,null,"stream/ngx_stream_upstream_hc_module.html#match",{"insert":"match ${1:name { ... \\}};$0"}], -[2,"zone_sync",[""],null,["server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync",{"insert":"zone_sync;$0"}], -[2,"zone_sync_buffers",["number size"],"zone_sync_buffers 8 4k|8k;",["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_buffers",{"insert":"zone_sync_buffers ${1:number} ${2:size};$0"}], -[2,"zone_sync_connect_retry_interval",["time"],"zone_sync_connect_retry_interval 1s;",["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_connect_retry_interval",{"insert":"zone_sync_connect_retry_interval ${1:time};$0"}], -[2,"zone_sync_connect_timeout",["time"],"zone_sync_connect_timeout 5s;",["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_connect_timeout",{"insert":"zone_sync_connect_timeout ${1:time};$0"}], -[2,"zone_sync_interval",["time"],"zone_sync_interval 1s;",["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_interval",{"insert":"zone_sync_interval ${1:time};$0"}], -[2,"zone_sync_recv_buffer_size",["size"],"zone_sync_recv_buffer_size 4k|8k;",["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_recv_buffer_size",{"insert":"zone_sync_recv_buffer_size ${1:size};$0"}], -[2,"zone_sync_server",["address [resolve]"],null,["server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_server",{"insert":"zone_sync_server ${1:address [resolve]};$0"}], -[2,"zone_sync_ssl",["on | off"],"zone_sync_ssl off;",["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_ssl",{"insert":"zone_sync_ssl ${1|on,off|};$0"}], -[2,"zone_sync_ssl_certificate",["file"],null,["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_certificate",{"insert":"zone_sync_ssl_certificate ${1:file};$0"}], -[2,"zone_sync_ssl_certificate_key",["file"],null,["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_certificate_key",{"insert":"zone_sync_ssl_certificate_key ${1:file};$0"}], -[2,"zone_sync_ssl_ciphers",["ciphers"],"zone_sync_ssl_ciphers DEFAULT;",["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_ciphers",{"insert":"zone_sync_ssl_ciphers ${1:ciphers};$0"}], -[2,"zone_sync_ssl_conf_command",["name value"],null,["stream","server"],89,"1.19.4","stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_conf_command",{"insert":"zone_sync_ssl_conf_command ${1:name} ${2:value};$0"}], -[2,"zone_sync_ssl_crl",["file"],null,["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_crl",{"insert":"zone_sync_ssl_crl ${1:file};$0"}], -[2,"zone_sync_ssl_name",["name"],"zone_sync_ssl_name host from zone_sync_server;",["stream","server"],89,"1.15.7","stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_name",{"insert":"zone_sync_ssl_name ${1:name};$0"}], -[2,"zone_sync_ssl_password_file",["file"],null,["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_password_file",{"insert":"zone_sync_ssl_password_file ${1:file};$0"}], -[2,"zone_sync_ssl_protocols",["[SSLv2]\n [SSLv3]\n [TLSv1]\n [TLSv1.1]\n [TLSv1.2]\n [TLSv1.3]"],"zone_sync_ssl_protocols TLSv1 TLSv1.1 TLSv1.2;",["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_protocols",{"insert":"zone_sync_ssl_protocols ${1:[SSLv2] [SSLv3] [TLSv1] [TLSv1.1] [TLSv1.2] [TLSv1.3]};$0"}], -[2,"zone_sync_ssl_server_name",["on | off"],"zone_sync_ssl_server_name off;",["stream","server"],89,"1.15.7","stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_server_name",{"insert":"zone_sync_ssl_server_name ${1|on,off|};$0"}], -[2,"zone_sync_ssl_trusted_certificate",["file"],null,["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_trusted_certificate",{"insert":"zone_sync_ssl_trusted_certificate ${1:file};$0"}], -[2,"zone_sync_ssl_verify",["on | off"],"zone_sync_ssl_verify off;",["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_verify",{"insert":"zone_sync_ssl_verify ${1|on,off|};$0"}], -[2,"zone_sync_ssl_verify_depth",["number"],"zone_sync_ssl_verify_depth 1;",["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_ssl_verify_depth",{"insert":"zone_sync_ssl_verify_depth ${1:number};$0"}], -[2,"zone_sync_timeout",["timeout"],"zone_sync_timeout 5s;",["stream","server"],89,null,"stream/ngx_stream_zone_sync_module.html#zone_sync_timeout",{"insert":"zone_sync_timeout ${1:timeout};$0"}], -[2,"google_perftools_profiles",["file"],null,["main"],90,null,"ngx_google_perftools_module.html#google_perftools_profiles",{"insert":"google_perftools_profiles ${1:file};$0"}], -[2,"otel_exporter",["{ ... }"],null,["http"],91,null,"ngx_otel_module.html#otel_exporter",{"insert":"otel_exporter ${1:{ ... \\}};$0"}], -[2,"otel_service_name",["name"],"otel_service_name unknown_service:nginx;",["http"],91,null,"ngx_otel_module.html#otel_service_name",{"insert":"otel_service_name ${1:name};$0"}], -[2,"otel_trace",["on |\n off |\n $variable"],"otel_trace off;",["http","server","location"],91,null,"ngx_otel_module.html#otel_trace",{"insert":"otel_trace ${1:on | off | $variable};$0"}], -[2,"otel_trace_context",["extract |\n inject |\n propagate |\n ignore"],"otel_trace_context ignore;",["http","server","location"],91,null,"ngx_otel_module.html#otel_trace_context",{"insert":"otel_trace_context ${1:extract | inject | propagate | ignore};$0"}], -[2,"otel_span_name",["name"],null,["http","server","location"],91,null,"ngx_otel_module.html#otel_span_name",{"insert":"otel_span_name ${1:name};$0"}], -[2,"otel_span_attr",["name value"],null,["http","server","location"],91,null,"ngx_otel_module.html#otel_span_attr",{"insert":"otel_span_attr ${1:name} ${2:value};$0"}], -[3,"$otel_trace_id","the identifier of the trace the current span belongs to,\nfor example, 56552bc4daa3bf39c08362527e1dd6c4",91,null,"ngx_otel_module.html#var_otel_trace_id",null], -[3,"$otel_span_id","the identifier of the current span,\nfor example, 4c0b8531ec38ca59",91,null,"ngx_otel_module.html#var_otel_span_id",null], -[3,"$otel_parent_id","the identifier of the parent span,\nfor example, dc94d281b0f884ea",91,null,"ngx_otel_module.html#var_otel_parent_id",null], -[3,"$otel_parent_sampled","the “sampled” flag of the parent span,\ncan be “1” or “0”",91,null,"ngx_otel_module.html#var_otel_parent_sampled",null] -] \ No newline at end of file +[3,"$proxy_add_x_forwarded_for","the “X-Forwarded-For” client request header field\nwith the $remote_addr variable appended to it, separated by a comma.\nIf the “X-Forwarded-For” field is not present in the client\nrequest header, the $proxy_add_x_forwarded_for variable is equal\nto the $remote_addr variable.",37,null,"http/ngx_http_proxy_module.html#var_proxy_add_x_forwarded_for",null] \ No newline at end of file diff --git a/assets/manifest/http_headers.de.json b/assets/manifest/http_headers.de.json index 63ae518..e25df8d 100644 --- a/assets/manifest/http_headers.de.json +++ b/assets/manifest/http_headers.de.json @@ -1,5 +1,5 @@ [ -[6,"Accept","Welche Inhaltstypen der Client verarbeiten kann. Ist es dem Server nicht möglich, einen Inhaltstyp bereitzustellen, der vom Client akzeptiert wird, kann er entweder den HTTP-Statuscode `[406 Not acceptable](https://de.wikipedia.org/wiki/HTTP-Statuscode#4xx_%E2%80%93_Client-Fehler \"HTTP-Statuscode\")` senden oder einen beliebigen Inhaltstyp zum Kodieren der angeforderten Informationen verwenden.[\\[10\\]](https://de.wikipedia.org/wiki/Liste_der_HTTP-Headerfelder#cite_note-10) Fehlt das Accept-Feld, so bedeutet dies, dass der Client alle Inhaltstypen akzeptiert. Kann der Server in diesem Beispiel den Inhalt der angeforderten Ressource sowohl als HTML als auch als Bild im GIF-Format an den Client senden, führt der Accept-Header der Anfrage dazu, dass als Inhaltstyp der Antwort HTML gewählt wird."], +[6,"Accept","Welche Inhaltstypen der Client verarbeiten kann. Ist es dem Server nicht möglich, einen Inhaltstyp bereitzustellen, der vom Client akzeptiert wird, kann er entweder den HTTP-Statuscode `406 Not acceptable` senden oder einen beliebigen Inhaltstyp zum Kodieren der angeforderten Informationen verwenden.[\\[10\\]](https://de.wikipedia.org/wiki/Liste_der_HTTP-Headerfelder#cite_note-10) Fehlt das Accept-Feld, so bedeutet dies, dass der Client alle Inhaltstypen akzeptiert. Kann der Server in diesem Beispiel den Inhalt der angeforderten Ressource sowohl als HTML als auch als Bild im GIF-Format an den Client senden, führt der Accept-Header der Anfrage dazu, dass als Inhaltstyp der Antwort HTML gewählt wird."], [6,"Accept-Charset","Welche [Zeichensätze](https://de.wikipedia.org/wiki/Zeichensatz \"Zeichensatz\") der Client anzeigen kann und somit empfangen möchte. Die passende Datei wird über Content Negotiation (z. B. bei Apache mod\\_negotiation) herausgesucht."], [6,"Accept-Encoding","Welche komprimierten Formate der Client unterstützt. Über Content Negotiation wird eine passend komprimierte Datei ausgeliefert."], [6,"Accept-Language","Welche Sprachen der Client akzeptiert. Falls der Server passend eingerichtet ist und die Sprachversionen vorhanden sind, wird über Content Negotiation die passende Datei ausgeliefert."], diff --git a/assets/manifest/http_headers.en.json b/assets/manifest/http_headers.en.json index 6a923b5..f2ceff7 100644 --- a/assets/manifest/http_headers.en.json +++ b/assets/manifest/http_headers.en.json @@ -14,12 +14,12 @@ [6,"Content-Length","The length of the request body in [octets](https://en.wikipedia.org/wiki/Octet_(computing) \"Octet (computing)\") (8-bit bytes).",["Content-Length: 348"],"Permanent"], [6,"Content-MD5","A [Base64](https://en.wikipedia.org/wiki/Base64 \"Base64\")\\-encoded binary [MD5](https://en.wikipedia.org/wiki/MD5 \"MD5\") sum of the content of the request body.",["Content-MD5: Q2hlY2sgSW50ZWdyaXR5IQ=="],"Obsolete"], [6,"Content-Type","The [Media type](https://en.wikipedia.org/wiki/Media_type \"Media type\") of the body of the request (used with POST and PUT requests).",["Content-Type: application/x-www-form-urlencoded"],"Permanent"], -[6,"Cookie","An [HTTP cookie](https://en.wikipedia.org/wiki/HTTP_cookie \"HTTP cookie\") previously sent by the server with `[Set-Cookie](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#innerlink_set-cookie)` (below).",["Cookie: $Version=1; Skin=new;"],"Permanent"], +[6,"Cookie","An [HTTP cookie](https://en.wikipedia.org/wiki/HTTP_cookie \"HTTP cookie\") previously sent by the server with `Set-Cookie` (below).",["Cookie: $Version=1; Skin=new;"],"Permanent"], [6,"Date","The date and time at which the message was originated (in \"HTTP-date\" format as defined by [RFC 9110: HTTP Semantics, section 5.6.7 \"Date/Time Formats\"](https://tools.ietf.org/html/rfc9110#section-5.6.7 \"rfc:9110\")).",["Date: Tue, 15 Nov 1994 08:12:31 GMT"],"Permanent"], [6,"Expect","Indicates that particular server behaviors are required by the client.",["Expect: 100-continue"],"Permanent"], [6,"Forwarded","Disclose original information of a client connecting to a web server through an HTTP proxy.[\\[16\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-16)",["Forwarded: for=192.0.2.60;proto=http;by=203.0.113.43","Forwarded: for=192.0.2.43, for=198.51.100.17"],"Permanent"], [6,"From","The email address of the user making the request.",["From: user@example.com"],"Permanent"], -[6,"Host","The domain name of the server (for [virtual hosting](https://en.wikipedia.org/wiki/Virtual_hosting \"Virtual hosting\")), and the [TCP port](https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers \"List of TCP and UDP port numbers\") number on which the server is listening. The [port](https://en.wikipedia.org/wiki/Port_(computer_networking) \"Port (computer networking)\") number may be omitted if the port is the standard port for the service requested.\n\nMandatory since HTTP/1.1.[\\[17\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-17) If the request is generated directly in HTTP/2, it should not be used.[\\[18\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-rfc9113_Request_Pseudo_Header_Fields-18)",["Host: en.wikipedia.org:8080","Host: en.wikipedia.org"]], +[6,"Host","The domain name of the server (for [virtual hosting](https://en.wikipedia.org/wiki/Virtual_hosting \"Virtual hosting\")), and the [TCP port](https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers \"List of TCP and UDP port numbers\") number on which the server is listening. The [port](https://en.wikipedia.org/wiki/Port_(computer_networking) \"Port (computer networking)\") number may be omitted if the port is the standard port for the service requested.\n\nMandatory since HTTP/1.1.[\\[17\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-17) If the request is generated directly in HTTP/2, it should not be used.[\\[18\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-rfc9113_Request_Pseudo_Header_Fields-18)",["Host: en.wikipedia.org:8080","Host: en.wikipedia.org"],"Permanent"], [6,"HTTP2-Settings","A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one `HTTP2-Settings` header field. The `HTTP2-Settings` header field is a connection-specific header field that includes parameters that govern the HTTP/2 connection, provided in anticipation of the server accepting the request to upgrade.[\\[19\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-19)[\\[20\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-20)",["HTTP2-Settings: token64"],"Obsolete"], [6,"If-Match","Only perform the action if the client supplied entity matches the same entity on the server. This is mainly for methods like PUT to only update a resource if it has not been modified since the user last updated it.",["If-Match: \"737060cd8c284d8af7ad3082f209582d\""],"Permanent"], [6,"If-Modified-Since","Allows a _304 Not Modified_ to be returned if content is unchanged.",["If-Modified-Since: Sat, 29 Oct 1994 19:43:31 GMT"],"Permanent"], @@ -47,7 +47,7 @@ [6,"X-Forwarded-Host","A [_de facto_ standard](https://en.wikipedia.org/wiki/De_facto_standard \"De facto standard\") for identifying the original host requested by the client in the `Host` HTTP request header, since the host name and/or port of the reverse proxy (load balancer) may differ from the origin server handling the request. Superseded by _Forwarded_ header.",["X-Forwarded-Host: en.wikipedia.org:8080","X-Forwarded-Host: en.wikipedia.org"]], [6,"X-Forwarded-Proto","A [_de facto_ standard](https://en.wikipedia.org/wiki/De_facto_standard \"De facto standard\") for identifying the originating protocol of an HTTP request, since a reverse proxy (or a load balancer) may communicate with a web server using HTTP even if the request to the reverse proxy is HTTPS. An alternative form of the header (X-ProxyUser-Ip) is used by Google clients talking to Google servers. Superseded by _Forwarded_ header.",["X-Forwarded-Proto: https"]], [6,"Front-End-Https","Non-standard header field used by Microsoft applications and load-balancers",["Front-End-Https: on"]], -[6,"X-Http-Method-Override","Requests a web application to override the method specified in the request (typically POST) with the method given in the header field (typically PUT or DELETE). This can be used when a user agent or firewall prevents PUT or DELETE methods from being sent directly (note that this is either a bug in the software component, which ought to be fixed, or an intentional configuration, in which case bypassing it may be the wrong thing to do).",["X-HTTP-Method-Override: DELETE"]], +[6,"X-Http-Method-Override","Requests a web application to override the method specified in the request (typically POST) with the method given in the header field (typically PUT or DELETE). This can be used when a user agent or firewall prevents PUT or DELETE methods from being sent directly (this is either a bug in the software component, which ought to be fixed, or an intentional configuration, in which case bypassing it may be the wrong thing to do).",["X-HTTP-Method-Override: DELETE"]], [6,"X-ATT-DeviceId","Allows easier parsing of the MakeModel/Firmware that is usually found in the User-Agent String of AT&T Devices",["X-Att-Deviceid: GT-P7320/P7320XXLPG"]], [6,"X-Wap-Profile","Links to an XML file on the Internet with a full description and details about the device currently connecting. In the example to the right is an XML file for an AT&T Samsung Galaxy S2.",["x-wap-profile: http://wap.samsungmobile.com/uaprof/SGH-I777.xml"]], [6,"Proxy-Connection","Implemented as a misunderstanding of the HTTP specifications. Common because of mistakes in implementations of early HTTP versions. Has exactly the same functionality as standard Connection field.\n\nMust not be used with HTTP/2.[\\[14\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-rfc9113_connection-14)",["Proxy-Connection: keep-alive"]], @@ -73,12 +73,12 @@ [7,"Content-Length","The length of the request body in [octets](https://en.wikipedia.org/wiki/Octet_(computing) \"Octet (computing)\") (8-bit bytes).",["Content-Length: 348"],"Permanent"], [7,"Content-MD5","A [Base64](https://en.wikipedia.org/wiki/Base64 \"Base64\")\\-encoded binary [MD5](https://en.wikipedia.org/wiki/MD5 \"MD5\") sum of the content of the request body.",["Content-MD5: Q2hlY2sgSW50ZWdyaXR5IQ=="],"Obsolete"], [7,"Content-Type","The [Media type](https://en.wikipedia.org/wiki/Media_type \"Media type\") of the body of the request (used with POST and PUT requests).",["Content-Type: application/x-www-form-urlencoded"],"Permanent"], -[7,"Cookie","An [HTTP cookie](https://en.wikipedia.org/wiki/HTTP_cookie \"HTTP cookie\") previously sent by the server with `[Set-Cookie](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#innerlink_set-cookie)` (below).",["Cookie: $Version=1; Skin=new;"],"Permanent"], +[7,"Cookie","An [HTTP cookie](https://en.wikipedia.org/wiki/HTTP_cookie \"HTTP cookie\") previously sent by the server with `Set-Cookie` (below).",["Cookie: $Version=1; Skin=new;"],"Permanent"], [7,"Date","The date and time at which the message was originated (in \"HTTP-date\" format as defined by [RFC 9110: HTTP Semantics, section 5.6.7 \"Date/Time Formats\"](https://tools.ietf.org/html/rfc9110#section-5.6.7 \"rfc:9110\")).",["Date: Tue, 15 Nov 1994 08:12:31 GMT"],"Permanent"], [7,"Expect","Indicates that particular server behaviors are required by the client.",["Expect: 100-continue"],"Permanent"], [7,"Forwarded","Disclose original information of a client connecting to a web server through an HTTP proxy.[\\[16\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-16)",["Forwarded: for=192.0.2.60;proto=http;by=203.0.113.43","Forwarded: for=192.0.2.43, for=198.51.100.17"],"Permanent"], [7,"From","The email address of the user making the request.",["From: user@example.com"],"Permanent"], -[7,"Host","The domain name of the server (for [virtual hosting](https://en.wikipedia.org/wiki/Virtual_hosting \"Virtual hosting\")), and the [TCP port](https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers \"List of TCP and UDP port numbers\") number on which the server is listening. The [port](https://en.wikipedia.org/wiki/Port_(computer_networking) \"Port (computer networking)\") number may be omitted if the port is the standard port for the service requested.\n\nMandatory since HTTP/1.1.[\\[17\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-17) If the request is generated directly in HTTP/2, it should not be used.[\\[18\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-rfc9113_Request_Pseudo_Header_Fields-18)",["Host: en.wikipedia.org:8080","Host: en.wikipedia.org"]], +[7,"Host","The domain name of the server (for [virtual hosting](https://en.wikipedia.org/wiki/Virtual_hosting \"Virtual hosting\")), and the [TCP port](https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers \"List of TCP and UDP port numbers\") number on which the server is listening. The [port](https://en.wikipedia.org/wiki/Port_(computer_networking) \"Port (computer networking)\") number may be omitted if the port is the standard port for the service requested.\n\nMandatory since HTTP/1.1.[\\[17\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-17) If the request is generated directly in HTTP/2, it should not be used.[\\[18\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-rfc9113_Request_Pseudo_Header_Fields-18)",["Host: en.wikipedia.org:8080","Host: en.wikipedia.org"],"Permanent"], [7,"HTTP2-Settings","A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one `HTTP2-Settings` header field. The `HTTP2-Settings` header field is a connection-specific header field that includes parameters that govern the HTTP/2 connection, provided in anticipation of the server accepting the request to upgrade.[\\[19\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-19)[\\[20\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-20)",["HTTP2-Settings: token64"],"Obsolete"], [7,"If-Match","Only perform the action if the client supplied entity matches the same entity on the server. This is mainly for methods like PUT to only update a resource if it has not been modified since the user last updated it.",["If-Match: \"737060cd8c284d8af7ad3082f209582d\""],"Permanent"], [7,"If-Modified-Since","Allows a _304 Not Modified_ to be returned if content is unchanged.",["If-Modified-Since: Sat, 29 Oct 1994 19:43:31 GMT"],"Permanent"], @@ -106,7 +106,7 @@ [7,"X-Forwarded-Host","A [_de facto_ standard](https://en.wikipedia.org/wiki/De_facto_standard \"De facto standard\") for identifying the original host requested by the client in the `Host` HTTP request header, since the host name and/or port of the reverse proxy (load balancer) may differ from the origin server handling the request. Superseded by _Forwarded_ header.",["X-Forwarded-Host: en.wikipedia.org:8080","X-Forwarded-Host: en.wikipedia.org"]], [7,"X-Forwarded-Proto","A [_de facto_ standard](https://en.wikipedia.org/wiki/De_facto_standard \"De facto standard\") for identifying the originating protocol of an HTTP request, since a reverse proxy (or a load balancer) may communicate with a web server using HTTP even if the request to the reverse proxy is HTTPS. An alternative form of the header (X-ProxyUser-Ip) is used by Google clients talking to Google servers. Superseded by _Forwarded_ header.",["X-Forwarded-Proto: https"]], [7,"Front-End-Https","Non-standard header field used by Microsoft applications and load-balancers",["Front-End-Https: on"]], -[7,"X-Http-Method-Override","Requests a web application to override the method specified in the request (typically POST) with the method given in the header field (typically PUT or DELETE). This can be used when a user agent or firewall prevents PUT or DELETE methods from being sent directly (note that this is either a bug in the software component, which ought to be fixed, or an intentional configuration, in which case bypassing it may be the wrong thing to do).",["X-HTTP-Method-Override: DELETE"]], +[7,"X-Http-Method-Override","Requests a web application to override the method specified in the request (typically POST) with the method given in the header field (typically PUT or DELETE). This can be used when a user agent or firewall prevents PUT or DELETE methods from being sent directly (this is either a bug in the software component, which ought to be fixed, or an intentional configuration, in which case bypassing it may be the wrong thing to do).",["X-HTTP-Method-Override: DELETE"]], [7,"X-ATT-DeviceId","Allows easier parsing of the MakeModel/Firmware that is usually found in the User-Agent String of AT&T Devices",["X-Att-Deviceid: GT-P7320/P7320XXLPG"]], [7,"X-Wap-Profile","Links to an XML file on the Internet with a full description and details about the device currently connecting. In the example to the right is an XML file for an AT&T Samsung Galaxy S2.",["x-wap-profile: http://wap.samsungmobile.com/uaprof/SGH-I777.xml"]], [7,"Proxy-Connection","Implemented as a misunderstanding of the HTTP specifications. Common because of mistakes in implementations of early HTTP versions. Has exactly the same functionality as standard Connection field.\n\nMust not be used with HTTP/2.[\\[14\\]](https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#cite_note-rfc9113_connection-14)",["Proxy-Connection: keep-alive"]], diff --git a/assets/manifest/http_headers.es.json b/assets/manifest/http_headers.es.json index 6720c2d..6e2b84f 100644 --- a/assets/manifest/http_headers.es.json +++ b/assets/manifest/http_headers.es.json @@ -1,27 +1,27 @@ [ -[6,"Accept","Content-Types (tipos de contenido) que se aceptan."], -[6,"Accept-Charset","Conjunto de caracteres que se aceptan."], -[6,"Accept-Encoding","Lista de codificaciones que se aceptan."], +[6,"Accept","Content-Types ([tipos de contenido](https://es.wikipedia.org/wiki/Tipo_de_contenido \"Tipo de contenido\")) que se aceptan. Véase [Negociación de contenido](https://es.wikipedia.org/wiki/Negociaci%C3%B3n_de_contenido \"Negociación de contenido\")."], +[6,"Accept-Charset","[Conjunto de caracteres](https://es.wikipedia.org/wiki/Conjunto_de_caracteres \"Conjunto de caracteres\") que se aceptan."], +[6,"Accept-Encoding","Lista de codificaciones que se aceptan. Véase [Compresión HTTP](https://es.wikipedia.org/wiki/Compresi%C3%B3n_HTTP \"Compresión HTTP\")."], [6,"Accept-Language","Idiomas que se aceptan."], [6,"Accept-Datetime","Versión de la hora y fecha que se aceptan."], -[6,"Authorization","Credenciales de autorización."], -[6,"Caché-Control","Se controla las políticas de caché."], +[6,"Authorization","Credenciales de autorización para la [autenticación HTML](https://es.wikipedia.org/wiki/Autenticaci%C3%B3n_de_acceso_b%C3%A1sica \"Autenticación de acceso básica\")."], +[6,"Cache-Control","Se controla las políticas de [caché](https://es.wikipedia.org/wiki/Cach%C3%A9_web \"Caché web\")."], [6,"Connection","Se controla el tipo de conexión."], -[6,"Cookie","Una cookie enviada previamente por el servidor usando Set-Cookie"], +[6,"Cookie","Una [cookie](https://es.wikipedia.org/wiki/Cookie_(inform%C3%A1tica) \"Cookie (informática)\") enviada previamente por el servidor usando Set-Cookie"], [6,"Content-Length","El tamaño del contenido de la petición en bytes"], -[6,"Content-MD5","Un checksum en MD5 sobre el contenido"], -[6,"Content-Type","El tipo de contenido de la petición en POST o PUT"], +[6,"Content-MD5","Un [checksum](https://es.wikipedia.org/wiki/Checksum \"Checksum\") en [MD5](https://es.wikipedia.org/wiki/MD5 \"MD5\") sobre el contenido"], +[6,"Content-Type","El tipo de contenido de la petición en [POST](https://es.wikipedia.org/w/index.php?title=M%C3%A9todo_POST&action=edit&redlink=1 \"Método POST (aún no redactado)\") o [PUT](https://es.wikipedia.org/w/index.php?title=M%C3%A9todo_PUT&action=edit&redlink=1 \"Método PUT (aún no redactado)\")"], [6,"Date","La fecha y la hora de la petición"], -[6,"Forwarded","Indica la información original del cliente en caso de conexión por proxy."], -[6,"From","La dirección de correo electrónico de la petición."], -[6,"Host","El nombre de dominio o dirección IP (puede incluir número de puerto). El uso de la cabecera es obligatorio a partir de HTTP 1.1"], +[6,"Forwarded","Indica la información original del cliente en caso de conexión por [proxy](https://es.wikipedia.org/wiki/Servidor_proxy \"Servidor proxy\")."], +[6,"From","La dirección de [correo electrónico](https://es.wikipedia.org/wiki/Correo_electr%C3%B3nico \"Correo electrónico\") de la petición."], +[6,"Host","El nombre de [dominio](https://es.wikipedia.org/wiki/Dominio_de_internet \"Dominio de internet\") o dirección IP (puede incluir [número de puerto](https://es.wikipedia.org/wiki/Puerto_de_red \"Puerto de red\")). El uso de la cabecera es obligatorio a partir de HTTP 1.1"], [6,"Max-Forwards","Limita el número de veces que un mensaje viaja a través de los proxies."], [6,"Origin","Inicia una petición para servidores con respuesta a Access-Control-Allow-Origin."], [6,"Pragma","Implementa cabeceras en donde múltiples efectos se aplica a todo."], [6,"Proxy-Authorization","Credenciales de autorización para conectarse a un proxy."], [6,"Range","Pide sólo una parte del contenido"], [6,"Referer","Indica la dirección URL de donde proviene, en otras palabras, es la dirección web del botón Atrás."], -[6,"User-Agent","Contiene la información de la petición, como el navegador, el sistema operativo, etc."], +[6,"User-Agent","Contiene la información del [agente de usuario](https://es.wikipedia.org/wiki/Agente_de_usuario \"Agente de usuario\"), como el navegador, el sistema operativo, etc."], [6,"Upgrade","Pide al servidor que se actualice la versión de HTTP para funcionar."], [6,"Warning","Una advertencia general sobre problemas de la entidad."] ] \ No newline at end of file diff --git a/assets/manifest/http_headers.pt.json b/assets/manifest/http_headers.pt.json new file mode 100644 index 0000000..f88fd8f --- /dev/null +++ b/assets/manifest/http_headers.pt.json @@ -0,0 +1,47 @@ +[ +[6,"Access-Control-Allow-Origin","Especifica quais sites podem participar de [compartilhamento de recursos de origem cruzada](https://pt.wikipedia.org/w/index.php?title=Compartilhamento_de_recursos_de_origem_cruzada&action=edit&redlink=1 \"Compartilhamento de recursos de origem cruzada (página não existe)\")"], +[6,"Accept-Ranges","Quais tipos de arranjo de conteúdo parcial este servidor suporta"], +[6,"Age","A idade que o objeto possui em um [cache de proxy](https://pt.wikipedia.org/w/index.php?title=Cache_de_proxy&action=edit&redlink=1 \"Cache de proxy (página não existe)\") em segundos"], +[6,"Allow","Ações válidas para um recurso especificado. Para ser utilizado por um _405 Método não permitido_"], +[6,"Cache-Control","Diz todos os mecanismos de cache do servidor para o cliente se eles podem armazenar em cache esse objeto. É medido em segundos"], +[6,"Connection","Opções desejadas para a conexão."], +[6,"Content-Encoding","O tipo de codificação usada nos dados. [HTTP compression](https://pt.wikipedia.org/w/index.php?title=HTTP_compression&action=edit&redlink=1 \"HTTP compression (página não existe)\")."], +[6,"Content-Language","A linguagem em que o conteúdo está"], +[6,"Content-Length","O comprimento do corpo da resposta em octetos (8-bit bytes)"], +[6,"Content-Location","Um local alternativo para os dados retornados"], +[6,"Content-MD5","A [Base64](https://pt.wikipedia.org/wiki/Base64 \"Base64\")\\-encoded binary [MD5](https://pt.wikipedia.org/wiki/MD5 \"MD5\") sum of the content of the response"], +[6,"Content-Disposition","An opportunity to raise a \"File Download\" dialogue box for a known MIME type with binary format or suggest a filename for dynamic content. Quotes are necessary with special characters."], +[6,"Content-Range","Where in a full body message this partial message belongs"], +[6,"Content-Type","O [tipo MIME](https://pt.wikipedia.org/wiki/Tipo_MIME \"Tipo MIME\") deste conteúdo"], +[6,"Date","A data e horário de envio da mensagem (no formato \"HTTP-date\" como definido pelo [RFC 7231](https://tools.ietf.org/html/rfc7231))"], +[6,"ETag","An identifier for a specific version of a resource, often a [message digest](https://pt.wikipedia.org/w/index.php?title=Message_digest&action=edit&redlink=1 \"Message digest (página não existe)\")"], +[6,"Expires","Gives the date/time after which the response is considered stale"], +[6,"Last-Modified","The last modified date for the requested object (in \"HTTP-date\" format as defined by [RFC 7231](https://tools.ietf.org/html/rfc7231))"], +[6,"Link","Used to express a typed relationship with another resource, where the relation type is defined by [RFC 5988](https://tools.ietf.org/html/rfc5988)"], +[6,"Location","Used in [redirection](https://pt.wikipedia.org/w/index.php?title=URL_redirection&action=edit&redlink=1 \"URL redirection (página não existe)\"), or when a new resource has been created."], +[6,"P3P","This field is supposed to set [P3P](https://pt.wikipedia.org/w/index.php?title=P3P&action=edit&redlink=1 \"P3P (página não existe)\") policy, in the form of `P3P:CP=\"your_compact_policy\"`. However, P3P did not take off,[\\[10\\]](https://pt.wikipedia.org/wiki/Lista_de_campos_de_cabe%C3%A7alho_HTTP#cite_note-10) most browsers have never fully implemented it, a lot of websites set this field with fake policy text, that was enough to fool browsers the existence of P3P policy and grant permissions for [third party cookies](https://pt.wikipedia.org/wiki/HTTP_cookie#Third-party_cookie \"HTTP cookie\")."], +[6,"Pragma","Implementation-specific fields that may have various effects anywhere along the request-response chain."], +[6,"Proxy-Authenticate","Request authentication to access the proxy."], +[6,"Refresh","Used in redirection, or when a new resource has been created. This refresh redirects after 5 seconds."], +[6,"Retry-After","If an entity is temporarily unavailable, this instructs the client to try again later. Value could be a specified period of time (in seconds) or a HTTP-date.[\\[11\\]](https://pt.wikipedia.org/wiki/Lista_de_campos_de_cabe%C3%A7alho_HTTP#cite_note-11)"], +[6,"Server","A name for the server"], +[6,"Set-Cookie","An [HTTP cookie](https://pt.wikipedia.org/wiki/HTTP_cookie \"HTTP cookie\")"], +[6,"Status","[CGI](https://pt.wikipedia.org/wiki/Common_Gateway_Interface \"Common Gateway Interface\") header field specifying the [status](https://pt.wikipedia.org/w/index.php?title=HTTP_status&action=edit&redlink=1 \"HTTP status (página não existe)\") of the HTTP response. Normal HTTP responses use a separate \"Status-Line\" instead, defined by [RFC 7230](https://tools.ietf.org/html/rfc7230).[\\[12\\]](https://pt.wikipedia.org/wiki/Lista_de_campos_de_cabe%C3%A7alho_HTTP#cite_note-12)"], +[6,"Strict-Transport-Security","A HSTS Policy informing the HTTP client how long to cache the HTTPS only policy and whether this applies to subdomains."], +[6,"Trailer","The Trailer general field value indicates that the given set of header fields is present in the trailer of a message encoded with [chunked transfer coding](https://pt.wikipedia.org/w/index.php?title=Chunked_transfer_coding&action=edit&redlink=1 \"Chunked transfer coding (página não existe)\")."], +[6,"Transfer-Encoding","The form of encoding used to safely transfer the entity to the user. [Currently defined methods](http://www.iana.org/assignments/http-parameters) are: [chunked](https://pt.wikipedia.org/w/index.php?title=Chunked_transfer_encoding&action=edit&redlink=1 \"Chunked transfer encoding (página não existe)\"), compress, deflate, gzip, identity."], +[6,"Upgrade","Ask the client to upgrade to another protocol."], +[6,"Vary","Tells downstream proxies how to match future request headers to decide whether the cached response can be used rather than requesting a fresh one from the origin server."], +[6,"Via","Informs the client of proxies through which the response was sent."], +[6,"Warning","A general warning about possible problems with the entity body."], +[6,"WWW-Authenticate","Indicates the authentication scheme that should be used to access the requested entity."], +[6,"X-Frame-Options","[Clickjacking](https://pt.wikipedia.org/wiki/Clickjacking \"Clickjacking\") protection: deny - no rendering within a frame, sameorigin - no rendering if origin mismatch, allow-from - allow from specified location, allowall - non-standard, allow from any location[\\[14\\]](https://pt.wikipedia.org/wiki/Lista_de_campos_de_cabe%C3%A7alho_HTTP#cite_note-14)"], +[6,"Public-Key-Pins","[Man-in-the-middle attack](https://pt.wikipedia.org/wiki/Man-in-the-middle_attack \"Man-in-the-middle attack\") mitigation, announces hash of website's authentic [TLS](https://pt.wikipedia.org/wiki/Transport_Layer_Security \"Transport Layer Security\") certificate"], +[6,"X-XSS-Protection","[Cross-site scripting](https://pt.wikipedia.org/wiki/Cross-site_scripting \"Cross-site scripting\") (XSS) filter"], +[6,"Content-Security-Policy","[Content Security Policy](https://pt.wikipedia.org/w/index.php?title=Content_Security_Policy&action=edit&redlink=1 \"Content Security Policy (página não existe)\") definition."], +[6,"X-Content-Security-Policy",-1], +[6,"X-WebKit-CSP",-1], +[6,"X-Content-Type-Options","The only defined value, \"nosniff\", prevents [Internet Explorer](https://pt.wikipedia.org/wiki/Internet_Explorer \"Internet Explorer\") from MIME-sniffing a response away from the declared content-type. This also applies to [Google Chrome](https://pt.wikipedia.org/wiki/Google_Chrome \"Google Chrome\"), when downloading extensions.[\\[20\\]](https://pt.wikipedia.org/wiki/Lista_de_campos_de_cabe%C3%A7alho_HTTP#cite_note-20)"], +[6,"X-Powered-By","specifies the technology (e.g. ASP.NET, PHP, JBoss) supporting the web application (version details are often in `X-Runtime`, `X-Version`, or `X-AspNet-Version`)"], +[6,"X-UA-Compatible","Recommends the preferred rendering engine (often a backward-compatibility mode) to use to display the content. Also used to activate [Chrome Frame](https://pt.wikipedia.org/wiki/Chrome_Frame \"Chrome Frame\") in Internet Explorer."] +] \ No newline at end of file diff --git a/assets/manifest/js.json b/assets/manifest/js.json index 76478ae..1ddda02 100644 --- a/assets/manifest/js.json +++ b/assets/manifest/js.json @@ -18,24 +18,4 @@ [2,"js_preload_object",["name.json |\nname from file.json"],null,["http","server","location"],1,"0.7.8","http/ngx_http_js_module.html#js_preload_object",{"insert":"js_preload_object ${1:name.json | name from file.json};$0"}], [2,"js_set",["$variable function |\nmodule.function"],null,["http","server","location"],1,null,"http/ngx_http_js_module.html#js_set",{"insert":"js_set ${1:$variable function | module.function};$0"}], [2,"js_shared_dict_zone",["zone=name:size\n [timeout=time]\n [type=string|number]\n [evict]"],null,["http"],1,"0.8.0","http/ngx_http_js_module.html#js_shared_dict_zone",{"insert":"js_shared_dict_zone ${1:zone=name:size [timeout=time] [type=string|number] [evict]};$0","args":["timeout=time","type=string"]}], -[2,"js_var",["$variable [value]"],null,["http","server","location"],1,"0.5.3","http/ngx_http_js_module.html#js_var",{"insert":"js_var ${1:$variable [value]};$0"}], -[2,"js_access",["function | module.function"],null,["stream","server"],2,null,"stream/ngx_stream_js_module.html#js_access",{"insert":"js_access ${1:function | module.function};$0"}], -[2,"js_fetch_buffer_size",["size"],"js_fetch_buffer_size 16k;",["stream","server"],2,"0.7.4","stream/ngx_stream_js_module.html#js_fetch_buffer_size",{"insert":"js_fetch_buffer_size ${1:size};$0"}], -[2,"js_fetch_ciphers",["ciphers"],"js_fetch_ciphers HIGH:!aNULL:!MD5;",["stream","server"],2,"0.7.0","stream/ngx_stream_js_module.html#js_fetch_ciphers",{"insert":"js_fetch_ciphers ${1:ciphers};$0"}], -[2,"js_fetch_max_response_buffer_size",["size"],"js_fetch_max_response_buffer_size 1m;",["stream","server"],2,"0.7.4","stream/ngx_stream_js_module.html#js_fetch_max_response_buffer_size",{"insert":"js_fetch_max_response_buffer_size ${1:size};$0"}], -[2,"js_fetch_protocols",["[TLSv1]\n [TLSv1.1]\n [TLSv1.2]\n [TLSv1.3]"],"js_fetch_protocols TLSv1 TLSv1.1 TLSv1.2;",["stream","server"],2,"0.7.0","stream/ngx_stream_js_module.html#js_fetch_protocols",{"insert":"js_fetch_protocols ${1:[TLSv1] [TLSv1.1] [TLSv1.2] [TLSv1.3]};$0"}], -[2,"js_fetch_timeout",["time"],"js_fetch_timeout 60s;",["stream","server"],2,"0.7.4","stream/ngx_stream_js_module.html#js_fetch_timeout",{"insert":"js_fetch_timeout ${1:time};$0"}], -[2,"js_fetch_trusted_certificate",["file"],null,["stream","server"],2,"0.7.0","stream/ngx_stream_js_module.html#js_fetch_trusted_certificate",{"insert":"js_fetch_trusted_certificate ${1:file};$0"}], -[2,"js_fetch_verify",["on | off"],"js_fetch_verify on;",["stream","server"],2,"0.7.4","stream/ngx_stream_js_module.html#js_fetch_verify",{"insert":"js_fetch_verify ${1|on,off|};$0"}], -[2,"js_fetch_verify_depth",["number"],"js_fetch_verify_depth 100;",["stream","server"],2,"0.7.0","stream/ngx_stream_js_module.html#js_fetch_verify_depth",{"insert":"js_fetch_verify_depth ${1:number};$0"}], -[2,"js_filter",["function | module.function"],null,["stream","server"],2,null,"stream/ngx_stream_js_module.html#js_filter",{"insert":"js_filter ${1:function | module.function};$0"}], -[2,"js_import",["module.js |\nexport_name from module.js"],null,["stream","server"],2,"0.4.0","stream/ngx_stream_js_module.html#js_import",{"insert":"js_import ${1:module.js | export_name from module.js};$0"}], -[2,"js_include",["file"],null,["stream"],2,null,"stream/ngx_stream_js_module.html#js_include",{"insert":"js_include ${1:file};$0"}], -[2,"js_path",["path"],null,["stream","server"],2,"0.3.0","stream/ngx_stream_js_module.html#js_path",{"insert":"js_path ${1:path};$0"}], -[2,"js_periodic",["function |\n module.function\n [interval=time]\n [jitter=number]\n [worker_affinity=mask]"],null,["server"],2,"0.8.1","stream/ngx_stream_js_module.html#js_periodic",{"insert":"js_periodic ${1:function | module.function [interval=time] [jitter=number] [worker_affinity=mask]};$0","args":["interval=time","jitter=number","worker_affinity=mask"]}], -[2,"js_preload_object",["name.json |\nname from file.json"],null,["stream","server"],2,"0.7.8","stream/ngx_stream_js_module.html#js_preload_object",{"insert":"js_preload_object ${1:name.json | name from file.json};$0"}], -[2,"js_preread",["function | module.function"],null,["stream","server"],2,null,"stream/ngx_stream_js_module.html#js_preread",{"insert":"js_preread ${1:function | module.function};$0"}], -[2,"js_set",["$variable function |\nmodule.function"],null,["stream","server"],2,null,"stream/ngx_stream_js_module.html#js_set",{"insert":"js_set ${1:$variable function | module.function};$0"}], -[2,"js_shared_dict_zone",["zone=name:size\n [timeout=time]\n [type=string|number]\n [evict]"],null,["stream"],2,"0.8.0","stream/ngx_stream_js_module.html#js_shared_dict_zone",{"insert":"js_shared_dict_zone ${1:zone=name:size [timeout=time] [type=string|number] [evict]};$0","args":["timeout=time","type=string"]}], -[2,"js_var",["$variable [value]"],null,["stream","server"],2,"0.5.3","stream/ngx_stream_js_module.html#js_var",{"insert":"js_var ${1:$variable [value]};$0"}] -] \ No newline at end of file +[2,"js_var",["$variable [value]"],null,["http","server","location"],1,"0.5.3","http/ngx_http_js_module.html#js_var",{"insert":"js_var ${1:$variable [value]};$0"}] \ No newline at end of file diff --git a/assets/manifest/lua.json b/assets/manifest/lua.json index 3baf816..8e829f3 100644 --- a/assets/manifest/lua.json +++ b/assets/manifest/lua.json @@ -77,5 +77,5 @@ [2,"lua_max_pending_timers",["lua_max_pending_timers "],"lua_max_pending_timers 1024",["http"],1,"0.8.0","https://github.com/openresty/lua-nginx-module/#lua_max_pending_timers",{}], [2,"lua_max_running_timers",["lua_max_running_timers "],"lua_max_running_timers 256",["http"],1,"0.8.0","https://github.com/openresty/lua-nginx-module/#lua_max_running_timers",{}], [2,"lua_sa_restart",["lua_sa_restart on|off"],"lua_sa_restart on",["http"],1,"0.10.14","https://github.com/openresty/lua-nginx-module/#lua_sa_restart",{}], -[2,"lua_worker_thread_vm_pool_size",["lua_worker_thread_vm_pool_size "],"lua_worker_thread_vm_pool_size 100",["http"],1,null,"https://github.com/openresty/lua-nginx-module/#lua_worker_thread_vm_pool_size",{}] +[2,"lua_worker_thread_vm_pool_size",["lua_worker_thread_vm_pool_size "],"lua_worker_thread_vm_pool_size 10",["http"],1,null,"https://github.com/openresty/lua-nginx-module/#lua_worker_thread_vm_pool_size",{}] ] \ No newline at end of file diff --git a/assets/mediatypes/application.json b/assets/mediatypes/application.json index 64d04b7..bb6d058 100644 --- a/assets/mediatypes/application.json +++ b/assets/mediatypes/application.json @@ -5,6 +5,7 @@ ["3gppHalForms+json"], ["3gpp-ims+xml"], ["A2L"], +["ace-groupcomm+cbor"], ["ace+cbor"], ["ace+json"], ["activemessage"], @@ -25,6 +26,8 @@ ["alto-networkmap+json"], ["alto-propmap+json"], ["alto-propmapparams+json"], +["alto-tips+json"], +["alto-tipsparams+json"], ["alto-updatestreamcontrol+json"], ["alto-updatestreamparams+json"], ["AML"], @@ -96,6 +99,7 @@ ["csvm+json"], ["cwl"], ["cwl+json"], +["cwl+yaml"], ["cwt"], ["cybercash"], ["dash+xml"], @@ -167,6 +171,10 @@ ["geoxacml+xml"], ["gltf-buffer"], ["gml+xml"], +["gnap-binding-jws"], +["gnap-binding-jwsd"], +["gnap-binding-rotation-jws"], +["gnap-binding-rotation-jwsd"], ["gzip"], ["H224"], ["held+xml"], @@ -333,6 +341,9 @@ ["prs.hpub+zip"], ["prs.implied-document+xml"], ["prs.implied-executable"], +["prs.implied-object+json"], +["prs.implied-object+json-seq"], +["prs.implied-object+yaml"], ["prs.implied-structure"], ["prs.nprend"], ["prs.plucker"], @@ -363,6 +374,7 @@ ["rpki-manifest"], ["rpki-publication"], ["rpki-roa"], +["rpki-signed-tal"], ["rpki-updown"], ["rtf"], ["rtploopback"], @@ -422,6 +434,7 @@ ["sru+xml"], ["ssml+xml"], ["stix+json"], +["stratum"], ["swid+cbor"], ["swid+xml"], ["tamp-apex-update"], @@ -465,6 +478,8 @@ ["vnd.1000minds.decision-model+xml"], ["vnd.1ob"], ["vnd.3gpp.5gnas"], +["vnd.3gpp.5gsa2x"], +["vnd.3gpp.5gsa2x-local-service-information"], ["vnd.3gpp.access-transfer-events+xml"], ["vnd.3gpp.bsf+xml"], ["vnd.3gpp.crs+xml"], @@ -510,6 +525,7 @@ ["vnd.3gpp.pic-bw-large"], ["vnd.3gpp.pic-bw-small"], ["vnd.3gpp.pic-bw-var"], +["vnd.3gpp.pinapp-info+xml"], ["vnd.3gpp-prose-pc3a+xml"], ["vnd.3gpp-prose-pc3ach+xml"], ["vnd.3gpp-prose-pc3ch+xml"], @@ -579,6 +595,7 @@ ["vnd.antix.game-component"], ["vnd.apache.arrow.file"], ["vnd.apache.arrow.stream"], +["vnd.apache.parquet"], ["vnd.apache.thrift.binary"], ["vnd.apache.thrift.compact"], ["vnd.apache.thrift.json"], @@ -621,6 +638,7 @@ ["vnd.businessobjects"], ["vnd.byu.uapi+json"], ["vnd.bzip3"], +["vnd.c3voc.schedule+xml"], ["vnd.cab-jscript"], ["vnd.canon-cpdl"], ["vnd.canon-lips"], @@ -756,6 +774,7 @@ ["vnd.epson.salt"], ["vnd.epson.ssf"], ["vnd.ericsson.quickcall"], +["vnd.erofs"], ["vnd.espass-espass+zip"], ["vnd.eszigno3+xml"], ["vnd.etsi.aoc+xml"], @@ -827,6 +846,7 @@ ["vnd.futoin+cbor"], ["vnd.futoin+json"], ["vnd.fuzzysheet"], +["vnd.ga4gh.passport+jwt"], ["vnd.genomatix.tuxedo"], ["vnd.genozip"], ["vnd.gentics.grd+json"], @@ -854,7 +874,7 @@ ["vnd.gnu.taler.merchant+json"], ["vnd.google-earth.kml+xml"], ["vnd.google-earth.kmz"], -["vnd.gov.sk.e-form+xml"], +["vnd.gov.sk.e-form+xml","vnd.gov.sk.e-form+xml (OBSOLETED by request)"], ["vnd.gov.sk.e-form+zip"], ["vnd.gov.sk.xmldatacontainer+xml"], ["vnd.gpxsee.map+xml"], @@ -971,6 +991,7 @@ ["vnd.las.las+json"], ["vnd.las.las+xml"], ["vnd.laszip"], +["vnd.ldev.productlicensing"], ["vnd.leap+json"], ["vnd.liberty-request+xml"], ["vnd.llamagraphics.life-balance.desktop"], @@ -1072,6 +1093,7 @@ ["vnd.ms-xpsdocument"], ["vnd.msa-disk-image"], ["vnd.mseq"], +["vnd.msgpack"], ["vnd.msign"], ["vnd.multiad.creator"], ["vnd.multiad.creator.cif"], @@ -1499,6 +1521,7 @@ ["vnd.wv.ssp+xml"], ["vnd.xacml+json"], ["vnd.xara"], +["vnd.xecrets-encrypted"], ["vnd.xfdl"], ["vnd.xfdl.webform"], ["vnd.xmi+xml"], @@ -1569,8 +1592,8 @@ ["yang-data+xml"], ["yang-patch+json"], ["yang-patch+xml"], +["yang-sid+json"], ["yin+xml"], ["zip"], ["zlib"], -["zstd"] -] \ No newline at end of file +["zstd"] \ No newline at end of file diff --git a/assets/mediatypes/audio.json b/assets/mediatypes/audio.json index 61679f5..6a79b34 100644 --- a/assets/mediatypes/audio.json +++ b/assets/mediatypes/audio.json @@ -43,6 +43,7 @@ ["EVRCWB1"], ["EVS"], ["example"], +["flac"], ["flexfec"], ["fwdred"], ["G711-0"], @@ -75,6 +76,7 @@ ["MELP1200"], ["MELP2400"], ["mhas"], +["midi-clip"], ["mobile-xmf"], ["MPA"], ["mp4"], @@ -157,5 +159,4 @@ ["vnd.sealedmedia.softseal.mpeg"], ["vnd.vmx.cvsd"], ["vorbis"], -["vorbis-config"] -] \ No newline at end of file +["vorbis-config"] \ No newline at end of file diff --git a/assets/mediatypes/font.json b/assets/mediatypes/font.json index dc9dc35..31640c8 100644 --- a/assets/mediatypes/font.json +++ b/assets/mediatypes/font.json @@ -4,5 +4,4 @@ ["sfnt"], ["ttf"], ["woff"], -["woff2"] -] \ No newline at end of file +["woff2"] \ No newline at end of file diff --git a/assets/mediatypes/image.json b/assets/mediatypes/image.json index 987a2c4..1eba587 100644 --- a/assets/mediatypes/image.json +++ b/assets/mediatypes/image.json @@ -25,6 +25,7 @@ ["jphc"], ["jpm"], ["jpx"], +["jxl"], ["jxr"], ["jxrA"], ["jxrS"], @@ -76,5 +77,4 @@ ["webp"], ["wmf"], ["emf","x-emf - DEPRECATED in favor of image/emf"], -["wmf","x-wmf - DEPRECATED in favor of image/wmf"] -] \ No newline at end of file +["wmf","x-wmf - DEPRECATED in favor of image/wmf"] \ No newline at end of file diff --git a/assets/mediatypes/message.json b/assets/mediatypes/message.json index 4bcd61c..dd8b39c 100644 --- a/assets/mediatypes/message.json +++ b/assets/mediatypes/message.json @@ -20,5 +20,4 @@ ["sipfrag"], ["tracking-status"], ["vnd.si.simp","vnd.si.simp (OBSOLETED by request)"], -["vnd.wfa.wsc"] -] \ No newline at end of file +["vnd.wfa.wsc"] \ No newline at end of file diff --git a/assets/mediatypes/model.json b/assets/mediatypes/model.json index 4c6b125..908335d 100644 --- a/assets/mediatypes/model.json +++ b/assets/mediatypes/model.json @@ -37,5 +37,4 @@ ["vnd.vtu"], ["x3d-vrml"], ["x3d+fastinfoset"], -["x3d+xml"] -] \ No newline at end of file +["x3d+xml"] \ No newline at end of file diff --git a/assets/mediatypes/multipart.json b/assets/mediatypes/multipart.json index f5c54d2..1ab0651 100644 --- a/assets/mediatypes/multipart.json +++ b/assets/mediatypes/multipart.json @@ -11,5 +11,4 @@ ["signed"], ["vnd.bint.med-plus"], ["voice-message"], -["x-mixed-replace"] -] \ No newline at end of file +["x-mixed-replace"] \ No newline at end of file diff --git a/assets/mediatypes/text.json b/assets/mediatypes/text.json index 083c606..2e29ef8 100644 --- a/assets/mediatypes/text.json +++ b/assets/mediatypes/text.json @@ -89,5 +89,4 @@ ["vtt"], ["wgsl"], ["xml"], -["xml-external-parsed-entity"] -] \ No newline at end of file +["xml-external-parsed-entity"] \ No newline at end of file diff --git a/assets/mediatypes/video.json b/assets/mediatypes/video.json index e1e4fa9..fc4dbd8 100644 --- a/assets/mediatypes/video.json +++ b/assets/mediatypes/video.json @@ -9,6 +9,7 @@ ["CelB"], ["DV"], ["encaprtp"], +["evc"], ["example"], ["FFV1"], ["flexfec"], @@ -88,5 +89,4 @@ ["vnd.youtube.yt"], ["vnd.vivo"], ["VP8"], -["VP9"] -] \ No newline at end of file +["VP9"] \ No newline at end of file diff --git a/assets/snippets/lua.json b/assets/snippets/lua.json index 29e37a2..d13bcab 100644 --- a/assets/snippets/lua.json +++ b/assets/snippets/lua.json @@ -1,1602 +1,1607 @@ { - "ngx.arg": { - "description": "When this is used in the context of the set_by_lua* directives, this table is read-only and holds the input arguments to the config directives: value = ngx.arg[n]Here is an example location /foo {\n set $a 32;\n set $b 56;\n\n set_by_lua $sum\n 'return tonumber(ngx.arg[1]) + tonumber(ngx.arg[2])'\n $a $b;\n\n echo $sum;\n }that writes out 88, the sum of 32 and 56.When this table is used in the context of body_filter_by_lua*, the first element holds the input data chunk to the output filter code and the second element holds the boolean flag for the \"eof\" flag indicating the end of the whole output data stream.The data chunk and \"eof\" flag passed to the downstream Nginx output filters can also be overridden by assigning values directly to the corresponding table elements. When setting nil or an empty Lua string value to ngx.arg[1], no data chunk will be passed to the downstream Nginx output filters at all.", - "prefix": "ngx.arg", - "body": "local val = ngx.arg[index]" - }, - "ngx.var.VARIABLE": { - "description": "Read and write Nginx variable values. value = ngx.var.some_nginx_variable_name\n ngx.var.some_nginx_variable_name = valueNote that only already defined Nginx variables can be written to.\nFor example: location /foo {\n set $my_var ''; # this line is required to create $my_var at config time\n content_by_lua_block {\n ngx.var.my_var = 123\n ...\n }\n }That is, Nginx variables cannot be created on-the-fly. Here is a list of pre-defined\nNginx variables.Some special Nginx variables like $args and $limit_rate can be assigned a value,\nmany others are not, like $query_string, $arg_PARAMETER, and $http_NAME.Nginx regex group capturing variables $1, $2, $3, and etc, can be read by this\ninterface as well, by writing ngx.var[1], ngx.var[2], ngx.var[3], and etc.Setting ngx.var.Foo to a nil value will unset the $Foo Nginx variable. ngx.var.args = nilCAUTION When reading from an Nginx variable, Nginx will allocate memory in the per-request memory pool which is freed only at request termination. So when you need to read from an Nginx variable repeatedly in your Lua code, cache the Nginx variable value to your own Lua variable, for example, local val = ngx.var.some_var\n --- use the val repeatedly laterto prevent (temporary) memory leaking within the current request's lifetime. Another way of caching the result is to use the ngx.ctx table.Undefined Nginx variables are evaluated to nil while uninitialized (but defined) Nginx variables are evaluated to an empty Lua string.This API requires a relatively expensive metamethod call and it is recommended to avoid using it on hot code paths.", - "prefix": "ngx.var.VARIABLE", - "body": "ngx.var.VAR_NAME" - }, - "print": { - "description": "Writes argument values into the Nginx error.log file with the ngx.NOTICE log level.It is equivalent to ngx.log(ngx.NOTICE, ...)Lua nil arguments are accepted and result in literal \"nil\" strings while Lua booleans result in literal \"true\" or \"false\" strings. And the ngx.null constant will yield the \"null\" string output.There is a hard coded 2048 byte limitation on error message lengths in the Nginx core. This limit includes trailing newlines and leading time stamps. If the message size exceeds this limit, Nginx will truncate the message text accordingly. This limit can be manually modified by editing the NGX_MAX_ERROR_STR macro definition in the src/core/ngx_log.h file in the Nginx source tree.", - "prefix": "print", - "body": "print(...)" - }, - "ngx.ctx": { - "description": "This table can be used to store per-request Lua context data and has a life time identical to the current request (as with the Nginx variables).Consider the following example, location /test {\n rewrite_by_lua_block {\n ngx.ctx.foo = 76\n }\n access_by_lua_block {\n ngx.ctx.foo = ngx.ctx.foo + 3\n }\n content_by_lua_block {\n ngx.say(ngx.ctx.foo)\n }\n }Then GET /test will yield the output 79That is, the ngx.ctx.foo entry persists across the rewrite, access, and content phases of a request.Every request, including subrequests, has its own copy of the table. For example: location /sub {\n content_by_lua_block {\n ngx.say(\"sub pre: \", ngx.ctx.blah)\n ngx.ctx.blah = 32\n ngx.say(\"sub post: \", ngx.ctx.blah)\n }\n }\n\n location /main {\n content_by_lua_block {\n ngx.ctx.blah = 73\n ngx.say(\"main pre: \", ngx.ctx.blah)\n local res = ngx.location.capture(\"/sub\")\n ngx.print(res.body)\n ngx.say(\"main post: \", ngx.ctx.blah)\n }\n }Then GET /main will give the output main pre: 73\n sub pre: nil\n sub post: 32\n main post: 73Here, modification of the ngx.ctx.blah entry in the subrequest does not affect the one in the parent request. This is because they have two separate versions of ngx.ctx.blah.Internal redirects (triggered by nginx configuration directives like error_page, try_files, index and etc) will destroy the original request ngx.ctx data (if any) and the new request will have an empty ngx.ctx table. For instance, location /new {\n content_by_lua_block {\n ngx.say(ngx.ctx.foo)\n }\n }\n\n location /orig {\n content_by_lua_block {\n ngx.ctx.foo = \"hello\"\n ngx.exec(\"/new\")\n }\n }Then GET /orig will give nilrather than the original \"hello\" value.Because HTTP request is created after SSL handshake, the ngx.ctx created\nin ssl_certificate_by_lua*, ssl_session_store_by_lua*, ssl_session_fetch_by_lua* and ssl_client_hello_by_lua*\nis not available in the following phases like rewrite_by_lua*.Since v0.10.18, the ngx.ctx created during a SSL handshake\nwill be inherited by the requests which share the same TCP connection established by the handshake.\nNote that overwrite values in ngx.ctx in the http request phases (like rewrite_by_lua*) will only take affect in the current http request.Arbitrary data values, including Lua closures and nested tables, can be inserted into this \"magic\" table. It also allows the registration of custom meta methods.Overriding ngx.ctx with a new Lua table is also supported, for example, ngx.ctx = { foo = 32, bar = 54 }When being used in the context of init_worker_by_lua*, this table just has the same lifetime of the current Lua handler.The ngx.ctx lookup requires relatively expensive metamethod calls and it is much slower than explicitly passing per-request data along by your own function arguments. So do not abuse this API for saving your own function arguments because it usually has quite some performance impact.Because of the metamethod magic, never \"local\" the ngx.ctx table outside your Lua function scope on the Lua module level due to worker-level data sharing. For example, the following is bad: -- mymodule.lua\n local _M = {}\n\n -- the following line is bad since ngx.ctx is a per-request\n -- data while this ctx variable is on the Lua module level\n -- and thus is per-nginx-worker.\n local ctx = ngx.ctx\n\n function _M.main()\n ctx.foo = \"bar\"\n end\n\n return _MUse the following instead: -- mymodule.lua\n local _M = {}\n\n function _M.main(ctx)\n ctx.foo = \"bar\"\n end\n\n return _MThat is, let the caller pass the ctx table explicitly via a function argument.", - "prefix": "ngx.ctx", - "body": "ngx.ctx" - }, - "ngx.location.capture": { - "description": "Issues a synchronous but still non-blocking Nginx Subrequest using uri.Nginx's subrequests provide a powerful way to make non-blocking internal requests to other locations configured with disk file directory or any other Nginx C modules like ngx_proxy, ngx_fastcgi, ngx_memc,\nngx_postgres, ngx_drizzle, and even ngx_lua itself and etc etc etc.Also note that subrequests just mimic the HTTP interface but there is no extra HTTP/TCP traffic nor IPC involved. Everything works internally, efficiently, on the C level.Subrequests are completely different from HTTP 301/302 redirection (via ngx.redirect) and internal redirection (via ngx.exec).You should always read the request body (by either calling ngx.req.read_body or configuring lua_need_request_body on) before initiating a subrequest.This API function (as well as ngx.location.capture_multi) always buffers the whole response body of the subrequest in memory. Thus, you should use cosockets\nand streaming processing instead if you have to handle large subrequest responses.Here is a basic example: res = ngx.location.capture(uri)Returns a Lua table with 4 slots: res.status, res.header, res.body, and res.truncated.res.status holds the response status code for the subrequest response.res.header holds all the response headers of the\nsubrequest and it is a normal Lua table. For multi-value response headers,\nthe value is a Lua (array) table that holds all the values in the order that\nthey appear. For instance, if the subrequest response headers contain the following\nlines: Set-Cookie: a=3\n Set-Cookie: foo=bar\n Set-Cookie: baz=blahThen res.header[\"Set-Cookie\"] will be evaluated to the table value\n{\"a=3\", \"foo=bar\", \"baz=blah\"}.res.body holds the subrequest's response body data, which might be truncated. You always need to check the res.truncated boolean flag to see if res.body contains truncated data. The data truncation here can only be caused by those unrecoverable errors in your subrequests like the cases that the remote end aborts the connection prematurely in the middle of the response body data stream or a read timeout happens when your subrequest is receiving the response body data from the remote.URI query strings can be concatenated to URI itself, for instance, res = ngx.location.capture('/foo/bar?a=3&b=4')Named locations like @foo are not allowed due to a limitation in\nthe Nginx core. Use normal locations combined with the internal directive to\nprepare internal-only locations.An optional option table can be fed as the second\nargument, which supports the options:\nmethod\nspecify the subrequest's request method, which only accepts constants like ngx.HTTP_POST.\nbody\nspecify the subrequest's request body (string value only).\nargs\nspecify the subrequest's URI query arguments (both string value and Lua tables are accepted)\nctx\nspecify a Lua table to be the ngx.ctx table for the subrequest. It can be the current request's ngx.ctx table, which effectively makes the parent and its subrequest to share exactly the same context table. This option was first introduced in the v0.3.1rc25 release.\nvars\ntake a Lua table which holds the values to set the specified Nginx variables in the subrequest as this option's value. This option was first introduced in the v0.3.1rc31 release.\ncopy_all_vars\nspecify whether to copy over all the Nginx variable values of the current request to the subrequest in question. modifications of the Nginx variables in the subrequest will not affect the current (parent) request. This option was first introduced in the v0.3.1rc31 release.\nshare_all_vars\nspecify whether to share all the Nginx variables of the subrequest with the current (parent) request. modifications of the Nginx variables in the subrequest will affect the current (parent) request. Enabling this option may lead to hard-to-debug issues due to bad side-effects and is considered bad and harmful. Only enable this option when you completely know what you are doing.\nalways_forward_body\nwhen set to true, the current (parent) request's request body will always be forwarded to the subrequest being created if the body option is not specified. The request body read by either ngx.req.read_body() or lua_need_request_body on will be directly forwarded to the subrequest without copying the whole request body data when creating the subrequest (no matter the request body data is buffered in memory buffers or temporary files). By default, this option is false and when the body option is not specified, the request body of the current (parent) request is only forwarded when the subrequest takes the PUT or POST request method.\nIssuing a POST subrequest, for example, can be done as follows res = ngx.location.capture(\n '/foo/bar',\n { method = ngx.HTTP_POST, body = 'hello, world' }\n )See HTTP method constants methods other than POST.\nThe method option is ngx.HTTP_GET by default.The args option can specify extra URI arguments, for instance, ngx.location.capture('/foo?a=1',\n { args = { b = 3, c = ':' } }\n )is equivalent to ngx.location.capture('/foo?a=1&b=3&c=%3a')that is, this method will escape argument keys and values according to URI rules and\nconcatenate them together into a complete query string. The format for the Lua table passed as the args argument is identical to the format used in the ngx.encode_args method.The args option can also take plain query strings: ngx.location.capture('/foo?a=1',\n { args = 'b=3&c=%3a' }\n )This is functionally identical to the previous examples.The share_all_vars option controls whether to share Nginx variables among the current request and its subrequests.\nIf this option is set to true, then the current request and associated subrequests will share the same Nginx variable scope. Hence, changes to Nginx variables made by a subrequest will affect the current request.Care should be taken in using this option as variable scope sharing can have unexpected side effects. The args, vars, or copy_all_vars options are generally preferable instead.This option is set to false by default location /other {\n set $dog \"$dog world\";\n echo \"$uri dog: $dog\";\n }\n\n location /lua {\n set $dog 'hello';\n content_by_lua_block {\n res = ngx.location.capture(\"/other\",\n { share_all_vars = true })\n\n ngx.print(res.body)\n ngx.say(ngx.var.uri, \": \", ngx.var.dog)\n }\n }Accessing location /lua gives/other dog: hello world\n/lua: hello world\nThe copy_all_vars option provides a copy of the parent request's Nginx variables to subrequests when such subrequests are issued. Changes made to these variables by such subrequests will not affect the parent request or any other subrequests sharing the parent request's variables. location /other {\n set $dog \"$dog world\";\n echo \"$uri dog: $dog\";\n }\n\n location /lua {\n set $dog 'hello';\n content_by_lua_block {\n res = ngx.location.capture(\"/other\",\n { copy_all_vars = true })\n\n ngx.print(res.body)\n ngx.say(ngx.var.uri, \": \", ngx.var.dog)\n }\n }Request GET /lua will give the output/other dog: hello world\n/lua: hello\nNote that if both share_all_vars and copy_all_vars are set to true, then share_all_vars takes precedence.In addition to the two settings above, it is possible to specify\nvalues for variables in the subrequest using the vars option. These\nvariables are set after the sharing or copying of variables has been\nevaluated, and provides a more efficient method of passing specific\nvalues to a subrequest over encoding them as URL arguments and\nunescaping them in the Nginx config file. location /other {\n content_by_lua_block {\n ngx.say(\"dog = \", ngx.var.dog)\n ngx.say(\"cat = \", ngx.var.cat)\n }\n }\n\n location /lua {\n set $dog '';\n set $cat '';\n content_by_lua_block {\n res = ngx.location.capture(\"/other\",\n { vars = { dog = \"hello\", cat = 32 }})\n\n ngx.print(res.body)\n }\n }Accessing /lua will yield the outputdog = hello\ncat = 32\nThe ctx option can be used to specify a custom Lua table to serve as the ngx.ctx table for the subrequest. location /sub {\n content_by_lua_block {\n ngx.ctx.foo = \"bar\";\n }\n }\n location /lua {\n content_by_lua_block {\n local ctx = {}\n res = ngx.location.capture(\"/sub\", { ctx = ctx })\n\n ngx.say(ctx.foo)\n ngx.say(ngx.ctx.foo)\n }\n }Then request GET /lua givesbar\nnil\nIt is also possible to use this ctx option to share the same ngx.ctx table between the current (parent) request and the subrequest: location /sub {\n content_by_lua_block {\n ngx.ctx.foo = \"bar\"\n }\n }\n location /lua {\n content_by_lua_block {\n res = ngx.location.capture(\"/sub\", { ctx = ngx.ctx })\n ngx.say(ngx.ctx.foo)\n }\n }Request GET /lua yields the outputbar\nNote that subrequests issued by ngx.location.capture inherit all the\nrequest headers of the current request by default and that this may have unexpected side effects on the\nsubrequest responses. For example, when using the standard ngx_proxy module to serve\nsubrequests, an \"Accept-Encoding: gzip\" header in the main request may result\nin gzipped responses that cannot be handled properly in Lua code. Original request headers should be ignored by setting\nproxy_pass_request_headers to off in subrequest locations.When the body option is not specified and the always_forward_body option is false (the default value), the POST and PUT subrequests will inherit the request bodies of the parent request (if any).There is a hard-coded upper limit on the number of subrequests possible for every main request. In older versions of Nginx, the limit was 50 concurrent subrequests and in more recent versions, Nginx 1.9.5 onwards, the same limit is changed to limit the depth of recursive subrequests. When this limit is exceeded, the following error message is added to the error.log file:[error] 13983#0: *1 subrequests cycle while processing \"/uri\"\nThe limit can be manually modified if required by editing the definition of the NGX_HTTP_MAX_SUBREQUESTS macro in the nginx/src/http/ngx_http_request.h file in the Nginx source tree.Please also refer to restrictions on capturing locations configured by subrequest directives of other modules.", - "prefix": "ngx.location.capture", - "body": "local res = ngx.location.capture(${1:uri}, ${2:options?})" - }, - "ngx.location.capture_multi": { - "description": "Just like ngx.location.capture, but supports multiple subrequests running in parallel.This function issues several parallel subrequests specified by the input table and returns their results in the same order. For example, res1, res2, res3 = ngx.location.capture_multi{\n { \"/foo\", { args = \"a=3&b=4\" } },\n { \"/bar\" },\n { \"/baz\", { method = ngx.HTTP_POST, body = \"hello\" } },\n }\n\n if res1.status == ngx.HTTP_OK then\n ...\n end\n\n if res2.body == \"BLAH\" then\n ...\n endThis function will not return until all the subrequests terminate.\nThe total latency is the longest latency of the individual subrequests rather than the sum.Lua tables can be used for both requests and responses when the number of subrequests to be issued is not known in advance: -- construct the requests table\n local reqs = {}\n table.insert(reqs, { \"/mysql\" })\n table.insert(reqs, { \"/postgres\" })\n table.insert(reqs, { \"/redis\" })\n table.insert(reqs, { \"/memcached\" })\n\n -- issue all the requests at once and wait until they all return\n local resps = {\n ngx.location.capture_multi(reqs)\n }\n\n -- loop over the responses table\n for i, resp in ipairs(resps) do\n -- process the response table \"resp\"\n endThe ngx.location.capture function is just a special form\nof this function. Logically speaking, the ngx.location.capture can be implemented like this ngx.location.capture =\n function (uri, args)\n return ngx.location.capture_multi({ {uri, args} })\n endPlease also refer to restrictions on capturing locations configured by subrequest directives of other modules.", - "prefix": "ngx.location.capture_multi", - "body": "local res1, res2, ... = ngx.location.capture_multi({ {uri, options?}, {uri, options?}, ... })" - }, - "ngx.status": { - "description": "Read and write the current request's response status. This should be called\nbefore sending out the response headers. ngx.status = ngx.HTTP_CREATED\n status = ngx.statusSetting ngx.status after the response header is sent out has no effect but leaving an error message in your Nginx's error log file:attempt to set ngx.status after sending out response headers\n", - "prefix": "ngx.status", - "body": "ngx.status" - }, - "ngx.header.HEADER": { - "description": "Set, add to, or clear the current request's HEADER response header that is to be sent.Underscores (_) in the header names will be replaced by hyphens (-) by default. This transformation can be turned off via the lua_transform_underscores_in_response_headers directive.The header names are matched case-insensitively. -- equivalent to ngx.header[\"Content-Type\"] = 'text/plain'\n ngx.header.content_type = 'text/plain'\n\n ngx.header[\"X-My-Header\"] = 'blah blah'Multi-value headers can be set this way: ngx.header['Set-Cookie'] = {'a=32; path=/', 'b=4; path=/'}will yield Set-Cookie: a=32; path=/\n Set-Cookie: b=4; path=/in the response headers.Only Lua tables are accepted (Only the last element in the table will take effect for standard headers such as Content-Type that only accept a single value). ngx.header.content_type = {'a', 'b'}is equivalent to ngx.header.content_type = 'b'Setting a slot to nil effectively removes it from the response headers: ngx.header[\"X-My-Header\"] = nilThe same applies to assigning an empty table: ngx.header[\"X-My-Header\"] = {}Setting ngx.header.HEADER after sending out response headers (either explicitly with ngx.send_headers or implicitly with ngx.print and similar) will log an error message.Reading ngx.header.HEADER will return the value of the response header named HEADER.Underscores (_) in the header names will also be replaced by dashes (-) and the header names will be matched case-insensitively. If the response header is not present at all, nil will be returned.This is particularly useful in the context of header_filter_by_lua*, for example, location /test {\n set $footer '';\n\n proxy_pass http://some-backend;\n\n header_filter_by_lua_block {\n if ngx.header[\"X-My-Header\"] == \"blah\" then\n ngx.var.footer = \"some value\"\n end\n }\n\n echo_after_body $footer;\n }For multi-value headers, all of the values of header will be collected in order and returned as a Lua table. For example, response headersFoo: bar\nFoo: baz\nwill result in {\"bar\", \"baz\"}to be returned when reading ngx.header.Foo.Note that ngx.header is not a normal Lua table and as such, it is not possible to iterate through it using the Lua ipairs function.Note: this function throws a Lua error if HEADER or\nVALUE contain unsafe characters (control characters).For reading request headers, use the ngx.req.get_headers function instead.", - "prefix": "ngx.header.HEADER", - "body": "local value = ngx.header.HEADER" - }, - "ngx.resp.get_headers": { - "description": "Returns a Lua table holding all the current response headers for the current request. local h, err = ngx.resp.get_headers()\n\n if err == \"truncated\" then\n -- one can choose to ignore or reject the current response here\n end\n\n for k, v in pairs(h) do\n ...\n endThis function has the same signature as ngx.req.get_headers except getting response headers instead of request headers.Note that a maximum of 100 response headers are parsed by default (including those with the same name) and that additional response headers are silently discarded to guard against potential denial of service attacks. Since v0.10.13, when the limit is exceeded, it will return a second value which is the string \"truncated\".This API was first introduced in the v0.9.5 release.", - "prefix": "ngx.resp.get_headers", - "body": "local headers, err = ngx.resp.get_headers(${1:max_headers?}, ${2:raw?})" - }, - "ngx.req.is_internal": { - "description": "Returns a boolean indicating whether the current request is an \"internal request\", i.e.,\na request initiated from inside the current Nginx server instead of from the client side.Subrequests are all internal requests and so are requests after internal redirects.This API was first introduced in the v0.9.20 release.", - "prefix": "ngx.req.is_internal", - "body": "local is_internal = ngx.req.is_internal()" - }, - "ngx.req.start_time": { - "description": "Returns a floating-point number representing the timestamp (including milliseconds as the decimal part) when the current request was created.The following example emulates the $request_time variable value (provided by ngx_http_log_module) in pure Lua: local request_time = ngx.now() - ngx.req.start_time()This function was first introduced in the v0.7.7 release.See also ngx.now and ngx.update_time.", - "prefix": "ngx.req.start_time", - "body": "local secs = ngx.req.start_time()" - }, - "ngx.req.http_version": { - "description": "Returns the HTTP version number for the current request as a Lua number.Current possible values are 2.0, 1.0, 1.1, and 0.9. Returns nil for unrecognized values.This method was first introduced in the v0.7.17 release.", - "prefix": "ngx.req.http_version", - "body": "local num = ngx.req.http_version()" - }, - "ngx.req.raw_header": { - "description": "Returns the original raw HTTP protocol header received by the Nginx server.By default, the request line and trailing CR LF terminator will also be included. For example, ngx.print(ngx.req.raw_header())gives something like this:GET /t HTTP/1.1\nHost: localhost\nConnection: close\nFoo: bar\nYou can specify the optional\nno_request_line argument as a true value to exclude the request line from the result. For example, ngx.print(ngx.req.raw_header(true))outputs something like this:Host: localhost\nConnection: close\nFoo: bar\nThis method was first introduced in the v0.7.17 release.This method does not work in HTTP/2 requests yet.", - "prefix": "ngx.req.raw_header", - "body": "local str = ngx.req.raw_header(${1:no_request_line?})" - }, - "ngx.req.get_method": { - "description": "Retrieves the current request's request method name. Strings like \"GET\" and \"POST\" are returned instead of numerical method constants.If the current request is an Nginx subrequest, then the subrequest's method name will be returned.This method was first introduced in the v0.5.6 release.See also ngx.req.set_method.", - "prefix": "ngx.req.get_method", - "body": "local method_name = ngx.req.get_method()" - }, - "ngx.req.set_method": { - "description": "Overrides the current request's request method with the method_id argument. Currently only numerical method constants are supported, like ngx.HTTP_POST and ngx.HTTP_GET.If the current request is an Nginx subrequest, then the subrequest's method will be overridden.This method was first introduced in the v0.5.6 release.See also ngx.req.get_method.", - "prefix": "ngx.req.set_method", - "body": "ngx.req.set_method(${1:method_id})" - }, - "ngx.req.set_uri": { - "description": "Rewrite the current request's (parsed) URI by the uri argument. The uri argument must be a Lua string and cannot be of zero length, or a Lua exception will be thrown.The optional boolean jump argument can trigger location rematch (or location jump) as ngx_http_rewrite_module's rewrite directive, that is, when jump is true (default to false), this function will never return and it will tell Nginx to try re-searching locations with the new URI value at the later post-rewrite phase and jumping to the new location.Location jump will not be triggered otherwise, and only the current request's URI will be modified, which is also the default behavior. This function will return but with no returned values when the jump argument is false or absent altogether.For example, the following Nginx config snippet rewrite ^ /foo last;can be coded in Lua like this: ngx.req.set_uri(\"/foo\", true)Similarly, Nginx config rewrite ^ /foo break;can be coded in Lua as ngx.req.set_uri(\"/foo\", false)or equivalently, ngx.req.set_uri(\"/foo\")The jump argument can only be set to true in rewrite_by_lua*. Use of jump in other contexts is prohibited and will throw out a Lua exception.A more sophisticated example involving regex substitutions is as follows location /test {\n rewrite_by_lua_block {\n local uri = ngx.re.sub(ngx.var.uri, \"^/test/(.*)\", \"/$1\", \"o\")\n ngx.req.set_uri(uri)\n }\n proxy_pass http://my_backend;\n }which is functionally equivalent to location /test {\n rewrite ^/test/(.*) /$1 break;\n proxy_pass http://my_backend;\n }Note: this function throws a Lua error if the uri argument\ncontains unsafe characters (control characters).Note that it is not possible to use this interface to rewrite URI arguments and that ngx.req.set_uri_args should be used for this instead. For instance, Nginx config rewrite ^ /foo?a=3? last;can be coded as ngx.req.set_uri_args(\"a=3\")\n ngx.req.set_uri(\"/foo\", true)or ngx.req.set_uri_args({a = 3})\n ngx.req.set_uri(\"/foo\", true)Starting from 0.10.16 of this module, this function accepts an\noptional boolean binary argument to allow arbitrary binary URI\ndata. By default, this binary argument is false and this function\nwill throw out a Lua error such as the one below when the uri\nargument contains any control characters (ASCII Code 0 ~ 0x08, 0x0A ~ 0x1F and 0x7F).[error] 23430#23430: *1 lua entry thread aborted: runtime error:\ncontent_by_lua(nginx.conf:44):3: ngx.req.set_uri unsafe byte \"0x00\"\nin \"\\x00foo\" (maybe you want to set the 'binary' argument?)\nThis interface was first introduced in the v0.3.1rc14 release.", - "prefix": "ngx.req.set_uri", - "body": "ngx.req.set_uri(${1:uri}, ${2:jump?}, ${3:binary?})" - }, - "ngx.req.set_uri_args": { - "description": "Rewrite the current request's URI query arguments by the args argument. The args argument can be either a Lua string, as in ngx.req.set_uri_args(\"a=3&b=hello%20world\")or a Lua table holding the query arguments' key-value pairs, as in ngx.req.set_uri_args({ a = 3, b = \"hello world\" })In the former case, i.e., when the whole query-string is provided directly,\nthe input Lua string should already be well-formed with the URI encoding.\nFor security considerations, this method will automatically escape any control and\nwhitespace characters (ASCII code 0x00 ~ 0x20 and 0x7F) in the Lua string.In the latter case, this method will escape argument keys and values according to the URI escaping rule.Multi-value arguments are also supported: ngx.req.set_uri_args({ a = 3, b = {5, 6} })which will result in a query string like a=3&b=5&b=6 or b=5&b=6&a=3.Note that when using Lua table as the arg argument, the order of the arguments in the result query string which change from time to time. If you would like to get an ordered result, you need to use Lua string as the arg argument.This interface was first introduced in the v0.3.1rc13 release.See also ngx.req.set_uri.", - "prefix": "ngx.req.set_uri_args", - "body": "ngx.req.set_uri_args(${1:args})" - }, - "ngx.req.get_uri_args": { - "description": "Returns a Lua table holding all the current request URL query arguments. An optional tab argument\ncan be used to reuse the table returned by this method. location = /test {\n content_by_lua_block {\n local args, err = ngx.req.get_uri_args()\n\n if err == \"truncated\" then\n -- one can choose to ignore or reject the current request here\n end\n\n for key, val in pairs(args) do\n if type(val) == \"table\" then\n ngx.say(key, \": \", table.concat(val, \", \"))\n else\n ngx.say(key, \": \", val)\n end\n end\n }\n }Then GET /test?foo=bar&bar=baz&bar=blah will yield the response body foo: bar\n bar: baz, blahMultiple occurrences of an argument key will result in a table value holding all the values for that key in order.Keys and values are unescaped according to URI escaping rules. In the settings above, GET /test?a%20b=1%61+2 will yield: a b: 1a 2Arguments without the = parts are treated as boolean arguments. GET /test?foo&bar will yield: foo: true\n bar: trueThat is, they will take Lua boolean values true. However, they are different from arguments taking empty string values. GET /test?foo=&bar= will give something like foo:\n bar:Empty key arguments are discarded. GET /test?=hello&=world will yield an empty output for instance.Updating query arguments via the Nginx variable $args (or ngx.var.args in Lua) at runtime is also supported: ngx.var.args = \"a=3&b=42\"\n local args, err = ngx.req.get_uri_args()Here the args table will always look like {a = 3, b = 42}regardless of the actual request query string.Note that a maximum of 100 request arguments are parsed by default (including those with the same name) and that additional request arguments are silently discarded to guard against potential denial of service attacks. Since v0.10.13, when the limit is exceeded, it will return a second value which is the string \"truncated\".However, the optional max_args function argument can be used to override this limit: local args, err = ngx.req.get_uri_args(10)\n if err == \"truncated\" then\n -- one can choose to ignore or reject the current request here\n endThis argument can be set to zero to remove the limit and to process all request arguments received: local args, err = ngx.req.get_uri_args(0)Removing the max_args cap is strongly discouraged.", - "prefix": "ngx.req.get_uri_args", - "body": "local args, err = ngx.req.get_uri_args(${1:max_args?}, ${2:tab?})" - }, - "ngx.req.get_post_args": { - "description": "Returns a Lua table holding all the current request POST query arguments (of the MIME type application/x-www-form-urlencoded). Call ngx.req.read_body to read the request body first or turn on the lua_need_request_body directive to avoid errors. location = /test {\n content_by_lua_block {\n ngx.req.read_body()\n local args, err = ngx.req.get_post_args()\n\n if err == \"truncated\" then\n -- one can choose to ignore or reject the current request here\n end\n\n if not args then\n ngx.say(\"failed to get post args: \", err)\n return\n end\n for key, val in pairs(args) do\n if type(val) == \"table\" then\n ngx.say(key, \": \", table.concat(val, \", \"))\n else\n ngx.say(key, \": \", val)\n end\n end\n }\n }Then # Post request with the body 'foo=bar&bar=baz&bar=blah'\n $ curl --data 'foo=bar&bar=baz&bar=blah' localhost/testwill yield the response body like foo: bar\n bar: baz, blahMultiple occurrences of an argument key will result in a table value holding all of the values for that key in order.Keys and values will be unescaped according to URI escaping rules.With the settings above, # POST request with body 'a%20b=1%61+2'\n $ curl -d 'a%20b=1%61+2' localhost/testwill yield: a b: 1a 2Arguments without the = parts are treated as boolean arguments. POST /test with the request body foo&bar will yield: foo: true\n bar: trueThat is, they will take Lua boolean values true. However, they are different from arguments taking empty string values. POST /test with request body foo=&bar= will return something like foo:\n bar:Empty key arguments are discarded. POST /test with body =hello&=world will yield empty outputs for instance.Note that a maximum of 100 request arguments are parsed by default (including those with the same name) and that additional request arguments are silently discarded to guard against potential denial of service attacks. Since v0.10.13, when the limit is exceeded, it will return a second value which is the string \"truncated\".However, the optional max_args function argument can be used to override this limit: local args, err = ngx.req.get_post_args(10)\n if err == \"truncated\" then\n -- one can choose to ignore or reject the current request here\n endThis argument can be set to zero to remove the limit and to process all request arguments received: local args, err = ngx.req.get_post_args(0)Removing the max_args cap is strongly discouraged.", - "prefix": "ngx.req.get_post_args", - "body": "local args, err = ngx.req.get_post_args(${1:max_args?})" - }, - "ngx.req.get_headers": { - "description": "Returns a Lua table holding all the current request headers. local h, err = ngx.req.get_headers()\n\n if err == \"truncated\" then\n -- one can choose to ignore or reject the current request here\n end\n\n for k, v in pairs(h) do\n ...\n endTo read an individual header: ngx.say(\"Host: \", ngx.req.get_headers()[\"Host\"])Note that the ngx.var.HEADER API call, which uses core $http_HEADER variables, may be more preferable for reading individual request headers.For multiple instances of request headers such as: Foo: foo\n Foo: bar\n Foo: bazthe value of ngx.req.get_headers()[\"Foo\"] will be a Lua (array) table such as: {\"foo\", \"bar\", \"baz\"}Note that a maximum of 100 request headers are parsed by default (including those with the same name) and that additional request headers are silently discarded to guard against potential denial of service attacks. Since v0.10.13, when the limit is exceeded, it will return a second value which is the string \"truncated\".However, the optional max_headers function argument can be used to override this limit: local headers, err = ngx.req.get_headers(10)\n\n if err == \"truncated\" then\n -- one can choose to ignore or reject the current request here\n endThis argument can be set to zero to remove the limit and to process all request headers received: local headers, err = ngx.req.get_headers(0)Removing the max_headers cap is strongly discouraged.Since the 0.6.9 release, all the header names in the Lua table returned are converted to the pure lower-case form by default, unless the raw argument is set to true (default to false).Also, by default, an __index metamethod is added to the resulting Lua table and will normalize the keys to a pure lowercase form with all underscores converted to dashes in case of a lookup miss. For example, if a request header My-Foo-Header is present, then the following invocations will all pick up the value of this header correctly: ngx.say(headers.my_foo_header)\n ngx.say(headers[\"My-Foo-Header\"])\n ngx.say(headers[\"my-foo-header\"])The __index metamethod will not be added when the raw argument is set to true.", - "prefix": "ngx.req.get_headers", - "body": "local headers, err = ngx.req.get_headers(${1:max_headers?}, ${2:raw?})" - }, - "ngx.req.set_header": { - "description": "Set the current request's request header named header_name to value header_value, overriding any existing ones.The input Lua string header_name and header_value should already be well-formed with the URI encoding.\nFor security considerations, this method will automatically escape \" \", \"\"\", \"(\", \")\", \",\", \"/\", \":\", \";\", \"?\",\n\"<\", \"=\", \">\", \"?\", \"@\", \"[\", \"]\", \"\", \"{\", \"}\", 0x00-0x1F, 0x7F-0xFF in header_name and automatically escape\n\"0x00-0x08, 0x0A-0x0F, 0x7F in header_value.By default, all the subrequests subsequently initiated by ngx.location.capture and ngx.location.capture_multi will inherit the new header.It is not a Lua's equivalent of nginx proxy_set_header directive (same is true about ngx.req.clear_header). proxy_set_header only affects the upstream request while ngx.req.set_header change the incoming request. Record the http headers in the access log file will show the difference. But you still can use it as an alternative of nginx proxy_set_header directive as long as you know the difference.Here is an example of setting the Content-Type header: ngx.req.set_header(\"Content-Type\", \"text/css\")The header_value can take an array list of values,\nfor example, ngx.req.set_header(\"Foo\", {\"a\", \"abc\"})will produce two new request headers: Foo: a\n Foo: abcand old Foo headers will be overridden if there is any.When the header_value argument is nil, the request header will be removed. So ngx.req.set_header(\"X-Foo\", nil)is equivalent to ngx.req.clear_header(\"X-Foo\")Note: this function throws a Lua error if header_name or\nheader_value contain unsafe characters (control characters).", - "prefix": "ngx.req.set_header", - "body": "ngx.req.set_header(${1:header_name}, ${2:header_value})" - }, - "ngx.req.clear_header": { - "description": "Clears the current request's request header named header_name. None of the current request's existing subrequests will be affected but subsequently initiated subrequests will inherit the change by default.", - "prefix": "ngx.req.clear_header", - "body": "ngx.req.clear_header(${1:header_name})" - }, - "ngx.req.read_body": { - "description": "Reads the client request body synchronously without blocking the Nginx event loop. ngx.req.read_body()\n local args = ngx.req.get_post_args()Due to the stream processing feature of HTTP/2 or HTTP/3, this api could potentially block the entire request. Therefore, this api is effective only when HTTP/2 or HTTP/3 requests send content-length header. For requests with versions lower than HTTP/2, this api can still be used without any problems.If the request body is already read previously by turning on lua_need_request_body or by using other modules, then this function does not run and returns immediately.If the request body has already been explicitly discarded, either by the ngx.req.discard_body function or other modules, this function does not run and returns immediately.In case of errors, such as connection errors while reading the data, this method will throw out a Lua exception or terminate the current request with a 500 status code immediately.The request body data read using this function can be retrieved later via ngx.req.get_body_data or, alternatively, the temporary file name for the body data cached to disk using ngx.req.get_body_file. This depends on\nwhether the current request body is already larger than the client_body_buffer_size,\nand whether client_body_in_file_only has been switched on.\nIn cases where current request may have a request body and the request body data is not required, The ngx.req.discard_body function must be used to explicitly discard the request body to avoid breaking things under HTTP 1.1 keepalive or HTTP 1.1 pipelining.This function was first introduced in the v0.3.1rc17 release.", - "prefix": "ngx.req.read_body", - "body": "ngx.req.read_body()" - }, - "ngx.req.discard_body": { - "description": "Explicitly discard the request body, i.e., read the data on the connection and throw it away immediately (without using the request body by any means).This function is an asynchronous call and returns immediately.If the request body has already been read, this function does nothing and returns immediately.This function was first introduced in the v0.3.1rc17 release.See also ngx.req.read_body.", - "prefix": "ngx.req.discard_body", - "body": "ngx.req.discard_body()" - }, - "ngx.req.get_body_data": { - "description": "Retrieves in-memory request body data. It returns a Lua string rather than a Lua table holding all the parsed query arguments. Use the ngx.req.get_post_args function instead if a Lua table is required.This function returns nil if\nthe request body has not been read,\nthe request body has been read into disk temporary files,\nor the request body has zero size.\nIf the request body has not been read yet, call ngx.req.read_body first (or turn on lua_need_request_body to force this module to read the request body. This is not recommended however).If the request body has been read into disk files, try calling the ngx.req.get_body_file function instead.To force in-memory request bodies, try setting client_body_buffer_size to the same size value in client_max_body_size.Note that calling this function instead of using ngx.var.request_body or ngx.var.echo_request_body is more efficient because it can save one dynamic memory allocation and one data copy.This function was first introduced in the v0.3.1rc17 release.See also ngx.req.get_body_file.", - "prefix": "ngx.req.get_body_data", - "body": "local data = ngx.req.get_body_data()" - }, - "ngx.req.get_body_file": { - "description": "Retrieves the file name for the in-file request body data. Returns nil if the request body has not been read or has been read into memory.The returned file is read only and is usually cleaned up by Nginx's memory pool. It should not be manually modified, renamed, or removed in Lua code.If the request body has not been read yet, call ngx.req.read_body first (or turn on lua_need_request_body to force this module to read the request body. This is not recommended however).If the request body has been read into memory, try calling the ngx.req.get_body_data function instead.To force in-file request bodies, try turning on client_body_in_file_only.This function was first introduced in the v0.3.1rc17 release.See also ngx.req.get_body_data.", - "prefix": "ngx.req.get_body_file", - "body": "local file_name = ngx.req.get_body_file()" - }, - "ngx.req.set_body_data": { - "description": "Set the current request's request body using the in-memory data specified by the data argument.If the request body has not been read yet, call ngx.req.read_body first (or turn on lua_need_request_body to force this module to read the request body. This is not recommended however). Additionally, the request body must not have been previously discarded by ngx.req.discard_body.Whether the previous request body has been read into memory or buffered into a disk file, it will be freed or the disk file will be cleaned up immediately, respectively.This function was first introduced in the v0.3.1rc18 release.See also ngx.req.set_body_file.", - "prefix": "ngx.req.set_body_data", - "body": "ngx.req.set_body_data(${1:data})" - }, - "ngx.req.set_body_file": { - "description": "Set the current request's request body using the in-file data specified by the file_name argument.If the request body has not been read yet, call ngx.req.read_body first (or turn on lua_need_request_body to force this module to read the request body. This is not recommended however). Additionally, the request body must not have been previously discarded by ngx.req.discard_body.If the optional auto_clean argument is given a true value, then this file will be removed at request completion or the next time this function or ngx.req.set_body_data are called in the same request. The auto_clean is default to false.Please ensure that the file specified by the file_name argument exists and is readable by an Nginx worker process by setting its permission properly to avoid Lua exception errors.Whether the previous request body has been read into memory or buffered into a disk file, it will be freed or the disk file will be cleaned up immediately, respectively.This function was first introduced in the v0.3.1rc18 release.See also ngx.req.set_body_data.", - "prefix": "ngx.req.set_body_file", - "body": "ngx.req.set_body_file(${1:file_name}, ${2:auto_clean?})" - }, - "ngx.req.init_body": { - "description": "Creates a new blank request body for the current request and initializes the buffer for later request body data writing via the ngx.req.append_body and ngx.req.finish_body APIs.If the buffer_size argument is specified, then its value will be used for the size of the memory buffer for body writing with ngx.req.append_body. If the argument is omitted, then the value specified by the standard client_body_buffer_size directive will be used instead.When the data can no longer be hold in the memory buffer for the request body, then the data will be flushed onto a temporary file just like the standard request body reader in the Nginx core.It is important to always call the ngx.req.finish_body after all the data has been appended onto the current request body. Also, when this function is used together with ngx.req.socket, it is required to call ngx.req.socket before this function, or you will get the \"request body already exists\" error message.The usage of this function is often like this: ngx.req.init_body(128 * 1024) -- buffer is 128KB\n for chunk in next_data_chunk() do\n ngx.req.append_body(chunk) -- each chunk can be 4KB\n end\n ngx.req.finish_body()This function can be used with ngx.req.append_body, ngx.req.finish_body, and ngx.req.socket to implement efficient input filters in pure Lua (in the context of rewrite_by_lua* or access_by_lua*), which can be used with other Nginx content handler or upstream modules like ngx_http_proxy_module and ngx_http_fastcgi_module.This function was first introduced in the v0.5.11 release.", - "prefix": "ngx.req.init_body", - "body": "ngx.req.init_body(${1:buffer_size?})" - }, - "ngx.req.append_body": { - "description": "Append new data chunk specified by the data_chunk argument onto the existing request body created by the ngx.req.init_body call.When the data can no longer be hold in the memory buffer for the request body, then the data will be flushed onto a temporary file just like the standard request body reader in the Nginx core.It is important to always call the ngx.req.finish_body after all the data has been appended onto the current request body.This function can be used with ngx.req.init_body, ngx.req.finish_body, and ngx.req.socket to implement efficient input filters in pure Lua (in the context of rewrite_by_lua* or access_by_lua*), which can be used with other Nginx content handler or upstream modules like ngx_http_proxy_module and ngx_http_fastcgi_module.This function was first introduced in the v0.5.11 release.See also ngx.req.init_body.", - "prefix": "ngx.req.append_body", - "body": "ngx.req.append_body(${1:data_chunk})" - }, - "ngx.req.finish_body": { - "description": "Completes the construction process of the new request body created by the ngx.req.init_body and ngx.req.append_body calls.This function can be used with ngx.req.init_body, ngx.req.append_body, and ngx.req.socket to implement efficient input filters in pure Lua (in the context of rewrite_by_lua* or access_by_lua*), which can be used with other Nginx content handler or upstream modules like ngx_http_proxy_module and ngx_http_fastcgi_module.This function was first introduced in the v0.5.11 release.See also ngx.req.init_body.", - "prefix": "ngx.req.finish_body", - "body": "ngx.req.finish_body()" - }, - "ngx.req.socket": { - "description": "Returns a read-only cosocket object that wraps the downstream connection. Only receive, receiveany and receiveuntil methods are supported on this object.In case of error, nil will be returned as well as a string describing the error.The socket object returned by this method is usually used to read the current request's body in a streaming fashion. Do not turn on the lua_need_request_body directive, and do not mix this call with ngx.req.read_body and ngx.req.discard_body.If any request body data has been pre-read into the Nginx core request header buffer, the resulting cosocket object will take care of this to avoid potential data loss resulting from such pre-reading.\nChunked request bodies are not yet supported in this API.Since the v0.9.0 release, this function accepts an optional boolean raw argument. When this argument is true, this function returns a full-duplex cosocket object wrapping around the raw downstream connection socket, upon which you can call the receive, receiveany, receiveuntil, and send methods.When the raw argument is true, it is required that no pending data from any previous ngx.say, ngx.print, or ngx.send_headers calls exists. So if you have these downstream output calls previously, you should call ngx.flush(true) before calling ngx.req.socket(true) to ensure that there is no pending output data. If the request body has not been read yet, then this \"raw socket\" can also be used to read the request body.You can use the \"raw request socket\" returned by ngx.req.socket(true) to implement fancy protocols like WebSocket, or just emit your own raw HTTP response header or body data. You can refer to the lua-resty-websocket library for a real world example.This function was first introduced in the v0.5.0rc1 release.", - "prefix": "ngx.req.socket", - "body": "local tcpsock, err = ngx.req.socket(${1:raw})" - }, - "ngx.exec": { - "description": "Does an internal redirect to uri with args and is similar to the echo_exec directive of the echo-nginx-module. ngx.exec('/some-location')\n ngx.exec('/some-location', 'a=3&b=5&c=6')\n ngx.exec('/some-location?a=3&b=5', 'c=6')The optional second args can be used to specify extra URI query arguments, for example: ngx.exec(\"/foo\", \"a=3&b=hello%20world\")Alternatively, a Lua table can be passed for the args argument for ngx_lua to carry out URI escaping and string concatenation. ngx.exec(\"/foo\", { a = 3, b = \"hello world\" })The result is exactly the same as the previous example.The format for the Lua table passed as the args argument is identical to the format used in the ngx.encode_args method.Named locations are also supported but the second args argument will be ignored if present and the querystring for the new target is inherited from the referring location (if any).GET /foo/file.php?a=hello will return \"hello\" and not \"goodbye\" in the example below location /foo {\n content_by_lua_block {\n ngx.exec(\"@bar\", \"a=goodbye\")\n }\n }\n\n location @bar {\n content_by_lua_block {\n local args = ngx.req.get_uri_args()\n for key, val in pairs(args) do\n if key == \"a\" then\n ngx.say(val)\n end\n end\n }\n }Note that the ngx.exec method is different from ngx.redirect in that\nit is purely an internal redirect and that no new external HTTP traffic is involved.Also note that this method call terminates the processing of the current request and that it must be called before ngx.send_headers or explicit response body\noutputs by either ngx.print or ngx.say.It is recommended that a coding style that combines this method call with the return statement, i.e., return ngx.exec(...) be adopted when this method call is used in contexts other than header_filter_by_lua* to reinforce the fact that the request processing is being terminated.", - "prefix": "ngx.exec", - "body": "ngx.exec(${1:uri}, ${2:args?})" - }, - "ngx.redirect": { - "description": "Issue an HTTP 301 or 302 redirection to uri.Note: this function throws a Lua error if the uri argument\ncontains unsafe characters (control characters).The optional status parameter specifies the HTTP status code to be used. The following status codes are supported right now:\n301\n302 (default)\n303\n307\n308\nIt is 302 (ngx.HTTP_MOVED_TEMPORARILY) by default.Here is an example assuming the current server name is localhost and that it is listening on port 1984: return ngx.redirect(\"/foo\")which is equivalent to return ngx.redirect(\"/foo\", ngx.HTTP_MOVED_TEMPORARILY)Redirecting arbitrary external URLs is also supported, for example: return ngx.redirect(\"http://www.google.com\")We can also use the numerical code directly as the second status argument: return ngx.redirect(\"/foo\", 301)This method is similar to the rewrite directive with the redirect modifier in the standard\nngx_http_rewrite_module, for example, this nginx.conf snippet rewrite ^ /foo? redirect; # nginx configis equivalent to the following Lua code return ngx.redirect('/foo') -- Lua codewhile rewrite ^ /foo? permanent; # nginx configis equivalent to return ngx.redirect('/foo', ngx.HTTP_MOVED_PERMANENTLY) -- Lua codeURI arguments can be specified as well, for example: return ngx.redirect('/foo?a=3&b=4')Note that this method call terminates the processing of the current request and that it must be called before ngx.send_headers or explicit response body\noutputs by either ngx.print or ngx.say.It is recommended that a coding style that combines this method call with the return statement, i.e., return ngx.redirect(...) be adopted when this method call is used in contexts other than header_filter_by_lua* to reinforce the fact that the request processing is being terminated.", - "prefix": "ngx.redirect", - "body": "ngx.redirect(${1:uri}, ${2:status?})" - }, - "ngx.send_headers": { - "description": "Explicitly send out the response headers.Since v0.8.3 this function returns 1 on success, or returns nil and a string describing the error otherwise.Note that there is normally no need to manually send out response headers as ngx_lua will automatically send headers out\nbefore content is output with ngx.say or ngx.print or when content_by_lua* exits normally.", - "prefix": "ngx.send_headers", - "body": "local ok, err = ngx.send_headers()" - }, - "ngx.headers_sent": { - "description": "Returns true if the response headers have been sent (by ngx_lua), and false otherwise.This API was first introduced in ngx_lua v0.3.1rc6.", - "prefix": "ngx.headers_sent", - "body": "local value = ngx.headers_sent" - }, - "ngx.print": { - "description": "Emits arguments concatenated to the HTTP client (as response body). If response headers have not been sent, this function will send headers out first and then output body data.Since v0.8.3 this function returns 1 on success, or returns nil and a string describing the error otherwise.Lua nil values will output \"nil\" strings and Lua boolean values will output \"true\" and \"false\" literal strings respectively.Nested arrays of strings are permitted and the elements in the arrays will be sent one by one: local table = {\n \"hello, \",\n {\"world: \", true, \" or \", false,\n {\": \", nil}}\n }\n ngx.print(table)will yield the output hello, world: true or false: nilNon-array table arguments will cause a Lua exception to be thrown.The ngx.null constant will yield the \"null\" string output.This is an asynchronous call and will return immediately without waiting for all the data to be written into the system send buffer. To run in synchronous mode, call ngx.flush(true) after calling ngx.print. This can be particularly useful for streaming output. See ngx.flush for more details.Please note that both ngx.print and ngx.say will always invoke the whole Nginx output body filter chain, which is an expensive operation. So be careful when calling either of these two in a tight loop; buffer the data yourself in Lua and save the calls.", - "prefix": "ngx.print", - "body": "local ok, err = ngx.print(...)" - }, - "ngx.say": { - "description": "Just as ngx.print but also emit a trailing newline.", - "prefix": "ngx.say", - "body": "local ok, err = ngx.say(...)" - }, - "ngx.log": { - "description": "Log arguments concatenated to error.log with the given logging level.Lua nil arguments are accepted and result in literal \"nil\" string while Lua booleans result in literal \"true\" or \"false\" string outputs. And the ngx.null constant will yield the \"null\" string output.The log_level argument can take constants like ngx.ERR and ngx.WARN. Check out Nginx log level constants for details.There is a hard coded 2048 byte limitation on error message lengths in the Nginx core. This limit includes trailing newlines and leading time stamps. If the message size exceeds this limit, Nginx will truncate the message text accordingly. This limit can be manually modified by editing the NGX_MAX_ERROR_STR macro definition in the src/core/ngx_log.h file in the Nginx source tree.", - "prefix": "ngx.log", - "body": "ngx.log(${1:log_level}, ...)" - }, - "ngx.flush": { - "description": "Flushes response output to the client.ngx.flush accepts an optional boolean wait argument (Default: false) first introduced in the v0.3.1rc34 release. When called with the default argument, it issues an asynchronous call (Returns immediately without waiting for output data to be written into the system send buffer). Calling the function with the wait argument set to true switches to synchronous mode.In synchronous mode, the function will not return until all output data has been written into the system send buffer or until the send_timeout setting has expired. Note that using the Lua coroutine mechanism means that this function does not block the Nginx event loop even in the synchronous mode.When ngx.flush(true) is called immediately after ngx.print or ngx.say, it causes the latter functions to run in synchronous mode. This can be particularly useful for streaming output.Note that ngx.flush is not functional when in the HTTP 1.0 output buffering mode. See HTTP 1.0 support.Since v0.8.3 this function returns 1 on success, or returns nil and a string describing the error otherwise.", - "prefix": "ngx.flush", - "body": "local ok, err = ngx.flush(${1:wait?})" - }, - "ngx.exit": { - "description": "When status >= 200 (i.e., ngx.HTTP_OK and above), it will interrupt the execution of the current request and return status code to Nginx.When status == 0 (i.e., ngx.OK), it will only quit the current phase handler (or the content handler if the content_by_lua* directive is used) and continue to run later phases (if any) for the current request.The status argument can be ngx.OK, ngx.ERROR, ngx.HTTP_NOT_FOUND,\nngx.HTTP_MOVED_TEMPORARILY, or other HTTP status constants.To return an error page with custom contents, use code snippets like this: ngx.status = ngx.HTTP_GONE\n ngx.say(\"This is our own content\")\n -- to cause quit the whole request rather than the current phase handler\n ngx.exit(ngx.HTTP_OK)The effect in action: $ curl -i http://localhost/test\n HTTP/1.1 410 Gone\n Server: nginx/1.0.6\n Date: Thu, 15 Sep 2011 00:51:48 GMT\n Content-Type: text/plain\n Transfer-Encoding: chunked\n Connection: keep-alive\n\n This is our own contentNumber literals can be used directly as the argument, for instance, ngx.exit(501)Note that while this method accepts all HTTP status constants as input, it only accepts ngx.OK and ngx.ERROR of the core constants.Also note that this method call terminates the processing of the current request and that it is recommended that a coding style that combines this method call with the return statement, i.e., return ngx.exit(...) be used to reinforce the fact that the request processing is being terminated.When being used in the contexts of header_filter_by_lua*, balancer_by_lua*, and\nssl_session_store_by_lua*, ngx.exit() is\nan asynchronous operation and will return immediately. This behavior may change in future and it is recommended that users always use return in combination as suggested above.", - "prefix": "ngx.exit", - "body": "ngx.exit(${1:status})" - }, - "ngx.eof": { - "description": "Explicitly specify the end of the response output stream. In the case of HTTP 1.1 chunked encoded output, it will just trigger the Nginx core to send out the \"last chunk\".When you disable the HTTP 1.1 keep-alive feature for your downstream connections, you can rely on well written HTTP clients to close the connection actively for you when you call this method. This trick can be used do back-ground jobs without letting the HTTP clients to wait on the connection, as in the following example: location = /async {\n keepalive_timeout 0;\n content_by_lua_block {\n ngx.say(\"got the task!\")\n ngx.eof() -- well written HTTP clients will close the connection at this point\n -- access MySQL, PostgreSQL, Redis, Memcached, and etc here...\n }\n }But if you create subrequests to access other locations configured by Nginx upstream modules, then you should configure those upstream modules to ignore client connection abortions if they are not by default. For example, by default the standard ngx_http_proxy_module will terminate both the subrequest and the main request as soon as the client closes the connection, so it is important to turn on the proxy_ignore_client_abort directive in your location block configured by ngx_http_proxy_module: proxy_ignore_client_abort on;A better way to do background jobs is to use the ngx.timer.at API.Since v0.8.3 this function returns 1 on success, or returns nil and a string describing the error otherwise.", - "prefix": "ngx.eof", - "body": "local ok, err = ngx.eof()" - }, - "ngx.sleep": { - "description": "Sleeps for the specified seconds without blocking. One can specify time resolution up to 0.001 seconds (i.e., one millisecond).Behind the scene, this method makes use of the Nginx timers.Since the 0.7.20 release, The 0 time argument can also be specified.This method was introduced in the 0.5.0rc30 release.", - "prefix": "ngx.sleep", - "body": "ngx.sleep(${1:seconds})" - }, - "ngx.escape_uri": { - "description": "Since v0.10.16, this function accepts an optional type argument.\nIt accepts the following values (defaults to 2):\n0: escapes str as a full URI. And the characters\n (space), #, %,\n?, 0x00 ~ 0x1F, 0x7F ~ 0xFF will be escaped.\n2: escape str as a URI component. All characters except\nalphabetic characters, digits, -, ., _,\n~ will be encoded as %XX.\n", - "prefix": "ngx.escape_uri", - "body": "local newstr = ngx.escape_uri(${1:str}, ${2:type?})" - }, - "ngx.unescape_uri": { - "description": "Unescape str as an escaped URI component.For example, ngx.say(ngx.unescape_uri(\"b%20r56+7\"))gives the outputb r56 7\nInvalid escaping sequences are handled in a conventional way: %s are left unchanged. Also, characters that should not appear in escaped string are simply left unchanged.For example, ngx.say(ngx.unescape_uri(\"try %search%%20%again%\"))gives the outputtry %search% %again%\n(Note that %20 following % got unescaped, even it can be considered a part of invalid sequence.)", - "prefix": "ngx.unescape_uri", - "body": "local newstr = ngx.unescape_uri(${1:str})" - }, - "ngx.encode_args": { - "description": "Encode the Lua table to a query args string according to the URI encoded rules.For example, ngx.encode_args({foo = 3, [\"b r\"] = \"hello world\"})yieldsfoo=3&b%20r=hello%20world\nThe table keys must be Lua strings.Multi-value query args are also supported. Just use a Lua table for the argument's value, for example: ngx.encode_args({baz = {32, \"hello\"}})givesbaz=32&baz=hello\nIf the value table is empty and the effect is equivalent to the nil value.Boolean argument values are also supported, for instance, ngx.encode_args({a = true, b = 1})yieldsa&b=1\nIf the argument value is false, then the effect is equivalent to the nil value.This method was first introduced in the v0.3.1rc27 release.", - "prefix": "ngx.encode_args", - "body": "local str = ngx.encode_args(${1:table})" - }, - "ngx.decode_args": { - "description": "Decodes a URI encoded query-string into a Lua table. This is the inverse function of ngx.encode_args.The optional max_args argument can be used to specify the maximum number of arguments parsed from the str argument. By default, a maximum of 100 request arguments are parsed (including those with the same name) and that additional URI arguments are silently discarded to guard against potential denial of service attacks. Since v0.10.13, when the limit is exceeded, it will return a second value which is the string \"truncated\".This argument can be set to zero to remove the limit and to process all request arguments received: local args = ngx.decode_args(str, 0)Removing the max_args cap is strongly discouraged.This method was introduced in the v0.5.0rc29.", - "prefix": "ngx.decode_args", - "body": "local table, err = ngx.decode_args(${1:str}, ${2:max_args?})" - }, - "ngx.encode_base64": { - "description": "Encodes str to a base64 digest.Since the 0.9.16 release, an optional boolean-typed no_padding argument can be specified to control whether the base64 padding should be appended to the resulting digest (default to false, i.e., with padding enabled).", - "prefix": "ngx.encode_base64", - "body": "local newstr = ngx.encode_base64(${1:str}, ${2:no_padding?})" - }, - "ngx.decode_base64": { - "description": "Decodes the str argument as a base64 digest to the raw form. Returns nil if str is not well formed.", - "prefix": "ngx.decode_base64", - "body": "local newstr = ngx.decode_base64(${1:str})" - }, - "ngx.crc32_short": { - "description": "Calculates the CRC-32 (Cyclic Redundancy Code) digest for the str argument.This method performs better on relatively short str inputs (i.e., less than 30 ~ 60 bytes), as compared to ngx.crc32_long. The result is exactly the same as ngx.crc32_long.Behind the scene, it is just a thin wrapper around the ngx_crc32_short function defined in the Nginx core.This API was first introduced in the v0.3.1rc8 release.", - "prefix": "ngx.crc32_short", - "body": "local intval = ngx.crc32_short(${1:str})" - }, - "ngx.crc32_long": { - "description": "Calculates the CRC-32 (Cyclic Redundancy Code) digest for the str argument.This method performs better on relatively long str inputs (i.e., longer than 30 ~ 60 bytes), as compared to ngx.crc32_short. The result is exactly the same as ngx.crc32_short.Behind the scene, it is just a thin wrapper around the ngx_crc32_long function defined in the Nginx core.This API was first introduced in the v0.3.1rc8 release.", - "prefix": "ngx.crc32_long", - "body": "local intval = ngx.crc32_long(${1:str})" - }, - "ngx.hmac_sha1": { - "description": "Computes the HMAC-SHA1 digest of the argument str and turns the result using the secret key .The raw binary form of the HMAC-SHA1 digest will be generated, use ngx.encode_base64, for example, to encode the result to a textual representation if desired.For example, local key = \"thisisverysecretstuff\"\n local src = \"some string we want to sign\"\n local digest = ngx.hmac_sha1(key, src)\n ngx.say(ngx.encode_base64(digest))yields the outputR/pvxzHC4NLtj7S+kXFg/NePTmk=\nThis API requires the OpenSSL library enabled in the Nginx build (usually by passing the --with-http_ssl_module option to the ./configure script).This function was first introduced in the v0.3.1rc29 release.", - "prefix": "ngx.hmac_sha1", - "body": "local digest = ngx.hmac_sha1(${1:secret_key}, ${2:str})" - }, - "ngx.md5": { - "description": "Returns the hexadecimal representation of the MD5 digest of the str argument.For example, location = /md5 {\n content_by_lua_block {\n ngx.say(ngx.md5(\"hello\"))\n }\n }yields the output5d41402abc4b2a76b9719d911017c592\nSee ngx.md5_bin if the raw binary MD5 digest is required.", - "prefix": "ngx.md5", - "body": "local digest = ngx.md5(${1:str})" - }, - "ngx.md5_bin": { - "description": "Returns the binary form of the MD5 digest of the str argument.See ngx.md5 if the hexadecimal form of the MD5 digest is required.", - "prefix": "ngx.md5_bin", - "body": "local digest = ngx.md5_bin(${1:str})" - }, - "ngx.sha1_bin": { - "description": "Returns the binary form of the SHA-1 digest of the str argument.This function requires SHA-1 support in the Nginx build. (This usually just means OpenSSL should be installed while building Nginx).This function was first introduced in the v0.5.0rc6.", - "prefix": "ngx.sha1_bin", - "body": "local digest = ngx.sha1_bin(${1:str})" - }, - "ngx.quote_sql_str": { - "description": "Returns a quoted SQL string literal according to the MySQL quoting rules.", - "prefix": "ngx.quote_sql_str", - "body": "local quoted_value = ngx.quote_sql_str(${1:raw_value})" - }, - "ngx.today": { - "description": "Returns current date (in the format yyyy-mm-dd) from the Nginx cached time (no syscall involved unlike Lua's date library).This is the local time.", - "prefix": "ngx.today", - "body": "local str = ngx.today()" - }, - "ngx.time": { - "description": "Returns the elapsed seconds from the epoch for the current time stamp from the Nginx cached time (no syscall involved unlike Lua's date library).Updates of the Nginx time cache can be forced by calling ngx.update_time first.", - "prefix": "ngx.time", - "body": "local secs = ngx.time()" - }, - "ngx.now": { - "description": "Returns a floating-point number for the elapsed time in seconds (including milliseconds as the decimal part) from the epoch for the current time stamp from the Nginx cached time (no syscall involved unlike Lua's date library).You can forcibly update the Nginx time cache by calling ngx.update_time first.This API was first introduced in v0.3.1rc32.", - "prefix": "ngx.now", - "body": "local secs = ngx.now()" - }, - "ngx.update_time": { - "description": "Forcibly updates the Nginx current time cache. This call involves a syscall and thus has some overhead, so do not abuse it.This API was first introduced in v0.3.1rc32.", - "prefix": "ngx.update_time", - "body": "ngx.update_time()" - }, - "ngx.localtime": { - "description": "Returns the current time stamp (in the format yyyy-mm-dd hh:mm:ss) of the Nginx cached time (no syscall involved unlike Lua's os.date function).This is the local time.", - "prefix": "ngx.localtime", - "body": "local str = ngx.localtime()" - }, - "ngx.utctime": { - "description": "Returns the current time stamp (in the format yyyy-mm-dd hh:mm:ss) of the Nginx cached time (no syscall involved unlike Lua's os.date function).This is the UTC time.", - "prefix": "ngx.utctime", - "body": "local str = ngx.utctime()" - }, - "ngx.cookie_time": { - "description": "Returns a formatted string can be used as the cookie expiration time. The parameter sec is the time stamp in seconds (like those returned from ngx.time). ngx.say(ngx.cookie_time(1290079655))\n -- yields \"Thu, 18-Nov-10 11:27:35 GMT\"", - "prefix": "ngx.cookie_time", - "body": "local str = ngx.cookie_time(${1:sec})" - }, - "ngx.http_time": { - "description": "Returns a formated string can be used as the http header time (for example, being used in Last-Modified header). The parameter sec is the time stamp in seconds (like those returned from ngx.time). ngx.say(ngx.http_time(1290079655))\n -- yields \"Thu, 18 Nov 2010 11:27:35 GMT\"", - "prefix": "ngx.http_time", - "body": "local str = ngx.http_time(${1:sec})" - }, - "ngx.parse_http_time": { - "description": "Parse the http time string (as returned by ngx.http_time) into seconds. Returns the seconds or nil if the input string is in bad forms. local time = ngx.parse_http_time(\"Thu, 18 Nov 2010 11:27:35 GMT\")\n if time == nil then\n ...\n end", - "prefix": "ngx.parse_http_time", - "body": "local sec = ngx.parse_http_time(${1:str})" - }, - "ngx.is_subrequest": { - "description": "Returns true if the current request is an Nginx subrequest, or false otherwise.", - "prefix": "ngx.is_subrequest", - "body": "local value = ngx.is_subrequest" - }, - "ngx.re.match": { - "description": "Matches the subject string using the Perl compatible regular expression regex with the optional options.Only the first occurrence of the match is returned, or nil if no match is found. In case of errors, like seeing a bad regular expression or exceeding the PCRE stack limit, nil and a string describing the error will be returned.When a match is found, a Lua table captures is returned, where captures[0] holds the whole substring being matched, and captures[1] holds the first parenthesized sub-pattern's capturing, captures[2] the second, and so on. local m, err = ngx.re.match(\"hello, 1234\", \"[0-9]+\")\n if m then\n -- m[0] == \"1234\"\n\n else\n if err then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n ngx.say(\"match not found\")\n end local m, err = ngx.re.match(\"hello, 1234\", \"([0-9])[0-9]+\")\n -- m[0] == \"1234\"\n -- m[1] == \"1\"Named captures are also supported since the v0.7.14 release\nand are returned in the same Lua table as key-value pairs as the numbered captures. local m, err = ngx.re.match(\"hello, 1234\", \"([0-9])(?[0-9]+)\")\n -- m[0] == \"1234\"\n -- m[1] == \"1\"\n -- m[2] == \"234\"\n -- m[\"remaining\"] == \"234\"Unmatched subpatterns will have false values in their captures table fields. local m, err = ngx.re.match(\"hello, world\", \"(world)|(hello)|(?howdy)\")\n -- m[0] == \"hello\"\n -- m[1] == false\n -- m[2] == \"hello\"\n -- m[3] == false\n -- m[\"named\"] == falseSpecify options to control how the match operation will be performed. The following option characters are supported:a anchored mode (only match from the beginning)\n\nd enable the DFA mode (or the longest token match semantics).\n this requires PCRE 6.0+ or else a Lua exception will be thrown.\n first introduced in ngx_lua v0.3.1rc30.\n\nD enable duplicate named pattern support. This allows named\n subpattern names to be repeated, returning the captures in\n an array-like Lua table. for example,\n local m = ngx.re.match(\"hello, world\",\n \"(?\\w+), (?\\w+)\",\n \"D\")\n -- m[\"named\"] == {\"hello\", \"world\"}\n this option was first introduced in the v0.7.14 release.\n this option requires at least PCRE 8.12.\n\ni case insensitive mode (similar to Perl's /i modifier)\n\nj enable PCRE JIT compilation, this requires PCRE 8.21+ which\n must be built with the --enable-jit option. for optimum performance,\n this option should always be used together with the 'o' option.\n first introduced in ngx_lua v0.3.1rc30.\n\nJ enable the PCRE Javascript compatible mode. this option was\n first introduced in the v0.7.14 release. this option requires\n at least PCRE 8.12.\n\nm multi-line mode (similar to Perl's /m modifier)\n\no compile-once mode (similar to Perl's /o modifier),\n to enable the worker-process-level compiled-regex cache\n\ns single-line mode (similar to Perl's /s modifier)\n\nu UTF-8 mode. this requires PCRE to be built with\n the --enable-utf8 option or else a Lua exception will be thrown.\n\nU similar to \"u\" but disables PCRE's UTF-8 validity check on\n the subject string. first introduced in ngx_lua v0.8.1.\n\nx extended mode (similar to Perl's /x modifier)\nThese options can be combined: local m, err = ngx.re.match(\"hello, world\", \"HEL LO\", \"ix\")\n -- m[0] == \"hello\" local m, err = ngx.re.match(\"hello, 美好生活\", \"HELLO, (.{2})\", \"iu\")\n -- m[0] == \"hello, 美好\"\n -- m[1] == \"美好\"The o option is useful for performance tuning, because the regex pattern in question will only be compiled once, cached in the worker-process level, and shared among all requests in the current Nginx worker process. The upper limit of the regex cache can be tuned via the lua_regex_cache_max_entries directive.The optional fourth argument, ctx, can be a Lua table holding an optional pos field. When the pos field in the ctx table argument is specified, ngx.re.match will start matching from that offset (starting from 1). Regardless of the presence of the pos field in the ctx table, ngx.re.match will always set this pos field to the position after the substring matched by the whole pattern in case of a successful match. When match fails, the ctx table will be left intact. local ctx = {}\n local m, err = ngx.re.match(\"1234, hello\", \"[0-9]+\", \"\", ctx)\n -- m[0] = \"1234\"\n -- ctx.pos == 5 local ctx = { pos = 2 }\n local m, err = ngx.re.match(\"1234, hello\", \"[0-9]+\", \"\", ctx)\n -- m[0] = \"234\"\n -- ctx.pos == 5The ctx table argument combined with the a regex modifier can be used to construct a lexer atop ngx.re.match.Note that, the options argument is not optional when the ctx argument is specified and that the empty Lua string (\"\") must be used as placeholder for options if no meaningful regex options are required.This method requires the PCRE library enabled in Nginx (Known Issue With Special Escaping Sequences).To confirm that PCRE JIT is enabled, activate the Nginx debug log by adding the --with-debug option to Nginx or OpenResty's ./configure script. Then, enable the \"debug\" error log level in error_log directive. The following message will be generated if PCRE JIT is enabled:pcre JIT compiling result: 1\nStarting from the 0.9.4 release, this function also accepts a 5th argument, res_table, for letting the caller supply the Lua table used to hold all the capturing results. Starting from 0.9.6, it is the caller's responsibility to ensure this table is empty. This is very useful for recycling Lua tables and saving GC and table allocation overhead.This feature was introduced in the v0.2.1rc11 release.", - "prefix": "ngx.re.match", - "body": "local captures, err = ngx.re.match(${1:subject}, ${2:regex}, ${3:options?}, ${4:ctx?}, ${5:res_table?})" - }, - "ngx.re.find": { - "description": "Similar to ngx.re.match but only returns the beginning index (from) and end index (to) of the matched substring. The returned indexes are 1-based and can be fed directly into the string.sub API function to obtain the matched substring.In case of errors (like bad regexes or any PCRE runtime errors), this API function returns two nil values followed by a string describing the error.If no match is found, this function just returns a nil value.Below is an example: local s = \"hello, 1234\"\n local from, to, err = ngx.re.find(s, \"([0-9]+)\", \"jo\")\n if from then\n ngx.say(\"from: \", from)\n ngx.say(\"to: \", to)\n ngx.say(\"matched: \", string.sub(s, from, to))\n else\n if err then\n ngx.say(\"error: \", err)\n return\n end\n ngx.say(\"not matched!\")\n endThis example produces the outputfrom: 8\nto: 11\nmatched: 1234\nBecause this API function does not create new Lua strings nor new Lua tables, it is much faster than ngx.re.match. It should be used wherever possible.Since the 0.9.3 release, an optional 5th argument, nth, is supported to specify which (submatch) capture's indexes to return. When nth is 0 (which is the default), the indexes for the whole matched substring is returned; when nth is 1, then the 1st submatch capture's indexes are returned; when nth is 2, then the 2nd submatch capture is returned, and so on. When the specified submatch does not have a match, then two nil values will be returned. Below is an example for this: local str = \"hello, 1234\"\n local from, to = ngx.re.find(str, \"([0-9])([0-9]+)\", \"jo\", nil, 2)\n if from then\n ngx.say(\"matched 2nd submatch: \", string.sub(str, from, to)) -- yields \"234\"\n endThis API function was first introduced in the v0.9.2 release.", - "prefix": "ngx.re.find", - "body": "local from, to, err = ngx.re.find(${1:subject}, ${2:regex}, ${3:options?}, ${4:ctx?}, ${5:nth?})" - }, - "ngx.re.gmatch": { - "description": "Similar to ngx.re.match, but returns a Lua iterator instead, so as to let the user programmer iterate all the matches over the string argument with the PCRE regex.In case of errors, like seeing an ill-formed regular expression, nil and a string describing the error will be returned.Here is a small example to demonstrate its basic usage: local iterator, err = ngx.re.gmatch(\"hello, world!\", \"([a-z]+)\", \"i\")\n if not iterator then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n local m\n m, err = iterator() -- m[0] == m[1] == \"hello\"\n if err then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n m, err = iterator() -- m[0] == m[1] == \"world\"\n if err then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n m, err = iterator() -- m == nil\n if err then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n endMore often we just put it into a Lua loop: local it, err = ngx.re.gmatch(\"hello, world!\", \"([a-z]+)\", \"i\")\n if not it then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n while true do\n local m, err = it()\n if err then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n if not m then\n -- no match found (any more)\n break\n end\n\n -- found a match\n ngx.say(m[0])\n ngx.say(m[1])\n endThe optional options argument takes exactly the same semantics as the ngx.re.match method.The current implementation requires that the iterator returned should only be used in a single request. That is, one should not assign it to a variable belonging to persistent namespace like a Lua package.This method requires the PCRE library enabled in Nginx (Known Issue With Special Escaping Sequences).This feature was first introduced in the v0.2.1rc12 release.", - "prefix": "ngx.re.gmatch", - "body": "local iterator, err = ngx.re.gmatch(${1:subject}, ${2:regex}, ${3:options?})" - }, - "ngx.re.sub": { - "description": "Substitutes the first match of the Perl compatible regular expression regex on the subject argument string with the string or function argument replace. The optional options argument has exactly the same meaning as in ngx.re.match.This method returns the resulting new string as well as the number of successful substitutions. In case of failures, like syntax errors in the regular expressions or the string argument, it will return nil and a string describing the error.When the replace is a string, then it is treated as a special template for string replacement. For example, local newstr, n, err = ngx.re.sub(\"hello, 1234\", \"([0-9])[0-9]\", \"[$0][$1]\")\n if not newstr then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n -- newstr == \"hello, [12][1]34\"\n -- n == 1where $0 referring to the whole substring matched by the pattern and $1 referring to the first parenthesized capturing substring.Curly braces can also be used to disambiguate variable names from the background string literals: local newstr, n, err = ngx.re.sub(\"hello, 1234\", \"[0-9]\", \"${0}00\")\n -- newstr == \"hello, 100234\"\n -- n == 1Literal dollar sign characters ($) in the replace string argument can be escaped by another dollar sign, for instance, local newstr, n, err = ngx.re.sub(\"hello, 1234\", \"[0-9]\", \"$$\")\n -- newstr == \"hello, $234\"\n -- n == 1Do not use backlashes to escape dollar signs; it will not work as expected.When the replace argument is of type \"function\", then it will be invoked with the \"match table\" as the argument to generate the replace string literal for substitution. The \"match table\" fed into the replace function is exactly the same as the return value of ngx.re.match. Here is an example: local func = function (m)\n return \"[\" .. m[0] .. \"][\" .. m[1] .. \"]\"\n end\n\n local newstr, n, err = ngx.re.sub(\"hello, 1234\", \"( [0-9] ) [0-9]\", func, \"x\")\n -- newstr == \"hello, [12][1]34\"\n -- n == 1The dollar sign characters in the return value of the replace function argument are not special at all.This method requires the PCRE library enabled in Nginx (Known Issue With Special Escaping Sequences).This feature was first introduced in the v0.2.1rc13 release.", - "prefix": "ngx.re.sub", - "body": "local newstr, n, err = ngx.re.sub(${1:subject}, ${2:regex}, ${3:replace}, ${4:options?})" - }, - "ngx.re.gsub": { - "description": "Just like ngx.re.sub, but does global substitution.Here is some examples: local newstr, n, err = ngx.re.gsub(\"hello, world\", \"([a-z])[a-z]+\", \"[$0,$1]\", \"i\")\n if not newstr then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n -- newstr == \"[hello,h], [world,w]\"\n -- n == 2 local func = function (m)\n return \"[\" .. m[0] .. \",\" .. m[1] .. \"]\"\n end\n local newstr, n, err = ngx.re.gsub(\"hello, world\", \"([a-z])[a-z]+\", func, \"i\")\n -- newstr == \"[hello,h], [world,w]\"\n -- n == 2This method requires the PCRE library enabled in Nginx (Known Issue With Special Escaping Sequences).This feature was first introduced in the v0.2.1rc15 release.", - "prefix": "ngx.re.gsub", - "body": "local newstr, n, err = ngx.re.gsub(${1:subject}, ${2:regex}, ${3:replace}, ${4:options?})" - }, - "ngx.shared.DICT": { - "description": "Fetching the shm-based Lua dictionary object for the shared memory zone named DICT defined by the lua_shared_dict directive.Shared memory zones are always shared by all the Nginx worker processes in the current Nginx server instance.The resulting object dict has the following methods:\nget\nget_stale\nset\nsafe_set\nadd\nsafe_add\nreplace\ndelete\nincr\nlpush\nrpush\nlpop\nrpop\nllen\nttl\nexpire\nflush_all\nflush_expired\nget_keys\ncapacity\nfree_space\nAll these methods are atomic operations, that is, safe from concurrent accesses from multiple Nginx worker processes for the same lua_shared_dict zone.Here is an example: http {\n lua_shared_dict dogs 10m;\n server {\n location /set {\n content_by_lua_block {\n local dogs = ngx.shared.dogs\n dogs:set(\"Jim\", 8)\n ngx.say(\"STORED\")\n }\n }\n location /get {\n content_by_lua_block {\n local dogs = ngx.shared.dogs\n ngx.say(dogs:get(\"Jim\"))\n }\n }\n }\n }Let us test it: $ curl localhost/set\n STORED\n\n $ curl localhost/get\n 8\n\n $ curl localhost/get\n 8The number 8 will be consistently output when accessing /get regardless of how many Nginx workers there are because the dogs dictionary resides in the shared memory and visible to all of the worker processes.The shared dictionary will retain its contents through a server config reload (either by sending the HUP signal to the Nginx process or by using the -s reload command-line option).The contents in the dictionary storage will be lost, however, when the Nginx server quits.This feature was first introduced in the v0.3.1rc22 release.", - "prefix": "ngx.shared.DICT", - "body": "local dict = ngx.shared[name_var]" - }, - "ngx.shared.DICT.get": { - "description": "Retrieving the value in the dictionary ngx.shared.DICT for the key key. If the key does not exist or has expired, then nil will be returned.In case of errors, nil and a string describing the error will be returned.The value returned will have the original data type when they were inserted into the dictionary, for example, Lua booleans, numbers, or strings.The first argument to this method must be the dictionary object itself, for example, local cats = ngx.shared.cats\n local value, flags = cats.get(cats, \"Marry\")or use Lua's syntactic sugar for method calls: local cats = ngx.shared.cats\n local value, flags = cats:get(\"Marry\")These two forms are fundamentally equivalent.If the user flags is 0 (the default), then no flags value will be returned.This feature was first introduced in the v0.3.1rc22 release.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.get", - "body": "local value, flags = ngx.shared.DICT:get(${1:key})" - }, - "ngx.shared.DICT.get_stale": { - "description": "Similar to the get method but returns the value even if the key has already expired.Returns a 3rd value, stale, indicating whether the key has expired or not.Note that the value of an expired key is not guaranteed to be available so one should never rely on the availability of expired items.This method was first introduced in the 0.8.6 release.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.get_stale", - "body": "local value, flags, stale = ngx.shared.DICT:get_stale(${1:key})" - }, - "ngx.shared.DICT.set": { - "description": "Unconditionally sets a key-value pair into the shm-based dictionary ngx.shared.DICT. Returns three values:\nsuccess: boolean value to indicate whether the key-value pair is stored or not.\nerr: textual error message, can be \"no memory\".\nforcible: a boolean value to indicate whether other valid items have been removed forcibly when out of storage in the shared memory zone.\nThe value argument inserted can be Lua booleans, numbers, strings, or nil. Their value type will also be stored into the dictionary and the same data type can be retrieved later via the get method.The optional exptime argument specifies expiration time (in seconds) for the inserted key-value pair. The time resolution is 0.001 seconds. If the exptime takes the value 0 (which is the default), then the item will never expire.The optional flags argument specifies a user flags value associated with the entry to be stored. It can also be retrieved later with the value. The user flags is stored as an unsigned 32-bit integer internally. Defaults to 0. The user flags argument was first introduced in the v0.5.0rc2 release.When it fails to allocate memory for the current key-value item, then set will try removing existing items in the storage according to the Least-Recently Used (LRU) algorithm. Note that, LRU takes priority over expiration time here. If up to tens of existing items have been removed and the storage left is still insufficient (either due to the total capacity limit specified by lua_shared_dict or memory segmentation), then the err return value will be no memory and success will be false.If the sizes of items in the dictionary are not multiples or even powers of a certain value (like 2), it is easier to encounter no memory error because of memory fragmentation. It is recommended to use different dictionaries for different sizes of items.When you encounter no memory error, you can also evict more least-recently-used items by retrying this method call more times to to make room for the current item.If this method succeeds in storing the current item by forcibly removing other not-yet-expired items in the dictionary via LRU, the forcible return value will be true. If it stores the item without forcibly removing other valid items, then the return value forcible will be false.The first argument to this method must be the dictionary object itself, for example, local cats = ngx.shared.cats\n local succ, err, forcible = cats.set(cats, \"Marry\", \"it is a nice cat!\")or use Lua's syntactic sugar for method calls: local cats = ngx.shared.cats\n local succ, err, forcible = cats:set(\"Marry\", \"it is a nice cat!\")These two forms are fundamentally equivalent.This feature was first introduced in the v0.3.1rc22 release.Please note that while internally the key-value pair is set atomically, the atomicity does not go across the method call boundary.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.set", - "body": "local success, err, forcible = ngx.shared.DICT:set(${1:key}, ${2:value}, ${3:exptime?}, ${4:flags?})" - }, - "ngx.shared.DICT.safe_set": { - "description": "Similar to the set method, but never overrides the (least recently used) unexpired items in the store when running out of storage in the shared memory zone. In this case, it will immediately return nil and the string \"no memory\".This feature was first introduced in the v0.7.18 release.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.safe_set", - "body": "local ok, err = ngx.shared.DICT:safe_set(${1:key}, ${2:value}, ${3:exptime?}, ${4:flags?})" - }, - "ngx.shared.DICT.add": { - "description": "Just like the set method, but only stores the key-value pair into the dictionary ngx.shared.DICT if the key does not exist.If the key argument already exists in the dictionary (and not expired for sure), the success return value will be false and the err return value will be \"exists\".This feature was first introduced in the v0.3.1rc22 release.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.add", - "body": "local success, err, forcible = ngx.shared.DICT:add(${1:key}, ${2:value}, ${3:exptime?}, ${4:flags?})" - }, - "ngx.shared.DICT.safe_add": { - "description": "Similar to the add method, but never overrides the (least recently used) unexpired items in the store when running out of storage in the shared memory zone. In this case, it will immediately return nil and the string \"no memory\".This feature was first introduced in the v0.7.18 release.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.safe_add", - "body": "local ok, err = ngx.shared.DICT:safe_add(${1:key}, ${2:value}, ${3:exptime?}, ${4:flags?})" - }, - "ngx.shared.DICT.replace": { - "description": "Just like the set method, but only stores the key-value pair into the dictionary ngx.shared.DICT if the key does exist.If the key argument does not exist in the dictionary (or expired already), the success return value will be false and the err return value will be \"not found\".This feature was first introduced in the v0.3.1rc22 release.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.replace", - "body": "local success, err, forcible = ngx.shared.DICT:replace(${1:key}, ${2:value}, ${3:exptime?}, ${4:flags?})" - }, - "ngx.shared.DICT.delete": { - "description": "Unconditionally removes the key-value pair from the shm-based dictionary ngx.shared.DICT.It is equivalent to ngx.shared.DICT:set(key, nil).This feature was first introduced in the v0.3.1rc22 release.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.delete", - "body": "ngx.shared.DICT:delete(${1:key})" - }, - "ngx.shared.DICT.incr": { - "description": "optional requirement: resty.core.shdict or resty.coreIncrements the (numerical) value for key in the shm-based dictionary ngx.shared.DICT by the step value value. Returns the new resulting number if the operation is successfully completed or nil and an error message otherwise.When the key does not exist or has already expired in the shared dictionary,\nif the init argument is not specified or takes the value nil, this method will return nil and the error string \"not found\", or\nif the init argument takes a number value, this method will create a new key with the value init + value.\nLike the add method, it also overrides the (least recently used) unexpired items in the store when running out of storage in the shared memory zone.The optional init_ttl argument specifies expiration time (in seconds) of the value when it is initialized via the init argument. The time resolution is 0.001 seconds. If init_ttl takes the value 0 (which is the default), then the item will never expire. This argument cannot be provided without providing the init argument as well, and has no effect if the value already exists (e.g., if it was previously inserted via set or the likes).Note: Usage of the init_ttl argument requires the resty.core.shdict or resty.core modules from the lua-resty-core library. Example: require \"resty.core\"\n\n local cats = ngx.shared.cats\n local newval, err = cats:incr(\"black_cats\", 1, 0, 0.1)\n\n print(newval) -- 1\n\n ngx.sleep(0.2)\n\n local val, err = cats:get(\"black_cats\")\n print(val) -- nilThe forcible return value will always be nil when the init argument is not specified.If this method succeeds in storing the current item by forcibly removing other not-yet-expired items in the dictionary via LRU, the forcible return value will be true. If it stores the item without forcibly removing other valid items, then the return value forcible will be false.If the original value is not a valid Lua number in the dictionary, it will return nil and \"not a number\".The value argument and init argument can be any valid Lua numbers, like negative numbers or floating-point numbers.This method was first introduced in the v0.3.1rc22 release.The optional init parameter was first added in the v0.10.6 release.The optional init_ttl parameter was introduced in the v0.10.12rc2 release.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.incr", - "body": "local newval, err, forcible? = ngx.shared.DICT:incr(${1:key}, ${2:value}, ${3:init?}, ${4:init_ttl?})" - }, - "ngx.shared.DICT.lpush": { - "description": "Inserts the specified (numerical or string) value at the head of the list named key in the shm-based dictionary ngx.shared.DICT. Returns the number of elements in the list after the push operation.If key does not exist, it is created as an empty list before performing the push operation. When the key already takes a value that is not a list, it will return nil and \"value not a list\".It never overrides the (least recently used) unexpired items in the store when running out of storage in the shared memory zone. In this case, it will immediately return nil and the string \"no memory\".This feature was first introduced in the v0.10.6 release.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.lpush", - "body": "local length, err = ngx.shared.DICT:lpush(${1:key}, ${2:value})" - }, - "ngx.shared.DICT.rpush": { - "description": "Similar to the lpush method, but inserts the specified (numerical or string) value at the tail of the list named key.This feature was first introduced in the v0.10.6 release.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.rpush", - "body": "local length, err = ngx.shared.DICT:rpush(${1:key}, ${2:value})" - }, - "ngx.shared.DICT.lpop": { - "description": "Removes and returns the first element of the list named key in the shm-based dictionary ngx.shared.DICT.If key does not exist, it will return nil. When the key already takes a value that is not a list, it will return nil and \"value not a list\".This feature was first introduced in the v0.10.6 release.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.lpop", - "body": "local val, err = ngx.shared.DICT:lpop(${1:key})" - }, - "ngx.shared.DICT.rpop": { - "description": "Removes and returns the last element of the list named key in the shm-based dictionary ngx.shared.DICT.If key does not exist, it will return nil. When the key already takes a value that is not a list, it will return nil and \"value not a list\".This feature was first introduced in the v0.10.6 release.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.rpop", - "body": "local val, err = ngx.shared.DICT:rpop(${1:key})" - }, - "ngx.shared.DICT.llen": { - "description": "Returns the number of elements in the list named key in the shm-based dictionary ngx.shared.DICT.If key does not exist, it is interpreted as an empty list and 0 is returned. When the key already takes a value that is not a list, it will return nil and \"value not a list\".This feature was first introduced in the v0.10.6 release.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.llen", - "body": "local len, err = ngx.shared.DICT:llen(${1:key})" - }, - "ngx.shared.DICT.ttl": { - "description": "requires: resty.core.shdict or resty.coreRetrieves the remaining TTL (time-to-live in seconds) of a key-value pair in the shm-based dictionary ngx.shared.DICT. Returns the TTL as a number if the operation is successfully completed or nil and an error message otherwise.If the key does not exist (or has already expired), this method will return nil and the error string \"not found\".The TTL is originally determined by the exptime argument of the set, add, replace (and the likes) methods. It has a time resolution of 0.001 seconds. A value of 0 means that the item will never expire.Example: require \"resty.core\"\n\n local cats = ngx.shared.cats\n local succ, err = cats:set(\"Marry\", \"a nice cat\", 0.5)\n\n ngx.sleep(0.2)\n\n local ttl, err = cats:ttl(\"Marry\")\n ngx.say(ttl) -- 0.3This feature was first introduced in the v0.10.11 release.Note: This method requires the resty.core.shdict or resty.core modules from the lua-resty-core library.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.ttl", - "body": "local ttl, err = ngx.shared.DICT:ttl(${1:key})" - }, - "ngx.shared.DICT.expire": { - "description": "requires: resty.core.shdict or resty.coreUpdates the exptime (in second) of a key-value pair in the shm-based dictionary ngx.shared.DICT. Returns a boolean indicating success if the operation completes or nil and an error message otherwise.If the key does not exist, this method will return nil and the error string \"not found\".The exptime argument has a resolution of 0.001 seconds. If exptime is 0, then the item will never expire.Example: require \"resty.core\"\n\n local cats = ngx.shared.cats\n local succ, err = cats:set(\"Marry\", \"a nice cat\", 0.1)\n\n succ, err = cats:expire(\"Marry\", 0.5)\n\n ngx.sleep(0.2)\n\n local val, err = cats:get(\"Marry\")\n ngx.say(val) -- \"a nice cat\"This feature was first introduced in the v0.10.11 release.Note: This method requires the resty.core.shdict or resty.core modules from the lua-resty-core library.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.expire", - "body": "local success, err = ngx.shared.DICT:expire(${1:key}, ${2:exptime})" - }, - "ngx.shared.DICT.flush_all": { - "description": "Flushes out all the items in the dictionary. This method does not actually free up all the memory blocks in the dictionary but just marks all the existing items as expired.This feature was first introduced in the v0.5.0rc17 release.See also ngx.shared.DICT.flush_expired and ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.flush_all", - "body": "ngx.shared.DICT:flush_all()" - }, - "ngx.shared.DICT.flush_expired": { - "description": "Flushes out the expired items in the dictionary, up to the maximal number specified by the optional max_count argument. When the max_count argument is given 0 or not given at all, then it means unlimited. Returns the number of items that have actually been flushed.Unlike the flush_all method, this method actually frees up the memory used by the expired items.This feature was first introduced in the v0.6.3 release.See also ngx.shared.DICT.flush_all and ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.flush_expired", - "body": "local flushed = ngx.shared.DICT:flush_expired(${1:max_count?})" - }, - "ngx.shared.DICT.get_keys": { - "description": "Fetch a list of the keys from the dictionary, up to .By default, only the first 1024 keys (if any) are returned. When the argument is given the value 0, then all the keys will be returned even there is more than 1024 keys in the dictionary.CAUTION Avoid calling this method on dictionaries with a very large number of keys as it may lock the dictionary for significant amount of time and block Nginx worker processes trying to access the dictionary.This feature was first introduced in the v0.7.3 release.", - "prefix": "ngx.shared.DICT.get_keys", - "body": "local keys = ngx.shared.DICT:get_keys(${1:max_count?})" - }, - "ngx.shared.DICT.capacity": { - "description": "requires: resty.core.shdict or resty.coreRetrieves the capacity in bytes for the shm-based dictionary ngx.shared.DICT declared with\nthe lua_shared_dict directive.Example: require \"resty.core.shdict\"\n\n local cats = ngx.shared.cats\n local capacity_bytes = cats:capacity()This feature was first introduced in the v0.10.11 release.Note: This method requires the resty.core.shdict or resty.core modules from the lua-resty-core library.This feature requires at least Nginx core version 0.7.3.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.capacity", - "body": "local capacity_bytes = ngx.shared.DICT:capacity()" - }, - "ngx.shared.DICT.free_space": { - "description": "requires: resty.core.shdict or resty.coreRetrieves the free page size in bytes for the shm-based dictionary ngx.shared.DICT.Note: The memory for ngx.shared.DICT is allocated via the Nginx slab allocator which has each slot for\ndata size ranges like ~8, 9~16, 17~32, ..., 1025~2048, 2048~ bytes. And pages are assigned to a slot if there\nis no room in already assigned pages for the slot.So even if the return value of the free_space method is zero, there may be room in already assigned pages, so\nyou may successfully set a new key value pair to the shared dict without getting true for forcible or\nnon nil err from the ngx.shared.DICT.set.On the other hand, if already assigned pages for a slot are full and a new key value pair is added to the\nslot and there is no free page, you may get true for forcible or non nil err from the\nngx.shared.DICT.set method.Example: require \"resty.core.shdict\"\n\n local cats = ngx.shared.cats\n local free_page_bytes = cats:free_space()This feature was first introduced in the v0.10.11 release.Note: This method requires the resty.core.shdict or resty.core modules from the lua-resty-core library.This feature requires at least Nginx core version 1.11.7.See also ngx.shared.DICT.", - "prefix": "ngx.shared.DICT.free_space", - "body": "local free_page_bytes = ngx.shared.DICT:free_space()" - }, - "ngx.socket.udp": { - "description": "Creates and returns a UDP or datagram-oriented unix domain socket object (also known as one type of the \"cosocket\" objects). The following methods are supported on this object:\nsetpeername\nsend\nreceive\nclose\nsettimeout\nIt is intended to be compatible with the UDP API of the LuaSocket library but is 100% nonblocking out of the box.This feature was first introduced in the v0.5.7 release.See also ngx.socket.tcp.", - "prefix": "ngx.socket.udp", - "body": "local udpsock = ngx.socket.udp()" - }, - "udpsock:setpeername": { - "description": "Attempts to connect a UDP socket object to a remote server or to a datagram unix domain socket file. Because the datagram protocol is actually connection-less, this method does not really establish a \"connection\", but only just set the name of the remote peer for subsequent read/write operations.Both IP addresses and domain names can be specified as the host argument. In case of domain names, this method will use Nginx core's dynamic resolver to parse the domain name without blocking and it is required to configure the resolver directive in the nginx.conf file like this: resolver 8.8.8.8; # use Google's public DNS nameserverIf the nameserver returns multiple IP addresses for the host name, this method will pick up one randomly.In case of error, the method returns nil followed by a string describing the error. In case of success, the method returns 1.Here is an example for connecting to a UDP (memcached) server: location /test {\n resolver 8.8.8.8;\n\n content_by_lua_block {\n local sock = ngx.socket.udp()\n local ok, err = sock:setpeername(\"my.memcached.server.domain\", 11211)\n if not ok then\n ngx.say(\"failed to connect to memcached: \", err)\n return\n end\n ngx.say(\"successfully connected to memcached!\")\n sock:close()\n }\n }Since the v0.7.18 release, connecting to a datagram unix domain socket file is also possible on Linux: local sock = ngx.socket.udp()\n local ok, err = sock:setpeername(\"unix:/tmp/some-datagram-service.sock\")\n if not ok then\n ngx.say(\"failed to connect to the datagram unix domain socket: \", err)\n return\n end\n\n -- do something after connect\n -- such as sock:send or sock:receiveassuming the datagram service is listening on the unix domain socket file /tmp/some-datagram-service.sock and the client socket will use the \"autobind\" feature on Linux.Calling this method on an already connected socket object will cause the original connection to be closed first.This method was first introduced in the v0.5.7 release.", - "prefix": "udpsock:setpeername", - "body": "local ok, err = udpsock:setpeername(\"unix:/path/to/unix-domain.socket\")" - }, - "udpsock:send": { - "description": "Sends data on the current UDP or datagram unix domain socket object.In case of success, it returns 1. Otherwise, it returns nil and a string describing the error.The input argument data can either be a Lua string or a (nested) Lua table holding string fragments. In case of table arguments, this method will copy all the string elements piece by piece to the underlying Nginx socket send buffers, which is usually optimal than doing string concatenation operations on the Lua land.This feature was first introduced in the v0.5.7 release.", - "prefix": "udpsock:send", - "body": "local ok, err = udpsock:send(${1:data})" - }, - "udpsock:receive": { - "description": "Receives data from the UDP or datagram unix domain socket object with an optional receive buffer size argument, size.This method is a synchronous operation and is 100% nonblocking.In case of success, it returns the data received; in case of error, it returns nil with a string describing the error.If the size argument is specified, then this method will use this size as the receive buffer size. But when this size is greater than 8192, then 8192 will be used instead.If no argument is specified, then the maximal buffer size, 8192 is assumed.Timeout for the reading operation is controlled by the lua_socket_read_timeout config directive and the settimeout method. And the latter takes priority. For example: sock:settimeout(1000) -- one second timeout\n local data, err = sock:receive()\n if not data then\n ngx.say(\"failed to read a packet: \", err)\n return\n end\n ngx.say(\"successfully read a packet: \", data)It is important here to call the settimeout method before calling this method.This feature was first introduced in the v0.5.7 release.", - "prefix": "udpsock:receive", - "body": "local data, err = udpsock:receive(${1:size?})" - }, - "udpsock:close": { - "description": "Closes the current UDP or datagram unix domain socket. It returns the 1 in case of success and returns nil with a string describing the error otherwise.Socket objects that have not invoked this method (and associated connections) will be closed when the socket object is released by the Lua GC (Garbage Collector) or the current client HTTP request finishes processing.This feature was first introduced in the v0.5.7 release.", - "prefix": "udpsock:close", - "body": "local ok, err = udpsock:close()" - }, - "udpsock:settimeout": { - "description": "Set the timeout value in milliseconds for subsequent socket operations (like receive).Settings done by this method takes priority over those config directives, like lua_socket_read_timeout.This feature was first introduced in the v0.5.7 release.", - "prefix": "udpsock:settimeout", - "body": "udpsock:settimeout(${1:time})" - }, - "ngx.socket.stream": { - "description": "Just an alias to ngx.socket.tcp. If the stream-typed cosocket may also connect to a unix domain\nsocket, then this API name is preferred.This API function was first added to the v0.10.1 release.", - "prefix": "ngx.socket.stream", - "body": "ngx.socket.stream" - }, - "ngx.socket.tcp": { - "description": "Creates and returns a TCP or stream-oriented unix domain socket object (also known as one type of the \"cosocket\" objects). The following methods are supported on this object:\nbind\nconnect\nsetclientcert\nsslhandshake\nsend\nreceive\nclose\nsettimeout\nsettimeouts\nsetoption\nreceiveany\nreceiveuntil\nsetkeepalive\ngetreusedtimes\nIt is intended to be compatible with the TCP API of the LuaSocket library but is 100% nonblocking out of the box. Also, we introduce some new APIs to provide more functionalities.The cosocket object created by this API function has exactly the same lifetime as the Lua handler creating it. So never pass the cosocket object to any other Lua handler (including ngx.timer callback functions) and never share the cosocket object between different Nginx requests.For every cosocket object's underlying connection, if you do not\nexplicitly close it (via close) or put it back to the connection\npool (via setkeepalive), then it is automatically closed when one of\nthe following two events happens:\nthe current request handler completes, or\nthe Lua cosocket object value gets collected by the Lua GC.\nFatal errors in cosocket operations always automatically close the current\nconnection (note that, read timeout error is the only error that is\nnot fatal), and if you call close on a closed connection, you will get\nthe \"closed\" error.Starting from the 0.9.9 release, the cosocket object here is full-duplex, that is, a reader \"light thread\" and a writer \"light thread\" can operate on a single cosocket object simultaneously (both \"light threads\" must belong to the same Lua handler though, see reasons above). But you cannot have two \"light threads\" both reading (or writing or connecting) the same cosocket, otherwise you might get an error like \"socket busy reading\" when calling the methods of the cosocket object.This feature was first introduced in the v0.5.0rc1 release.See also ngx.socket.udp.", - "prefix": "ngx.socket.tcp", - "body": "local tcpsock = ngx.socket.tcp()" - }, - "tcpsock:bind": { - "description": "Just like the standard proxy_bind directive, this api makes the outgoing connection to a upstream server originate from the specified local IP address.Only IP addresses can be specified as the address argument.Here is an example for connecting to a TCP server from the specified local IP address: location /test {\n content_by_lua_block {\n local sock = ngx.socket.tcp()\n -- assume \"192.168.1.10\" is the local ip address\n local ok, err = sock:bind(\"192.168.1.10\")\n if not ok then\n ngx.say(\"failed to bind\")\n return\n end\n local ok, err = sock:connect(\"192.168.1.67\", 80)\n if not ok then\n ngx.say(\"failed to connect server: \", err)\n return\n end\n ngx.say(\"successfully connected!\")\n sock:close()\n }\n }", - "prefix": "tcpsock:bind", - "body": "local ok, err = tcpsock:bind(${1:address})" - }, - "tcpsock:connect": { - "description": "Attempts to connect a TCP socket object to a remote server or to a stream unix domain socket file without blocking.Before actually resolving the host name and connecting to the remote backend, this method will always look up the connection pool for matched idle connections created by previous calls of this method (or the ngx.socket.connect function).Both IP addresses and domain names can be specified as the host argument. In case of domain names, this method will use Nginx core's dynamic resolver to parse the domain name without blocking and it is required to configure the resolver directive in the nginx.conf file like this: resolver 8.8.8.8; # use Google's public DNS nameserverIf the nameserver returns multiple IP addresses for the host name, this method will pick up one randomly.In case of error, the method returns nil followed by a string describing the error. In case of success, the method returns 1.Here is an example for connecting to a TCP server: location /test {\n resolver 8.8.8.8;\n\n content_by_lua_block {\n local sock = ngx.socket.tcp()\n local ok, err = sock:connect(\"www.google.com\", 80)\n if not ok then\n ngx.say(\"failed to connect to google: \", err)\n return\n end\n ngx.say(\"successfully connected to google!\")\n sock:close()\n }\n }Connecting to a Unix Domain Socket file is also possible: local sock = ngx.socket.tcp()\n local ok, err = sock:connect(\"unix:/tmp/memcached.sock\")\n if not ok then\n ngx.say(\"failed to connect to the memcached unix domain socket: \", err)\n return\n end\n\n -- do something after connect\n -- such as sock:send or sock:receiveassuming memcached (or something else) is listening on the unix domain socket file /tmp/memcached.sock.Timeout for the connecting operation is controlled by the lua_socket_connect_timeout config directive and the settimeout method. And the latter takes priority. For example: local sock = ngx.socket.tcp()\n sock:settimeout(1000) -- one second timeout\n local ok, err = sock:connect(host, port)It is important here to call the settimeout method before calling this method.Calling this method on an already connected socket object will cause the original connection to be closed first.An optional Lua table can be specified as the last argument to this method to specify various connect options:\n\npool\nspecify a custom name for the connection pool being used. If omitted, then the connection pool name will be generated from the string template \":\" or \"\".\n\n\npool_size\nspecify the size of the connection pool. If omitted and no\nbacklog option was provided, no pool will be created. If omitted\nbut backlog was provided, the pool will be created with a default\nsize equal to the value of the lua_socket_pool_size\ndirective.\nThe connection pool holds up to pool_size alive connections\nready to be reused by subsequent calls to connect, but\nnote that there is no upper limit to the total number of opened connections\noutside of the pool. If you need to restrict the total number of opened\nconnections, specify the backlog option.\nWhen the connection pool would exceed its size limit, the least recently used\n(kept-alive) connection already in the pool will be closed to make room for\nthe current connection.\nNote that the cosocket connection pool is per Nginx worker process rather\nthan per Nginx server instance, so the size limit specified here also applies\nto every single Nginx worker process. Also note that the size of the connection\npool cannot be changed once it has been created.\nThis option was first introduced in the v0.10.14 release.\n\n\nbacklog\nif specified, this module will limit the total number of opened connections\nfor this pool. No more connections than pool_size can be opened\nfor this pool at any time. If the connection pool is full, subsequent\nconnect operations will be queued into a queue equal to this option's\nvalue (the \"backlog\" queue).\nIf the number of queued connect operations is equal to backlog,\nsubsequent connect operations will fail and return nil plus the\nerror string \"too many waiting connect operations\".\nThe queued connect operations will be resumed once the number of connections\nin the pool is less than pool_size.\nThe queued connect operation will abort once they have been queued for more\nthan connect_timeout, controlled by\nsettimeouts, and will return nil plus\nthe error string \"timeout\".\nThis option was first introduced in the v0.10.14 release.\n\nThe support for the options table argument was first introduced in the v0.5.7 release.This method was first introduced in the v0.5.0rc1 release.", - "prefix": "tcpsock:connect", - "body": "local ok, err = tcpsock:connect(\"unix:/path/to/unix-domain.socket\", options_table?)" - }, - "tcpsock:setclientcert": { - "description": "Set client certificate chain and corresponding private key to the TCP socket object.\nThe certificate chain and private key provided will be used later by the tcpsock:sslhandshake method.\ncert specify a client certificate chain cdata object that will be used while handshaking with\nremote server. These objects can be created using ngx.ssl.parse_pem_cert\nfunction provided by lua-resty-core. Note that specifying the cert option requires\ncorresponding pkey be provided too. See below.\npkey specify a private key corresponds to the cert option above.\nThese objects can be created using ngx.ssl.parse_pem_priv_key\nfunction provided by lua-resty-core.\nIf both of cert and pkey are nil, this method will clear any existing client certificate and private key\nthat was previously set on the cosocket object.This method was first introduced in the v0.10.22 release.", - "prefix": "tcpsock:setclientcert", - "body": "local ok, err = tcpsock:setclientcert(${1:cert}, ${2:pkey})" - }, - "tcpsock:sslhandshake": { - "description": "Does SSL/TLS handshake on the currently established connection.The optional reused_session argument can take a former SSL\nsession userdata returned by a previous sslhandshake\ncall for exactly the same target. For short-lived connections, reusing SSL\nsessions can usually speed up the handshake by one order by magnitude but it\nis not so useful if the connection pool is enabled. This argument defaults to\nnil. If this argument takes the boolean false value, no SSL session\nuserdata would return by this call and only a Lua boolean will be returned as\nthe first return value; otherwise the current SSL session will\nalways be returned as the first argument in case of successes.The optional server_name argument is used to specify the server\nname for the new TLS extension Server Name Indication (SNI). Use of SNI can\nmake different servers share the same IP address on the server side. Also,\nwhen SSL verification is enabled, this server_name argument is\nalso used to validate the server name specified in the server certificate sent from\nthe remote.The optional ssl_verify argument takes a Lua boolean value to\ncontrol whether to perform SSL verification. When set to true, the server\ncertificate will be verified according to the CA certificates specified by\nthe lua_ssl_trusted_certificate directive.\nYou may also need to adjust the lua_ssl_verify_depth\ndirective to control how deep we should follow along the certificate chain.\nAlso, when the ssl_verify argument is true and the\nserver_name argument is also specified, the latter will be used\nto validate the server name in the server certificate.The optional send_status_req argument takes a boolean that controls whether to send\nthe OCSP status request in the SSL handshake request (which is for requesting OCSP stapling).For connections that have already done SSL/TLS handshake, this method returns\nimmediately.This method was first introduced in the v0.9.11 release.", - "prefix": "tcpsock:sslhandshake", - "body": "local session, err = tcpsock:sslhandshake(${1:reused_session?}, ${2:server_name?}, ${3:ssl_verify?}, ${4:send_status_req?})" - }, - "tcpsock:send": { - "description": "Sends data without blocking on the current TCP or Unix Domain Socket connection.This method is a synchronous operation that will not return until all the data has been flushed into the system socket send buffer or an error occurs.In case of success, it returns the total number of bytes that have been sent. Otherwise, it returns nil and a string describing the error.The input argument data can either be a Lua string or a (nested) Lua table holding string fragments. In case of table arguments, this method will copy all the string elements piece by piece to the underlying Nginx socket send buffers, which is usually optimal than doing string concatenation operations on the Lua land.Timeout for the sending operation is controlled by the lua_socket_send_timeout config directive and the settimeout method. And the latter takes priority. For example: sock:settimeout(1000) -- one second timeout\n local bytes, err = sock:send(request)It is important here to call the settimeout method before calling this method.In case of any connection errors, this method always automatically closes the current connection.This feature was first introduced in the v0.5.0rc1 release.", - "prefix": "tcpsock:send", - "body": "local bytes, err = tcpsock:send(${1:data})" - }, - "tcpsock:receive": { - "description": "Receives data from the connected socket according to the reading pattern or size.This method is a synchronous operation just like the send method and is 100% nonblocking.In case of success, it returns the data received; in case of error, it returns nil with a string describing the error and the partial data received so far.If a number-like argument is specified (including strings that look like numbers), then it is interpreted as a size. This method will not return until it reads exactly this size of data or an error occurs.If a non-number-like string argument is specified, then it is interpreted as a \"pattern\". The following patterns are supported:\n'*a': reads from the socket until the connection is closed. No end-of-line translation is performed;\n'*l': reads a line of text from the socket. The line is terminated by a Line Feed (LF) character (ASCII 10), optionally preceded by a Carriage Return (CR) character (ASCII 13). The CR and LF characters are not included in the returned line. In fact, all CR characters are ignored by the pattern.\nIf no argument is specified, then it is assumed to be the pattern '*l', that is, the line reading pattern.Timeout for the reading operation is controlled by the lua_socket_read_timeout config directive and the settimeout method. And the latter takes priority. For example: sock:settimeout(1000) -- one second timeout\n local line, err, partial = sock:receive()\n if not line then\n ngx.say(\"failed to read a line: \", err)\n return\n end\n ngx.say(\"successfully read a line: \", line)It is important here to call the settimeout method before calling this method.Since the v0.8.8 release, this method no longer automatically closes the current connection when the read timeout error happens. For other connection errors, this method always automatically closes the connection.This feature was first introduced in the v0.5.0rc1 release.", - "prefix": "tcpsock:receive", - "body": "local data, err, partial = tcpsock:receive(${1:pattern?})" - }, - "tcpsock:receiveany": { - "description": "Returns any data received by the connected socket, at most max bytes.This method is a synchronous operation just like the send method and is 100% nonblocking.In case of success, it returns the data received; in case of error, it returns nil with a string describing the error.If the received data is more than this size, this method will return with exactly this size of data.\nThe remaining data in the underlying receive buffer could be returned in the next reading operation.Timeout for the reading operation is controlled by the lua_socket_read_timeout config directive and the settimeouts method. And the latter takes priority. For example: sock:settimeouts(1000, 1000, 1000) -- one second timeout for connect/read/write\n local data, err = sock:receiveany(10 * 1024) -- read any data, at most 10K\n if not data then\n ngx.say(\"failed to read any data: \", err)\n return\n end\n ngx.say(\"successfully read: \", data)This method doesn't automatically close the current connection when the read timeout error occurs. For other connection errors, this method always automatically closes the connection.This feature was first introduced in the v0.10.14 release.", - "prefix": "tcpsock:receiveany", - "body": "local data, err = tcpsock:receiveany(${1:max})" - }, - "tcpsock:receiveuntil": { - "description": "This method returns an iterator Lua function that can be called to read the data stream until it sees the specified pattern or an error occurs.Here is an example for using this method to read a data stream with the boundary sequence --abcedhb: local reader = sock:receiveuntil(\"\\r\\n--abcedhb\")\n local data, err, partial = reader()\n if not data then\n ngx.say(\"failed to read the data stream: \", err)\n end\n ngx.say(\"read the data stream: \", data)When called without any argument, the iterator function returns the received data right before the specified pattern string in the incoming data stream. So for the example above, if the incoming data stream is 'hello, world! -agentzh\\r\\n--abcedhb blah blah', then the string 'hello, world! -agentzh' will be returned.In case of error, the iterator function will return nil along with a string describing the error and the partial data bytes that have been read so far.The iterator function can be called multiple times and can be mixed safely with other cosocket method calls or other iterator function calls.The iterator function behaves differently (i.e., like a real iterator) when it is called with a size argument. That is, it will read that size of data on each invocation and will return nil at the last invocation (either sees the boundary pattern or meets an error). For the last successful invocation of the iterator function, the err return value will be nil too. The iterator function will be reset after the last successful invocation that returns nil data and nil error. Consider the following example: local reader = sock:receiveuntil(\"\\r\\n--abcedhb\")\n\n while true do\n local data, err, partial = reader(4)\n if not data then\n if err then\n ngx.say(\"failed to read the data stream: \", err)\n break\n end\n\n ngx.say(\"read done\")\n break\n end\n ngx.say(\"read chunk: [\", data, \"]\")\n endThen for the incoming data stream 'hello, world! -agentzh\\r\\n--abcedhb blah blah', we shall get the following output from the sample code above:read chunk: [hell]\nread chunk: [o, w]\nread chunk: [orld]\nread chunk: [! -a]\nread chunk: [gent]\nread chunk: [zh]\nread done\nNote that, the actual data returned might be a little longer than the size limit specified by the size argument when the boundary pattern has ambiguity for streaming parsing. Near the boundary of the data stream, the data string actually returned could also be shorter than the size limit.Timeout for the iterator function's reading operation is controlled by the lua_socket_read_timeout config directive and the settimeout method. And the latter takes priority. For example: local readline = sock:receiveuntil(\"\\r\\n\")\n\n sock:settimeout(1000) -- one second timeout\n line, err, partial = readline()\n if not line then\n ngx.say(\"failed to read a line: \", err)\n return\n end\n ngx.say(\"successfully read a line: \", line)It is important here to call the settimeout method before calling the iterator function (note that the receiveuntil call is irrelevant here).As from the v0.5.1 release, this method also takes an optional options table argument to control the behavior. The following options are supported:\ninclusive\nThe inclusive takes a boolean value to control whether to include the pattern string in the returned data string. Default to false. For example, local reader = tcpsock:receiveuntil(\"_END_\", { inclusive = true })\n local data = reader()\n ngx.say(data)Then for the input data stream \"hello world _END_ blah blah blah\", then the example above will output hello world _END_, including the pattern string _END_ itself.Since the v0.8.8 release, this method no longer automatically closes the current connection when the read timeout error happens. For other connection errors, this method always automatically closes the connection.This method was first introduced in the v0.5.0rc1 release.", - "prefix": "tcpsock:receiveuntil", - "body": "local iterator = tcpsock:receiveuntil(${1:pattern}, ${2:options?})" - }, - "tcpsock:close": { - "description": "Closes the current TCP or stream unix domain socket. It returns the 1 in case of success and returns nil with a string describing the error otherwise.Note that there is no need to call this method on socket objects that have invoked the setkeepalive method because the socket object is already closed (and the current connection is saved into the built-in connection pool).Socket objects that have not invoked this method (and associated connections) will be closed when the socket object is released by the Lua GC (Garbage Collector) or the current client HTTP request finishes processing.This feature was first introduced in the v0.5.0rc1 release.", - "prefix": "tcpsock:close", - "body": "local ok, err = tcpsock:close()" - }, - "tcpsock:settimeout": { - "description": "Set the timeout value in milliseconds for subsequent socket operations (connect, receive, and iterators returned from receiveuntil).Settings done by this method take priority over those specified via config directives (i.e. lua_socket_connect_timeout, lua_socket_send_timeout, and lua_socket_read_timeout).Note that this method does not affect the lua_socket_keepalive_timeout setting; the timeout argument to the setkeepalive method should be used for this purpose instead.This feature was first introduced in the v0.5.0rc1 release.", - "prefix": "tcpsock:settimeout", - "body": "tcpsock:settimeout(${1:time})" - }, - "tcpsock:settimeouts": { - "description": "Respectively sets the connect, send, and read timeout thresholds (in milliseconds) for subsequent socket\noperations (connect, send, receive, and iterators returned from receiveuntil).Settings done by this method take priority over those specified via config directives (i.e. lua_socket_connect_timeout, lua_socket_send_timeout, and lua_socket_read_timeout).It is recommended to use settimeouts instead of settimeout.Note that this method does not affect the lua_socket_keepalive_timeout setting; the timeout argument to the setkeepalive method should be used for this purpose instead.This feature was first introduced in the v0.10.7 release.", - "prefix": "tcpsock:settimeouts", - "body": "tcpsock:settimeouts(${1:connect_timeout}, ${2:send_timeout}, ${3:read_timeout})" - }, - "tcpsock:setoption": { - "description": "This function is added for LuaSocket API compatibility, its functionality is implemented v0.10.18.This feature was first introduced in the v0.5.0rc1 release.In case of success, it returns true. Otherwise, it returns nil and a string describing the error.The option is a string with the option name, and the value depends on the option being set:\n\nkeepalive\nSetting this option to true enables sending of keep-alive messages on\nconnection-oriented sockets. Make sure the connect function\nhad been called before, for example,\nlocal ok, err = tcpsock:setoption(\"keepalive\", true)\nif not ok then\n ngx.say(\"setoption keepalive failed: \", err)\nend\n\n\nreuseaddr\nEnabling this option indicates that the rules used in validating addresses\nsupplied in a call to bind should allow reuse of local addresses. Make sure\nthe connect function had been called before, for example,\nlocal ok, err = tcpsock:setoption(\"reuseaddr\", 0)\nif not ok then\n ngx.say(\"setoption reuseaddr failed: \", err)\nend\n\n\ntcp-nodelay\nSetting this option to true disables the Nagle's algorithm for the connection.\nMake sure the connect function had been called before, for example,\nlocal ok, err = tcpsock:setoption(\"tcp-nodelay\", true)\nif not ok then\n ngx.say(\"setoption tcp-nodelay failed: \", err)\nend\n\n\nsndbuf\nSets the maximum socket send buffer in bytes. The kernel doubles this value\n(to allow space for bookkeeping overhead) when it is set using setsockopt().\nMake sure the connect function had been called before, for example,\nlocal ok, err = tcpsock:setoption(\"sndbuf\", 1024 * 10)\nif not ok then\n ngx.say(\"setoption sndbuf failed: \", err)\nend\n\n\nrcvbuf\nSets the maximum socket receive buffer in bytes. The kernel doubles this value\n(to allow space for bookkeeping overhead) when it is set using setsockopt. Make\nsure the connect function had been called before, for example,\nlocal ok, err = tcpsock:setoption(\"rcvbuf\", 1024 * 10)\nif not ok then\n ngx.say(\"setoption rcvbuf failed: \", err)\nend\n\nNOTE: Once the option is set, it will become effective until the connection is closed. If you know the connection is from the connection pool and all the in-pool connections already have called the setoption() method with the desired socket option state, then you can just skip calling setoption() again to avoid the overhead of repeated calls, for example, local count, err = tcpsock:getreusedtimes()\n if not count then\n ngx.say(\"getreusedtimes failed: \", err)\n return\n end\n\n if count == 0 then\n local ok, err = tcpsock:setoption(\"rcvbuf\", 1024 * 10)\n if not ok then\n ngx.say(\"setoption rcvbuf failed: \", err)\n return\n end\n endThese options described above are supported in v0.10.18, and more options will be implemented in future.", - "prefix": "tcpsock:setoption", - "body": "local ok, err = tcpsock:setoption(${1:option}, ${2:value?})" - }, - "tcpsock:setkeepalive": { - "description": "Puts the current socket's connection immediately into the cosocket built-in connection pool and keep it alive until other connect method calls request it or the associated maximal idle timeout is expired.The first optional argument, timeout, can be used to specify the maximal idle timeout (in milliseconds) for the current connection. If omitted, the default setting in the lua_socket_keepalive_timeout config directive will be used. If the 0 value is given, then the timeout interval is unlimited.The second optional argument size is considered deprecated since\nthe v0.10.14 release of this module, in favor of the\npool_size option of the connect method.\nSince the v0.10.14 release, this option will only take effect if\nthe call to connect did not already create a connection\npool.\nWhen this option takes effect (no connection pool was previously created by\nconnect), it will specify the size of the connection pool,\nand create it.\nIf omitted (and no pool was previously created), the default size is the value\nof the lua_socket_pool_size directive.\nThe connection pool holds up to size alive connections ready to be\nreused by subsequent calls to connect, but note that there\nis no upper limit to the total number of opened connections outside of the\npool.\nWhen the connection pool would exceed its size limit, the least recently used\n(kept-alive) connection already in the pool will be closed to make room for\nthe current connection.\nNote that the cosocket connection pool is per Nginx worker process rather\nthan per Nginx server instance, so the size limit specified here also applies\nto every single Nginx worker process. Also note that the size of the connection\npool cannot be changed once it has been created.\nIf you need to restrict the total number of opened connections, specify both\nthe pool_size and backlog option in the call to\nconnect.In case of success, this method returns 1; otherwise, it returns nil and a string describing the error.When the system receive buffer for the current connection has unread data, then this method will return the \"connection in dubious state\" error message (as the second return value) because the previous session has unread data left behind for the next session and the connection is not safe to be reused.This method also makes the current cosocket object enter the \"closed\" state, so there is no need to manually call the close method on it afterwards.This feature was first introduced in the v0.5.0rc1 release.", - "prefix": "tcpsock:setkeepalive", - "body": "local ok, err = tcpsock:setkeepalive(${1:timeout?}, ${2:size?})" - }, - "tcpsock:getreusedtimes": { - "description": "This method returns the (successfully) reused times for the current connection. In case of error, it returns nil and a string describing the error.If the current connection does not come from the built-in connection pool, then this method always returns 0, that is, the connection has never been reused (yet). If the connection comes from the connection pool, then the return value is always non-zero. So this method can also be used to determine if the current connection comes from the pool.This feature was first introduced in the v0.5.0rc1 release.", - "prefix": "tcpsock:getreusedtimes", - "body": "local count, err = tcpsock:getreusedtimes()" - }, - "ngx.socket.connect": { - "description": "This function is a shortcut for combining ngx.socket.tcp() and the connect() method call in a single operation. It is actually implemented like this: local sock = ngx.socket.tcp()\n local ok, err = sock:connect(...)\n if not ok then\n return nil, err\n end\n return sockThere is no way to use the settimeout method to specify connecting timeout for this method and the lua_socket_connect_timeout directive must be set at configure time instead.This feature was first introduced in the v0.5.0rc1 release.", - "prefix": "ngx.socket.connect", - "body": "local tcpsock, err = ngx.socket.connect(\"unix:/path/to/unix-domain.socket\")" - }, - "ngx.get_phase": { - "description": "Retrieves the current running phase name. Possible return values are\ninit\nfor the context of init_by_lua*.\ninit_worker\nfor the context of init_worker_by_lua*.\nssl_cert\nfor the context of ssl_certificate_by_lua*.\nssl_session_fetch\nfor the context of ssl_session_fetch_by_lua*.\nssl_session_store\nfor the context of ssl_session_store_by_lua*.\nssl_client_hello\nfor the context of ssl_client_hello_by_lua*.\nset\nfor the context of set_by_lua*.\nrewrite\nfor the context of rewrite_by_lua*.\nbalancer\nfor the context of balancer_by_lua*.\naccess\nfor the context of access_by_lua*.\ncontent\nfor the context of content_by_lua*.\nheader_filter\nfor the context of header_filter_by_lua*.\nbody_filter\nfor the context of body_filter_by_lua*.\nlog\nfor the context of log_by_lua*.\ntimer\nfor the context of user callback functions for ngx.timer.*.\nexit_worker\nfor the context of exit_worker_by_lua*.\nThis API was first introduced in the v0.5.10 release.", - "prefix": "ngx.get_phase", - "body": "local str = ngx.get_phase()" - }, - "ngx.thread.spawn": { - "description": "Spawns a new user \"light thread\" with the Lua function func as well as those optional arguments arg1, arg2, and etc. Returns a Lua thread (or Lua coroutine) object represents this \"light thread\".\"Light threads\" are just a special kind of Lua coroutines that are scheduled by the ngx_lua module.Before ngx.thread.spawn returns, the func will be called with those optional arguments until it returns, aborts with an error, or gets yielded due to I/O operations via the Nginx API for Lua (like tcpsock:receive).After ngx.thread.spawn returns, the newly-created \"light thread\" will keep running asynchronously usually at various I/O events.All the Lua code chunks running by rewrite_by_lua, access_by_lua, and content_by_lua are in a boilerplate \"light thread\" created automatically by ngx_lua. Such boilerplate \"light thread\" are also called \"entry threads\".By default, the corresponding Nginx handler (e.g., rewrite_by_lua handler) will not terminate until\nboth the \"entry thread\" and all the user \"light threads\" terminates,\na \"light thread\" (either the \"entry thread\" or a user \"light thread\") aborts by calling ngx.exit, ngx.exec, ngx.redirect, or ngx.req.set_uri(uri, true), or\nthe \"entry thread\" terminates with a Lua error.\nWhen the user \"light thread\" terminates with a Lua error, however, it will not abort other running \"light threads\" like the \"entry thread\" does.Due to the limitation in the Nginx subrequest model, it is not allowed to abort a running Nginx subrequest in general. So it is also prohibited to abort a running \"light thread\" that is pending on one ore more Nginx subrequests. You must call ngx.thread.wait to wait for those \"light thread\" to terminate before quitting the \"world\". A notable exception here is that you can abort pending subrequests by calling ngx.exit with and only with the status code ngx.ERROR (-1), 408, 444, or 499.The \"light threads\" are not scheduled in a pre-emptive way. In other words, no time-slicing is performed automatically. A \"light thread\" will keep running exclusively on the CPU until\na (nonblocking) I/O operation cannot be completed in a single run,\nit calls coroutine.yield to actively give up execution, or\nit is aborted by a Lua error or an invocation of ngx.exit, ngx.exec, ngx.redirect, or ngx.req.set_uri(uri, true).\nFor the first two cases, the \"light thread\" will usually be resumed later by the ngx_lua scheduler unless a \"stop-the-world\" event happens.User \"light threads\" can create \"light threads\" themselves. And normal user coroutines created by coroutine.create can also create \"light threads\". The coroutine (be it a normal Lua coroutine or a \"light thread\") that directly spawns the \"light thread\" is called the \"parent coroutine\" for the \"light thread\" newly spawned.The \"parent coroutine\" can call ngx.thread.wait to wait on the termination of its child \"light thread\".You can call coroutine.status() and coroutine.yield() on the \"light thread\" coroutines.The status of the \"light thread\" coroutine can be \"zombie\" if\nthe current \"light thread\" already terminates (either successfully or with an error),\nits parent coroutine is still alive, and\nits parent coroutine is not waiting on it with ngx.thread.wait.\nThe following example demonstrates the use of coroutine.yield() in the \"light thread\" coroutines\nto do manual time-slicing: local yield = coroutine.yield\n\n function f()\n local self = coroutine.running()\n ngx.say(\"f 1\")\n yield(self)\n ngx.say(\"f 2\")\n yield(self)\n ngx.say(\"f 3\")\n end\n\n local self = coroutine.running()\n ngx.say(\"0\")\n yield(self)\n\n ngx.say(\"1\")\n ngx.thread.spawn(f)\n\n ngx.say(\"2\")\n yield(self)\n\n ngx.say(\"3\")\n yield(self)\n\n ngx.say(\"4\")Then it will generate the output0\n1\nf 1\n2\nf 2\n3\nf 3\n4\n\"Light threads\" are mostly useful for making concurrent upstream requests in a single Nginx request handler, much like a generalized version of ngx.location.capture_multi that can work with all the Nginx API for Lua. The following example demonstrates parallel requests to MySQL, Memcached, and upstream HTTP services in a single Lua handler, and outputting the results in the order that they actually return (similar to Facebook's BigPipe model): -- query mysql, memcached, and a remote http service at the same time,\n -- output the results in the order that they\n -- actually return the results.\n\n local mysql = require \"resty.mysql\"\n local memcached = require \"resty.memcached\"\n\n local function query_mysql()\n local db = mysql:new()\n db:connect{\n host = \"127.0.0.1\",\n port = 3306,\n database = \"test\",\n user = \"monty\",\n password = \"mypass\"\n }\n local res, err, errno, sqlstate =\n db:query(\"select * from cats order by id asc\")\n db:set_keepalive(0, 100)\n ngx.say(\"mysql done: \", cjson.encode(res))\n end\n\n local function query_memcached()\n local memc = memcached:new()\n memc:connect(\"127.0.0.1\", 11211)\n local res, err = memc:get(\"some_key\")\n ngx.say(\"memcached done: \", res)\n end\n\n local function query_http()\n local res = ngx.location.capture(\"/my-http-proxy\")\n ngx.say(\"http done: \", res.body)\n end\n\n ngx.thread.spawn(query_mysql) -- create thread 1\n ngx.thread.spawn(query_memcached) -- create thread 2\n ngx.thread.spawn(query_http) -- create thread 3This API was first enabled in the v0.7.0 release.", - "prefix": "ngx.thread.spawn", - "body": "local co = ngx.thread.spawn(${1:func}, ${2:arg1}, ${3:arg2}, ...)" - }, - "ngx.thread.wait": { - "description": "Waits on one or more child \"light threads\" and returns the results of the first \"light thread\" that terminates (either successfully or with an error).The arguments thread1, thread2, and etc are the Lua thread objects returned by earlier calls of ngx.thread.spawn.The return values have exactly the same meaning as coroutine.resume, that is, the first value returned is a boolean value indicating whether the \"light thread\" terminates successfully or not, and subsequent values returned are the return values of the user Lua function that was used to spawn the \"light thread\" (in case of success) or the error object (in case of failure).Only the direct \"parent coroutine\" can wait on its child \"light thread\", otherwise a Lua exception will be raised.The following example demonstrates the use of ngx.thread.wait and ngx.location.capture to emulate ngx.location.capture_multi: local capture = ngx.location.capture\n local spawn = ngx.thread.spawn\n local wait = ngx.thread.wait\n local say = ngx.say\n\n local function fetch(uri)\n return capture(uri)\n end\n\n local threads = {\n spawn(fetch, \"/foo\"),\n spawn(fetch, \"/bar\"),\n spawn(fetch, \"/baz\")\n }\n\n for i = 1, #threads do\n local ok, res = wait(threads[i])\n if not ok then\n say(i, \": failed to run: \", res)\n else\n say(i, \": status: \", res.status)\n say(i, \": body: \", res.body)\n end\n endHere it essentially implements the \"wait all\" model.And below is an example demonstrating the \"wait any\" model: function f()\n ngx.sleep(0.2)\n ngx.say(\"f: hello\")\n return \"f done\"\n end\n\n function g()\n ngx.sleep(0.1)\n ngx.say(\"g: hello\")\n return \"g done\"\n end\n\n local tf, err = ngx.thread.spawn(f)\n if not tf then\n ngx.say(\"failed to spawn thread f: \", err)\n return\n end\n\n ngx.say(\"f thread created: \", coroutine.status(tf))\n\n local tg, err = ngx.thread.spawn(g)\n if not tg then\n ngx.say(\"failed to spawn thread g: \", err)\n return\n end\n\n ngx.say(\"g thread created: \", coroutine.status(tg))\n\n ok, res = ngx.thread.wait(tf, tg)\n if not ok then\n ngx.say(\"failed to wait: \", res)\n return\n end\n\n ngx.say(\"res: \", res)\n\n -- stop the \"world\", aborting other running threads\n ngx.exit(ngx.OK)And it will generate the following output:f thread created: running\ng thread created: running\ng: hello\nres: g done\nThis API was first enabled in the v0.7.0 release.", - "prefix": "ngx.thread.wait", - "body": "local ok, res1, res2, ... = ngx.thread.wait(${1:thread1}, ${2:thread2}, ...)" - }, - "ngx.thread.kill": { - "description": "Kills a running \"light thread\" created by ngx.thread.spawn. Returns a true value when successful or nil and a string describing the error otherwise.According to the current implementation, only the parent coroutine (or \"light thread\") can kill a thread. Also, a running \"light thread\" with pending Nginx subrequests (initiated by ngx.location.capture for example) cannot be killed due to a limitation in the Nginx core.This API was first enabled in the v0.9.9 release.", - "prefix": "ngx.thread.kill", - "body": "local ok, err = ngx.thread.kill(${1:thread})" - }, - "ngx.on_abort": { - "description": "Registers a user Lua function as the callback which gets called automatically when the client closes the (downstream) connection prematurely.Returns 1 if the callback is registered successfully or returns nil and a string describing the error otherwise.All the Nginx API for Lua can be used in the callback function because the function is run in a special \"light thread\", just as those \"light threads\" created by ngx.thread.spawn.The callback function can decide what to do with the client abortion event all by itself. For example, it can simply ignore the event by doing nothing and the current Lua request handler will continue executing without interruptions. And the callback function can also decide to terminate everything by calling ngx.exit, for example, local function my_cleanup()\n -- custom cleanup work goes here, like cancelling a pending DB transaction\n\n -- now abort all the \"light threads\" running in the current request handler\n ngx.exit(499)\n end\n\n local ok, err = ngx.on_abort(my_cleanup)\n if not ok then\n ngx.log(ngx.ERR, \"failed to register the on_abort callback: \", err)\n ngx.exit(500)\n endWhen lua_check_client_abort is set to off (which is the default), then this function call will always return the error message \"lua_check_client_abort is off\".According to the current implementation, this function can only be called once in a single request handler; subsequent calls will return the error message \"duplicate call\".This API was first introduced in the v0.7.4 release.See also lua_check_client_abort.", - "prefix": "ngx.on_abort", - "body": "local ok, err = ngx.on_abort(${1:callback})" - }, - "ngx.timer.at": { - "description": "Creates an Nginx timer with a user callback function as well as optional user arguments.The first argument, delay, specifies the delay for the timer,\nin seconds. One can specify fractional seconds like 0.001 to mean 1\nmillisecond here. 0 delay can also be specified, in which case the\ntimer will immediately expire when the current handler yields\nexecution.The second argument, callback, can\nbe any Lua function, which will be invoked later in a background\n\"light thread\" after the delay specified. The user callback will be\ncalled automatically by the Nginx core with the arguments premature,\nuser_arg1, user_arg2, and etc, where the premature\nargument takes a boolean value indicating whether it is a premature timer\nexpiration or not(for the 0 delay timer it is always false), and user_arg1, user_arg2, and etc, are\nthose (extra) user arguments specified when calling ngx.timer.at\nas the remaining arguments.Premature timer expiration happens when the Nginx worker process is\ntrying to shut down, as in an Nginx configuration reload triggered by\nthe HUP signal or in an Nginx server shutdown. When the Nginx worker\nis trying to shut down, one can no longer call ngx.timer.at to\ncreate new timers with nonzero delays and in that case ngx.timer.at will return a \"conditional false\" value and\na string describing the error, that is, \"process exiting\".Starting from the v0.9.3 release, it is allowed to create zero-delay timers even when the Nginx worker process starts shutting down.When a timer expires, the user Lua code in the timer callback is\nrunning in a \"light thread\" detached completely from the original\nrequest creating the timer. So objects with the same lifetime as the\nrequest creating them, like cosockets, cannot be shared between the\noriginal request and the timer user callback function.Here is a simple example: location / {\n ...\n log_by_lua_block {\n local function push_data(premature, uri, args, status)\n -- push the data uri, args, and status to the remote\n -- via ngx.socket.tcp or ngx.socket.udp\n -- (one may want to buffer the data in Lua a bit to\n -- save I/O operations)\n end\n local ok, err = ngx.timer.at(0, push_data,\n ngx.var.uri, ngx.var.args, ngx.header.status)\n if not ok then\n ngx.log(ngx.ERR, \"failed to create timer: \", err)\n return\n end\n\n -- other job in log_by_lua_block\n }\n }One can also create infinite re-occurring timers, for instance, a timer getting triggered every 5 seconds, by calling ngx.timer.at recursively in the timer callback function. Here is such an example, local delay = 5\n local handler\n handler = function (premature)\n -- do some routine job in Lua just like a cron job\n if premature then\n return\n end\n local ok, err = ngx.timer.at(delay, handler)\n if not ok then\n ngx.log(ngx.ERR, \"failed to create the timer: \", err)\n return\n end\n\n -- do something in timer\n end\n\n local ok, err = ngx.timer.at(delay, handler)\n if not ok then\n ngx.log(ngx.ERR, \"failed to create the timer: \", err)\n return\n end\n\n -- do other jobsIt is recommended, however, to use the ngx.timer.every API function\ninstead for creating recurring timers since it is more robust.Because timer callbacks run in the background and their running time\nwill not add to any client request's response time, they can easily\naccumulate in the server and exhaust system resources due to either\nLua programming mistakes or just too much client traffic. To prevent\nextreme consequences like crashing the Nginx server, there are\nbuilt-in limitations on both the number of \"pending timers\" and the\nnumber of \"running timers\" in an Nginx worker process. The \"pending\ntimers\" here mean timers that have not yet been expired and \"running\ntimers\" are those whose user callbacks are currently running.The maximal number of pending timers allowed in an Nginx\nworker is controlled by the lua_max_pending_timers\ndirective. The maximal number of running timers is controlled by the\nlua_max_running_timers directive.According to the current implementation, each \"running timer\" will\ntake one (fake) connection record from the global connection record\nlist configured by the standard worker_connections directive in\nnginx.conf. So ensure that the\nworker_connections directive is set to\na large enough value that takes into account both the real connections\nand fake connections required by timer callbacks (as limited by the\nlua_max_running_timers directive).A lot of the Lua APIs for Nginx are enabled in the context of the timer\ncallbacks, like stream/datagram cosockets (ngx.socket.tcp and ngx.socket.udp), shared\nmemory dictionaries (ngx.shared.DICT), user coroutines (coroutine.*),\nuser \"light threads\" (ngx.thread.*), ngx.exit, ngx.now/ngx.time,\nngx.md5/ngx.sha1_bin, are all allowed. But the subrequest API (like\nngx.location.capture), the ngx.req.* API, the downstream output API\n(like ngx.say, ngx.print, and ngx.flush) are explicitly disabled in\nthis context.You must notice that each timer will be based on a fake request (this fake request is also based on a fake connection). Because Nginx's memory release is based on the connection closure, if you run a lot of APIs that apply for memory resources in a timer, such as tcpsock:connect, will cause the accumulation of memory resources. So it is recommended to create a new timer after running several times to release memory resources.You can pass most of the standard Lua values (nils, booleans, numbers, strings, tables, closures, file handles, and etc) into the timer callback, either explicitly as user arguments or implicitly as upvalues for the callback closure. There are several exceptions, however: you cannot pass any thread objects returned by coroutine.create and ngx.thread.spawn or any cosocket objects returned by ngx.socket.tcp, ngx.socket.udp, and ngx.req.socket because these objects' lifetime is bound to the request context creating them while the timer callback is detached from the creating request's context (by design) and runs in its own (fake) request context. If you try to share the thread or cosocket objects across the boundary of the creating request, then you will get the \"no co ctx found\" error (for threads) or \"bad request\" (for cosockets). It is fine, however, to create all these objects inside your timer callback.Please note that the timer Lua handler has its own copy of the ngx.ctx magic\ntable. It won't share the same ngx.ctx with the Lua handler creating the timer.\nIf you need to pass data from the timer creator to the timer handler, please\nuse the extra parameters of ngx.timer.at().This API was first introduced in the v0.8.0 release.", - "prefix": "ngx.timer.at", - "body": "local hdl, err = ngx.timer.at(${1:delay}, ${2:callback}, ${3:user_arg1}, ${4:user_arg2}, ...)" - }, - "ngx.timer.every": { - "description": "Similar to the ngx.timer.at API function, but\ndelay cannot be zero,\ntimer will be created every delay seconds until the current Nginx worker process starts exiting.\nLike ngx.timer.at, the callback argument will be called\nautomatically with the arguments premature, user_arg1, user_arg2, etc.When success, returns a \"conditional true\" value (but not a true). Otherwise, returns a \"conditional false\" value and a string describing the error.This API also respect the lua_max_pending_timers and lua_max_running_timers.This API was first introduced in the v0.10.9 release.", - "prefix": "ngx.timer.every", - "body": "local hdl, err = ngx.timer.every(${1:delay}, ${2:callback}, ${3:user_arg1}, ${4:user_arg2}, ...)" - }, - "ngx.timer.running_count": { - "description": "Returns the number of timers currently running.This directive was first introduced in the v0.9.20 release.", - "prefix": "ngx.timer.running_count", - "body": "local count = ngx.timer.running_count()" - }, - "ngx.timer.pending_count": { - "description": "Returns the number of pending timers.This directive was first introduced in the v0.9.20 release.", - "prefix": "ngx.timer.pending_count", - "body": "local count = ngx.timer.pending_count()" - }, - "ngx.config.subsystem": { - "description": "This string field indicates the Nginx subsystem the current Lua environment is based on. For this module, this field always takes the string value \"http\". For\nngx_stream_lua_module, however, this field takes the value \"stream\".This field was first introduced in the 0.10.1.", - "prefix": "ngx.config.subsystem", - "body": "local subsystem = ngx.config.subsystem" - }, - "ngx.config.debug": { - "description": "This boolean field indicates whether the current Nginx is a debug build, i.e., being built by the ./configure option --with-debug.This field was first introduced in the 0.8.7.", - "prefix": "ngx.config.debug", - "body": "local debug = ngx.config.debug" - }, - "ngx.config.prefix": { - "description": "Returns the Nginx server \"prefix\" path, as determined by the -p command-line option when running the Nginx executable, or the path specified by the --prefix command-line option when building Nginx with the ./configure script.This function was first introduced in the 0.9.2.", - "prefix": "ngx.config.prefix", - "body": "local prefix = ngx.config.prefix()" - }, - "ngx.config.nginx_version": { - "description": "This field take an integral value indicating the version number of the current Nginx core being used. For example, the version number 1.4.3 results in the Lua number 1004003.This API was first introduced in the 0.9.3 release.", - "prefix": "ngx.config.nginx_version", - "body": "local ver = ngx.config.nginx_version" - }, - "ngx.config.nginx_configure": { - "description": "This function returns a string for the Nginx ./configure command's arguments string.This API was first introduced in the 0.9.5 release.", - "prefix": "ngx.config.nginx_configure", - "body": "local str = ngx.config.nginx_configure()" - }, - "ngx.config.ngx_lua_version": { - "description": "This field take an integral value indicating the version number of the current ngx_lua module being used. For example, the version number 0.9.3 results in the Lua number 9003.This API was first introduced in the 0.9.3 release.", - "prefix": "ngx.config.ngx_lua_version", - "body": "local ver = ngx.config.ngx_lua_version" - }, - "ngx.worker.exiting": { - "description": "This function returns a boolean value indicating whether the current Nginx worker process already starts exiting. Nginx worker process exiting happens on Nginx server quit or configuration reload (aka HUP reload).This API was first introduced in the 0.9.3 release.", - "prefix": "ngx.worker.exiting", - "body": "local exiting = ngx.worker.exiting()" - }, - "ngx.worker.pid": { - "description": "This function returns a Lua number for the process ID (PID) of the current Nginx worker process. This API is more efficient than ngx.var.pid and can be used in contexts where the ngx.var.VARIABLE API cannot be used (like init_worker_by_lua).This API was first introduced in the 0.9.5 release.", - "prefix": "ngx.worker.pid", - "body": "local pid = ngx.worker.pid()" - }, - "ngx.worker.pids": { - "description": "This function returns a Lua table for all Nginx worker process IDs (PIDs). Nginx uses channel to send the current worker PID to another worker in the worker process start or restart. So this API can get all current worker PIDs. Windows does not have this API.This API was first introduced in the 0.10.23 release.", - "prefix": "ngx.worker.pids", - "body": "local pids = ngx.worker.pids()" - }, - "ngx.worker.count": { - "description": "Returns the total number of the Nginx worker processes (i.e., the value configured\nby the worker_processes\ndirective in nginx.conf).This API was first introduced in the 0.9.20 release.", - "prefix": "ngx.worker.count", - "body": "local count = ngx.worker.count()" - }, - "ngx.worker.id": { - "description": "Returns the ordinal number of the current Nginx worker processes (starting from number 0).So if the total number of workers is N, then this method may return a number between 0\nand N - 1 (inclusive).This function returns meaningful values only for Nginx 1.9.1+. With earlier versions of Nginx, it\nalways returns nil.See also ngx.worker.count.This API was first introduced in the 0.9.20 release.", - "prefix": "ngx.worker.id", - "body": "local id = ngx.worker.id()" - }, - "ngx.semaphore": { - "description": "This is a Lua module that implements a classic-style semaphore API for efficient synchronizations among\ndifferent \"light threads\". Sharing the same semaphore among different \"light threads\" created in different (request)\ncontexts are also supported as long as the \"light threads\" reside in the same Nginx worker process\nand the lua_code_cache directive is turned on (which is the default).This Lua module does not ship with this ngx_lua module itself rather it is shipped with\nthe\nlua-resty-core library.Please refer to the documentation\nfor this ngx.semaphore Lua module in lua-resty-core\nfor more details.This feature requires at least ngx_lua v0.10.0.", - "prefix": "ngx.semaphore", - "body": "local semaphore = require \"ngx.semaphore\"" - }, - "ngx.balancer": { - "description": "This is a Lua module that provides a Lua API to allow defining completely dynamic load balancers\nin pure Lua.This Lua module does not ship with this ngx_lua module itself rather it is shipped with\nthe\nlua-resty-core library.Please refer to the documentation\nfor this ngx.balancer Lua module in lua-resty-core\nfor more details.This feature requires at least ngx_lua v0.10.0.", - "prefix": "ngx.balancer", - "body": "local balancer = require \"ngx.balancer\"" - }, - "ngx.ssl": { - "description": "This Lua module provides API functions to control the SSL handshake process in contexts like\nssl_certificate_by_lua*.This Lua module does not ship with this ngx_lua module itself rather it is shipped with\nthe\nlua-resty-core library.Please refer to the documentation\nfor this ngx.ssl Lua module for more details.This feature requires at least ngx_lua v0.10.0.", - "prefix": "ngx.ssl", - "body": "local ssl = require \"ngx.ssl\"" - }, - "ngx.ocsp": { - "description": "This Lua module provides API to perform OCSP queries, OCSP response validations, and\nOCSP stapling planting.Usually, this module is used together with the ngx.ssl\nmodule in the\ncontext of ssl_certificate_by_lua*.This Lua module does not ship with this ngx_lua module itself rather it is shipped with\nthe\nlua-resty-core library.Please refer to the documentation\nfor this ngx.ocsp Lua module for more details.This feature requires at least ngx_lua v0.10.0.", - "prefix": "ngx.ocsp", - "body": "local ocsp = require \"ngx.ocsp\"" - }, - "ndk.set_var.DIRECTIVE": { - "description": "This mechanism allows calling other Nginx C modules' directives that are implemented by Nginx Devel Kit (NDK)'s set_var submodule's ndk_set_var_value.For example, the following set-misc-nginx-module directives can be invoked this way:\nset_quote_sql_str\nset_quote_pgsql_str\nset_quote_json_str\nset_unescape_uri\nset_escape_uri\nset_encode_base32\nset_decode_base32\nset_encode_base64\nset_decode_base64\nset_encode_hex\nset_decode_hex\nset_sha1\nset_md5\nFor instance, local res = ndk.set_var.set_escape_uri('a/b')\n -- now res == 'a%2fb'Similarly, the following directives provided by encrypted-session-nginx-module can be invoked from within Lua too:\nset_encrypt_session\nset_decrypt_session\nThis feature requires the ngx_devel_kit module.", - "prefix": "ndk.set_var.DIRECTIVE", - "body": "local res = ndk.set_var.DIRECTIVE_NAME" - }, - "coroutine.create": { - "description": "Creates a user Lua coroutines with a Lua function, and returns a coroutine object.Similar to the standard Lua coroutine.create API, but works in the context of the Lua coroutines created by ngx_lua.This API was first usable in the context of init_by_lua* since the 0.9.2.This API was first introduced in the v0.6.0 release.", - "prefix": "coroutine.create", - "body": "local co = coroutine.create(${1:f})" - }, - "coroutine.resume": { - "description": "Resumes the execution of a user Lua coroutine object previously yielded or just created.Similar to the standard Lua coroutine.resume API, but works in the context of the Lua coroutines created by ngx_lua.This API was first usable in the context of init_by_lua* since the 0.9.2.This API was first introduced in the v0.6.0 release.", - "prefix": "coroutine.resume", - "body": "local ok, ... = coroutine.resume(${1:co}, ...)" - }, - "coroutine.yield": { - "description": "Yields the execution of the current user Lua coroutine.Similar to the standard Lua coroutine.yield API, but works in the context of the Lua coroutines created by ngx_lua.This API was first usable in the context of init_by_lua* since the 0.9.2.This API was first introduced in the v0.6.0 release.", - "prefix": "coroutine.yield", - "body": "local ... = coroutine.yield(...)" - }, - "coroutine.wrap": { - "description": "Similar to the standard Lua coroutine.wrap API, but works in the context of the Lua coroutines created by ngx_lua.This API was first usable in the context of init_by_lua* since the 0.9.2.This API was first introduced in the v0.6.0 release.", - "prefix": "coroutine.wrap", - "body": "local co = coroutine.wrap(${1:f})" - }, - "coroutine.running": { - "description": "Identical to the standard Lua coroutine.running API.This API was first usable in the context of init_by_lua* since the 0.9.2.This API was first enabled in the v0.6.0 release.", - "prefix": "coroutine.running", - "body": "local co = coroutine.running()" - }, - "coroutine.status": { - "description": "Identical to the standard Lua coroutine.status API.This API was first usable in the context of init_by_lua* since the 0.9.2.This API was first enabled in the v0.6.0 release.", - "prefix": "coroutine.status", - "body": "local status = coroutine.status(${1:co})" - }, - "ngx.run_worker_thread": { - "description": "This API is still experimental and may change in the future without notice.This API is available only for Linux.Wrap the nginx worker thread to execute lua function. The caller coroutine would yield until the function returns.Only the following ngx_lua APIs could be used in function_name function of the module module:\n\nngx.encode_base64\n\n\nngx.decode_base64\n\n\nngx.hmac_sha1\n\n\nngx.encode_args\n\n\nngx.decode_args\n\n\nngx.quote_sql_str\n\n\nngx.crc32_short\n\n\nngx.crc32_long\n\n\nngx.hmac_sha1\n\n\nngx.md5_bin\n\n\nngx.md5\n\n\nngx.config.subsystem\n\n\nngx.config.debug\n\n\nngx.config.prefix\n\n\nngx.config.nginx_version\n\n\nngx.config.nginx_configure\n\n\nngx.config.ngx_lua_version\n\n\nngx.shared.DICT\n\nThe first argument threadpool specifies the Nginx thread pool name defined by thread_pool.The second argument module_name specifies the lua module name to execute in the worker thread, which would return a lua table. The module must be inside the package path, e.g. lua_package_path '/opt/openresty/?.lua;;';The third argument func_name specifies the function field in the module table as the second argument.The type of args must be one of type below:\nboolean\nnumber\nstring\nnil\ntable (the table may be recursive, and contains members of types above.)\nThe ok is in boolean type, which indicate the C land error (failed to get thread from thread pool, pcall the module function failed, .etc). If ok is false, the res1 is the error string.The return values (res1, ...) are returned by invocation of the module function. Normally, the res1 should be in boolean type, so that the caller could inspect the error.This API is useful when you need to execute the below types of tasks:\nCPU bound task, e.g. do md5 calculation\nFile I/O task\nCall os.execute() or blocking C API via ffi\nCall external Lua library not based on cosocket or nginx\nExample1: do md5 calculation. location /calc_md5 {\n default_type 'text/plain';\n\n content_by_lua_block {\n local ok, md5_or_err = ngx.run_worker_thread(\"testpool\", \"md5\", \"md5\")\n ngx.say(ok, \" : \", md5_or_err)\n }\n }md5.lualocal function md5()\n return ngx.md5(\"hello\")\nend\n\nreturn { md5=md5, }Example2: write logs into the log file. location /write_log_file {\n default_type 'text/plain';\n\n content_by_lua_block {\n local ok, err = ngx.run_worker_thread(\"testpool\", \"write_log_file\", \"log\", ngx.var.arg_str)\n if not ok then\n ngx.say(ok, \" : \", err)\n return\n end\n ngx.say(ok)\n }\n }write_log_file.lua local function log(str)\n local file, err = io.open(\"/tmp/tmp.log\", \"a\")\n if not file then\n return false, err\n end\n file:write(str)\n file:flush()\n file:close()\n return true\n end\n return {log=log}", - "prefix": "ngx.run_worker_thread", - "body": "local ok, res1, res2, ... = ngx.run_worker_thread(${1:threadpool}, ${2:module_name}, ${3:func_name}, ${4:arg1}, ${5:arg2}, ...)" - }, - "memcached.new": { - "description": "Creates a memcached object. In case of failures, returns nil and a string describing the error.It accepts an optional opts table argument. The following options are supported:\n\nkey_transform\nan array table containing two functions for escaping and unescaping the\nmemcached keys, respectively. By default,\nthe memcached keys will be escaped and unescaped as URI components, that is\n\n memached:new{\n key_transform = { ngx.escape_uri, ngx.unescape_uri }\n }", - "prefix": "memcached.new", - "body": "local memc, err = memcached:new($opts?)" - }, - "memcached.connect": { - "description": "local ok, err = memc:connect(\"unix:/path/to/unix.sock\")\nAttempts to connect to the remote host and port that the memcached server is listening to or a local unix domain socket file listened by the memcached server.Before actually resolving the host name and connecting to the remote backend, this method will always look up the connection pool for matched idle connections created by previous calls of this method.", - "prefix": "memcached.connect", - "body": "local ok, err = memc:connect($host, $port)" - }, - "memcached.sslhandshake": { - "description": "Does SSL/TLS handshake on the currently established connection. See the\ntcpsock.sslhandshake\nAPI from OpenResty for more details.", - "prefix": "memcached.sslhandshake", - "body": "local session, err = memc:sslhandshake($reused_session?, $server_name?, $ssl_verify?, $send_status_req?)" - }, - "memcached.set": { - "description": "Inserts an entry into memcached unconditionally. If the key already exists, overrides it.The value argument could also be a Lua table holding multiple Lua\nstrings that are supposed to be concatenated as a whole\n(without any delimiters). For example, memc:set(\"dog\", {\"a \", {\"kind of\"}, \" animal\"})is functionally equivalent to memc:set(\"dog\", \"a kind of animal\")The exptime parameter is optional and defaults to 0 (meaning never expires). The expiration time is in seconds.The flags parameter is optional and defaults to 0.", - "prefix": "memcached.set", - "body": "local ok, err = memc:set($key, $value, $exptime, $flags)" - }, - "memcached.set_timeout": { - "description": "Sets the timeout (in ms) protection for subsequent operations, including the connect method.Returns 1 when successful and nil plus a string describing the error otherwise.", - "prefix": "memcached.set_timeout", - "body": "local ok, err = memc:set_timeout($timeout)" - }, - "memcached.set_timeouts": { - "description": "Sets the timeouts (in ms) for connect, send and read operations respectively.Returns 1 when successful and nil plus a string describing the error otherwise.set_keepalivelocal ok, err = memc:set_keepalive($max_idle_timeout, $pool_size)\nPuts the current memcached connection immediately into the ngx_lua cosocket connection pool.You can specify the max idle timeout (in ms) when the connection is in the pool and the maximal size of the pool every nginx worker process.In case of success, returns 1. In case of errors, returns nil with a string describing the error.Only call this method in the place you would have called the close method instead. Calling this method will immediately turn the current memcached object into the closed state. Any subsequent operations other than connect() on the current object will return the closed error.", - "prefix": "memcached.set_timeouts", - "body": "local ok, err = memc:set_timeouts($connect_timeout, $send_timeout, $read_timeout)" - }, - "memcached.set_keepalive": { - "description": "Puts the current memcached connection immediately into the ngx_lua cosocket connection pool.You can specify the max idle timeout (in ms) when the connection is in the pool and the maximal size of the pool every nginx worker process.In case of success, returns 1. In case of errors, returns nil with a string describing the error.Only call this method in the place you would have called the close method instead. Calling this method will immediately turn the current memcached object into the closed state. Any subsequent operations other than connect() on the current object will return the closed error.", - "prefix": "memcached.set_keepalive", - "body": "local ok, err = memc:set_keepalive($max_idle_timeout, $pool_size)" - }, - "memcached.get_reused_times": { - "description": "This method returns the (successfully) reused times for the current connection. In case of error, it returns nil and a string describing the error.If the current connection does not come from the built-in connection pool, then this method always returns 0, that is, the connection has never been reused (yet). If the connection comes from the connection pool, then the return value is always non-zero. So this method can also be used to determine if the current connection comes from the pool.", - "prefix": "memcached.get_reused_times", - "body": "local times, err = memc:get_reused_times()" - }, - "memcached.close": { - "description": "Closes the current memcached connection and returns the status.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", - "prefix": "memcached.close", - "body": "local ok, err = memc:close()" - }, - "memcached.add": { - "description": "Inserts an entry into memcached if and only if the key does not exist.The value argument could also be a Lua table holding multiple Lua\nstrings that are supposed to be concatenated as a whole\n(without any delimiters). For example, memc:add(\"dog\", {\"a \", {\"kind of\"}, \" animal\"})is functionally equivalent to memc:add(\"dog\", \"a kind of animal\")The exptime parameter is optional and defaults to 0 (meaning never expires). The expiration time is in seconds.The flags parameter is optional, defaults to 0.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", - "prefix": "memcached.add", - "body": "local ok, err = memc:add($key, $value, $exptime, $flags)" - }, - "memcached.replace": { - "description": "Inserts an entry into memcached if and only if the key does exist.The value argument could also be a Lua table holding multiple Lua\nstrings that are supposed to be concatenated as a whole\n(without any delimiters). For example, memc:replace(\"dog\", {\"a \", {\"kind of\"}, \" animal\"})is functionally equivalent to memc:replace(\"dog\", \"a kind of animal\")The exptime parameter is optional and defaults to 0 (meaning never expires). The expiration time is in seconds.The flags parameter is optional, defaults to 0.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", - "prefix": "memcached.replace", - "body": "local ok, err = memc:replace($key, $value, $exptime, $flags)" - }, - "memcached.append": { - "description": "Appends the value to an entry with the same key that already exists in memcached.The value argument could also be a Lua table holding multiple Lua\nstrings that are supposed to be concatenated as a whole\n(without any delimiters). For example, memc:append(\"dog\", {\"a \", {\"kind of\"}, \" animal\"})is functionally equivalent to memc:append(\"dog\", \"a kind of animal\")The exptime parameter is optional and defaults to 0 (meaning never expires). The expiration time is in seconds.The flags parameter is optional, defaults to 0.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", - "prefix": "memcached.append", - "body": "local ok, err = memc:append($key, $value, $exptime, $flags)" - }, - "memcached.prepend": { - "description": "Prepends the value to an entry with the same key that already exists in memcached.The value argument could also be a Lua table holding multiple Lua\nstrings that are supposed to be concatenated as a whole\n(without any delimiters). For example, memc:prepend(\"dog\", {\"a \", {\"kind of\"}, \" animal\"})is functionally equivalent to memc:prepend(\"dog\", \"a kind of animal\")The exptime parameter is optional and defaults to 0 (meaning never expires). The expiration time is in seconds.The flags parameter is optional and defaults to 0.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", - "prefix": "memcached.prepend", - "body": "local ok, err = memc:prepend($key, $value, $exptime, $flags)" - }, - "memcached.get": { - "description": "Get a single entry or multiple entries in the memcached server via a single key or a table of keys.Let us first discuss the case When the key is a single string.The key's value and associated flags value will be returned if the entry is found and no error happens.In case of errors, nil values will be turned for value and flags and a 3rd (string) value will also be returned for describing the error.If the entry is not found, then three nil values will be returned.Then let us discuss the case when the a Lua table of multiple keys are provided.In this case, a Lua table holding the key-result pairs will be always returned in case of success. Each value corresponding each key in the table is also a table holding two values, the key's value and the key's flags. If a key does not exist, then there is no responding entries in the results table.In case of errors, nil will be returned, and the second return value will be a string describing the error.", - "prefix": "memcached.get", - "body": "local value, flags, err = memc:get($key)" - }, - "memcached.gets": { - "description": "local results, err = memc:gets($keys)\nJust like the get method, but will also return the CAS unique value associated with the entry in addition to the key's value and flags.This method is usually used together with the cas method.", - "prefix": "memcached.gets", - "body": "local value, flags, cas_unique, err = memc:gets($key)" - }, - "memcached.cas": { - "description": "Just like the set method but does a check and set operation, which means \"store this data but\nonly if no one else has updated since I last fetched it.\"The cas_unique argument can be obtained from the gets method.", - "prefix": "memcached.cas", - "body": "local ok, err = memc:cas($key, $value, $cas_unique, $exptime?, $flags?)" - }, - "memcached.touch": { - "description": "Update the expiration time of an existing key.Returns 1 for success or nil with a string describing the error otherwise.This method was first introduced in the v0.11 release.", - "prefix": "memcached.touch", - "body": "local ok, err = memc:touch($key, $exptime)" - }, - "memcached.flush_all": { - "description": "Flushes (or invalidates) all the existing entries in the memcached server immediately (by default) or after the expiration\nspecified by the time argument (in seconds).In case of success, returns 1. In case of errors, returns nil with a string describing the error.", - "prefix": "memcached.flush_all", - "body": "local ok, err = memc:flush_all($time?)" - }, - "memcached.delete": { - "description": "Deletes the key from memcached immediately.The key to be deleted must already exist in memcached.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", - "prefix": "memcached.delete", - "body": "local ok, err = memc:delete($key)" - }, - "memcached.incr": { - "description": "Increments the value of the specified key by the integer value specified in the delta argument.Returns the new value after incrementation in success, and nil with a string describing the error in case of failures.", - "prefix": "memcached.incr", - "body": "local new_value, err = memc:incr($key, $delta)" - }, - "memcached.decr": { - "description": "Decrements the value of the specified key by the integer value specified in the delta argument.Returns the new value after decrementation in success, and nil with a string describing the error in case of failures.", - "prefix": "memcached.decr", - "body": "local new_value, err = memc:decr($key, $value)" - }, - "memcached.stats": { - "description": "Returns memcached server statistics information with an optional args argument.In case of success, this method returns a lua table holding all of the lines of the output; in case of failures, it returns nil with a string describing the error.If the args argument is omitted, general server statistics is returned. Possible args argument values are items, sizes, slabs, among others.", - "prefix": "memcached.stats", - "body": "local lines, err = memc:stats($args?)" - }, - "memcached.version": { - "description": "Returns the server version number, like 1.2.8.In case of error, it returns nil with a string describing the error.", - "prefix": "memcached.version", - "body": "local version, err = memc:version($args?)" - }, - "memcached.quit": { - "description": "Tells the server to close the current memcached connection.Returns 1 in case of success and nil other wise. In case of failures, another string value will also be returned to describe the error.Generally you can just directly call the close method to achieve the same effect.", - "prefix": "memcached.quit", - "body": "local ok, err = memc:quit()" - }, - "memcached.verbosity": { - "description": "Sets the verbosity level used by the memcached server. The level argument should be given integers only.Returns 1 in case of success and nil other wise. In case of failures, another string value will also be returned to describe the error.", - "prefix": "memcached.verbosity", - "body": "local ok, err = memc:verbosity($level)" - }, - "memcached.init_pipeline": { - "description": "Enable the Memcache pipelining mode. All subsequent calls to Memcache command methods will automatically get buffer and will send to the server in one run when the commit_pipeline method is called or get cancelled by calling the cancel_pipeline method.The optional params n is buffer tables size. default value 4", - "prefix": "memcached.init_pipeline", - "body": "local err = memc:init_pipeline($n?)" - }, - "memcached.commit_pipeline": { - "description": "Quits the pipelining mode by committing all the cached Memcache queries to the remote server in a single run. All the replies for these queries will be collected automatically and are returned as if a big multi-bulk reply at the highest level.This method success return a lua table. failed return a lua string describing the error upon failures.", - "prefix": "memcached.commit_pipeline", - "body": "local results, err = memc:commit_pipeline()" - }, - "memcached.cancel_pipeline": { - "description": "Quits the pipelining mode by discarding all existing buffer Memcache commands since the last call to the init_pipeline method.the method no return. always succeeds.", - "prefix": "memcached.cancel_pipeline", - "body": "memc:cancel_pipeline()" - }, - "mysql.new": { - "description": "Creates a MySQL connection object. In case of failures, returns nil and a string describing the error.", - "prefix": "mysql.new", - "body": "local db, err = mysql:new()" - }, - "mysql.connect": { - "description": "Attempts to connect to the remote MySQL server.The options argument is a Lua table holding the following keys:\n\nhost\nthe host name for the MySQL server.\n\n\nport\nthe port that the MySQL server is listening on. Default to 3306.\n\n\npath\nthe path of the unix socket file listened by the MySQL server.\n\n\ndatabase\nthe MySQL database name.\n\n\nuser\nMySQL account name for login.\n\n\npassword\nMySQL account password for login (in clear text).\n\n\ncharset\nthe character set used on the MySQL connection, which can be different from the default charset setting.\nThe following values are accepted: big5, dec8, cp850, hp8, koi8r, latin1, latin2,\nswe7, ascii, ujis, sjis, hebrew, tis620, euckr, koi8u, gb2312, greek,\ncp1250, gbk, latin5, armscii8, utf8, ucs2, cp866, keybcs2, macce,\nmacroman, cp852, latin7, utf8mb4, cp1251, utf16, utf16le, cp1256,\ncp1257, utf32, binary, geostd8, cp932, eucjpms, gb18030.\n\n\nmax_packet_size\nthe upper limit for the reply packets sent from the MySQL server (default to 1MB).\n\n\nssl\nIf set to true, then uses SSL to connect to MySQL (default to false). If the MySQL\nserver does not have SSL support\n(or just disabled), the error string \"ssl disabled on server\" will be returned.\n\n\nssl_verify\nIf set to true, then verifies the validity of the server SSL certificate (default to false).\nNote that you need to configure the lua_ssl_trusted_certificate\nto specify the CA (or server) certificate used by your MySQL server. You may also\nneed to configure lua_ssl_verify_depth\naccordingly.\n\n\npool\nthe name for the MySQL connection pool. if omitted, an ambiguous pool name will be generated automatically with the string template user:database:host:port or user:database:path. (this option was first introduced in v0.08.)\n\n\npool_size\nSpecifies the size of the connection pool. If omitted and no backlog option was provided, no pool will be created. If omitted but backlog was provided, the pool will be created with a default size equal to the value of the lua_socket_pool_size directive. The connection pool holds up to pool_size alive connections ready to be reused by subsequent calls to connect, but note that there is no upper limit to the total number of opened connections outside of the pool. If you need to restrict the total number of opened connections, specify the backlog option. When the connection pool would exceed its size limit, the least recently used (kept-alive) connection already in the pool will be closed to make room for the current connection. Note that the cosocket connection pool is per Nginx worker process rather than per Nginx server instance, so the size limit specified here also applies to every single Nginx worker process. Also note that the size of the connection pool cannot be changed once it has been created. Note that at least ngx_lua 0.10.14 is required to use this options.\n\n\nbacklog\nIf specified, this module will limit the total number of opened connections for this pool. No more connections than pool_size can be opened for this pool at any time. If the connection pool is full, subsequent connect operations will be queued into a queue equal to this option's value (the \"backlog\" queue). If the number of queued connect operations is equal to backlog, subsequent connect operations will fail and return nil plus the error string \"too many waiting connect operations\". The queued connect operations will be resumed once the number of connections in the pool is less than pool_size. The queued connect operation will abort once they have been queued for more than connect_timeout, controlled by set_timeout, and will return nil plus the error string \"timeout\". Note that at least ngx_lua 0.10.14 is required to use this options.\n\n\ncompact_arrays\nwhen this option is set to true, then the query and read_result methods will return the array-of-arrays structure for the resultset, rather than the default array-of-hashes structure.\n\nBefore actually resolving the host name and connecting to the remote backend, this method will always look up the connection pool for matched idle connections created by previous calls of this method.", - "prefix": "mysql.connect", - "body": "local ok, err, errcode, sqlstate = db:connect($options)" - }, - "mysql.set_timeout": { - "description": "Sets the timeout (in ms) protection for subsequent operations, including the connect method.", - "prefix": "mysql.set_timeout", - "body": "db:set_timeout($time)" - }, - "mysql.set_keepalive": { - "description": "Puts the current MySQL connection immediately into the ngx_lua cosocket connection pool.You can specify the max idle timeout (in ms) when the connection is in the pool and the maximal size of the pool every nginx worker process.In case of success, returns 1. In case of errors, returns nil with a string describing the error.Only call this method in the place you would have called the close method instead. Calling this method will immediately turn the current resty.mysql object into the closed state. Any subsequent operations other than connect() on the current objet will return the closed error.", - "prefix": "mysql.set_keepalive", - "body": "local ok, err = db:set_keepalive($max_idle_timeout, $pool_size)" - }, - "mysql.get_reused_times": { - "description": "This method returns the (successfully) reused times for the current connection. In case of error, it returns nil and a string describing the error.If the current connection does not come from the built-in connection pool, then this method always returns 0, that is, the connection has never been reused (yet). If the connection comes from the connection pool, then the return value is always non-zero. So this method can also be used to determine if the current connection comes from the pool.", - "prefix": "mysql.get_reused_times", - "body": "local times, err = db:get_reused_times()" - }, - "mysql.close": { - "description": "Closes the current mysql connection and returns the status.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", - "prefix": "mysql.close", - "body": "local ok, err = db:close()" - }, - "mysql.send_query": { - "description": "Sends the query to the remote MySQL server without waiting for its replies.Returns the bytes successfully sent out in success and otherwise returns nil and a string describing the error.You should use the read_result method to read the MySQL replies afterwards.", - "prefix": "mysql.send_query", - "body": "local bytes, err = db:send_query($query)" - }, - "mysql.read_result": { - "description": "local res, err, errcode, sqlstate = db:read_result($nrows)\nReads in one result returned from the MySQL server.It returns a Lua table (res) describing the MySQL OK packet or result set packet for the query result.For queries corresponding to a result set, it returns an array holding all the rows. Each row holds key-value pairs for each data fields. For instance, {\n { name = \"Bob\", age = 32, phone = ngx.null },\n { name = \"Marry\", age = 18, phone = \"10666372\"}\n }For queries that do not correspond to a result set, it returns a Lua table like this: {\n insert_id = 0,\n server_status = 2,\n warning_count = 1,\n affected_rows = 32,\n message = nil\n }If more results are following the current result, a second err return value will be given the string again. One should always check this (second) return value and if it is again, then she should call this method again to retrieve more results. This usually happens when the original query contains multiple statements (separated by semicolon in the same query string) or calling a MySQL procedure. See also Multi-Resultset Support.In case of errors, this method returns at most 4 values: nil, err, errcode, and sqlstate. The err return value contains a string describing the error, the errcode return value holds the MySQL error code (a numerical value), and finally, the sqlstate return value contains the standard SQL error code that consists of 5 characters. Note that, the errcode and sqlstate might be nil if MySQL does not return them.The optional argument nrows can be used to specify an approximate number of rows for the result set. This value can be used\nto pre-allocate space in the resulting Lua table for the result set. By default, it takes the value 4.", - "prefix": "mysql.read_result", - "body": "local res, err, errcode, sqlstate = db:read_result()" - }, - "mysql.query": { - "description": "local res, err, errcode, sqlstate = db:query($query, $nrows)\nThis is a shortcut for combining the send_query call and the first read_result call.You should always check if the err return value is again in case of success because this method will only call read_result only once for you. See also Multi-Resultset Support.", - "prefix": "mysql.query", - "body": "local res, err, errcode, sqlstate = db:query($query)" - }, - "mysql.server_ver": { - "description": "Returns the MySQL server version string, like \"5.1.64\".You should only call this method after successfully connecting to a MySQL server, otherwise nil will be returned.", - "prefix": "mysql.server_ver", - "body": "local str = db:server_ver()" - }, - "mysql.set_compact_arrays": { - "description": "Sets whether to use the \"compact-arrays\" structure for the resultsets returned by subsequent queries. See the compact_arrays option for the connect method for more details.This method was first introduced in the v0.09 release.", - "prefix": "mysql.set_compact_arrays", - "body": "db:set_compact_arrays($boolean)" - }, - "redis.new": { - "description": "Creates a redis object. In case of failures, returns nil and a string describing the error.", - "prefix": "redis.new", - "body": "local red, err = redis:new()" - }, - "redis.connect": { - "description": "local ok, err = red:connect(\"unix:/path/to/unix.sock\", options_table?)\nAttempts to connect to the remote host and port that the redis server is listening to or a local unix domain socket file listened by the redis server.Before actually resolving the host name and connecting to the remote backend, this method will always look up the connection pool for matched idle connections created by previous calls of this method.The optional options_table argument is a Lua table holding the following keys:\n\nssl\nIf set to true, then uses SSL to connect to redis (defaults to false).\n\n\nssl_verify\nIf set to true, then verifies the validity of the server SSL certificate (defaults to false). Note that you need to configure the lua_ssl_trusted_certificate to specify the CA (or server) certificate used by your redis server. You may also need to configure lua_ssl_verify_depth accordingly.\n\n\nserver_name\nSpecifies the server name for the new TLS extension Server Name Indication (SNI) when connecting over SSL.\n\n\npool\nSpecifies a custom name for the connection pool being used. If omitted, then the connection pool name will be generated from the string template : or .\n\n\npool_size\nSpecifies the size of the connection pool. If omitted and no backlog option was provided, no pool will be created. If omitted but backlog was provided, the pool will be created with a default size equal to the value of the lua_socket_pool_size directive. The connection pool holds up to pool_size alive connections ready to be reused by subsequent calls to connect, but note that there is no upper limit to the total number of opened connections outside of the pool. If you need to restrict the total number of opened connections, specify the backlog option. When the connection pool would exceed its size limit, the least recently used (kept-alive) connection already in the pool will be closed to make room for the current connection. Note that the cosocket connection pool is per Nginx worker process rather than per Nginx server instance, so the size limit specified here also applies to every single Nginx worker process. Also note that the size of the connection pool cannot be changed once it has been created. Note that at least ngx_lua 0.10.14 is required to use this options.\n\n\nbacklog\nIf specified, this module will limit the total number of opened connections for this pool. No more connections than pool_size can be opened for this pool at any time. If the connection pool is full, subsequent connect operations will be queued into a queue equal to this option's value (the \"backlog\" queue). If the number of queued connect operations is equal to backlog, subsequent connect operations will fail and return nil plus the error string \"too many waiting connect operations\". The queued connect operations will be resumed once the number of connections in the pool is less than pool_size. The queued connect operation will abort once they have been queued for more than connect_timeout, controlled by set_timeout, and will return nil plus the error string \"timeout\". Note that at least ngx_lua 0.10.14 is required to use this options.\n\n", - "prefix": "redis.connect", - "body": "local ok, err = red:connect($host, $port, $options_table?)" - }, - "redis.set_timeout": { - "description": "Sets the timeout (in ms) protection for subsequent operations, including the connect method.Since version v0.28 of this module, it is advised that\nset_timeouts be used in favor of this method.", - "prefix": "redis.set_timeout", - "body": "red:set_timeout($time)" - }, - "redis.set_timeouts": { - "description": "Respectively sets the connect, send, and read timeout thresholds (in ms), for\nsubsequent socket operations. Setting timeout thresholds with this method\noffers more granularity than set_timeout. As such, it is\npreferred to use set_timeouts over\nset_timeout.This method was added in the v0.28 release.", - "prefix": "redis.set_timeouts", - "body": "red:set_timeouts($connect_timeout, $send_timeout, $read_timeout)" - }, - "redis.set_keepalive": { - "description": "Puts the current Redis connection immediately into the ngx_lua cosocket connection pool.You can specify the max idle timeout (in ms) when the connection is in the pool and the maximal size of the pool every nginx worker process.In case of success, returns 1. In case of errors, returns nil with a string describing the error.Only call this method in the place you would have called the close method instead. Calling this method will immediately turn the current redis object into the closed state. Any subsequent operations other than connect() on the current object will return the closed error.", - "prefix": "redis.set_keepalive", - "body": "local ok, err = red:set_keepalive($max_idle_timeout, $pool_size)" - }, - "redis.get_reused_times": { - "description": "This method returns the (successfully) reused times for the current connection. In case of error, it returns nil and a string describing the error.If the current connection does not come from the built-in connection pool, then this method always returns 0, that is, the connection has never been reused (yet). If the connection comes from the connection pool, then the return value is always non-zero. So this method can also be used to determine if the current connection comes from the pool.", - "prefix": "redis.get_reused_times", - "body": "local times, err = red:get_reused_times()" - }, - "redis.close": { - "description": "Closes the current redis connection and returns the status.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", - "prefix": "redis.close", - "body": "local ok, err = red:close()" - }, - "redis.init_pipeline": { - "description": "red:init_pipeline($n)\nEnable the redis pipelining mode. All subsequent calls to Redis command methods will automatically get cached and will send to the server in one run when the commit_pipeline method is called or get cancelled by calling the cancel_pipeline method.This method always succeeds.If the redis object is already in the Redis pipelining mode, then calling this method will discard existing cached Redis queries.The optional n argument specifies the (approximate) number of commands that are going to add to this pipeline, which can make things a little faster.", - "prefix": "redis.init_pipeline", - "body": "red:init_pipeline()" - }, - "redis.commit_pipeline": { - "description": "Quits the pipelining mode by committing all the cached Redis queries to the remote server in a single run. All the replies for these queries will be collected automatically and are returned as if a big multi-bulk reply at the highest level.This method returns nil and a Lua string describing the error upon failures.", - "prefix": "redis.commit_pipeline", - "body": "local results, err = red:commit_pipeline()" - }, - "redis.cancel_pipeline": { - "description": "Quits the pipelining mode by discarding all existing cached Redis commands since the last call to the init_pipeline method.This method always succeeds.If the redis object is not in the Redis pipelining mode, then this method is a no-op.", - "prefix": "redis.cancel_pipeline", - "body": "red:cancel_pipeline()" - }, - "redis.hmset": { - "description": "local res, err = red:hmset($myhash, { field1 \nSpecial wrapper for the Redis \"hmset\" command.When there are only three arguments (including the \"red\" object\nitself), then the last argument must be a Lua table holding all the field/value pairs.", - "prefix": "redis.hmset", - "body": "local res, err = red:hmset($myhash, $field1, $value1, $field2, $value2, ...)" - }, - "redis.array_to_hash": { - "description": "Auxiliary function that converts an array-like Lua table into a hash-like table.This method was first introduced in the v0.11 release.", - "prefix": "redis.array_to_hash", - "body": "local hash = red:array_to_hash($array)" - }, - "redis.read_reply": { - "description": "Reading a reply from the redis server. This method is mostly useful for the Redis Pub/Sub API, for example, local cjson = require \"cjson\"\n local redis = require \"resty.redis\"\n\n local red = redis:new()\n local red2 = redis:new()\n\n red:set_timeouts(1000, 1000, 1000) -- 1 sec\n red2:set_timeouts(1000, 1000, 1000) -- 1 sec\n\n local ok, err = red:connect(\"127.0.0.1\", 6379)\n if not ok then\n ngx.say(\"1: failed to connect: \", err)\n return\n end\n\n ok, err = red2:connect(\"127.0.0.1\", 6379)\n if not ok then\n ngx.say(\"2: failed to connect: \", err)\n return\n end\n\n local res, err = red:subscribe(\"dog\")\n if not res then\n ngx.say(\"1: failed to subscribe: \", err)\n return\n end\n\n ngx.say(\"1: subscribe: \", cjson.encode(res))\n\n res, err = red2:publish(\"dog\", \"Hello\")\n if not res then\n ngx.say(\"2: failed to publish: \", err)\n return\n end\n\n ngx.say(\"2: publish: \", cjson.encode(res))\n\n res, err = red:read_reply()\n if not res then\n ngx.say(\"1: failed to read reply: \", err)\n return\n end\n\n ngx.say(\"1: receive: \", cjson.encode(res))\n\n red:close()\n red2:close()Running this example gives the output like this:1: subscribe: [\"subscribe\",\"dog\",1]\n2: publish: 1\n1: receive: [\"message\",\"dog\",\"Hello\"]\nThe following class methods are provieded:", - "prefix": "redis.read_reply", - "body": "local res, err = red:read_reply()" - }, - "redis.add_commands": { - "description": "WARNING this method is now deprecated since we already do automatic Lua method generation\nfor any redis commands the user attempts to use and thus we no longer need this.Adds new redis commands to the resty.redis class. Here is an example: local redis = require \"resty.redis\"\n\n redis.add_commands(\"foo\", \"bar\")\n\n local red = redis:new()\n\n red:set_timeouts(1000, 1000, 1000) -- 1 sec\n\n local ok, err = red:connect(\"127.0.0.1\", 6379)\n if not ok then\n ngx.say(\"failed to connect: \", err)\n return\n end\n\n local res, err = red:foo(\"a\")\n if not res then\n ngx.say(\"failed to foo: \", err)\n end\n\n res, err = red:bar()\n if not res then\n ngx.say(\"failed to bar: \", err)\n end", - "prefix": "redis.add_commands", - "body": "local hash = redis.add_commands($cmd_name1, $cmd_name2, ...)" - }, - "dns.new": { - "description": "Creates a dns.resolver object. Returns nil and a message string on error.It accepts a opts table argument. The following options are supported:\n\nnameservers\na list of nameservers to be used. Each nameserver entry can be either a single hostname string or a table holding both the hostname string and the port number. The nameserver is picked up by a simple round-robin algorithm for each query method call. This option is required.\n\n\nretrans\nthe total number of times of retransmitting the DNS request when receiving a DNS response times out according to the timeout setting. Defaults to 5 times. When trying to retransmit the query, the next nameserver according to the round-robin algorithm will be picked up.\n\n\ntimeout\nthe time in milliseconds for waiting for the response for a single attempt of request transmission. note that this is ''not'' the maximal total waiting time before giving up, the maximal total waiting time can be calculated by the expression timeout x retrans. The timeout setting can also be changed by calling the set_timeout method. The default timeout setting is 2000 milliseconds, or 2 seconds.\n\n\nno_recurse\na boolean flag controls whether to disable the \"recursion desired\" (RD) flag in the UDP request. Defaults to false.\n\n\nno_random\na boolean flag controls whether to randomly pick the nameserver to query first, if true will always start with the first nameserver listed. Defaults to false.\n\n", - "prefix": "dns.new", - "body": "local r, err = class:new($opts)" - }, - "dns.destroy": { - "description": "Destroy the dns.resolver object by releasing all the internal occupied resources.", - "prefix": "dns.destroy", - "body": "r:destroy()" - }, - "dns.query": { - "description": "Performs a DNS standard query to the nameservers specified by the new method,\nand returns all the answer records in an array-like Lua table. In case of errors, it will\nreturn nil and a string describing the error instead.If the server returns a non-zero error code, the fields errcode and errstr will be set accordingly in the Lua table returned.Each entry in the answers returned table value is also a hash-like Lua table\nwhich usually takes some of the following fields:\n\nname\nThe resource record name.\n\n\ntype\nThe current resource record type, possible values are 1 (TYPE_A), 5 (TYPE_CNAME), 28 (TYPE_AAAA), and any other values allowed by RFC 1035.\n\n\naddress\nThe IPv4 or IPv6 address in their textual representations when the resource record type is either 1 (TYPE_A) or 28 (TYPE_AAAA), respectively. Successive 16-bit zero groups in IPv6 addresses will not be compressed by default, if you want that, you need to call the compress_ipv6_addr static method instead.\n\n\nsection\nThe identifier of the section that the current answer record belongs to. Possible values are 1 (SECTION_AN), 2 (SECTION_NS), and 3 (SECTION_AR).\n\n\ncname\nThe (decoded) record data value for CNAME resource records. Only present for CNAME records.\n\n\nttl\nThe time-to-live (TTL) value in seconds for the current resource record.\n\n\nclass\nThe current resource record class, possible values are 1 (CLASS_IN) or any other values allowed by RFC 1035.\n\n\npreference\nThe preference integer number for MX resource records. Only present for MX type records.\n\n\nexchange\nThe exchange domain name for MX resource records. Only present for MX type records.\n\n\nnsdname\nA domain-name which specifies a host which should be authoritative for the specified class and domain. Usually present for NS type records.\n\n\nrdata\nThe raw resource data (RDATA) for resource records that are not recognized.\n\n\ntxt\nThe record value for TXT records. When there is only one character string in this record, then this field takes a single Lua string. Otherwise this field takes a Lua table holding all the strings.\n\n\nptrdname\nThe record value for PTR records.\n\nThis method also takes an optional options argument table, which takes the following fields:\n\nqtype\nThe type of the question. Possible values are 1 (TYPE_A), 5 (TYPE_CNAME), 28 (TYPE_AAAA), or any other QTYPE value specified by RFC 1035 and RFC 3596. Default to 1 (TYPE_A).\n\n\nauthority_section\nWhen set to a true value, the answers return value includes the Authority section of the DNS response. Default to false.\n\n\nadditional_section\nWhen set to a true value, the answers return value includes the Additional section of the DNS response. Default to false.\n\nThe optional parameter tries can be provided as an empty table, and will be\nreturned as a third result. The table will be an array with the error message\nfor each (if any) failed try.When data truncation happens, the resolver will automatically retry using the TCP transport mode\nto query the current nameserver. All TCP connections are short lived.", - "prefix": "dns.query", - "body": "local answers, err, tries? = r:query($name, $options?, $tries?)" - }, - "dns.tcp_query": { - "description": "Just like the query method, but enforce the TCP transport mode instead of UDP.All TCP connections are short lived.Here is an example: local resolver = require \"resty.dns.resolver\"\n\n local r, err = resolver:new{\n nameservers = { \"8.8.8.8\" }\n }\n if not r then\n ngx.say(\"failed to instantiate resolver: \", err)\n return\n end\n\n local ans, err = r:tcp_query(\"www.google.com\", { qtype = r.TYPE_A })\n if not ans then\n ngx.say(\"failed to query: \", err)\n return\n end\n\n local cjson = require \"cjson\"\n ngx.say(\"records: \", cjson.encode(ans))", - "prefix": "dns.tcp_query", - "body": "local answers, err = r:tcp_query($name, $options?)" - }, - "dns.set_timeout": { - "description": "Overrides the current timeout setting by the time argument in milliseconds for all the nameserver peers.", - "prefix": "dns.set_timeout", - "body": "r:set_timeout($time)" - }, - "dns.compress_ipv6_addr": { - "description": "Compresses the successive 16-bit zero groups in the textual format of the IPv6 address.For example, local resolver = require \"resty.dns.resolver\"\n local compress = resolver.compress_ipv6_addr\n local new_addr = compress(\"FF01:0:0:0:0:0:0:101\")will yield FF01::101 in the new_addr return value.", - "prefix": "dns.compress_ipv6_addr", - "body": "local compressed = resty.dns.resolver.compress_ipv6_addr($address)" - }, - "dns.expand_ipv6_addr": { - "description": "Expands the successive 16-bit zero groups in the textual format of the IPv6 address.For example, local resolver = require \"resty.dns.resolver\"\n local expand = resolver.expand_ipv6_addr\n local new_addr = expand(\"FF01::101\")will yield FF01:0:0:0:0:0:0:101 in the new_addr return value.", - "prefix": "dns.expand_ipv6_addr", - "body": "local expanded = resty.dns.resolver.expand_ipv6_addr($address)" - }, - "dns.arpa_str": { - "description": "Generates the reverse domain name for PTR lookups for both IPv4 and IPv6 addresses. Compressed IPv6 addresses\nwill be automatically expanded.For example, local resolver = require \"resty.dns.resolver\"\n local ptr4 = resolver.arpa_str(\"1.2.3.4\")\n local ptr6 = resolver.arpa_str(\"FF01::101\")will yield 4.3.2.1.in-addr.arpa for ptr4 and 1.0.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.F.F.ip6.arpa for ptr6.", - "prefix": "dns.arpa_str", - "body": "local arpa_record = resty.dns.resolver.arpa_str($address)" - }, - "dns.reverse_query": { - "description": "Performs a PTR lookup for both IPv4 and IPv6 addresses. This function is basically a wrapper for the query command\nwhich uses the arpa_str command to convert the IP address on the fly.", - "prefix": "dns.reverse_query", - "body": "local answers, err = r:reverse_query($address)" - }, - "websocket.new": { - "description": "local wb, err = server:new($opts)\nPerforms the websocket handshake process on the server side and returns a WebSocket server object.In case of error, it returns nil and a string describing the error.An optional options table can be specified. The following options are as follows:\n\nmax_payload_len\nSpecifies the maximal length of payload allowed when sending and receiving WebSocket frames.\n\n\nsend_masked\nSpecifies whether to send out masked WebSocket frames. When it is true, masked frames are always sent. Default to false.\n\n\ntimeout\nSpecifies the network timeout threshold in milliseconds. You can change this setting later via the set_timeout method call. Note that this timeout setting does not affect the HTTP response header sending process for the websocket handshake; you need to configure the send_timeout directive at the same time.\n\n", - "prefix": "websocket.new", - "body": "local wb, err = server:new()" - }, - "websocket.set_timeout": { - "description": "Sets the timeout delay (in milliseconds) for the network-related operations.", - "prefix": "websocket.set_timeout", - "body": "wb:set_timeout($ms)" - }, - "websocket.send_text": { - "description": "Sends the text argument out as an unfragmented data frame of the text type. Returns the number of bytes that have actually been sent on the TCP level.In case of errors, returns nil and a string describing the error.", - "prefix": "websocket.send_text", - "body": "local bytes, err = wb:send_text($text)" - }, - "websocket.send_binary": { - "description": "Sends the data argument out as an unfragmented data frame of the binary type. Returns the number of bytes that have actually been sent on the TCP level.In case of errors, returns nil and a string describing the error.", - "prefix": "websocket.send_binary", - "body": "local bytes, err = wb:send_binary($data)" - }, - "websocket.send_ping": { - "description": "local bytes, err = wb:send_ping($msg)\nSends out a ping frame with an optional message specified by the msg argument. Returns the number of bytes that have actually been sent on the TCP level.In case of errors, returns nil and a string describing the error.Note that this method does not wait for a pong frame from the remote end.", - "prefix": "websocket.send_ping", - "body": "local bytes, err = wb:send_ping()" - }, - "websocket.send_pong": { - "description": "local bytes, err = wb:send_pong($msg)\nSends out a pong frame with an optional message specified by the msg argument. Returns the number of bytes that have actually been sent on the TCP level.In case of errors, returns nil and a string describing the error.", - "prefix": "websocket.send_pong", - "body": "local bytes, err = wb:send_pong()" - }, - "websocket.send_close": { - "description": "local bytes, err = wb:send_close($code, $msg)\nSends out a close frame with an optional status code and a message.In case of errors, returns nil and a string describing the error.For a list of valid status code, see the following document:http://tools.ietf.org/html/rfc6455#section-7.4.1Note that this method does not wait for a close frame from the remote end.", - "prefix": "websocket.send_close", - "body": "local bytes, err = wb:send_close()" - }, - "websocket.send_frame": { - "description": "Sends out a raw websocket frame by specifying the fin field (boolean value), the opcode, and the payload.For a list of valid opcode, seehttp://tools.ietf.org/html/rfc6455#section-5.2In case of errors, returns nil and a string describing the error.To control the maximal payload length allowed, you can pass the max_payload_len option to the new constructor.To control whether to send masked frames, you can pass true to the send_masked option in the new constructor method. By default, unmasked frames are sent.", - "prefix": "websocket.send_frame", - "body": "local bytes, err = wb:send_frame($fin, $opcode, $payload)" - }, - "websocket.recv_frame": { - "description": "Receives a WebSocket frame from the wire.In case of an error, returns two nil values and a string describing the error.The second return value is always the frame type, which could be one of continuation, text, binary, close, ping, pong, or nil (for unknown types).For close frames, returns 3 values: the extra status message (which could be an empty string), the string \"close\", and a Lua number for the status code (if any). For possible closing status codes, seehttp://tools.ietf.org/html/rfc6455#section-7.4.1For other types of frames, just returns the payload and the type.For fragmented frames, the err return value is the Lua string \"again\".", - "prefix": "websocket.recv_frame", - "body": "local data, typ, err = wb:recv_frame()" - }, - "websocket.client:new": { - "description": "local wb, err = client:new($opts)\nInstantiates a WebSocket client object.In case of error, it returns nil and a string describing the error.An optional options table can be specified. The following options are as follows:\n\nmax_payload_len\nSpecifies the maximal length of payload allowed when sending and receiving WebSocket frames.\n\n\nsend_unmasked\nSpecifies whether to send out an unmasked WebSocket frames. When it is true, unmasked frames are always sent. Default to false. RFC 6455 requires, however, that the client MUST send masked frames to the server, so never set this option to true unless you know what you are doing.\n\n\ntimeout\nSpecifies the default network timeout threshold in milliseconds. You can change this setting later via the set_timeout method call.\n\n", - "prefix": "websocket.client:new", - "body": "local wb, err = client:new()" - }, - "websocket.client:connect": { - "description": "local ok, err = wb:connect(\"wss://:/\")\nlocal ok, err = wb:connect(\"ws://:/\", options)\nlocal ok, err = wb:connect(\"wss://:/\", options)\nConnects to the remote WebSocket service port and performs the websocket handshake process on the client side.Before actually resolving the host name and connecting to the remote backend, this method will always look up the connection pool for matched idle connections created by previous calls of this method.An optional Lua table can be specified as the last argument to this method to specify various connect options:\n\nprotocols\nSpecifies all the subprotocols used for the current WebSocket session. It could be a Lua table holding all the subprotocol names or just a single Lua string.\n\n\norigin\nSpecifies the value of the Origin request header.\n\n\npool\nSpecifies a custom name for the connection pool being used. If omitted, then the connection pool name will be generated from the string template :.\n\n\npool_size\nspecify the size of the connection pool. If omitted and no\nbacklog option was provided, no pool will be created. If omitted\nbut backlog was provided, the pool will be created with a default\nsize equal to the value of the lua_socket_pool_size\ndirective.\nThe connection pool holds up to pool_size alive connections\nready to be reused by subsequent calls to connect, but\nnote that there is no upper limit to the total number of opened connections\noutside of the pool. If you need to restrict the total number of opened\nconnections, specify the backlog option.\nWhen the connection pool would exceed its size limit, the least recently used\n(kept-alive) connection already in the pool will be closed to make room for\nthe current connection.\nNote that the cosocket connection pool is per Nginx worker process rather\nthan per Nginx server instance, so the size limit specified here also applies\nto every single Nginx worker process. Also note that the size of the connection\npool cannot be changed once it has been created.\nThis option was first introduced in the v0.10.14 release.\n\n\nbacklog\nif specified, this module will limit the total number of opened connections\nfor this pool. No more connections than pool_size can be opened\nfor this pool at any time. If the connection pool is full, subsequent\nconnect operations will be queued into a queue equal to this option's\nvalue (the \"backlog\" queue).\nIf the number of queued connect operations is equal to backlog,\nsubsequent connect operations will fail and return nil plus the\nerror string \"too many waiting connect operations\".\nThe queued connect operations will be resumed once the number of connections\nin the pool is less than pool_size.\nThe queued connect operation will abort once they have been queued for more\nthan connect_timeout, controlled by\nsettimeouts, and will return nil plus\nthe error string \"timeout\".\nThis option was first introduced in the v0.10.14 release.\n\n\nssl_verify\nSpecifies whether to perform SSL certificate verification during the\nSSL handshake if the wss:// scheme is used.\n\n\nheaders\nSpecifies custom headers to be sent in the handshake request. The table is expected to contain strings in the format {\"a-header: a header value\", \"another-header: another header value\"}.\n\n\nclient_cert\nSpecifies a client certificate chain cdata object that will be used while TLS handshaking with remote server.\nThese objects can be created using\nngx.ssl.parse_pem_cert\nfunction provided by lua-resty-core.\nNote that specifying the client_cert option requires corresponding client_priv_key be provided too. See below.\n\n\nclient_priv_key\nSpecifies a private key corresponds to the client_cert option above.\nThese objects can be created using\nngx.ssl.parse_pem_priv_key\nfunction provided by lua-resty-core.\n\nThe SSL connection mode (wss://) requires at least ngx_lua 0.9.11 or OpenResty 1.7.4.1.", - "prefix": "websocket.client:connect", - "body": "local ok, err = wb:connect(\"ws://:/\")" - }, - "websocket.client:close": { - "description": "Closes the current WebSocket connection. If no close frame is sent yet, then the close frame will be automatically sent.", - "prefix": "websocket.client:close", - "body": "local ok, err = wb:close()" - }, - "websocket.client:set_keepalive": { - "description": "Puts the current WebSocket connection immediately into the ngx_lua cosocket connection pool.You can specify the max idle timeout (in ms) when the connection is in the pool and the maximal size of the pool every nginx worker process.In case of success, returns 1. In case of errors, returns nil with a string describing the error.Only call this method in the place you would have called the close method instead. Calling this method will immediately turn the current WebSocket object into the closed state. Any subsequent operations other than connect() on the current objet will return the closed error.", - "prefix": "websocket.client:set_keepalive", - "body": "local ok, err = wb:set_keepalive($max_idle_timeout, $pool_size)" - }, - "websocket.client:set_timeout": { - "description": "Identical to the set_timeout method of the resty.websocket.server objects.", - "prefix": "websocket.client:set_timeout", - "body": "wb:set_timeout($ms)" - }, - "websocket.client:send_text": { - "description": "Identical to the send_text method of the resty.websocket.server objects.", - "prefix": "websocket.client:send_text", - "body": "local bytes, err = wb:send_text($text)" - }, - "websocket.client:send_binary": { - "description": "Identical to the send_binary method of the resty.websocket.server objects.", - "prefix": "websocket.client:send_binary", - "body": "local bytes, err = wb:send_binary($data)" - }, - "websocket.client:send_ping": { - "description": "local bytes, err = wb:send_ping($msg)\nIdentical to the send_ping method of the resty.websocket.server objects.", - "prefix": "websocket.client:send_ping", - "body": "local bytes, err = wb:send_ping()" - }, - "websocket.client:send_pong": { - "description": "local bytes, err = wb:send_pong($msg)\nIdentical to the send_pong method of the resty.websocket.server objects.", - "prefix": "websocket.client:send_pong", - "body": "local bytes, err = wb:send_pong()" - }, - "websocket.client:send_close": { - "description": "local bytes, err = wb:send_close($code, $msg)\nIdentical to the send_close method of the resty.websocket.server objects.", - "prefix": "websocket.client:send_close", - "body": "local bytes, err = wb:send_close()" - }, - "websocket.client:send_frame": { - "description": "Identical to the send_frame method of the resty.websocket.server objects.To control whether to send unmasked frames, you can pass true to the send_unmasked option in the new constructor method. By default, masked frames are sent.", - "prefix": "websocket.client:send_frame", - "body": "local bytes, err = wb:send_frame($fin, $opcode, $payload)" - }, - "websocket.client:recv_frame": { - "description": "Identical to the recv_frame method of the resty.websocket.server objects.", - "prefix": "websocket.client:recv_frame", - "body": "local data, typ, err = wb:recv_frame()" - }, - "websocket.protocol.recv_frame": { - "description": "Receives a WebSocket frame from the wire.", - "prefix": "websocket.protocol.recv_frame", - "body": "local data, typ, err = protocol.recv_frame($socket, $max_payload_len, $force_masking)" - }, - "websocket.protocol.build_frame": { - "description": "Builds a raw WebSocket frame.", - "prefix": "websocket.protocol.build_frame", - "body": "local frame = protocol.build_frame($fin, $opcode, $payload_len, $payload, $masking)" - }, - "websocket.protocol.send_frame": { - "description": "Sends a raw WebSocket frame.", - "prefix": "websocket.protocol.send_frame", - "body": "local bytes, err = protocol.send_frame($socket, $fin, $opcode, $payload, $max_payload_len, $masking)" - }, - "lock.new": { - "description": "local obj, err = lock:new($dict_name, $opts)\nCreates a new lock object instance by specifying the shared dictionary name (created by lua_shared_dict) and an optional options table opts.In case of failure, returns nil and a string describing the error.The options table accepts the following options:\nexptime\nSpecifies expiration time (in seconds) for the lock entry in the shared memory dictionary. You can specify up to 0.001 seconds. Default to 30 (seconds). Even if the invoker does not call unlock or the object holding the lock is not GC'd, the lock will be released after this time. So deadlock won't happen even when the worker process holding the lock crashes.\ntimeout\nSpecifies the maximal waiting time (in seconds) for the lock method calls on the current object instance. You can specify up to 0.001 seconds. Default to 5 (seconds). This option value cannot be bigger than exptime. This timeout is to prevent a lock method call from waiting forever.\nYou can specify 0 to make the lock method return immediately without waiting if it cannot acquire the lock right away.\nstep\nSpecifies the initial step (in seconds) of sleeping when waiting for the lock. Default to 0.001 (seconds). When the lock method is waiting on a busy lock, it sleeps by steps. The step size is increased by a ratio (specified by the ratio option) until reaching the step size limit (specified by the max_step option).\nratio\nSpecifies the step increasing ratio. Default to 2, that is, the step size doubles at each waiting iteration.\nmax_step\nSpecifies the maximal step size (i.e., sleep interval, in seconds) allowed. See also the step and ratio options). Default to 0.5 (seconds).\n", - "prefix": "lock.new", - "body": "local obj, err = lock:new($dict_name)" - }, - "lock.lock": { - "description": "Tries to lock a key across all the Nginx worker processes in the current Nginx server instance. Different keys are different locks.The length of the key string must not be larger than 65535 bytes.Returns the waiting time (in seconds) if the lock is successfully acquired. Otherwise returns nil and a string describing the error.The waiting time is not from the wallclock, but rather is from simply adding up all the waiting \"steps\". A nonzero elapsed return value indicates that someone else has just hold this lock. But a zero return value cannot gurantee that no one else has just acquired and released the lock.When this method is waiting on fetching the lock, no operating system threads will be blocked and the current Lua \"light thread\" will be automatically yielded behind the scene.It is strongly recommended to always call the unlock() method to actively release the lock as soon as possible.If the unlock() method is never called after this method call, the lock will get released when\nthe current resty.lock object instance is collected automatically by the Lua GC.\nthe exptime for the lock entry is reached.\nCommon errors for this method call is\n\"timeout\"\n: The timeout threshold specified by the timeout option of the new method is exceeded.\n\"locked\"\n: The current resty.lock object instance is already holding a lock (not necessarily of the same key).\nOther possible errors are from ngx_lua's shared dictionary API.It is required to create different resty.lock instances for multiple simultaneous locks (i.e., those around different keys).", - "prefix": "lock.lock", - "body": "local elapsed, err = obj:lock($key)" - }, - "lock.unlock": { - "description": "Releases the lock held by the current resty.lock object instance.Returns 1 on success. Returns nil and a string describing the error otherwise.If you call unlock when no lock is currently held, the error \"unlocked\" will be returned.", - "prefix": "lock.unlock", - "body": "local ok, err = obj:unlock()" - }, - "lock.expire": { - "description": "Sets the TTL of the lock held by the current resty.lock object instance. This will reset the\ntimeout of the lock to timeout seconds if it is given, otherwise the timeout provided while\ncalling new will be used.Note that the timeout supplied inside this function is independent from the timeout provided while\ncalling new. Calling expire() will not change the timeout value specified inside new\nand subsequent expire(nil) call will still use the timeout number from new.Returns true on success. Returns nil and a string describing the error otherwise.If you call expire when no lock is currently held, the error \"unlocked\" will be returned.", - "prefix": "lock.expire", - "body": "local ok, err = obj:expire($timeout)" - }, - "lrucache.new": { - "description": "Creates a new cache instance. Upon failure, returns nil and a string\ndescribing the error.The max_items argument specifies the maximal number of items this cache can\nhold.The load-factor argument designates the \"load factor\" of the FFI-based\nhash-table used internally by resty.lrucache.pureffi; the default value is\n0.5 (i.e. 50%); if the load factor is specified, it will be clamped to the\nrange of [0.1, 1] (i.e. if load factor is greater than 1, it will be\nsaturated to 1; likewise, if load-factor is smaller than 0.1, it will be\nclamped to 0.1). This argument is only meaningful for\nresty.lrucache.pureffi.", - "prefix": "lrucache.new", - "body": "local cache, err = lrucache.new($max_items [, $load_factor])" - }, - "lrucache.set": { - "description": "Sets a key with a value and an expiration time.When the cache is full, the cache will automatically evict the least recently\nused item.The optional ttl argument specifies the expiration time. The time value is in\nseconds, but you can also specify the fraction number part (e.g. 0.25). A nil\nttl argument means the value would never expire (which is the default).The optional flags argument specifies a user flags value associated with the\nitem to be stored. It can be retrieved later with the item. The user flags are\nstored as an unsigned 32-bit integer internally, and thus must be specified as\na Lua number. If not specified, flags will have a default value of 0. This\nargument was added in the v0.10 release.", - "prefix": "lrucache.set", - "body": "cache:set($key, $value, $ttl?, $flags?)" - }, - "lrucache.get": { - "description": "Fetches a value with the key. If the key does not exist in the cache or has\nalready expired, nil will be returned.Starting from v0.03, the stale data is also returned as the second return\nvalue if available.Starting from v0.10, the user flags value associated with the stored item is\nalso returned as the third return value. If no user flags were given to an\nitem, its default flags will be 0.", - "prefix": "lrucache.get", - "body": "local data, stale_data, flags = cache:get($key)" - }, - "lrucache.delete": { - "description": "Removes an item specified by the key from the cache.", - "prefix": "lrucache.delete", - "body": "cache:delete($key)" - }, - "lrucache.count": { - "description": "Returns the number of items currently stored in the cache including\nexpired items if any.The returned count value will always be greater or equal to 0 and smaller\nthan or equal to the size argument given to cache:new.This method was added in the v0.10 release.", - "prefix": "lrucache.count", - "body": "local count = cache:count()" - }, - "lrucache.capacity": { - "description": "Returns the maximum number of items the cache can hold. The return value is the\nsame as the size argument given to cache:new when the cache was\ncreated.This method was added in the v0.10 release.", - "prefix": "lrucache.capacity", - "body": "local size = cache:capacity()" - }, - "lrucache.get_keys": { - "description": "Fetch the list of keys currently inside the cache up to max_count. The keys\nwill be ordered in MRU fashion (Most-Recently-Used keys first).This function returns a Lua (array) table (with integer keys) containing the\nkeys.When max_count is nil or 0, all keys (if any) will be returned.When provided with a res table argument, this function will not allocate a\ntable and will instead insert the keys in res, along with a trailing nil\nvalue.This method was added in the v0.10 release.", - "prefix": "lrucache.get_keys", - "body": "local keys = cache:get_keys($max_count?, $res?)" - }, - "lrucache.flush_all": { - "description": "Flushes all the existing data (if any) in the current cache instance. This is\nan O(1) operation and should be much faster than creating a brand new cache\ninstance.Note however that the flush_all() method of resty.lrucache.pureffi is an\nO(n) operation.", - "prefix": "lrucache.flush_all", - "body": "cache:flush_all()" - }, - "healthcheck.spawn_checker": { - "description": "Spawns background timer-based \"light threads\" to perform periodic healthchecks on\nthe specified NGINX upstream group with the specified shm storage.The healthchecker does not need any client traffic to function. The checks are performed actively\nand periodically.This method call is asynchronous and returns immediately.Returns true on success, or nil and a string describing an error otherwise.", - "prefix": "healthcheck.spawn_checker", - "body": "local ok, err = healthcheck.spawn_checker($options)" - }, - "healthcheck.status_page": { - "description": "Generates a detailed status report for all the upstreams defined in the current NGINX server.One typical output isUpstream foo.com\n Primary Peers\n 127.0.0.1:12354 UP\n 127.0.0.1:12355 DOWN\n Backup Peers\n 127.0.0.1:12356 UP\n\nUpstream bar.com\n Primary Peers\n 127.0.0.1:12354 UP\n 127.0.0.1:12355 DOWN\n 127.0.0.1:12357 DOWN\n Backup Peers\n 127.0.0.1:12356 UP\nIf an upstream has no health checkers, then it will be marked by (NO checkers), as inUpstream foo.com (NO checkers)\n Primary Peers\n 127.0.0.1:12354 UP\n 127.0.0.1:12355 UP\n Backup Peers\n 127.0.0.1:12356 UP\nIf you indeed have spawned a healthchecker in init_worker_by_lua*, then you should really\ncheck out the NGINX error log file to see if there is any fatal errors aborting the healthchecker threads.", - "prefix": "healthcheck.status_page", - "body": "local str, err = healthcheck.status_page()" - }, - "balancer.new": { - "description": "Instantiates an object of this class. The class value is returned by the call require \"resty.chash\".The id should be table.concat({host, string.char(0), port}) like the nginx chash does,\nwhen we need to keep consistency with nginx chash.The id can be any string value when we do not need to keep consistency with nginx chash.\nThe weight should be a non negative integer.local nodes = {\n -- id => weight\n server1 = 10,\n server2 = 2,\n}\n\nlocal resty_chash = require \"resty.chash\"\n\nlocal chash = resty_chash:new(nodes)\n\nlocal id = chash:find(\"foo\")\n\nngx.say(id)", - "prefix": "balancer.new", - "body": "local obj, err = class.new($nodes)" - }, - "balancer.reinit": { - "description": "Reinit the chash obj with the new nodes.", - "prefix": "balancer.reinit", - "body": "obj:reinit($nodes)" - }, - "balancer.set": { - "description": "Set weight of the id.", - "prefix": "balancer.set", - "body": "obj:set($id, $weight)" - }, - "balancer.delete": { - "description": "Delete the id.", - "prefix": "balancer.delete", - "body": "obj:delete($id)" - }, - "balancer.incr": { - "description": "Increments weight for the id by the step value weight(default to 1).", - "prefix": "balancer.incr", - "body": "obj:incr($id, $weight?)" - }, - "balancer.decr": { - "description": "Decrease weight for the id by the step value weight(default to 1).", - "prefix": "balancer.decr", - "body": "obj:decr($id, $weight?)" - }, - "balancer.find": { - "description": "Find an id by the key, same key always return the same id in the same obj.The second return value index is the index in the chash circle of the hash value of the key.", - "prefix": "balancer.find", - "body": "local id, index = obj:find($key)" - }, - "balancer.next": { - "description": "If we have chance to retry when the first id(server) doesn't work well,\nthen we can use obj:next to get the next id.The new id may be the same as the old one.", - "prefix": "balancer.next", - "body": "local id, new_index = obj:next($old_index)" - }, - "ngx.OK": { - "description": "0", - "prefix": "ngx.OK", - "body": "ngx.OK" - }, - "ngx.ERROR": { - "description": "-1", - "prefix": "ngx.ERROR", - "body": "ngx.ERROR" - }, - "ngx.AGAIN": { - "description": "-2", - "prefix": "ngx.AGAIN", - "body": "ngx.AGAIN" - }, - "ngx.DONE": { - "description": "-4", - "prefix": "ngx.DONE", - "body": "ngx.DONE" - }, - "ngx.DECLINED": { - "description": "-5", - "prefix": "ngx.DECLINED", - "body": "ngx.DECLINED" - }, - "ngx.HTTP_GET": { - "description": "HTTP method constants.", - "prefix": "ngx.HTTP_GET", - "body": "ngx.HTTP_GET" - }, - "ngx.HTTP_HEAD": { - "description": "HTTP method constants.", - "prefix": "ngx.HTTP_HEAD", - "body": "ngx.HTTP_HEAD" - }, - "ngx.HTTP_PUT": { - "description": "HTTP method constants.", - "prefix": "ngx.HTTP_PUT", - "body": "ngx.HTTP_PUT" - }, - "ngx.HTTP_POST": { - "description": "HTTP method constants.", - "prefix": "ngx.HTTP_POST", - "body": "ngx.HTTP_POST" - }, - "ngx.HTTP_DELETE": { - "description": "HTTP method constants.", - "prefix": "ngx.HTTP_DELETE", - "body": "ngx.HTTP_DELETE" - }, - "ngx.HTTP_OPTIONS": { - "description": "HTTP method constants.(added in the v0.5.0rc24 release)", - "prefix": "ngx.HTTP_OPTIONS", - "body": "ngx.HTTP_OPTIONS" - }, - "ngx.HTTP_MKCOL": { - "description": "HTTP method constants.(added in the v0.8.2 release)", - "prefix": "ngx.HTTP_MKCOL", - "body": "ngx.HTTP_MKCOL" - }, - "ngx.HTTP_COPY": { - "description": "HTTP method constants.(added in the v0.8.2 release)", - "prefix": "ngx.HTTP_COPY", - "body": "ngx.HTTP_COPY" - }, - "ngx.HTTP_MOVE": { - "description": "HTTP method constants.(added in the v0.8.2 release)", - "prefix": "ngx.HTTP_MOVE", - "body": "ngx.HTTP_MOVE" - }, - "ngx.HTTP_PROPFIND": { - "description": "HTTP method constants.(added in the v0.8.2 release)", - "prefix": "ngx.HTTP_PROPFIND", - "body": "ngx.HTTP_PROPFIND" - }, - "ngx.HTTP_PROPPATCH": { - "description": "HTTP method constants.(added in the v0.8.2 release)", - "prefix": "ngx.HTTP_PROPPATCH", - "body": "ngx.HTTP_PROPPATCH" - }, - "ngx.HTTP_LOCK": { - "description": "HTTP method constants.(added in the v0.8.2 release)", - "prefix": "ngx.HTTP_LOCK", - "body": "ngx.HTTP_LOCK" - }, - "ngx.HTTP_UNLOCK": { - "description": "HTTP method constants.(added in the v0.8.2 release)", - "prefix": "ngx.HTTP_UNLOCK", - "body": "ngx.HTTP_UNLOCK" - }, - "ngx.HTTP_PATCH": { - "description": "HTTP method constants.(added in the v0.8.2 release)", - "prefix": "ngx.HTTP_PATCH", - "body": "ngx.HTTP_PATCH" - }, - "ngx.HTTP_TRACE": { - "description": "HTTP method constants.(added in the v0.8.2 release)", - "prefix": "ngx.HTTP_TRACE", - "body": "ngx.HTTP_TRACE" - }, - "ngx.STDERR": { - "description": "Nginx log level constants", - "prefix": "ngx.STDERR", - "body": "ngx.STDERR" - }, - "ngx.EMERG": { - "description": "Nginx log level constants", - "prefix": "ngx.EMERG", - "body": "ngx.EMERG" - }, - "ngx.ALERT": { - "description": "Nginx log level constants", - "prefix": "ngx.ALERT", - "body": "ngx.ALERT" - }, - "ngx.CRIT": { - "description": "Nginx log level constants", - "prefix": "ngx.CRIT", - "body": "ngx.CRIT" - }, - "ngx.ERR": { - "description": "Nginx log level constants", - "prefix": "ngx.ERR", - "body": "ngx.ERR" - }, - "ngx.WARN": { - "description": "Nginx log level constants", - "prefix": "ngx.WARN", - "body": "ngx.WARN" - }, - "ngx.NOTICE": { - "description": "Nginx log level constants", - "prefix": "ngx.NOTICE", - "body": "ngx.NOTICE" - }, - "ngx.INFO": { - "description": "Nginx log level constants", - "prefix": "ngx.INFO", - "body": "ngx.INFO" - }, - "ngx.DEBUG": { - "description": "Nginx log level constants", - "prefix": "ngx.DEBUG", - "body": "ngx.DEBUG" - }, - "ngx.HTTP_CONTINUE": { - "description": "(100) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_CONTINUE", - "body": "ngx.HTTP_CONTINUE" - }, - "ngx.HTTP_SWITCHING_PROTOCOLS": { - "description": "(101)(first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_SWITCHING_PROTOCOLS", - "body": "ngx.HTTP_SWITCHING_PROTOCOLS" - }, - "ngx.HTTP_OK": { - "description": "(200)", - "prefix": "ngx.HTTP_OK", - "body": "ngx.HTTP_OK" - }, - "ngx.HTTP_CREATED": { - "description": "(201)", - "prefix": "ngx.HTTP_CREATED", - "body": "ngx.HTTP_CREATED" - }, - "ngx.HTTP_ACCEPTED": { - "description": "(202) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_ACCEPTED", - "body": "ngx.HTTP_ACCEPTED" - }, - "ngx.HTTP_NO_CONTENT": { - "description": "(204) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_NO_CONTENT", - "body": "ngx.HTTP_NO_CONTENT" - }, - "ngx.HTTP_PARTIAL_CONTENT": { - "description": "(206) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_PARTIAL_CONTENT", - "body": "ngx.HTTP_PARTIAL_CONTENT" - }, - "ngx.HTTP_SPECIAL_RESPONSE": { - "description": "(300)", - "prefix": "ngx.HTTP_SPECIAL_RESPONSE", - "body": "ngx.HTTP_SPECIAL_RESPONSE" - }, - "ngx.HTTP_MOVED_PERMANENTLY": { - "description": "(301)", - "prefix": "ngx.HTTP_MOVED_PERMANENTLY", - "body": "ngx.HTTP_MOVED_PERMANENTLY" - }, - "ngx.HTTP_MOVED_TEMPORARILY": { - "description": "(302)", - "prefix": "ngx.HTTP_MOVED_TEMPORARILY", - "body": "ngx.HTTP_MOVED_TEMPORARILY" - }, - "ngx.HTTP_SEE_OTHER": { - "description": "(303)", - "prefix": "ngx.HTTP_SEE_OTHER", - "body": "ngx.HTTP_SEE_OTHER" - }, - "ngx.HTTP_NOT_MODIFIED": { - "description": "(304)", - "prefix": "ngx.HTTP_NOT_MODIFIED", - "body": "ngx.HTTP_NOT_MODIFIED" - }, - "ngx.HTTP_TEMPORARY_REDIRECT": { - "description": "(307) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_TEMPORARY_REDIRECT", - "body": "ngx.HTTP_TEMPORARY_REDIRECT" - }, - "ngx.HTTP_PERMANENT_REDIRECT": { - "description": "(308)", - "prefix": "ngx.HTTP_PERMANENT_REDIRECT", - "body": "ngx.HTTP_PERMANENT_REDIRECT" - }, - "ngx.HTTP_BAD_REQUEST": { - "description": "(400)", - "prefix": "ngx.HTTP_BAD_REQUEST", - "body": "ngx.HTTP_BAD_REQUEST" - }, - "ngx.HTTP_UNAUTHORIZED": { - "description": "(401)", - "prefix": "ngx.HTTP_UNAUTHORIZED", - "body": "ngx.HTTP_UNAUTHORIZED" - }, - "ngx.HTTP_PAYMENT_REQUIRED": { - "description": "(402) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_PAYMENT_REQUIRED", - "body": "ngx.HTTP_PAYMENT_REQUIRED" - }, - "ngx.HTTP_FORBIDDEN": { - "description": "(403)", - "prefix": "ngx.HTTP_FORBIDDEN", - "body": "ngx.HTTP_FORBIDDEN" - }, - "ngx.HTTP_NOT_FOUND": { - "description": "(404)", - "prefix": "ngx.HTTP_NOT_FOUND", - "body": "ngx.HTTP_NOT_FOUND" - }, - "ngx.HTTP_NOT_ALLOWED": { - "description": "(405)", - "prefix": "ngx.HTTP_NOT_ALLOWED", - "body": "ngx.HTTP_NOT_ALLOWED" - }, - "ngx.HTTP_NOT_ACCEPTABLE": { - "description": "(406) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_NOT_ACCEPTABLE", - "body": "ngx.HTTP_NOT_ACCEPTABLE" - }, - "ngx.HTTP_REQUEST_TIMEOUT": { - "description": "(408) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_REQUEST_TIMEOUT", - "body": "ngx.HTTP_REQUEST_TIMEOUT" - }, - "ngx.HTTP_CONFLICT": { - "description": "(409) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_CONFLICT", - "body": "ngx.HTTP_CONFLICT" - }, - "ngx.HTTP_GONE": { - "description": "(410)", - "prefix": "ngx.HTTP_GONE", - "body": "ngx.HTTP_GONE" - }, - "ngx.HTTP_UPGRADE_REQUIRED": { - "description": "(426) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_UPGRADE_REQUIRED", - "body": "ngx.HTTP_UPGRADE_REQUIRED" - }, - "ngx.HTTP_TOO_MANY_REQUESTS": { - "description": "(429) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_TOO_MANY_REQUESTS", - "body": "ngx.HTTP_TOO_MANY_REQUESTS" - }, - "ngx.HTTP_CLOSE": { - "description": "(444) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_CLOSE", - "body": "ngx.HTTP_CLOSE" - }, - "ngx.HTTP_ILLEGAL": { - "description": "(451) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_ILLEGAL", - "body": "ngx.HTTP_ILLEGAL" - }, - "ngx.HTTP_INTERNAL_SERVER_ERROR": { - "description": "(500)", - "prefix": "ngx.HTTP_INTERNAL_SERVER_ERROR", - "body": "ngx.HTTP_INTERNAL_SERVER_ERROR" - }, - "ngx.HTTP_METHOD_NOT_IMPLEMENTED": { - "description": "(501)", - "prefix": "ngx.HTTP_METHOD_NOT_IMPLEMENTED", - "body": "ngx.HTTP_METHOD_NOT_IMPLEMENTED" - }, - "ngx.HTTP_BAD_GATEWAY": { - "description": "(502) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_BAD_GATEWAY", - "body": "ngx.HTTP_BAD_GATEWAY" - }, - "ngx.HTTP_SERVICE_UNAVAILABLE": { - "description": "(503)", - "prefix": "ngx.HTTP_SERVICE_UNAVAILABLE", - "body": "ngx.HTTP_SERVICE_UNAVAILABLE" - }, - "ngx.HTTP_GATEWAY_TIMEOUT": { - "description": "(504) (first added in the v0.3.1rc38 release)", - "prefix": "ngx.HTTP_GATEWAY_TIMEOUT", - "body": "ngx.HTTP_GATEWAY_TIMEOUT" - }, - "ngx.HTTP_VERSION_NOT_SUPPORTED": { - "description": "(505) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_VERSION_NOT_SUPPORTED", - "body": "ngx.HTTP_VERSION_NOT_SUPPORTED" - }, - "ngx.HTTP_INSUFFICIENT_STORAGE": { - "description": "(507) (first added in the v0.9.20 release)", - "prefix": "ngx.HTTP_INSUFFICIENT_STORAGE", - "body": "ngx.HTTP_INSUFFICIENT_STORAGE" - } + "ngx.arg": { + "description": "When this is used in the context of the set_by_lua* directives, this table is read-only and holds the input arguments to the config directives: value = ngx.arg[n]Here is an example location /foo {\n set $a 32;\n set $b 56;\n\n set_by_lua $sum\n 'return tonumber(ngx.arg[1]) + tonumber(ngx.arg[2])'\n $a $b;\n\n echo $sum;\n }that writes out 88, the sum of 32 and 56.When this table is used in the context of body_filter_by_lua*, the first element holds the input data chunk to the output filter code and the second element holds the boolean flag for the \"eof\" flag indicating the end of the whole output data stream.The data chunk and \"eof\" flag passed to the downstream Nginx output filters can also be overridden by assigning values directly to the corresponding table elements. When setting nil or an empty Lua string value to ngx.arg[1], no data chunk will be passed to the downstream Nginx output filters at all.", + "prefix": "ngx.arg", + "body": "local val = ngx.arg[index]" + }, + "ngx.var.VARIABLE": { + "description": "Read and write Nginx variable values. value = ngx.var.some_nginx_variable_name\n ngx.var.some_nginx_variable_name = valueNote that only already defined Nginx variables can be written to.\nFor example: location /foo {\n set $my_var ''; # this line is required to create $my_var at config time\n content_by_lua_block {\n ngx.var.my_var = 123\n ...\n }\n }That is, Nginx variables cannot be created on-the-fly. Here is a list of pre-defined\nNginx variables.Some special Nginx variables like $args and $limit_rate can be assigned a value,\nmany others are not, like $query_string, $arg_PARAMETER, and $http_NAME.Nginx regex group capturing variables $1, $2, $3, and etc, can be read by this\ninterface as well, by writing ngx.var[1], ngx.var[2], ngx.var[3], and etc.Setting ngx.var.Foo to a nil value will unset the $Foo Nginx variable. ngx.var.args = nilCAUTION When reading from an Nginx variable, Nginx will allocate memory in the per-request memory pool which is freed only at request termination. So when you need to read from an Nginx variable repeatedly in your Lua code, cache the Nginx variable value to your own Lua variable, for example, local val = ngx.var.some_var\n --- use the val repeatedly laterto prevent (temporary) memory leaking within the current request's lifetime. Another way of caching the result is to use the ngx.ctx table.Undefined Nginx variables are evaluated to nil while uninitialized (but defined) Nginx variables are evaluated to an empty Lua string.This API requires a relatively expensive metamethod call and it is recommended to avoid using it on hot code paths.", + "prefix": "ngx.var.VARIABLE", + "body": "ngx.var.VAR_NAME" + }, + "print": { + "description": "Writes argument values into the Nginx error.log file with the ngx.NOTICE log level.It is equivalent to ngx.log(ngx.NOTICE, ...)Lua nil arguments are accepted and result in literal \"nil\" strings while Lua booleans result in literal \"true\" or \"false\" strings. And the ngx.null constant will yield the \"null\" string output.There is a hard coded 2048 byte limitation on error message lengths in the Nginx core. This limit includes trailing newlines and leading time stamps. If the message size exceeds this limit, Nginx will truncate the message text accordingly. This limit can be manually modified by editing the NGX_MAX_ERROR_STR macro definition in the src/core/ngx_log.h file in the Nginx source tree.", + "prefix": "print", + "body": "print(...)" + }, + "ngx.ctx": { + "description": "This table can be used to store per-request Lua context data and has a life time identical to the current request (as with the Nginx variables).Consider the following example, location /test {\n rewrite_by_lua_block {\n ngx.ctx.foo = 76\n }\n access_by_lua_block {\n ngx.ctx.foo = ngx.ctx.foo + 3\n }\n content_by_lua_block {\n ngx.say(ngx.ctx.foo)\n }\n }Then GET /test will yield the output 79That is, the ngx.ctx.foo entry persists across the rewrite, access, and content phases of a request.Every request, including subrequests, has its own copy of the table. For example: location /sub {\n content_by_lua_block {\n ngx.say(\"sub pre: \", ngx.ctx.blah)\n ngx.ctx.blah = 32\n ngx.say(\"sub post: \", ngx.ctx.blah)\n }\n }\n\n location /main {\n content_by_lua_block {\n ngx.ctx.blah = 73\n ngx.say(\"main pre: \", ngx.ctx.blah)\n local res = ngx.location.capture(\"/sub\")\n ngx.print(res.body)\n ngx.say(\"main post: \", ngx.ctx.blah)\n }\n }Then GET /main will give the output main pre: 73\n sub pre: nil\n sub post: 32\n main post: 73Here, modification of the ngx.ctx.blah entry in the subrequest does not affect the one in the parent request. This is because they have two separate versions of ngx.ctx.blah.Internal redirects (triggered by nginx configuration directives like error_page, try_files, index and etc) will destroy the original request ngx.ctx data (if any) and the new request will have an empty ngx.ctx table. For instance, location /new {\n content_by_lua_block {\n ngx.say(ngx.ctx.foo)\n }\n }\n\n location /orig {\n content_by_lua_block {\n ngx.ctx.foo = \"hello\"\n ngx.exec(\"/new\")\n }\n }Then GET /orig will give nilrather than the original \"hello\" value.Because HTTP request is created after SSL handshake, the ngx.ctx created\nin ssl_certificate_by_lua*, ssl_session_store_by_lua*, ssl_session_fetch_by_lua* and ssl_client_hello_by_lua*\nis not available in the following phases like rewrite_by_lua*.Since v0.10.18, the ngx.ctx created during a SSL handshake\nwill be inherited by the requests which share the same TCP connection established by the handshake.\nNote that overwrite values in ngx.ctx in the http request phases (like rewrite_by_lua*) will only take affect in the current http request.Arbitrary data values, including Lua closures and nested tables, can be inserted into this \"magic\" table. It also allows the registration of custom meta methods.Overriding ngx.ctx with a new Lua table is also supported, for example, ngx.ctx = { foo = 32, bar = 54 }When being used in the context of init_worker_by_lua*, this table just has the same lifetime of the current Lua handler.The ngx.ctx lookup requires relatively expensive metamethod calls and it is much slower than explicitly passing per-request data along by your own function arguments. So do not abuse this API for saving your own function arguments because it usually has quite some performance impact.Because of the metamethod magic, never \"local\" the ngx.ctx table outside your Lua function scope on the Lua module level due to worker-level data sharing. For example, the following is bad: -- mymodule.lua\n local _M = {}\n\n -- the following line is bad since ngx.ctx is a per-request\n -- data while this ctx variable is on the Lua module level\n -- and thus is per-nginx-worker.\n local ctx = ngx.ctx\n\n function _M.main()\n ctx.foo = \"bar\"\n end\n\n return _MUse the following instead: -- mymodule.lua\n local _M = {}\n\n function _M.main(ctx)\n ctx.foo = \"bar\"\n end\n\n return _MThat is, let the caller pass the ctx table explicitly via a function argument.", + "prefix": "ngx.ctx", + "body": "ngx.ctx" + }, + "ngx.location.capture": { + "description": "Issues a synchronous but still non-blocking Nginx Subrequest using uri.Nginx's subrequests provide a powerful way to make non-blocking internal requests to other locations configured with disk file directory or any other Nginx C modules like ngx_proxy, ngx_fastcgi, ngx_memc,\nngx_postgres, ngx_drizzle, and even ngx_lua itself and etc etc etc.Also note that subrequests just mimic the HTTP interface but there is no extra HTTP/TCP traffic nor IPC involved. Everything works internally, efficiently, on the C level.Subrequests are completely different from HTTP 301/302 redirection (via ngx.redirect) and internal redirection (via ngx.exec).You should always read the request body (by either calling ngx.req.read_body or configuring lua_need_request_body on) before initiating a subrequest.This API function (as well as ngx.location.capture_multi) always buffers the whole response body of the subrequest in memory. Thus, you should use cosockets\nand streaming processing instead if you have to handle large subrequest responses.Here is a basic example: res = ngx.location.capture(uri)Returns a Lua table with 4 slots: res.status, res.header, res.body, and res.truncated.res.status holds the response status code for the subrequest response.res.header holds all the response headers of the\nsubrequest and it is a normal Lua table. For multi-value response headers,\nthe value is a Lua (array) table that holds all the values in the order that\nthey appear. For instance, if the subrequest response headers contain the following\nlines: Set-Cookie: a=3\n Set-Cookie: foo=bar\n Set-Cookie: baz=blahThen res.header[\"Set-Cookie\"] will be evaluated to the table value\n{\"a=3\", \"foo=bar\", \"baz=blah\"}.res.body holds the subrequest's response body data, which might be truncated. You always need to check the res.truncated boolean flag to see if res.body contains truncated data. The data truncation here can only be caused by those unrecoverable errors in your subrequests like the cases that the remote end aborts the connection prematurely in the middle of the response body data stream or a read timeout happens when your subrequest is receiving the response body data from the remote.URI query strings can be concatenated to URI itself, for instance, res = ngx.location.capture('/foo/bar?a=3&b=4')Named locations like @foo are not allowed due to a limitation in\nthe Nginx core. Use normal locations combined with the internal directive to\nprepare internal-only locations.An optional option table can be fed as the second\nargument, which supports the options:\nmethod\nspecify the subrequest's request method, which only accepts constants like ngx.HTTP_POST.\nbody\nspecify the subrequest's request body (string value only).\nargs\nspecify the subrequest's URI query arguments (both string value and Lua tables are accepted)\nctx\nspecify a Lua table to be the ngx.ctx table for the subrequest. It can be the current request's ngx.ctx table, which effectively makes the parent and its subrequest to share exactly the same context table. This option was first introduced in the v0.3.1rc25 release.\nvars\ntake a Lua table which holds the values to set the specified Nginx variables in the subrequest as this option's value. This option was first introduced in the v0.3.1rc31 release.\ncopy_all_vars\nspecify whether to copy over all the Nginx variable values of the current request to the subrequest in question. modifications of the Nginx variables in the subrequest will not affect the current (parent) request. This option was first introduced in the v0.3.1rc31 release.\nshare_all_vars\nspecify whether to share all the Nginx variables of the subrequest with the current (parent) request. modifications of the Nginx variables in the subrequest will affect the current (parent) request. Enabling this option may lead to hard-to-debug issues due to bad side-effects and is considered bad and harmful. Only enable this option when you completely know what you are doing.\nalways_forward_body\nwhen set to true, the current (parent) request's request body will always be forwarded to the subrequest being created if the body option is not specified. The request body read by either ngx.req.read_body() or lua_need_request_body on will be directly forwarded to the subrequest without copying the whole request body data when creating the subrequest (no matter the request body data is buffered in memory buffers or temporary files). By default, this option is false and when the body option is not specified, the request body of the current (parent) request is only forwarded when the subrequest takes the PUT or POST request method.\nIssuing a POST subrequest, for example, can be done as follows res = ngx.location.capture(\n '/foo/bar',\n { method = ngx.HTTP_POST, body = 'hello, world' }\n )See HTTP method constants methods other than POST.\nThe method option is ngx.HTTP_GET by default.The args option can specify extra URI arguments, for instance, ngx.location.capture('/foo?a=1',\n { args = { b = 3, c = ':' } }\n )is equivalent to ngx.location.capture('/foo?a=1&b=3&c=%3a')that is, this method will escape argument keys and values according to URI rules and\nconcatenate them together into a complete query string. The format for the Lua table passed as the args argument is identical to the format used in the ngx.encode_args method.The args option can also take plain query strings: ngx.location.capture('/foo?a=1',\n { args = 'b=3&c=%3a' }\n )This is functionally identical to the previous examples.The share_all_vars option controls whether to share Nginx variables among the current request and its subrequests.\nIf this option is set to true, then the current request and associated subrequests will share the same Nginx variable scope. Hence, changes to Nginx variables made by a subrequest will affect the current request.Care should be taken in using this option as variable scope sharing can have unexpected side effects. The args, vars, or copy_all_vars options are generally preferable instead.This option is set to false by default location /other {\n set $dog \"$dog world\";\n echo \"$uri dog: $dog\";\n }\n\n location /lua {\n set $dog 'hello';\n content_by_lua_block {\n res = ngx.location.capture(\"/other\",\n { share_all_vars = true })\n\n ngx.print(res.body)\n ngx.say(ngx.var.uri, \": \", ngx.var.dog)\n }\n }Accessing location /lua gives/other dog: hello world\n/lua: hello world\nThe copy_all_vars option provides a copy of the parent request's Nginx variables to subrequests when such subrequests are issued. Changes made to these variables by such subrequests will not affect the parent request or any other subrequests sharing the parent request's variables. location /other {\n set $dog \"$dog world\";\n echo \"$uri dog: $dog\";\n }\n\n location /lua {\n set $dog 'hello';\n content_by_lua_block {\n res = ngx.location.capture(\"/other\",\n { copy_all_vars = true })\n\n ngx.print(res.body)\n ngx.say(ngx.var.uri, \": \", ngx.var.dog)\n }\n }Request GET /lua will give the output/other dog: hello world\n/lua: hello\nNote that if both share_all_vars and copy_all_vars are set to true, then share_all_vars takes precedence.In addition to the two settings above, it is possible to specify\nvalues for variables in the subrequest using the vars option. These\nvariables are set after the sharing or copying of variables has been\nevaluated, and provides a more efficient method of passing specific\nvalues to a subrequest over encoding them as URL arguments and\nunescaping them in the Nginx config file. location /other {\n content_by_lua_block {\n ngx.say(\"dog = \", ngx.var.dog)\n ngx.say(\"cat = \", ngx.var.cat)\n }\n }\n\n location /lua {\n set $dog '';\n set $cat '';\n content_by_lua_block {\n res = ngx.location.capture(\"/other\",\n { vars = { dog = \"hello\", cat = 32 }})\n\n ngx.print(res.body)\n }\n }Accessing /lua will yield the outputdog = hello\ncat = 32\nThe ctx option can be used to specify a custom Lua table to serve as the ngx.ctx table for the subrequest. location /sub {\n content_by_lua_block {\n ngx.ctx.foo = \"bar\";\n }\n }\n location /lua {\n content_by_lua_block {\n local ctx = {}\n res = ngx.location.capture(\"/sub\", { ctx = ctx })\n\n ngx.say(ctx.foo)\n ngx.say(ngx.ctx.foo)\n }\n }Then request GET /lua givesbar\nnil\nIt is also possible to use this ctx option to share the same ngx.ctx table between the current (parent) request and the subrequest: location /sub {\n content_by_lua_block {\n ngx.ctx.foo = \"bar\"\n }\n }\n location /lua {\n content_by_lua_block {\n res = ngx.location.capture(\"/sub\", { ctx = ngx.ctx })\n ngx.say(ngx.ctx.foo)\n }\n }Request GET /lua yields the outputbar\nNote that subrequests issued by ngx.location.capture inherit all the\nrequest headers of the current request by default and that this may have unexpected side effects on the\nsubrequest responses. For example, when using the standard ngx_proxy module to serve\nsubrequests, an \"Accept-Encoding: gzip\" header in the main request may result\nin gzipped responses that cannot be handled properly in Lua code. Original request headers should be ignored by setting\nproxy_pass_request_headers to off in subrequest locations.When the body option is not specified and the always_forward_body option is false (the default value), the POST and PUT subrequests will inherit the request bodies of the parent request (if any).There is a hard-coded upper limit on the number of subrequests possible for every main request. In older versions of Nginx, the limit was 50 concurrent subrequests and in more recent versions, Nginx 1.9.5 onwards, the same limit is changed to limit the depth of recursive subrequests. When this limit is exceeded, the following error message is added to the error.log file:[error] 13983#0: *1 subrequests cycle while processing \"/uri\"\nThe limit can be manually modified if required by editing the definition of the NGX_HTTP_MAX_SUBREQUESTS macro in the nginx/src/http/ngx_http_request.h file in the Nginx source tree.Please also refer to restrictions on capturing locations configured by subrequest directives of other modules.", + "prefix": "ngx.location.capture", + "body": "local res = ngx.location.capture(${1:uri}, ${2:options?})" + }, + "ngx.location.capture_multi": { + "description": "Just like ngx.location.capture, but supports multiple subrequests running in parallel.This function issues several parallel subrequests specified by the input table and returns their results in the same order. For example, res1, res2, res3 = ngx.location.capture_multi{\n { \"/foo\", { args = \"a=3&b=4\" } },\n { \"/bar\" },\n { \"/baz\", { method = ngx.HTTP_POST, body = \"hello\" } },\n }\n\n if res1.status == ngx.HTTP_OK then\n ...\n end\n\n if res2.body == \"BLAH\" then\n ...\n endThis function will not return until all the subrequests terminate.\nThe total latency is the longest latency of the individual subrequests rather than the sum.Lua tables can be used for both requests and responses when the number of subrequests to be issued is not known in advance: -- construct the requests table\n local reqs = {}\n table.insert(reqs, { \"/mysql\" })\n table.insert(reqs, { \"/postgres\" })\n table.insert(reqs, { \"/redis\" })\n table.insert(reqs, { \"/memcached\" })\n\n -- issue all the requests at once and wait until they all return\n local resps = {\n ngx.location.capture_multi(reqs)\n }\n\n -- loop over the responses table\n for i, resp in ipairs(resps) do\n -- process the response table \"resp\"\n endThe ngx.location.capture function is just a special form\nof this function. Logically speaking, the ngx.location.capture can be implemented like this ngx.location.capture =\n function (uri, args)\n return ngx.location.capture_multi({ {uri, args} })\n endPlease also refer to restrictions on capturing locations configured by subrequest directives of other modules.", + "prefix": "ngx.location.capture_multi", + "body": "local res1, res2, ... = ngx.location.capture_multi({ {uri, options?}, {uri, options?}, ... })" + }, + "ngx.status": { + "description": "Read and write the current request's response status. This should be called\nbefore sending out the response headers. ngx.status = ngx.HTTP_CREATED\n status = ngx.statusSetting ngx.status after the response header is sent out has no effect but leaving an error message in your Nginx's error log file:attempt to set ngx.status after sending out response headers\n", + "prefix": "ngx.status", + "body": "ngx.status" + }, + "ngx.header.HEADER": { + "description": "Set, add to, or clear the current request's HEADER response header that is to be sent.Underscores (_) in the header names will be replaced by hyphens (-) by default. This transformation can be turned off via the lua_transform_underscores_in_response_headers directive.The header names are matched case-insensitively. -- equivalent to ngx.header[\"Content-Type\"] = 'text/plain'\n ngx.header.content_type = 'text/plain'\n\n ngx.header[\"X-My-Header\"] = 'blah blah'Multi-value headers can be set this way: ngx.header['Set-Cookie'] = {'a=32; path=/', 'b=4; path=/'}will yield Set-Cookie: a=32; path=/\n Set-Cookie: b=4; path=/in the response headers.Only Lua tables are accepted (Only the last element in the table will take effect for standard headers such as Content-Type that only accept a single value). ngx.header.content_type = {'a', 'b'}is equivalent to ngx.header.content_type = 'b'Setting a slot to nil effectively removes it from the response headers: ngx.header[\"X-My-Header\"] = nilThe same applies to assigning an empty table: ngx.header[\"X-My-Header\"] = {}Setting ngx.header.HEADER after sending out response headers (either explicitly with ngx.send_headers or implicitly with ngx.print and similar) will log an error message.Reading ngx.header.HEADER will return the value of the response header named HEADER.Underscores (_) in the header names will also be replaced by dashes (-) and the header names will be matched case-insensitively. If the response header is not present at all, nil will be returned.This is particularly useful in the context of header_filter_by_lua*, for example, location /test {\n set $footer '';\n\n proxy_pass http://some-backend;\n\n header_filter_by_lua_block {\n if ngx.header[\"X-My-Header\"] == \"blah\" then\n ngx.var.footer = \"some value\"\n end\n }\n\n echo_after_body $footer;\n }For multi-value headers, all of the values of header will be collected in order and returned as a Lua table. For example, response headersFoo: bar\nFoo: baz\nwill result in {\"bar\", \"baz\"}to be returned when reading ngx.header.Foo.Note that ngx.header is not a normal Lua table and as such, it is not possible to iterate through it using the Lua ipairs function.Note: this function throws a Lua error if HEADER or\nVALUE contain unsafe characters (control characters).For reading request headers, use the ngx.req.get_headers function instead.", + "prefix": "ngx.header.HEADER", + "body": "local value = ngx.header.HEADER" + }, + "ngx.resp.get_headers": { + "description": "Returns a Lua table holding all the current response headers for the current request. local h, err = ngx.resp.get_headers()\n\n if err == \"truncated\" then\n -- one can choose to ignore or reject the current response here\n end\n\n for k, v in pairs(h) do\n ...\n endThis function has the same signature as ngx.req.get_headers except getting response headers instead of request headers.Note that a maximum of 100 response headers are parsed by default (including those with the same name) and that additional response headers are silently discarded to guard against potential denial of service attacks. Since v0.10.13, when the limit is exceeded, it will return a second value which is the string \"truncated\".This API was first introduced in the v0.9.5 release.", + "prefix": "ngx.resp.get_headers", + "body": "local headers, err = ngx.resp.get_headers(${1:max_headers?}, ${2:raw?})" + }, + "ngx.req.is_internal": { + "description": "Returns a boolean indicating whether the current request is an \"internal request\", i.e.,\na request initiated from inside the current Nginx server instead of from the client side.Subrequests are all internal requests and so are requests after internal redirects.This API was first introduced in the v0.9.20 release.", + "prefix": "ngx.req.is_internal", + "body": "local is_internal = ngx.req.is_internal()" + }, + "ngx.req.start_time": { + "description": "Returns a floating-point number representing the timestamp (including milliseconds as the decimal part) when the current request was created.The following example emulates the $request_time variable value (provided by ngx_http_log_module) in pure Lua: local request_time = ngx.now() - ngx.req.start_time()This function was first introduced in the v0.7.7 release.See also ngx.now and ngx.update_time.", + "prefix": "ngx.req.start_time", + "body": "local secs = ngx.req.start_time()" + }, + "ngx.req.http_version": { + "description": "Returns the HTTP version number for the current request as a Lua number.Current possible values are 2.0, 1.0, 1.1, and 0.9. Returns nil for unrecognized values.This method was first introduced in the v0.7.17 release.", + "prefix": "ngx.req.http_version", + "body": "local num = ngx.req.http_version()" + }, + "ngx.req.raw_header": { + "description": "Returns the original raw HTTP protocol header received by the Nginx server.By default, the request line and trailing CR LF terminator will also be included. For example, ngx.print(ngx.req.raw_header())gives something like this:GET /t HTTP/1.1\nHost: localhost\nConnection: close\nFoo: bar\nYou can specify the optional\nno_request_line argument as a true value to exclude the request line from the result. For example, ngx.print(ngx.req.raw_header(true))outputs something like this:Host: localhost\nConnection: close\nFoo: bar\nThis method was first introduced in the v0.7.17 release.This method does not work in HTTP/2 requests yet.", + "prefix": "ngx.req.raw_header", + "body": "local str = ngx.req.raw_header(${1:no_request_line?})" + }, + "ngx.req.get_method": { + "description": "Retrieves the current request's request method name. Strings like \"GET\" and \"POST\" are returned instead of numerical method constants.If the current request is an Nginx subrequest, then the subrequest's method name will be returned.This method was first introduced in the v0.5.6 release.See also ngx.req.set_method.", + "prefix": "ngx.req.get_method", + "body": "local method_name = ngx.req.get_method()" + }, + "ngx.req.set_method": { + "description": "Overrides the current request's request method with the method_id argument. Currently only numerical method constants are supported, like ngx.HTTP_POST and ngx.HTTP_GET.If the current request is an Nginx subrequest, then the subrequest's method will be overridden.This method was first introduced in the v0.5.6 release.See also ngx.req.get_method.", + "prefix": "ngx.req.set_method", + "body": "ngx.req.set_method(${1:method_id})" + }, + "ngx.req.set_uri": { + "description": "Rewrite the current request's (parsed) URI by the uri argument. The uri argument must be a Lua string and cannot be of zero length, or a Lua exception will be thrown.The optional boolean jump argument can trigger location rematch (or location jump) as ngx_http_rewrite_module's rewrite directive, that is, when jump is true (default to false), this function will never return and it will tell Nginx to try re-searching locations with the new URI value at the later post-rewrite phase and jumping to the new location.Location jump will not be triggered otherwise, and only the current request's URI will be modified, which is also the default behavior. This function will return but with no returned values when the jump argument is false or absent altogether.For example, the following Nginx config snippet rewrite ^ /foo last;can be coded in Lua like this: ngx.req.set_uri(\"/foo\", true)Similarly, Nginx config rewrite ^ /foo break;can be coded in Lua as ngx.req.set_uri(\"/foo\", false)or equivalently, ngx.req.set_uri(\"/foo\")The jump argument can only be set to true in rewrite_by_lua*. Use of jump in other contexts is prohibited and will throw out a Lua exception.A more sophisticated example involving regex substitutions is as follows location /test {\n rewrite_by_lua_block {\n local uri = ngx.re.sub(ngx.var.uri, \"^/test/(.*)\", \"/$1\", \"o\")\n ngx.req.set_uri(uri)\n }\n proxy_pass http://my_backend;\n }which is functionally equivalent to location /test {\n rewrite ^/test/(.*) /$1 break;\n proxy_pass http://my_backend;\n }Note: this function throws a Lua error if the uri argument\ncontains unsafe characters (control characters).Note that it is not possible to use this interface to rewrite URI arguments and that ngx.req.set_uri_args should be used for this instead. For instance, Nginx config rewrite ^ /foo?a=3? last;can be coded as ngx.req.set_uri_args(\"a=3\")\n ngx.req.set_uri(\"/foo\", true)or ngx.req.set_uri_args({a = 3})\n ngx.req.set_uri(\"/foo\", true)Starting from 0.10.16 of this module, this function accepts an\noptional boolean binary argument to allow arbitrary binary URI\ndata. By default, this binary argument is false and this function\nwill throw out a Lua error such as the one below when the uri\nargument contains any control characters (ASCII Code 0 ~ 0x08, 0x0A ~ 0x1F and 0x7F).[error] 23430#23430: *1 lua entry thread aborted: runtime error:\ncontent_by_lua(nginx.conf:44):3: ngx.req.set_uri unsafe byte \"0x00\"\nin \"\\x00foo\" (maybe you want to set the 'binary' argument?)\nThis interface was first introduced in the v0.3.1rc14 release.", + "prefix": "ngx.req.set_uri", + "body": "ngx.req.set_uri(${1:uri}, ${2:jump?}, ${3:binary?})" + }, + "ngx.req.set_uri_args": { + "description": "Rewrite the current request's URI query arguments by the args argument. The args argument can be either a Lua string, as in ngx.req.set_uri_args(\"a=3&b=hello%20world\")or a Lua table holding the query arguments' key-value pairs, as in ngx.req.set_uri_args({ a = 3, b = \"hello world\" })In the former case, i.e., when the whole query-string is provided directly,\nthe input Lua string should already be well-formed with the URI encoding.\nFor security considerations, this method will automatically escape any control and\nwhitespace characters (ASCII code 0x00 ~ 0x20 and 0x7F) in the Lua string.In the latter case, this method will escape argument keys and values according to the URI escaping rule.Multi-value arguments are also supported: ngx.req.set_uri_args({ a = 3, b = {5, 6} })which will result in a query string like a=3&b=5&b=6 or b=5&b=6&a=3.Note that when using Lua table as the arg argument, the order of the arguments in the result query string which change from time to time. If you would like to get an ordered result, you need to use Lua string as the arg argument.This interface was first introduced in the v0.3.1rc13 release.See also ngx.req.set_uri.", + "prefix": "ngx.req.set_uri_args", + "body": "ngx.req.set_uri_args(${1:args})" + }, + "ngx.req.get_uri_args": { + "description": "Returns a Lua table holding all the current request URL query arguments. An optional tab argument\ncan be used to reuse the table returned by this method. location = /test {\n content_by_lua_block {\n local args, err = ngx.req.get_uri_args()\n\n if err == \"truncated\" then\n -- one can choose to ignore or reject the current request here\n end\n\n for key, val in pairs(args) do\n if type(val) == \"table\" then\n ngx.say(key, \": \", table.concat(val, \", \"))\n else\n ngx.say(key, \": \", val)\n end\n end\n }\n }Then GET /test?foo=bar&bar=baz&bar=blah will yield the response body foo: bar\n bar: baz, blahMultiple occurrences of an argument key will result in a table value holding all the values for that key in order.Keys and values are unescaped according to URI escaping rules. In the settings above, GET /test?a%20b=1%61+2 will yield: a b: 1a 2Arguments without the = parts are treated as boolean arguments. GET /test?foo&bar will yield: foo: true\n bar: trueThat is, they will take Lua boolean values true. However, they are different from arguments taking empty string values. GET /test?foo=&bar= will give something like foo:\n bar:Empty key arguments are discarded. GET /test?=hello&=world will yield an empty output for instance.Updating query arguments via the Nginx variable $args (or ngx.var.args in Lua) at runtime is also supported: ngx.var.args = \"a=3&b=42\"\n local args, err = ngx.req.get_uri_args()Here the args table will always look like {a = 3, b = 42}regardless of the actual request query string.Note that a maximum of 100 request arguments are parsed by default (including those with the same name) and that additional request arguments are silently discarded to guard against potential denial of service attacks. Since v0.10.13, when the limit is exceeded, it will return a second value which is the string \"truncated\".However, the optional max_args function argument can be used to override this limit: local args, err = ngx.req.get_uri_args(10)\n if err == \"truncated\" then\n -- one can choose to ignore or reject the current request here\n endThis argument can be set to zero to remove the limit and to process all request arguments received: local args, err = ngx.req.get_uri_args(0)Removing the max_args cap is strongly discouraged.", + "prefix": "ngx.req.get_uri_args", + "body": "local args, err = ngx.req.get_uri_args(${1:max_args?}, ${2:tab?})" + }, + "ngx.req.get_post_args": { + "description": "Returns a Lua table holding all the current request POST query arguments (of the MIME type application/x-www-form-urlencoded). Call ngx.req.read_body to read the request body first or turn on the lua_need_request_body directive to avoid errors. location = /test {\n content_by_lua_block {\n ngx.req.read_body()\n local args, err = ngx.req.get_post_args()\n\n if err == \"truncated\" then\n -- one can choose to ignore or reject the current request here\n end\n\n if not args then\n ngx.say(\"failed to get post args: \", err)\n return\n end\n for key, val in pairs(args) do\n if type(val) == \"table\" then\n ngx.say(key, \": \", table.concat(val, \", \"))\n else\n ngx.say(key, \": \", val)\n end\n end\n }\n }Then # Post request with the body 'foo=bar&bar=baz&bar=blah'\n $ curl --data 'foo=bar&bar=baz&bar=blah' localhost/testwill yield the response body like foo: bar\n bar: baz, blahMultiple occurrences of an argument key will result in a table value holding all of the values for that key in order.Keys and values will be unescaped according to URI escaping rules.With the settings above, # POST request with body 'a%20b=1%61+2'\n $ curl -d 'a%20b=1%61+2' localhost/testwill yield: a b: 1a 2Arguments without the = parts are treated as boolean arguments. POST /test with the request body foo&bar will yield: foo: true\n bar: trueThat is, they will take Lua boolean values true. However, they are different from arguments taking empty string values. POST /test with request body foo=&bar= will return something like foo:\n bar:Empty key arguments are discarded. POST /test with body =hello&=world will yield empty outputs for instance.Note that a maximum of 100 request arguments are parsed by default (including those with the same name) and that additional request arguments are silently discarded to guard against potential denial of service attacks. Since v0.10.13, when the limit is exceeded, it will return a second value which is the string \"truncated\".However, the optional max_args function argument can be used to override this limit: local args, err = ngx.req.get_post_args(10)\n if err == \"truncated\" then\n -- one can choose to ignore or reject the current request here\n endThis argument can be set to zero to remove the limit and to process all request arguments received: local args, err = ngx.req.get_post_args(0)Removing the max_args cap is strongly discouraged.", + "prefix": "ngx.req.get_post_args", + "body": "local args, err = ngx.req.get_post_args(${1:max_args?})" + }, + "ngx.req.get_headers": { + "description": "Returns a Lua table holding all the current request headers. local h, err = ngx.req.get_headers()\n\n if err == \"truncated\" then\n -- one can choose to ignore or reject the current request here\n end\n\n for k, v in pairs(h) do\n ...\n endTo read an individual header: ngx.say(\"Host: \", ngx.req.get_headers()[\"Host\"])Note that the ngx.var.HEADER API call, which uses core $http_HEADER variables, may be more preferable for reading individual request headers.For multiple instances of request headers such as: Foo: foo\n Foo: bar\n Foo: bazthe value of ngx.req.get_headers()[\"Foo\"] will be a Lua (array) table such as: {\"foo\", \"bar\", \"baz\"}Note that a maximum of 100 request headers are parsed by default (including those with the same name) and that additional request headers are silently discarded to guard against potential denial of service attacks. Since v0.10.13, when the limit is exceeded, it will return a second value which is the string \"truncated\".However, the optional max_headers function argument can be used to override this limit: local headers, err = ngx.req.get_headers(10)\n\n if err == \"truncated\" then\n -- one can choose to ignore or reject the current request here\n endThis argument can be set to zero to remove the limit and to process all request headers received: local headers, err = ngx.req.get_headers(0)Removing the max_headers cap is strongly discouraged.Since the 0.6.9 release, all the header names in the Lua table returned are converted to the pure lower-case form by default, unless the raw argument is set to true (default to false).Also, by default, an __index metamethod is added to the resulting Lua table and will normalize the keys to a pure lowercase form with all underscores converted to dashes in case of a lookup miss. For example, if a request header My-Foo-Header is present, then the following invocations will all pick up the value of this header correctly: ngx.say(headers.my_foo_header)\n ngx.say(headers[\"My-Foo-Header\"])\n ngx.say(headers[\"my-foo-header\"])The __index metamethod will not be added when the raw argument is set to true.", + "prefix": "ngx.req.get_headers", + "body": "local headers, err = ngx.req.get_headers(${1:max_headers?}, ${2:raw?})" + }, + "ngx.req.set_header": { + "description": "Set the current request's request header named header_name to value header_value, overriding any existing ones.The input Lua string header_name and header_value should already be well-formed with the URI encoding.\nFor security considerations, this method will automatically escape \" \", \"\"\", \"(\", \")\", \",\", \"/\", \":\", \";\", \"?\",\n\"<\", \"=\", \">\", \"?\", \"@\", \"[\", \"]\", \"\", \"{\", \"}\", 0x00-0x1F, 0x7F-0xFF in header_name and automatically escape\n\"0x00-0x08, 0x0A-0x0F, 0x7F in header_value.By default, all the subrequests subsequently initiated by ngx.location.capture and ngx.location.capture_multi will inherit the new header.It is not a Lua's equivalent of nginx proxy_set_header directive (same is true about ngx.req.clear_header). proxy_set_header only affects the upstream request while ngx.req.set_header change the incoming request. Record the http headers in the access log file will show the difference. But you still can use it as an alternative of nginx proxy_set_header directive as long as you know the difference.Here is an example of setting the Content-Type header: ngx.req.set_header(\"Content-Type\", \"text/css\")The header_value can take an array list of values,\nfor example, ngx.req.set_header(\"Foo\", {\"a\", \"abc\"})will produce two new request headers: Foo: a\n Foo: abcand old Foo headers will be overridden if there is any.When the header_value argument is nil, the request header will be removed. So ngx.req.set_header(\"X-Foo\", nil)is equivalent to ngx.req.clear_header(\"X-Foo\")Note: this function throws a Lua error if header_name or\nheader_value contain unsafe characters (control characters).", + "prefix": "ngx.req.set_header", + "body": "ngx.req.set_header(${1:header_name}, ${2:header_value})" + }, + "ngx.req.clear_header": { + "description": "Clears the current request's request header named header_name. None of the current request's existing subrequests will be affected but subsequently initiated subrequests will inherit the change by default.", + "prefix": "ngx.req.clear_header", + "body": "ngx.req.clear_header(${1:header_name})" + }, + "ngx.req.read_body": { + "description": "Reads the client request body synchronously without blocking the Nginx event loop. ngx.req.read_body()\n local args = ngx.req.get_post_args()If the request body is already read previously by turning on lua_need_request_body or by using other modules, then this function does not run and returns immediately.If the request body has already been explicitly discarded, either by the ngx.req.discard_body function or other modules, this function does not run and returns immediately.In case of errors, such as connection errors while reading the data, this method will throw out a Lua exception or terminate the current request with a 500 status code immediately.The request body data read using this function can be retrieved later via ngx.req.get_body_data or, alternatively, the temporary file name for the body data cached to disk using ngx.req.get_body_file. This depends on\nwhether the current request body is already larger than the client_body_buffer_size,\nand whether client_body_in_file_only has been switched on.\nIn cases where current request may have a request body and the request body data is not required, The ngx.req.discard_body function must be used to explicitly discard the request body to avoid breaking things under HTTP 1.1 keepalive or HTTP 1.1 pipelining.This function was first introduced in the v0.3.1rc17 release.", + "prefix": "ngx.req.read_body", + "body": "ngx.req.read_body()" + }, + "ngx.req.discard_body": { + "description": "Explicitly discard the request body, i.e., read the data on the connection and throw it away immediately (without using the request body by any means).This function is an asynchronous call and returns immediately.If the request body has already been read, this function does nothing and returns immediately.This function was first introduced in the v0.3.1rc17 release.See also ngx.req.read_body.", + "prefix": "ngx.req.discard_body", + "body": "ngx.req.discard_body()" + }, + "ngx.req.get_body_data": { + "description": "Retrieves in-memory request body data. It returns a Lua string rather than a Lua table holding all the parsed query arguments. Use the ngx.req.get_post_args function instead if a Lua table is required.The optional max_bytes argument can be used when you don't need the entire body.This function returns nil if\nthe request body has not been read,\nthe request body has been read into disk temporary files,\nor the request body has zero size.\nIf the request body has not been read yet, call ngx.req.read_body first (or turn on lua_need_request_body to force this module to read the request body. This is not recommended however).If the request body has been read into disk files, try calling the ngx.req.get_body_file function instead.To force in-memory request bodies, try setting client_body_buffer_size to the same size value in client_max_body_size.Note that calling this function instead of using ngx.var.request_body or ngx.var.echo_request_body is more efficient because it can save one dynamic memory allocation and one data copy.This function was first introduced in the v0.3.1rc17 release.See also ngx.req.get_body_file.", + "prefix": "ngx.req.get_body_data", + "body": "local data = ngx.req.get_body_data(${1:max_bytes?})" + }, + "ngx.req.get_body_file": { + "description": "Retrieves the file name for the in-file request body data. Returns nil if the request body has not been read or has been read into memory.The returned file is read only and is usually cleaned up by Nginx's memory pool. It should not be manually modified, renamed, or removed in Lua code.If the request body has not been read yet, call ngx.req.read_body first (or turn on lua_need_request_body to force this module to read the request body. This is not recommended however).If the request body has been read into memory, try calling the ngx.req.get_body_data function instead.To force in-file request bodies, try turning on client_body_in_file_only.This function was first introduced in the v0.3.1rc17 release.See also ngx.req.get_body_data.", + "prefix": "ngx.req.get_body_file", + "body": "local file_name = ngx.req.get_body_file()" + }, + "ngx.req.set_body_data": { + "description": "Set the current request's request body using the in-memory data specified by the data argument.If the request body has not been read yet, call ngx.req.read_body first (or turn on lua_need_request_body to force this module to read the request body. This is not recommended however). Additionally, the request body must not have been previously discarded by ngx.req.discard_body.Whether the previous request body has been read into memory or buffered into a disk file, it will be freed or the disk file will be cleaned up immediately, respectively.This function was first introduced in the v0.3.1rc18 release.See also ngx.req.set_body_file.", + "prefix": "ngx.req.set_body_data", + "body": "ngx.req.set_body_data(${1:data})" + }, + "ngx.req.set_body_file": { + "description": "Set the current request's request body using the in-file data specified by the file_name argument.If the request body has not been read yet, call ngx.req.read_body first (or turn on lua_need_request_body to force this module to read the request body. This is not recommended however). Additionally, the request body must not have been previously discarded by ngx.req.discard_body.If the optional auto_clean argument is given a true value, then this file will be removed at request completion or the next time this function or ngx.req.set_body_data are called in the same request. The auto_clean is default to false.Please ensure that the file specified by the file_name argument exists and is readable by an Nginx worker process by setting its permission properly to avoid Lua exception errors.Whether the previous request body has been read into memory or buffered into a disk file, it will be freed or the disk file will be cleaned up immediately, respectively.This function was first introduced in the v0.3.1rc18 release.See also ngx.req.set_body_data.", + "prefix": "ngx.req.set_body_file", + "body": "ngx.req.set_body_file(${1:file_name}, ${2:auto_clean?})" + }, + "ngx.req.init_body": { + "description": "Creates a new blank request body for the current request and initializes the buffer for later request body data writing via the ngx.req.append_body and ngx.req.finish_body APIs.If the buffer_size argument is specified, then its value will be used for the size of the memory buffer for body writing with ngx.req.append_body. If the argument is omitted, then the value specified by the standard client_body_buffer_size directive will be used instead.When the data can no longer be hold in the memory buffer for the request body, then the data will be flushed onto a temporary file just like the standard request body reader in the Nginx core.It is important to always call the ngx.req.finish_body after all the data has been appended onto the current request body. Also, when this function is used together with ngx.req.socket, it is required to call ngx.req.socket before this function, or you will get the \"request body already exists\" error message.The usage of this function is often like this: ngx.req.init_body(128 * 1024) -- buffer is 128KB\n for chunk in next_data_chunk() do\n ngx.req.append_body(chunk) -- each chunk can be 4KB\n end\n ngx.req.finish_body()This function can be used with ngx.req.append_body, ngx.req.finish_body, and ngx.req.socket to implement efficient input filters in pure Lua (in the context of rewrite_by_lua* or access_by_lua*), which can be used with other Nginx content handler or upstream modules like ngx_http_proxy_module and ngx_http_fastcgi_module.This function was first introduced in the v0.5.11 release.", + "prefix": "ngx.req.init_body", + "body": "ngx.req.init_body(${1:buffer_size?})" + }, + "ngx.req.append_body": { + "description": "Append new data chunk specified by the data_chunk argument onto the existing request body created by the ngx.req.init_body call.When the data can no longer be hold in the memory buffer for the request body, then the data will be flushed onto a temporary file just like the standard request body reader in the Nginx core.It is important to always call the ngx.req.finish_body after all the data has been appended onto the current request body.This function can be used with ngx.req.init_body, ngx.req.finish_body, and ngx.req.socket to implement efficient input filters in pure Lua (in the context of rewrite_by_lua* or access_by_lua*), which can be used with other Nginx content handler or upstream modules like ngx_http_proxy_module and ngx_http_fastcgi_module.This function was first introduced in the v0.5.11 release.See also ngx.req.init_body.", + "prefix": "ngx.req.append_body", + "body": "ngx.req.append_body(${1:data_chunk})" + }, + "ngx.req.finish_body": { + "description": "Completes the construction process of the new request body created by the ngx.req.init_body and ngx.req.append_body calls.This function can be used with ngx.req.init_body, ngx.req.append_body, and ngx.req.socket to implement efficient input filters in pure Lua (in the context of rewrite_by_lua* or access_by_lua*), which can be used with other Nginx content handler or upstream modules like ngx_http_proxy_module and ngx_http_fastcgi_module.This function was first introduced in the v0.5.11 release.See also ngx.req.init_body.", + "prefix": "ngx.req.finish_body", + "body": "ngx.req.finish_body()" + }, + "ngx.req.socket": { + "description": "Returns a read-only cosocket object that wraps the downstream connection. Only receive, receiveany and receiveuntil methods are supported on this object.In case of error, nil will be returned as well as a string describing the error.Note: This method will block while waiting for client request body to be fully received. Block time depends on the client_body_timeout directive and maximum body size specified by the client_max_body_size directive. If read timeout occurs or client body size exceeds the defined limit, this function will not return and 408 Request Time-out or 413 Request Entity Too Large response will be returned to the client instead.The socket object returned by this method is usually used to read the current request's body in a streaming fashion. Do not turn on the lua_need_request_body directive, and do not mix this call with ngx.req.read_body and ngx.req.discard_body.If any request body data has been pre-read into the Nginx core request header buffer, the resulting cosocket object will take care of this to avoid potential data loss resulting from such pre-reading.\nChunked request bodies are not yet supported in this API.Since the v0.9.0 release, this function accepts an optional boolean raw argument. When this argument is true, this function returns a full-duplex cosocket object wrapping around the raw downstream connection socket, upon which you can call the receive, receiveany, receiveuntil, and send methods.When the raw argument is true, it is required that no pending data from any previous ngx.say, ngx.print, or ngx.send_headers calls exists. So if you have these downstream output calls previously, you should call ngx.flush(true) before calling ngx.req.socket(true) to ensure that there is no pending output data. If the request body has not been read yet, then this \"raw socket\" can also be used to read the request body.You can use the \"raw request socket\" returned by ngx.req.socket(true) to implement fancy protocols like WebSocket, or just emit your own raw HTTP response header or body data. You can refer to the lua-resty-websocket library for a real world example.This function was first introduced in the v0.5.0rc1 release.", + "prefix": "ngx.req.socket", + "body": "local tcpsock, err = ngx.req.socket(${1:raw})" + }, + "ngx.exec": { + "description": "Does an internal redirect to uri with args and is similar to the echo_exec directive of the echo-nginx-module. ngx.exec('/some-location')\n ngx.exec('/some-location', 'a=3&b=5&c=6')\n ngx.exec('/some-location?a=3&b=5', 'c=6')The optional second args can be used to specify extra URI query arguments, for example: ngx.exec(\"/foo\", \"a=3&b=hello%20world\")Alternatively, a Lua table can be passed for the args argument for ngx_lua to carry out URI escaping and string concatenation. ngx.exec(\"/foo\", { a = 3, b = \"hello world\" })The result is exactly the same as the previous example.The format for the Lua table passed as the args argument is identical to the format used in the ngx.encode_args method.Named locations are also supported but the second args argument will be ignored if present and the querystring for the new target is inherited from the referring location (if any).GET /foo/file.php?a=hello will return \"hello\" and not \"goodbye\" in the example below location /foo {\n content_by_lua_block {\n ngx.exec(\"@bar\", \"a=goodbye\")\n }\n }\n\n location @bar {\n content_by_lua_block {\n local args = ngx.req.get_uri_args()\n for key, val in pairs(args) do\n if key == \"a\" then\n ngx.say(val)\n end\n end\n }\n }Note that the ngx.exec method is different from ngx.redirect in that\nit is purely an internal redirect and that no new external HTTP traffic is involved.Also note that this method call terminates the processing of the current request and that it must be called before ngx.send_headers or explicit response body\noutputs by either ngx.print or ngx.say.It is recommended that a coding style that combines this method call with the return statement, i.e., return ngx.exec(...) be adopted when this method call is used in contexts other than header_filter_by_lua* to reinforce the fact that the request processing is being terminated.", + "prefix": "ngx.exec", + "body": "ngx.exec(${1:uri}, ${2:args?})" + }, + "ngx.redirect": { + "description": "Issue an HTTP 301 or 302 redirection to uri.Note: this function throws a Lua error if the uri argument\ncontains unsafe characters (control characters).The optional status parameter specifies the HTTP status code to be used. The following status codes are supported right now:\n301\n302 (default)\n303\n307\n308\nIt is 302 (ngx.HTTP_MOVED_TEMPORARILY) by default.Here is an example assuming the current server name is localhost and that it is listening on port 1984: return ngx.redirect(\"/foo\")which is equivalent to return ngx.redirect(\"/foo\", ngx.HTTP_MOVED_TEMPORARILY)Redirecting arbitrary external URLs is also supported, for example: return ngx.redirect(\"http://www.google.com\")We can also use the numerical code directly as the second status argument: return ngx.redirect(\"/foo\", 301)This method is similar to the rewrite directive with the redirect modifier in the standard\nngx_http_rewrite_module, for example, this nginx.conf snippet rewrite ^ /foo? redirect; # nginx configis equivalent to the following Lua code return ngx.redirect('/foo') -- Lua codewhile rewrite ^ /foo? permanent; # nginx configis equivalent to return ngx.redirect('/foo', ngx.HTTP_MOVED_PERMANENTLY) -- Lua codeURI arguments can be specified as well, for example: return ngx.redirect('/foo?a=3&b=4')Note that this method call terminates the processing of the current request and that it must be called before ngx.send_headers or explicit response body\noutputs by either ngx.print or ngx.say.It is recommended that a coding style that combines this method call with the return statement, i.e., return ngx.redirect(...) be adopted when this method call is used in contexts other than header_filter_by_lua* to reinforce the fact that the request processing is being terminated.", + "prefix": "ngx.redirect", + "body": "ngx.redirect(${1:uri}, ${2:status?})" + }, + "ngx.send_headers": { + "description": "Explicitly send out the response headers.Since v0.8.3 this function returns 1 on success, or returns nil and a string describing the error otherwise.Note that there is normally no need to manually send out response headers as ngx_lua will automatically send headers out\nbefore content is output with ngx.say or ngx.print or when content_by_lua* exits normally.", + "prefix": "ngx.send_headers", + "body": "local ok, err = ngx.send_headers()" + }, + "ngx.headers_sent": { + "description": "Returns true if the response headers have been sent (by ngx_lua), and false otherwise.This API was first introduced in ngx_lua v0.3.1rc6.", + "prefix": "ngx.headers_sent", + "body": "local value = ngx.headers_sent" + }, + "ngx.print": { + "description": "Emits arguments concatenated to the HTTP client (as response body). If response headers have not been sent, this function will send headers out first and then output body data.Since v0.8.3 this function returns 1 on success, or returns nil and a string describing the error otherwise.Lua nil values will output \"nil\" strings and Lua boolean values will output \"true\" and \"false\" literal strings respectively.Nested arrays of strings are permitted and the elements in the arrays will be sent one by one: local table = {\n \"hello, \",\n {\"world: \", true, \" or \", false,\n {\": \", nil}}\n }\n ngx.print(table)will yield the output hello, world: true or false: nilNon-array table arguments will cause a Lua exception to be thrown.The ngx.null constant will yield the \"null\" string output.This is an asynchronous call and will return immediately without waiting for all the data to be written into the system send buffer. To run in synchronous mode, call ngx.flush(true) after calling ngx.print. This can be particularly useful for streaming output. See ngx.flush for more details.Please note that both ngx.print and ngx.say will always invoke the whole Nginx output body filter chain, which is an expensive operation. So be careful when calling either of these two in a tight loop; buffer the data yourself in Lua and save the calls.", + "prefix": "ngx.print", + "body": "local ok, err = ngx.print(...)" + }, + "ngx.say": { + "description": "Just as ngx.print but also emit a trailing newline.", + "prefix": "ngx.say", + "body": "local ok, err = ngx.say(...)" + }, + "ngx.log": { + "description": "Log arguments concatenated to error.log with the given logging level.Lua nil arguments are accepted and result in literal \"nil\" string while Lua booleans result in literal \"true\" or \"false\" string outputs. And the ngx.null constant will yield the \"null\" string output.The log_level argument can take constants like ngx.ERR and ngx.WARN. Check out Nginx log level constants for details.There is a hard coded 2048 byte limitation on error message lengths in the Nginx core. This limit includes trailing newlines and leading time stamps. If the message size exceeds this limit, Nginx will truncate the message text accordingly. This limit can be manually modified by editing the NGX_MAX_ERROR_STR macro definition in the src/core/ngx_log.h file in the Nginx source tree.", + "prefix": "ngx.log", + "body": "ngx.log(${1:log_level}, ...)" + }, + "ngx.flush": { + "description": "Flushes response output to the client.ngx.flush accepts an optional boolean wait argument (Default: false) first introduced in the v0.3.1rc34 release. When called with the default argument, it issues an asynchronous call (Returns immediately without waiting for output data to be written into the system send buffer). Calling the function with the wait argument set to true switches to synchronous mode.In synchronous mode, the function will not return until all output data has been written into the system send buffer or until the send_timeout setting has expired. Note that using the Lua coroutine mechanism means that this function does not block the Nginx event loop even in the synchronous mode.When ngx.flush(true) is called immediately after ngx.print or ngx.say, it causes the latter functions to run in synchronous mode. This can be particularly useful for streaming output.Note that ngx.flush is not functional when in the HTTP 1.0 output buffering mode. See HTTP 1.0 support.Since v0.8.3 this function returns 1 on success, or returns nil and a string describing the error otherwise.", + "prefix": "ngx.flush", + "body": "local ok, err = ngx.flush(${1:wait?})" + }, + "ngx.exit": { + "description": "When status >= 200 (i.e., ngx.HTTP_OK and above), it will interrupt the execution of the current request and return status code to Nginx.When status == 0 (i.e., ngx.OK), it will only quit the current phase handler (or the content handler if the content_by_lua* directive is used) and continue to run later phases (if any) for the current request.The status argument can be ngx.OK, ngx.ERROR, ngx.HTTP_NOT_FOUND,\nngx.HTTP_MOVED_TEMPORARILY, or other HTTP status constants.To return an error page with custom contents, use code snippets like this: ngx.status = ngx.HTTP_GONE\n ngx.say(\"This is our own content\")\n -- to cause quit the whole request rather than the current phase handler\n ngx.exit(ngx.HTTP_OK)The effect in action: $ curl -i http://localhost/test\n HTTP/1.1 410 Gone\n Server: nginx/1.0.6\n Date: Thu, 15 Sep 2011 00:51:48 GMT\n Content-Type: text/plain\n Transfer-Encoding: chunked\n Connection: keep-alive\n\n This is our own contentNumber literals can be used directly as the argument, for instance, ngx.exit(501)Note that while this method accepts all HTTP status constants as input, it only accepts ngx.OK and ngx.ERROR of the core constants.Also note that this method call terminates the processing of the current request and that it is recommended that a coding style that combines this method call with the return statement, i.e., return ngx.exit(...) be used to reinforce the fact that the request processing is being terminated.When being used in the contexts of header_filter_by_lua*, balancer_by_lua*, and\nssl_session_store_by_lua*, ngx.exit() is\nan asynchronous operation and will return immediately. This behavior may change in future and it is recommended that users always use return in combination as suggested above.", + "prefix": "ngx.exit", + "body": "ngx.exit(${1:status})" + }, + "ngx.eof": { + "description": "Explicitly specify the end of the response output stream. In the case of HTTP 1.1 chunked encoded output, it will just trigger the Nginx core to send out the \"last chunk\".When you disable the HTTP 1.1 keep-alive feature for your downstream connections, you can rely on well written HTTP clients to close the connection actively for you when you call this method. This trick can be used do back-ground jobs without letting the HTTP clients to wait on the connection, as in the following example: location = /async {\n keepalive_timeout 0;\n content_by_lua_block {\n ngx.say(\"got the task!\")\n ngx.eof() -- well written HTTP clients will close the connection at this point\n -- access MySQL, PostgreSQL, Redis, Memcached, and etc here...\n }\n }But if you create subrequests to access other locations configured by Nginx upstream modules, then you should configure those upstream modules to ignore client connection abortions if they are not by default. For example, by default the standard ngx_http_proxy_module will terminate both the subrequest and the main request as soon as the client closes the connection, so it is important to turn on the proxy_ignore_client_abort directive in your location block configured by ngx_http_proxy_module: proxy_ignore_client_abort on;A better way to do background jobs is to use the ngx.timer.at API.Since v0.8.3 this function returns 1 on success, or returns nil and a string describing the error otherwise.", + "prefix": "ngx.eof", + "body": "local ok, err = ngx.eof()" + }, + "ngx.sleep": { + "description": "Sleeps for the specified seconds without blocking. One can specify time resolution up to 0.001 seconds (i.e., one millisecond).Behind the scene, this method makes use of the Nginx timers.Since the 0.7.20 release, The 0 time argument can also be specified.This method was introduced in the 0.5.0rc30 release.", + "prefix": "ngx.sleep", + "body": "ngx.sleep(${1:seconds})" + }, + "ngx.escape_uri": { + "description": "Since v0.10.16, this function accepts an optional type argument.\nIt accepts the following values (defaults to 2):\n0: escapes str as a full URI. And the characters\n (space), #, %,\n?, 0x00 ~ 0x1F, 0x7F ~ 0xFF will be escaped.\n2: escape str as a URI component. All characters except\nalphabetic characters, digits, -, ., _,\n~ will be encoded as %XX.\n", + "prefix": "ngx.escape_uri", + "body": "local newstr = ngx.escape_uri(${1:str}, ${2:type?})" + }, + "ngx.unescape_uri": { + "description": "Unescape str as an escaped URI component.For example, ngx.say(ngx.unescape_uri(\"b%20r56+7\"))gives the outputb r56 7\nInvalid escaping sequences are handled in a conventional way: %s are left unchanged. Also, characters that should not appear in escaped string are simply left unchanged.For example, ngx.say(ngx.unescape_uri(\"try %search%%20%again%\"))gives the outputtry %search% %again%\n(Note that %20 following % got unescaped, even it can be considered a part of invalid sequence.)", + "prefix": "ngx.unescape_uri", + "body": "local newstr = ngx.unescape_uri(${1:str})" + }, + "ngx.encode_args": { + "description": "Encode the Lua table to a query args string according to the URI encoded rules.For example, ngx.encode_args({foo = 3, [\"b r\"] = \"hello world\"})yieldsfoo=3&b%20r=hello%20world\nThe table keys must be Lua strings.Multi-value query args are also supported. Just use a Lua table for the argument's value, for example: ngx.encode_args({baz = {32, \"hello\"}})givesbaz=32&baz=hello\nIf the value table is empty and the effect is equivalent to the nil value.Boolean argument values are also supported, for instance, ngx.encode_args({a = true, b = 1})yieldsa&b=1\nIf the argument value is false, then the effect is equivalent to the nil value.This method was first introduced in the v0.3.1rc27 release.", + "prefix": "ngx.encode_args", + "body": "local str = ngx.encode_args(${1:table})" + }, + "ngx.decode_args": { + "description": "Decodes a URI encoded query-string into a Lua table. This is the inverse function of ngx.encode_args.The optional max_args argument can be used to specify the maximum number of arguments parsed from the str argument. By default, a maximum of 100 request arguments are parsed (including those with the same name) and that additional URI arguments are silently discarded to guard against potential denial of service attacks. Since v0.10.13, when the limit is exceeded, it will return a second value which is the string \"truncated\".This argument can be set to zero to remove the limit and to process all request arguments received: local args = ngx.decode_args(str, 0)Removing the max_args cap is strongly discouraged.This method was introduced in the v0.5.0rc29.", + "prefix": "ngx.decode_args", + "body": "local table, err = ngx.decode_args(${1:str}, ${2:max_args?})" + }, + "ngx.encode_base64": { + "description": "Encodes str to a base64 digest.Since the 0.9.16 release, an optional boolean-typed no_padding argument can be specified to control whether the base64 padding should be appended to the resulting digest (default to false, i.e., with padding enabled).", + "prefix": "ngx.encode_base64", + "body": "local newstr = ngx.encode_base64(${1:str}, ${2:no_padding?})" + }, + "ngx.decode_base64": { + "description": "Decodes the str argument as a base64 digest to the raw form. Returns nil if str is not well formed.", + "prefix": "ngx.decode_base64", + "body": "local newstr = ngx.decode_base64(${1:str})" + }, + "ngx.crc32_short": { + "description": "Calculates the CRC-32 (Cyclic Redundancy Code) digest for the str argument.This method performs better on relatively short str inputs (i.e., less than 30 ~ 60 bytes), as compared to ngx.crc32_long. The result is exactly the same as ngx.crc32_long.Behind the scene, it is just a thin wrapper around the ngx_crc32_short function defined in the Nginx core.This API was first introduced in the v0.3.1rc8 release.", + "prefix": "ngx.crc32_short", + "body": "local intval = ngx.crc32_short(${1:str})" + }, + "ngx.crc32_long": { + "description": "Calculates the CRC-32 (Cyclic Redundancy Code) digest for the str argument.This method performs better on relatively long str inputs (i.e., longer than 30 ~ 60 bytes), as compared to ngx.crc32_short. The result is exactly the same as ngx.crc32_short.Behind the scene, it is just a thin wrapper around the ngx_crc32_long function defined in the Nginx core.This API was first introduced in the v0.3.1rc8 release.", + "prefix": "ngx.crc32_long", + "body": "local intval = ngx.crc32_long(${1:str})" + }, + "ngx.hmac_sha1": { + "description": "Computes the HMAC-SHA1 digest of the argument str and turns the result using the secret key .The raw binary form of the HMAC-SHA1 digest will be generated, use ngx.encode_base64, for example, to encode the result to a textual representation if desired.For example, local key = \"thisisverysecretstuff\"\n local src = \"some string we want to sign\"\n local digest = ngx.hmac_sha1(key, src)\n ngx.say(ngx.encode_base64(digest))yields the outputR/pvxzHC4NLtj7S+kXFg/NePTmk=\nThis API requires the OpenSSL library enabled in the Nginx build (usually by passing the --with-http_ssl_module option to the ./configure script).This function was first introduced in the v0.3.1rc29 release.", + "prefix": "ngx.hmac_sha1", + "body": "local digest = ngx.hmac_sha1(${1:secret_key}, ${2:str})" + }, + "ngx.md5": { + "description": "Returns the hexadecimal representation of the MD5 digest of the str argument.For example, location = /md5 {\n content_by_lua_block {\n ngx.say(ngx.md5(\"hello\"))\n }\n }yields the output5d41402abc4b2a76b9719d911017c592\nSee ngx.md5_bin if the raw binary MD5 digest is required.", + "prefix": "ngx.md5", + "body": "local digest = ngx.md5(${1:str})" + }, + "ngx.md5_bin": { + "description": "Returns the binary form of the MD5 digest of the str argument.See ngx.md5 if the hexadecimal form of the MD5 digest is required.", + "prefix": "ngx.md5_bin", + "body": "local digest = ngx.md5_bin(${1:str})" + }, + "ngx.sha1_bin": { + "description": "Returns the binary form of the SHA-1 digest of the str argument.This function requires SHA-1 support in the Nginx build. (This usually just means OpenSSL should be installed while building Nginx).This function was first introduced in the v0.5.0rc6.", + "prefix": "ngx.sha1_bin", + "body": "local digest = ngx.sha1_bin(${1:str})" + }, + "ngx.quote_sql_str": { + "description": "Returns a quoted SQL string literal according to the MySQL quoting rules.", + "prefix": "ngx.quote_sql_str", + "body": "local quoted_value = ngx.quote_sql_str(${1:raw_value})" + }, + "ngx.today": { + "description": "Returns current date (in the format yyyy-mm-dd) from the Nginx cached time (no syscall involved unlike Lua's date library).This is the local time.", + "prefix": "ngx.today", + "body": "local str = ngx.today()" + }, + "ngx.time": { + "description": "Returns the elapsed seconds from the epoch for the current time stamp from the Nginx cached time (no syscall involved unlike Lua's date library).Updates of the Nginx time cache can be forced by calling ngx.update_time first.", + "prefix": "ngx.time", + "body": "local secs = ngx.time()" + }, + "ngx.now": { + "description": "Returns a floating-point number for the elapsed time in seconds (including milliseconds as the decimal part) from the epoch for the current time stamp from the Nginx cached time (no syscall involved unlike Lua's date library).You can forcibly update the Nginx time cache by calling ngx.update_time first.This API was first introduced in v0.3.1rc32.", + "prefix": "ngx.now", + "body": "local secs = ngx.now()" + }, + "ngx.update_time": { + "description": "Forcibly updates the Nginx current time cache. This call involves a syscall and thus has some overhead, so do not abuse it.This API was first introduced in v0.3.1rc32.", + "prefix": "ngx.update_time", + "body": "ngx.update_time()" + }, + "ngx.localtime": { + "description": "Returns the current time stamp (in the format yyyy-mm-dd hh:mm:ss) of the Nginx cached time (no syscall involved unlike Lua's os.date function).This is the local time.", + "prefix": "ngx.localtime", + "body": "local str = ngx.localtime()" + }, + "ngx.utctime": { + "description": "Returns the current time stamp (in the format yyyy-mm-dd hh:mm:ss) of the Nginx cached time (no syscall involved unlike Lua's os.date function).This is the UTC time.", + "prefix": "ngx.utctime", + "body": "local str = ngx.utctime()" + }, + "ngx.cookie_time": { + "description": "Returns a formatted string can be used as the cookie expiration time. The parameter sec is the time stamp in seconds (like those returned from ngx.time). ngx.say(ngx.cookie_time(1290079655))\n -- yields \"Thu, 18-Nov-10 11:27:35 GMT\"", + "prefix": "ngx.cookie_time", + "body": "local str = ngx.cookie_time(${1:sec})" + }, + "ngx.http_time": { + "description": "Returns a formated string can be used as the http header time (for example, being used in Last-Modified header). The parameter sec is the time stamp in seconds (like those returned from ngx.time). ngx.say(ngx.http_time(1290079655))\n -- yields \"Thu, 18 Nov 2010 11:27:35 GMT\"", + "prefix": "ngx.http_time", + "body": "local str = ngx.http_time(${1:sec})" + }, + "ngx.parse_http_time": { + "description": "Parse the http time string (as returned by ngx.http_time) into seconds. Returns the seconds or nil if the input string is in bad forms. local time = ngx.parse_http_time(\"Thu, 18 Nov 2010 11:27:35 GMT\")\n if time == nil then\n ...\n end", + "prefix": "ngx.parse_http_time", + "body": "local sec = ngx.parse_http_time(${1:str})" + }, + "ngx.is_subrequest": { + "description": "Returns true if the current request is an Nginx subrequest, or false otherwise.", + "prefix": "ngx.is_subrequest", + "body": "local value = ngx.is_subrequest" + }, + "ngx.re.match": { + "description": "Matches the subject string using the Perl compatible regular expression regex with the optional options.Only the first occurrence of the match is returned, or nil if no match is found. In case of errors, like seeing a bad regular expression or exceeding the PCRE stack limit, nil and a string describing the error will be returned.When a match is found, a Lua table captures is returned, where captures[0] holds the whole substring being matched, and captures[1] holds the first parenthesized sub-pattern's capturing, captures[2] the second, and so on. local m, err = ngx.re.match(\"hello, 1234\", \"[0-9]+\")\n if m then\n -- m[0] == \"1234\"\n\n else\n if err then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n ngx.say(\"match not found\")\n end local m, err = ngx.re.match(\"hello, 1234\", \"([0-9])[0-9]+\")\n -- m[0] == \"1234\"\n -- m[1] == \"1\"Named captures are also supported since the v0.7.14 release\nand are returned in the same Lua table as key-value pairs as the numbered captures. local m, err = ngx.re.match(\"hello, 1234\", \"([0-9])(?[0-9]+)\")\n -- m[0] == \"1234\"\n -- m[1] == \"1\"\n -- m[2] == \"234\"\n -- m[\"remaining\"] == \"234\"Unmatched subpatterns will have false values in their captures table fields. local m, err = ngx.re.match(\"hello, world\", \"(world)|(hello)|(?howdy)\")\n -- m[0] == \"hello\"\n -- m[1] == false\n -- m[2] == \"hello\"\n -- m[3] == false\n -- m[\"named\"] == falseSpecify options to control how the match operation will be performed. The following option characters are supported:a anchored mode (only match from the beginning)\n\nd enable the DFA mode (or the longest token match semantics).\n this requires PCRE 6.0+ or else a Lua exception will be thrown.\n first introduced in ngx_lua v0.3.1rc30.\n\nD enable duplicate named pattern support. This allows named\n subpattern names to be repeated, returning the captures in\n an array-like Lua table. for example,\n local m = ngx.re.match(\"hello, world\",\n \"(?\\w+), (?\\w+)\",\n \"D\")\n -- m[\"named\"] == {\"hello\", \"world\"}\n this option was first introduced in the v0.7.14 release.\n this option requires at least PCRE 8.12.\n\ni case insensitive mode (similar to Perl's /i modifier)\n\nj enable PCRE JIT compilation, this requires PCRE 8.21+ which\n must be built with the --enable-jit option. for optimum performance,\n this option should always be used together with the 'o' option.\n first introduced in ngx_lua v0.3.1rc30.\n\nJ enable the PCRE Javascript compatible mode. this option was\n first introduced in the v0.7.14 release. this option requires\n at least PCRE 8.12.\n\nm multi-line mode (similar to Perl's /m modifier)\n\no compile-once mode (similar to Perl's /o modifier),\n to enable the worker-process-level compiled-regex cache\n\ns single-line mode (similar to Perl's /s modifier)\n\nu UTF-8 mode. this requires PCRE to be built with\n the --enable-utf8 option or else a Lua exception will be thrown.\n\nU similar to \"u\" but disables PCRE's UTF-8 validity check on\n the subject string. first introduced in ngx_lua v0.8.1.\n\nx extended mode (similar to Perl's /x modifier)\nThese options can be combined: local m, err = ngx.re.match(\"hello, world\", \"HEL LO\", \"ix\")\n -- m[0] == \"hello\" local m, err = ngx.re.match(\"hello, 美好生活\", \"HELLO, (.{2})\", \"iu\")\n -- m[0] == \"hello, 美好\"\n -- m[1] == \"美好\"The o option is useful for performance tuning, because the regex pattern in question will only be compiled once, cached in the worker-process level, and shared among all requests in the current Nginx worker process. The upper limit of the regex cache can be tuned via the lua_regex_cache_max_entries directive.The optional fourth argument, ctx, can be a Lua table holding an optional pos field. When the pos field in the ctx table argument is specified, ngx.re.match will start matching from that offset (starting from 1). Regardless of the presence of the pos field in the ctx table, ngx.re.match will always set this pos field to the position after the substring matched by the whole pattern in case of a successful match. When match fails, the ctx table will be left intact. local ctx = {}\n local m, err = ngx.re.match(\"1234, hello\", \"[0-9]+\", \"\", ctx)\n -- m[0] = \"1234\"\n -- ctx.pos == 5 local ctx = { pos = 2 }\n local m, err = ngx.re.match(\"1234, hello\", \"[0-9]+\", \"\", ctx)\n -- m[0] = \"234\"\n -- ctx.pos == 5The ctx table argument combined with the a regex modifier can be used to construct a lexer atop ngx.re.match.Note that, the options argument is not optional when the ctx argument is specified and that the empty Lua string (\"\") must be used as placeholder for options if no meaningful regex options are required.This method requires the PCRE library enabled in Nginx (Known Issue With Special Escaping Sequences).To confirm that PCRE JIT is enabled, activate the Nginx debug log by adding the --with-debug option to Nginx or OpenResty's ./configure script. Then, enable the \"debug\" error log level in error_log directive. The following message will be generated if PCRE JIT is enabled:pcre JIT compiling result: 1\nStarting from the 0.9.4 release, this function also accepts a 5th argument, res_table, for letting the caller supply the Lua table used to hold all the capturing results. Starting from 0.9.6, it is the caller's responsibility to ensure this table is empty. This is very useful for recycling Lua tables and saving GC and table allocation overhead.This feature was introduced in the v0.2.1rc11 release.", + "prefix": "ngx.re.match", + "body": "local captures, err = ngx.re.match(${1:subject}, ${2:regex}, ${3:options?}, ${4:ctx?}, ${5:res_table?})" + }, + "ngx.re.find": { + "description": "Similar to ngx.re.match but only returns the beginning index (from) and end index (to) of the matched substring. The returned indexes are 1-based and can be fed directly into the string.sub API function to obtain the matched substring.In case of errors (like bad regexes or any PCRE runtime errors), this API function returns two nil values followed by a string describing the error.If no match is found, this function just returns a nil value.Below is an example: local s = \"hello, 1234\"\n local from, to, err = ngx.re.find(s, \"([0-9]+)\", \"jo\")\n if from then\n ngx.say(\"from: \", from)\n ngx.say(\"to: \", to)\n ngx.say(\"matched: \", string.sub(s, from, to))\n else\n if err then\n ngx.say(\"error: \", err)\n return\n end\n ngx.say(\"not matched!\")\n endThis example produces the outputfrom: 8\nto: 11\nmatched: 1234\nBecause this API function does not create new Lua strings nor new Lua tables, it is much faster than ngx.re.match. It should be used wherever possible.Since the 0.9.3 release, an optional 5th argument, nth, is supported to specify which (submatch) capture's indexes to return. When nth is 0 (which is the default), the indexes for the whole matched substring is returned; when nth is 1, then the 1st submatch capture's indexes are returned; when nth is 2, then the 2nd submatch capture is returned, and so on. When the specified submatch does not have a match, then two nil values will be returned. Below is an example for this: local str = \"hello, 1234\"\n local from, to = ngx.re.find(str, \"([0-9])([0-9]+)\", \"jo\", nil, 2)\n if from then\n ngx.say(\"matched 2nd submatch: \", string.sub(str, from, to)) -- yields \"234\"\n endThis API function was first introduced in the v0.9.2 release.", + "prefix": "ngx.re.find", + "body": "local from, to, err = ngx.re.find(${1:subject}, ${2:regex}, ${3:options?}, ${4:ctx?}, ${5:nth?})" + }, + "ngx.re.gmatch": { + "description": "Similar to ngx.re.match, but returns a Lua iterator instead, so as to let the user programmer iterate all the matches over the string argument with the PCRE regex.In case of errors, like seeing an ill-formed regular expression, nil and a string describing the error will be returned.Here is a small example to demonstrate its basic usage: local iterator, err = ngx.re.gmatch(\"hello, world!\", \"([a-z]+)\", \"i\")\n if not iterator then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n local m\n m, err = iterator() -- m[0] == m[1] == \"hello\"\n if err then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n m, err = iterator() -- m[0] == m[1] == \"world\"\n if err then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n m, err = iterator() -- m == nil\n if err then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n endMore often we just put it into a Lua loop: local it, err = ngx.re.gmatch(\"hello, world!\", \"([a-z]+)\", \"i\")\n if not it then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n while true do\n local m, err = it()\n if err then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n if not m then\n -- no match found (any more)\n break\n end\n\n -- found a match\n ngx.say(m[0])\n ngx.say(m[1])\n endThe optional options argument takes exactly the same semantics as the ngx.re.match method.The current implementation requires that the iterator returned should only be used in a single request. That is, one should not assign it to a variable belonging to persistent namespace like a Lua package.This method requires the PCRE library enabled in Nginx (Known Issue With Special Escaping Sequences).This feature was first introduced in the v0.2.1rc12 release.", + "prefix": "ngx.re.gmatch", + "body": "local iterator, err = ngx.re.gmatch(${1:subject}, ${2:regex}, ${3:options?})" + }, + "ngx.re.sub": { + "description": "Substitutes the first match of the Perl compatible regular expression regex on the subject argument string with the string or function argument replace. The optional options argument has exactly the same meaning as in ngx.re.match.This method returns the resulting new string as well as the number of successful substitutions. In case of failures, like syntax errors in the regular expressions or the string argument, it will return nil and a string describing the error.When the replace is a string, then it is treated as a special template for string replacement. For example, local newstr, n, err = ngx.re.sub(\"hello, 1234\", \"([0-9])[0-9]\", \"[$0][$1]\")\n if not newstr then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n -- newstr == \"hello, [12][1]34\"\n -- n == 1where $0 referring to the whole substring matched by the pattern and $1 referring to the first parenthesized capturing substring.Curly braces can also be used to disambiguate variable names from the background string literals: local newstr, n, err = ngx.re.sub(\"hello, 1234\", \"[0-9]\", \"${0}00\")\n -- newstr == \"hello, 100234\"\n -- n == 1Literal dollar sign characters ($) in the replace string argument can be escaped by another dollar sign, for instance, local newstr, n, err = ngx.re.sub(\"hello, 1234\", \"[0-9]\", \"$$\")\n -- newstr == \"hello, $234\"\n -- n == 1Do not use backlashes to escape dollar signs; it will not work as expected.When the replace argument is of type \"function\", then it will be invoked with the \"match table\" as the argument to generate the replace string literal for substitution. The \"match table\" fed into the replace function is exactly the same as the return value of ngx.re.match. Here is an example: local func = function (m)\n return \"[\" .. m[0] .. \"][\" .. m[1] .. \"]\"\n end\n\n local newstr, n, err = ngx.re.sub(\"hello, 1234\", \"( [0-9] ) [0-9]\", func, \"x\")\n -- newstr == \"hello, [12][1]34\"\n -- n == 1The dollar sign characters in the return value of the replace function argument are not special at all.This method requires the PCRE library enabled in Nginx (Known Issue With Special Escaping Sequences).This feature was first introduced in the v0.2.1rc13 release.", + "prefix": "ngx.re.sub", + "body": "local newstr, n, err = ngx.re.sub(${1:subject}, ${2:regex}, ${3:replace}, ${4:options?})" + }, + "ngx.re.gsub": { + "description": "Just like ngx.re.sub, but does global substitution.Here is some examples: local newstr, n, err = ngx.re.gsub(\"hello, world\", \"([a-z])[a-z]+\", \"[$0,$1]\", \"i\")\n if not newstr then\n ngx.log(ngx.ERR, \"error: \", err)\n return\n end\n\n -- newstr == \"[hello,h], [world,w]\"\n -- n == 2 local func = function (m)\n return \"[\" .. m[0] .. \",\" .. m[1] .. \"]\"\n end\n local newstr, n, err = ngx.re.gsub(\"hello, world\", \"([a-z])[a-z]+\", func, \"i\")\n -- newstr == \"[hello,h], [world,w]\"\n -- n == 2This method requires the PCRE library enabled in Nginx (Known Issue With Special Escaping Sequences).This feature was first introduced in the v0.2.1rc15 release.", + "prefix": "ngx.re.gsub", + "body": "local newstr, n, err = ngx.re.gsub(${1:subject}, ${2:regex}, ${3:replace}, ${4:options?})" + }, + "ngx.shared.DICT": { + "description": "Fetching the shm-based Lua dictionary object for the shared memory zone named DICT defined by the lua_shared_dict directive.Shared memory zones are always shared by all the Nginx worker processes in the current Nginx server instance.The resulting object dict has the following methods:\nget\nget_stale\nset\nsafe_set\nadd\nsafe_add\nreplace\ndelete\nincr\nlpush\nrpush\nlpop\nrpop\nllen\nttl\nexpire\nflush_all\nflush_expired\nget_keys\ncapacity\nfree_space\nAll these methods are atomic operations, that is, safe from concurrent accesses from multiple Nginx worker processes for the same lua_shared_dict zone.Here is an example: http {\n lua_shared_dict dogs 10m;\n server {\n location /set {\n content_by_lua_block {\n local dogs = ngx.shared.dogs\n dogs:set(\"Jim\", 8)\n ngx.say(\"STORED\")\n }\n }\n location /get {\n content_by_lua_block {\n local dogs = ngx.shared.dogs\n ngx.say(dogs:get(\"Jim\"))\n }\n }\n }\n }Let us test it: $ curl localhost/set\n STORED\n\n $ curl localhost/get\n 8\n\n $ curl localhost/get\n 8The number 8 will be consistently output when accessing /get regardless of how many Nginx workers there are because the dogs dictionary resides in the shared memory and visible to all of the worker processes.The shared dictionary will retain its contents through a server config reload (either by sending the HUP signal to the Nginx process or by using the -s reload command-line option).The contents in the dictionary storage will be lost, however, when the Nginx server quits.This feature was first introduced in the v0.3.1rc22 release.", + "prefix": "ngx.shared.DICT", + "body": "local dict = ngx.shared[name_var]" + }, + "ngx.shared.DICT.get": { + "description": "Retrieving the value in the dictionary ngx.shared.DICT for the key key. If the key does not exist or has expired, then nil will be returned.In case of errors, nil and a string describing the error will be returned.The value returned will have the original data type when they were inserted into the dictionary, for example, Lua booleans, numbers, or strings.The first argument to this method must be the dictionary object itself, for example, local cats = ngx.shared.cats\n local value, flags = cats.get(cats, \"Marry\")or use Lua's syntactic sugar for method calls: local cats = ngx.shared.cats\n local value, flags = cats:get(\"Marry\")These two forms are fundamentally equivalent.If the user flags is 0 (the default), then no flags value will be returned.This feature was first introduced in the v0.3.1rc22 release.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.get", + "body": "local value, flags = ngx.shared.DICT:get(${1:key})" + }, + "ngx.shared.DICT.get_stale": { + "description": "Similar to the get method but returns the value even if the key has already expired.Returns a 3rd value, stale, indicating whether the key has expired or not.Note that the value of an expired key is not guaranteed to be available so one should never rely on the availability of expired items.This method was first introduced in the 0.8.6 release.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.get_stale", + "body": "local value, flags, stale = ngx.shared.DICT:get_stale(${1:key})" + }, + "ngx.shared.DICT.set": { + "description": "Unconditionally sets a key-value pair into the shm-based dictionary ngx.shared.DICT. Returns three values:\nsuccess: boolean value to indicate whether the key-value pair is stored or not.\nerr: textual error message, can be \"no memory\".\nforcible: a boolean value to indicate whether other valid items have been removed forcibly when out of storage in the shared memory zone.\nThe value argument inserted can be Lua booleans, numbers, strings, or nil. Their value type will also be stored into the dictionary and the same data type can be retrieved later via the get method.The optional exptime argument specifies expiration time (in seconds) for the inserted key-value pair. The time resolution is 0.001 seconds. If the exptime takes the value 0 (which is the default), then the item will never expire.The optional flags argument specifies a user flags value associated with the entry to be stored. It can also be retrieved later with the value. The user flags is stored as an unsigned 32-bit integer internally. Defaults to 0. The user flags argument was first introduced in the v0.5.0rc2 release.When it fails to allocate memory for the current key-value item, then set will try removing existing items in the storage according to the Least-Recently Used (LRU) algorithm. Note that, LRU takes priority over expiration time here. If up to tens of existing items have been removed and the storage left is still insufficient (either due to the total capacity limit specified by lua_shared_dict or memory segmentation), then the err return value will be no memory and success will be false.If the sizes of items in the dictionary are not multiples or even powers of a certain value (like 2), it is easier to encounter no memory error because of memory fragmentation. It is recommended to use different dictionaries for different sizes of items.When you encounter no memory error, you can also evict more least-recently-used items by retrying this method call more times to to make room for the current item.If this method succeeds in storing the current item by forcibly removing other not-yet-expired items in the dictionary via LRU, the forcible return value will be true. If it stores the item without forcibly removing other valid items, then the return value forcible will be false.The first argument to this method must be the dictionary object itself, for example, local cats = ngx.shared.cats\n local succ, err, forcible = cats.set(cats, \"Marry\", \"it is a nice cat!\")or use Lua's syntactic sugar for method calls: local cats = ngx.shared.cats\n local succ, err, forcible = cats:set(\"Marry\", \"it is a nice cat!\")These two forms are fundamentally equivalent.This feature was first introduced in the v0.3.1rc22 release.Please note that while internally the key-value pair is set atomically, the atomicity does not go across the method call boundary.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.set", + "body": "local success, err, forcible = ngx.shared.DICT:set(${1:key}, ${2:value}, ${3:exptime?}, ${4:flags?})" + }, + "ngx.shared.DICT.safe_set": { + "description": "Similar to the set method, but never overrides the (least recently used) unexpired items in the store when running out of storage in the shared memory zone. In this case, it will immediately return nil and the string \"no memory\".This feature was first introduced in the v0.7.18 release.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.safe_set", + "body": "local ok, err = ngx.shared.DICT:safe_set(${1:key}, ${2:value}, ${3:exptime?}, ${4:flags?})" + }, + "ngx.shared.DICT.add": { + "description": "Just like the set method, but only stores the key-value pair into the dictionary ngx.shared.DICT if the key does not exist.If the key argument already exists in the dictionary (and not expired for sure), the success return value will be false and the err return value will be \"exists\".This feature was first introduced in the v0.3.1rc22 release.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.add", + "body": "local success, err, forcible = ngx.shared.DICT:add(${1:key}, ${2:value}, ${3:exptime?}, ${4:flags?})" + }, + "ngx.shared.DICT.safe_add": { + "description": "Similar to the add method, but never overrides the (least recently used) unexpired items in the store when running out of storage in the shared memory zone. In this case, it will immediately return nil and the string \"no memory\".This feature was first introduced in the v0.7.18 release.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.safe_add", + "body": "local ok, err = ngx.shared.DICT:safe_add(${1:key}, ${2:value}, ${3:exptime?}, ${4:flags?})" + }, + "ngx.shared.DICT.replace": { + "description": "Just like the set method, but only stores the key-value pair into the dictionary ngx.shared.DICT if the key does exist.If the key argument does not exist in the dictionary (or expired already), the success return value will be false and the err return value will be \"not found\".This feature was first introduced in the v0.3.1rc22 release.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.replace", + "body": "local success, err, forcible = ngx.shared.DICT:replace(${1:key}, ${2:value}, ${3:exptime?}, ${4:flags?})" + }, + "ngx.shared.DICT.delete": { + "description": "Unconditionally removes the key-value pair from the shm-based dictionary ngx.shared.DICT.It is equivalent to ngx.shared.DICT:set(key, nil).This feature was first introduced in the v0.3.1rc22 release.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.delete", + "body": "ngx.shared.DICT:delete(${1:key})" + }, + "ngx.shared.DICT.incr": { + "description": "optional requirement: resty.core.shdict or resty.coreIncrements the (numerical) value for key in the shm-based dictionary ngx.shared.DICT by the step value value. Returns the new resulting number if the operation is successfully completed or nil and an error message otherwise.When the key does not exist or has already expired in the shared dictionary,\nif the init argument is not specified or takes the value nil, this method will return nil and the error string \"not found\", or\nif the init argument takes a number value, this method will create a new key with the value init + value.\nLike the add method, it also overrides the (least recently used) unexpired items in the store when running out of storage in the shared memory zone.The optional init_ttl argument specifies expiration time (in seconds) of the value when it is initialized via the init argument. The time resolution is 0.001 seconds. If init_ttl takes the value 0 (which is the default), then the item will never expire. This argument cannot be provided without providing the init argument as well, and has no effect if the value already exists (e.g., if it was previously inserted via set or the likes).Note: Usage of the init_ttl argument requires the resty.core.shdict or resty.core modules from the lua-resty-core library. Example: require \"resty.core\"\n\n local cats = ngx.shared.cats\n local newval, err = cats:incr(\"black_cats\", 1, 0, 0.1)\n\n print(newval) -- 1\n\n ngx.sleep(0.2)\n\n local val, err = cats:get(\"black_cats\")\n print(val) -- nilThe forcible return value will always be nil when the init argument is not specified.If this method succeeds in storing the current item by forcibly removing other not-yet-expired items in the dictionary via LRU, the forcible return value will be true. If it stores the item without forcibly removing other valid items, then the return value forcible will be false.If the original value is not a valid Lua number in the dictionary, it will return nil and \"not a number\".The value argument and init argument can be any valid Lua numbers, like negative numbers or floating-point numbers.This method was first introduced in the v0.3.1rc22 release.The optional init parameter was first added in the v0.10.6 release.The optional init_ttl parameter was introduced in the v0.10.12rc2 release.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.incr", + "body": "local newval, err, forcible? = ngx.shared.DICT:incr(${1:key}, ${2:value}, ${3:init?}, ${4:init_ttl?})" + }, + "ngx.shared.DICT.lpush": { + "description": "Inserts the specified (numerical or string) value at the head of the list named key in the shm-based dictionary ngx.shared.DICT. Returns the number of elements in the list after the push operation.If key does not exist, it is created as an empty list before performing the push operation. When the key already takes a value that is not a list, it will return nil and \"value not a list\".It never overrides the (least recently used) unexpired items in the store when running out of storage in the shared memory zone. In this case, it will immediately return nil and the string \"no memory\".This feature was first introduced in the v0.10.6 release.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.lpush", + "body": "local length, err = ngx.shared.DICT:lpush(${1:key}, ${2:value})" + }, + "ngx.shared.DICT.rpush": { + "description": "Similar to the lpush method, but inserts the specified (numerical or string) value at the tail of the list named key.This feature was first introduced in the v0.10.6 release.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.rpush", + "body": "local length, err = ngx.shared.DICT:rpush(${1:key}, ${2:value})" + }, + "ngx.shared.DICT.lpop": { + "description": "Removes and returns the first element of the list named key in the shm-based dictionary ngx.shared.DICT.If key does not exist, it will return nil. When the key already takes a value that is not a list, it will return nil and \"value not a list\".This feature was first introduced in the v0.10.6 release.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.lpop", + "body": "local val, err = ngx.shared.DICT:lpop(${1:key})" + }, + "ngx.shared.DICT.rpop": { + "description": "Removes and returns the last element of the list named key in the shm-based dictionary ngx.shared.DICT.If key does not exist, it will return nil. When the key already takes a value that is not a list, it will return nil and \"value not a list\".This feature was first introduced in the v0.10.6 release.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.rpop", + "body": "local val, err = ngx.shared.DICT:rpop(${1:key})" + }, + "ngx.shared.DICT.llen": { + "description": "Returns the number of elements in the list named key in the shm-based dictionary ngx.shared.DICT.If key does not exist, it is interpreted as an empty list and 0 is returned. When the key already takes a value that is not a list, it will return nil and \"value not a list\".This feature was first introduced in the v0.10.6 release.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.llen", + "body": "local len, err = ngx.shared.DICT:llen(${1:key})" + }, + "ngx.shared.DICT.ttl": { + "description": "requires: resty.core.shdict or resty.coreRetrieves the remaining TTL (time-to-live in seconds) of a key-value pair in the shm-based dictionary ngx.shared.DICT. Returns the TTL as a number if the operation is successfully completed or nil and an error message otherwise.If the key does not exist (or has already expired), this method will return nil and the error string \"not found\".The TTL is originally determined by the exptime argument of the set, add, replace (and the likes) methods. It has a time resolution of 0.001 seconds. A value of 0 means that the item will never expire.Example: require \"resty.core\"\n\n local cats = ngx.shared.cats\n local succ, err = cats:set(\"Marry\", \"a nice cat\", 0.5)\n\n ngx.sleep(0.2)\n\n local ttl, err = cats:ttl(\"Marry\")\n ngx.say(ttl) -- 0.3This feature was first introduced in the v0.10.11 release.Note: This method requires the resty.core.shdict or resty.core modules from the lua-resty-core library.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.ttl", + "body": "local ttl, err = ngx.shared.DICT:ttl(${1:key})" + }, + "ngx.shared.DICT.expire": { + "description": "requires: resty.core.shdict or resty.coreUpdates the exptime (in second) of a key-value pair in the shm-based dictionary ngx.shared.DICT. Returns a boolean indicating success if the operation completes or nil and an error message otherwise.If the key does not exist, this method will return nil and the error string \"not found\".The exptime argument has a resolution of 0.001 seconds. If exptime is 0, then the item will never expire.Example: require \"resty.core\"\n\n local cats = ngx.shared.cats\n local succ, err = cats:set(\"Marry\", \"a nice cat\", 0.1)\n\n succ, err = cats:expire(\"Marry\", 0.5)\n\n ngx.sleep(0.2)\n\n local val, err = cats:get(\"Marry\")\n ngx.say(val) -- \"a nice cat\"This feature was first introduced in the v0.10.11 release.Note: This method requires the resty.core.shdict or resty.core modules from the lua-resty-core library.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.expire", + "body": "local success, err = ngx.shared.DICT:expire(${1:key}, ${2:exptime})" + }, + "ngx.shared.DICT.flush_all": { + "description": "Flushes out all the items in the dictionary. This method does not actually free up all the memory blocks in the dictionary but just marks all the existing items as expired.This feature was first introduced in the v0.5.0rc17 release.See also ngx.shared.DICT.flush_expired and ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.flush_all", + "body": "ngx.shared.DICT:flush_all()" + }, + "ngx.shared.DICT.flush_expired": { + "description": "Flushes out the expired items in the dictionary, up to the maximal number specified by the optional max_count argument. When the max_count argument is given 0 or not given at all, then it means unlimited. Returns the number of items that have actually been flushed.Unlike the flush_all method, this method actually frees up the memory used by the expired items.This feature was first introduced in the v0.6.3 release.See also ngx.shared.DICT.flush_all and ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.flush_expired", + "body": "local flushed = ngx.shared.DICT:flush_expired(${1:max_count?})" + }, + "ngx.shared.DICT.get_keys": { + "description": "Fetch a list of the keys from the dictionary, up to .By default, only the first 1024 keys (if any) are returned. When the argument is given the value 0, then all the keys will be returned even there is more than 1024 keys in the dictionary.CAUTION Avoid calling this method on dictionaries with a very large number of keys as it may lock the dictionary for significant amount of time and block Nginx worker processes trying to access the dictionary.This feature was first introduced in the v0.7.3 release.", + "prefix": "ngx.shared.DICT.get_keys", + "body": "local keys = ngx.shared.DICT:get_keys(${1:max_count?})" + }, + "ngx.shared.DICT.capacity": { + "description": "requires: resty.core.shdict or resty.coreRetrieves the capacity in bytes for the shm-based dictionary ngx.shared.DICT declared with\nthe lua_shared_dict directive.Example: require \"resty.core.shdict\"\n\n local cats = ngx.shared.cats\n local capacity_bytes = cats:capacity()This feature was first introduced in the v0.10.11 release.Note: This method requires the resty.core.shdict or resty.core modules from the lua-resty-core library.This feature requires at least Nginx core version 0.7.3.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.capacity", + "body": "local capacity_bytes = ngx.shared.DICT:capacity()" + }, + "ngx.shared.DICT.free_space": { + "description": "requires: resty.core.shdict or resty.coreRetrieves the free page size in bytes for the shm-based dictionary ngx.shared.DICT.Note: The memory for ngx.shared.DICT is allocated via the Nginx slab allocator which has each slot for\ndata size ranges like ~8, 9~16, 17~32, ..., 1025~2048, 2048~ bytes. And pages are assigned to a slot if there\nis no room in already assigned pages for the slot.So even if the return value of the free_space method is zero, there may be room in already assigned pages, so\nyou may successfully set a new key value pair to the shared dict without getting true for forcible or\nnon nil err from the ngx.shared.DICT.set.On the other hand, if already assigned pages for a slot are full and a new key value pair is added to the\nslot and there is no free page, you may get true for forcible or non nil err from the\nngx.shared.DICT.set method.Example: require \"resty.core.shdict\"\n\n local cats = ngx.shared.cats\n local free_page_bytes = cats:free_space()This feature was first introduced in the v0.10.11 release.Note: This method requires the resty.core.shdict or resty.core modules from the lua-resty-core library.This feature requires at least Nginx core version 1.11.7.See also ngx.shared.DICT.", + "prefix": "ngx.shared.DICT.free_space", + "body": "local free_page_bytes = ngx.shared.DICT:free_space()" + }, + "ngx.socket.udp": { + "description": "Creates and returns a UDP or datagram-oriented unix domain socket object (also known as one type of the \"cosocket\" objects). The following methods are supported on this object:\nbind\nsetpeername\nsend\nreceive\nclose\nsettimeout\nIt is intended to be compatible with the UDP API of the LuaSocket library but is 100% nonblocking out of the box.This feature was first introduced in the v0.5.7 release.See also ngx.socket.tcp.", + "prefix": "ngx.socket.udp", + "body": "local udpsock = ngx.socket.udp()" + }, + "udpsock:bind": { + "description": "Just like the standard proxy_bind directive, this api makes the outgoing connection to a upstream server originate from the specified local IP address.Only IP addresses can be specified as the address argument.Here is an example for connecting to a TCP server from the specified local IP address: location /test {\n content_by_lua_block {\n local sock = ngx.socket.udp()\n -- assume \"192.168.1.10\" is the local ip address\n local ok, err = sock:bind(\"192.168.1.10\")\n if not ok then\n ngx.say(\"failed to bind: \", err)\n return\n end\n sock:close()\n }\n }", + "prefix": "udpsock:bind", + "body": "local ok, err = udpsock:bind(${1:address})" + }, + "udpsock:setpeername": { + "description": "Attempts to connect a UDP socket object to a remote server or to a datagram unix domain socket file. Because the datagram protocol is actually connection-less, this method does not really establish a \"connection\", but only just set the name of the remote peer for subsequent read/write operations.Both IP addresses and domain names can be specified as the host argument. In case of domain names, this method will use Nginx core's dynamic resolver to parse the domain name without blocking and it is required to configure the resolver directive in the nginx.conf file like this: resolver 8.8.8.8; # use Google's public DNS nameserverIf the nameserver returns multiple IP addresses for the host name, this method will pick up one randomly.In case of error, the method returns nil followed by a string describing the error. In case of success, the method returns 1.Here is an example for connecting to a UDP (memcached) server: location /test {\n resolver 8.8.8.8;\n\n content_by_lua_block {\n local sock = ngx.socket.udp()\n local ok, err = sock:setpeername(\"my.memcached.server.domain\", 11211)\n if not ok then\n ngx.say(\"failed to connect to memcached: \", err)\n return\n end\n ngx.say(\"successfully connected to memcached!\")\n sock:close()\n }\n }Since the v0.7.18 release, connecting to a datagram unix domain socket file is also possible on Linux: local sock = ngx.socket.udp()\n local ok, err = sock:setpeername(\"unix:/tmp/some-datagram-service.sock\")\n if not ok then\n ngx.say(\"failed to connect to the datagram unix domain socket: \", err)\n return\n end\n\n -- do something after connect\n -- such as sock:send or sock:receiveassuming the datagram service is listening on the unix domain socket file /tmp/some-datagram-service.sock and the client socket will use the \"autobind\" feature on Linux.Calling this method on an already connected socket object will cause the original connection to be closed first.This method was first introduced in the v0.5.7 release.", + "prefix": "udpsock:setpeername", + "body": "local ok, err = udpsock:setpeername(\"unix:/path/to/unix-domain.socket\")" + }, + "udpsock:send": { + "description": "Sends data on the current UDP or datagram unix domain socket object.In case of success, it returns 1. Otherwise, it returns nil and a string describing the error.The input argument data can either be a Lua string or a (nested) Lua table holding string fragments. In case of table arguments, this method will copy all the string elements piece by piece to the underlying Nginx socket send buffers, which is usually optimal than doing string concatenation operations on the Lua land.This feature was first introduced in the v0.5.7 release.", + "prefix": "udpsock:send", + "body": "local ok, err = udpsock:send(${1:data})" + }, + "udpsock:receive": { + "description": "Receives data from the UDP or datagram unix domain socket object with an optional receive buffer size argument, size.This method is a synchronous operation and is 100% nonblocking.In case of success, it returns the data received; in case of error, it returns nil with a string describing the error.If the size argument is specified, then this method will use this size as the receive buffer size. But when this size is greater than 8192, then 8192 will be used instead.If no argument is specified, then the maximal buffer size, 8192 is assumed.Timeout for the reading operation is controlled by the lua_socket_read_timeout config directive and the settimeout method. And the latter takes priority. For example: sock:settimeout(1000) -- one second timeout\n local data, err = sock:receive()\n if not data then\n ngx.say(\"failed to read a packet: \", err)\n return\n end\n ngx.say(\"successfully read a packet: \", data)It is important here to call the settimeout method before calling this method.This feature was first introduced in the v0.5.7 release.", + "prefix": "udpsock:receive", + "body": "local data, err = udpsock:receive(${1:size?})" + }, + "udpsock:close": { + "description": "Closes the current UDP or datagram unix domain socket. It returns the 1 in case of success and returns nil with a string describing the error otherwise.Socket objects that have not invoked this method (and associated connections) will be closed when the socket object is released by the Lua GC (Garbage Collector) or the current client HTTP request finishes processing.This feature was first introduced in the v0.5.7 release.", + "prefix": "udpsock:close", + "body": "local ok, err = udpsock:close()" + }, + "udpsock:settimeout": { + "description": "Set the timeout value in milliseconds for subsequent socket operations (like receive).Settings done by this method takes priority over those config directives, like lua_socket_read_timeout.This feature was first introduced in the v0.5.7 release.", + "prefix": "udpsock:settimeout", + "body": "udpsock:settimeout(${1:time})" + }, + "ngx.socket.stream": { + "description": "Just an alias to ngx.socket.tcp. If the stream-typed cosocket may also connect to a unix domain\nsocket, then this API name is preferred.This API function was first added to the v0.10.1 release.", + "prefix": "ngx.socket.stream", + "body": "ngx.socket.stream" + }, + "ngx.socket.tcp": { + "description": "Creates and returns a TCP or stream-oriented unix domain socket object (also known as one type of the \"cosocket\" objects). The following methods are supported on this object:\nbind\nconnect\nsetclientcert\nsslhandshake\nsend\nreceive\nclose\nsettimeout\nsettimeouts\nsetoption\nreceiveany\nreceiveuntil\nsetkeepalive\ngetreusedtimes\nIt is intended to be compatible with the TCP API of the LuaSocket library but is 100% nonblocking out of the box. Also, we introduce some new APIs to provide more functionalities.The cosocket object created by this API function has exactly the same lifetime as the Lua handler creating it. So never pass the cosocket object to any other Lua handler (including ngx.timer callback functions) and never share the cosocket object between different Nginx requests.For every cosocket object's underlying connection, if you do not\nexplicitly close it (via close) or put it back to the connection\npool (via setkeepalive), then it is automatically closed when one of\nthe following two events happens:\nthe current request handler completes, or\nthe Lua cosocket object value gets collected by the Lua GC.\nFatal errors in cosocket operations always automatically close the current\nconnection (note that, read timeout error is the only error that is\nnot fatal), and if you call close on a closed connection, you will get\nthe \"closed\" error.Starting from the 0.9.9 release, the cosocket object here is full-duplex, that is, a reader \"light thread\" and a writer \"light thread\" can operate on a single cosocket object simultaneously (both \"light threads\" must belong to the same Lua handler though, see reasons above). But you cannot have two \"light threads\" both reading (or writing or connecting) the same cosocket, otherwise you might get an error like \"socket busy reading\" when calling the methods of the cosocket object.This feature was first introduced in the v0.5.0rc1 release.See also ngx.socket.udp.", + "prefix": "ngx.socket.tcp", + "body": "local tcpsock = ngx.socket.tcp()" + }, + "tcpsock:bind": { + "description": "Just like the standard proxy_bind directive, this api makes the outgoing connection to a upstream server originate from the specified local IP address.Only IP addresses can be specified as the address argument.Here is an example for connecting to a TCP server from the specified local IP address: location /test {\n content_by_lua_block {\n local sock = ngx.socket.tcp()\n -- assume \"192.168.1.10\" is the local ip address\n local ok, err = sock:bind(\"192.168.1.10\")\n if not ok then\n ngx.say(\"failed to bind\")\n return\n end\n local ok, err = sock:connect(\"192.168.1.67\", 80)\n if not ok then\n ngx.say(\"failed to connect server: \", err)\n return\n end\n ngx.say(\"successfully connected!\")\n sock:close()\n }\n }", + "prefix": "tcpsock:bind", + "body": "local ok, err = tcpsock:bind(${1:address})" + }, + "tcpsock:connect": { + "description": "Attempts to connect a TCP socket object to a remote server or to a stream unix domain socket file without blocking.Before actually resolving the host name and connecting to the remote backend, this method will always look up the connection pool for matched idle connections created by previous calls of this method (or the ngx.socket.connect function).Both IP addresses and domain names can be specified as the host argument. In case of domain names, this method will use Nginx core's dynamic resolver to parse the domain name without blocking and it is required to configure the resolver directive in the nginx.conf file like this: resolver 8.8.8.8; # use Google's public DNS nameserverIf the nameserver returns multiple IP addresses for the host name, this method will pick up one randomly.In case of error, the method returns nil followed by a string describing the error. In case of success, the method returns 1.Here is an example for connecting to a TCP server: location /test {\n resolver 8.8.8.8;\n\n content_by_lua_block {\n local sock = ngx.socket.tcp()\n local ok, err = sock:connect(\"www.google.com\", 80)\n if not ok then\n ngx.say(\"failed to connect to google: \", err)\n return\n end\n ngx.say(\"successfully connected to google!\")\n sock:close()\n }\n }Connecting to a Unix Domain Socket file is also possible: local sock = ngx.socket.tcp()\n local ok, err = sock:connect(\"unix:/tmp/memcached.sock\")\n if not ok then\n ngx.say(\"failed to connect to the memcached unix domain socket: \", err)\n return\n end\n\n -- do something after connect\n -- such as sock:send or sock:receiveassuming memcached (or something else) is listening on the unix domain socket file /tmp/memcached.sock.Timeout for the connecting operation is controlled by the lua_socket_connect_timeout config directive and the settimeout method. And the latter takes priority. For example: local sock = ngx.socket.tcp()\n sock:settimeout(1000) -- one second timeout\n local ok, err = sock:connect(host, port)It is important here to call the settimeout method before calling this method.Calling this method on an already connected socket object will cause the original connection to be closed first.An optional Lua table can be specified as the last argument to this method to specify various connect options:\n\npool\nspecify a custom name for the connection pool being used. If omitted, then the connection pool name will be generated from the string template \":\" or \"\".\n\n\npool_size\nspecify the size of the connection pool. If omitted and no\nbacklog option was provided, no pool will be created. If omitted\nbut backlog was provided, the pool will be created with a default\nsize equal to the value of the lua_socket_pool_size\ndirective.\nThe connection pool holds up to pool_size alive connections\nready to be reused by subsequent calls to connect, but\nnote that there is no upper limit to the total number of opened connections\noutside of the pool. If you need to restrict the total number of opened\nconnections, specify the backlog option.\nWhen the connection pool would exceed its size limit, the least recently used\n(kept-alive) connection already in the pool will be closed to make room for\nthe current connection.\nNote that the cosocket connection pool is per Nginx worker process rather\nthan per Nginx server instance, so the size limit specified here also applies\nto every single Nginx worker process. Also note that the size of the connection\npool cannot be changed once it has been created.\nThis option was first introduced in the v0.10.14 release.\n\n\nbacklog\nif specified, this module will limit the total number of opened connections\nfor this pool. No more connections than pool_size can be opened\nfor this pool at any time. If the connection pool is full, subsequent\nconnect operations will be queued into a queue equal to this option's\nvalue (the \"backlog\" queue).\nIf the number of queued connect operations is equal to backlog,\nsubsequent connect operations will fail and return nil plus the\nerror string \"too many waiting connect operations\".\nThe queued connect operations will be resumed once the number of connections\nin the pool is less than pool_size.\nThe queued connect operation will abort once they have been queued for more\nthan connect_timeout, controlled by\nsettimeouts, and will return nil plus\nthe error string \"timeout\".\nThis option was first introduced in the v0.10.14 release.\n\nThe support for the options table argument was first introduced in the v0.5.7 release.This method was first introduced in the v0.5.0rc1 release.", + "prefix": "tcpsock:connect", + "body": "local ok, err = tcpsock:connect(\"unix:/path/to/unix-domain.socket\", options_table?)" + }, + "tcpsock:setclientcert": { + "description": "Set client certificate chain and corresponding private key to the TCP socket object.\nThe certificate chain and private key provided will be used later by the tcpsock:sslhandshake method.\ncert specify a client certificate chain cdata object that will be used while handshaking with\nremote server. These objects can be created using ngx.ssl.parse_pem_cert or ngx.ssl.parse_der_cert\nfunction provided by lua-resty-core. Note that specifying the cert option requires\ncorresponding pkey be provided too. See below.\npkey specify a private key corresponds to the cert option above.\nThese objects can be created using ngx.ssl.parse_pem_priv_key or ngx.ssl.parse_der_priv_key\nfunction provided by lua-resty-core.\nIf both of cert and pkey are nil, this method will clear any existing client certificate and private key\nthat was previously set on the cosocket object.This method was first introduced in the v0.10.22 release.", + "prefix": "tcpsock:setclientcert", + "body": "local ok, err = tcpsock:setclientcert(${1:cert}, ${2:pkey})" + }, + "tcpsock:sslhandshake": { + "description": "Does SSL/TLS handshake on the currently established connection.The optional reused_session argument can take a former SSL\nsession userdata returned by a previous sslhandshake\ncall for exactly the same target. For short-lived connections, reusing SSL\nsessions can usually speed up the handshake by one order by magnitude but it\nis not so useful if the connection pool is enabled. This argument defaults to\nnil. If this argument takes the boolean false value, no SSL session\nuserdata would return by this call and only a Lua boolean will be returned as\nthe first return value; otherwise the current SSL session will\nalways be returned as the first argument in case of successes.The optional server_name argument is used to specify the server\nname for the new TLS extension Server Name Indication (SNI). Use of SNI can\nmake different servers share the same IP address on the server side. Also,\nwhen SSL verification is enabled, this server_name argument is\nalso used to validate the server name specified in the server certificate sent from\nthe remote.The optional ssl_verify argument takes a Lua boolean value to\ncontrol whether to perform SSL verification. When set to true, the server\ncertificate will be verified according to the CA certificates specified by\nthe lua_ssl_trusted_certificate directive.\nYou may also need to adjust the lua_ssl_verify_depth\ndirective to control how deep we should follow along the certificate chain.\nAlso, when the ssl_verify argument is true and the\nserver_name argument is also specified, the latter will be used\nto validate the server name in the server certificate.The optional send_status_req argument takes a boolean that controls whether to send\nthe OCSP status request in the SSL handshake request (which is for requesting OCSP stapling).For connections that have already done SSL/TLS handshake, this method returns\nimmediately.This method was first introduced in the v0.9.11 release.", + "prefix": "tcpsock:sslhandshake", + "body": "local session, err = tcpsock:sslhandshake(${1:reused_session?}, ${2:server_name?}, ${3:ssl_verify?}, ${4:send_status_req?})" + }, + "tcpsock:send": { + "description": "Sends data without blocking on the current TCP or Unix Domain Socket connection.This method is a synchronous operation that will not return until all the data has been flushed into the system socket send buffer or an error occurs.In case of success, it returns the total number of bytes that have been sent. Otherwise, it returns nil and a string describing the error.The input argument data can either be a Lua string or a (nested) Lua table holding string fragments. In case of table arguments, this method will copy all the string elements piece by piece to the underlying Nginx socket send buffers, which is usually optimal than doing string concatenation operations on the Lua land.Timeout for the sending operation is controlled by the lua_socket_send_timeout config directive and the settimeout method. And the latter takes priority. For example: sock:settimeout(1000) -- one second timeout\n local bytes, err = sock:send(request)It is important here to call the settimeout method before calling this method.In case of any connection errors, this method always automatically closes the current connection.This feature was first introduced in the v0.5.0rc1 release.", + "prefix": "tcpsock:send", + "body": "local bytes, err = tcpsock:send(${1:data})" + }, + "tcpsock:receive": { + "description": "Receives data from the connected socket according to the reading pattern or size.This method is a synchronous operation just like the send method and is 100% nonblocking.In case of success, it returns the data received; in case of error, it returns nil with a string describing the error and the partial data received so far.If a number-like argument is specified (including strings that look like numbers), then it is interpreted as a size. This method will not return until it reads exactly this size of data or an error occurs.If a non-number-like string argument is specified, then it is interpreted as a \"pattern\". The following patterns are supported:\n'*a': reads from the socket until the connection is closed. No end-of-line translation is performed;\n'*l': reads a line of text from the socket. The line is terminated by a Line Feed (LF) character (ASCII 10), optionally preceded by a Carriage Return (CR) character (ASCII 13). The CR and LF characters are not included in the returned line. In fact, all CR characters are ignored by the pattern.\nIf no argument is specified, then it is assumed to be the pattern '*l', that is, the line reading pattern.Timeout for the reading operation is controlled by the lua_socket_read_timeout config directive and the settimeout method. And the latter takes priority. For example: sock:settimeout(1000) -- one second timeout\n local line, err, partial = sock:receive()\n if not line then\n ngx.say(\"failed to read a line: \", err)\n return\n end\n ngx.say(\"successfully read a line: \", line)It is important here to call the settimeout method before calling this method.Since the v0.8.8 release, this method no longer automatically closes the current connection when the read timeout error happens. For other connection errors, this method always automatically closes the connection.This feature was first introduced in the v0.5.0rc1 release.", + "prefix": "tcpsock:receive", + "body": "local data, err, partial = tcpsock:receive(${1:pattern?})" + }, + "tcpsock:receiveany": { + "description": "Returns any data received by the connected socket, at most max bytes.This method is a synchronous operation just like the send method and is 100% nonblocking.In case of success, it returns the data received; in case of error, it returns nil with a string describing the error.If the received data is more than this size, this method will return with exactly this size of data.\nThe remaining data in the underlying receive buffer could be returned in the next reading operation.Timeout for the reading operation is controlled by the lua_socket_read_timeout config directive and the settimeouts method. And the latter takes priority. For example: sock:settimeouts(1000, 1000, 1000) -- one second timeout for connect/read/write\n local data, err = sock:receiveany(10 * 1024) -- read any data, at most 10K\n if not data then\n ngx.say(\"failed to read any data: \", err)\n return\n end\n ngx.say(\"successfully read: \", data)This method doesn't automatically close the current connection when the read timeout error occurs. For other connection errors, this method always automatically closes the connection.This feature was first introduced in the v0.10.14 release.", + "prefix": "tcpsock:receiveany", + "body": "local data, err = tcpsock:receiveany(${1:max})" + }, + "tcpsock:receiveuntil": { + "description": "This method returns an iterator Lua function that can be called to read the data stream until it sees the specified pattern or an error occurs.Here is an example for using this method to read a data stream with the boundary sequence --abcedhb: local reader = sock:receiveuntil(\"\\r\\n--abcedhb\")\n local data, err, partial = reader()\n if not data then\n ngx.say(\"failed to read the data stream: \", err)\n end\n ngx.say(\"read the data stream: \", data)When called without any argument, the iterator function returns the received data right before the specified pattern string in the incoming data stream. So for the example above, if the incoming data stream is 'hello, world! -agentzh\\r\\n--abcedhb blah blah', then the string 'hello, world! -agentzh' will be returned.In case of error, the iterator function will return nil along with a string describing the error and the partial data bytes that have been read so far.The iterator function can be called multiple times and can be mixed safely with other cosocket method calls or other iterator function calls.The iterator function behaves differently (i.e., like a real iterator) when it is called with a size argument. That is, it will read that size of data on each invocation and will return nil at the last invocation (either sees the boundary pattern or meets an error). For the last successful invocation of the iterator function, the err return value will be nil too. The iterator function will be reset after the last successful invocation that returns nil data and nil error. Consider the following example: local reader = sock:receiveuntil(\"\\r\\n--abcedhb\")\n\n while true do\n local data, err, partial = reader(4)\n if not data then\n if err then\n ngx.say(\"failed to read the data stream: \", err)\n break\n end\n\n ngx.say(\"read done\")\n break\n end\n ngx.say(\"read chunk: [\", data, \"]\")\n endThen for the incoming data stream 'hello, world! -agentzh\\r\\n--abcedhb blah blah', we shall get the following output from the sample code above:read chunk: [hell]\nread chunk: [o, w]\nread chunk: [orld]\nread chunk: [! -a]\nread chunk: [gent]\nread chunk: [zh]\nread done\nNote that, the actual data returned might be a little longer than the size limit specified by the size argument when the boundary pattern has ambiguity for streaming parsing. Near the boundary of the data stream, the data string actually returned could also be shorter than the size limit.Timeout for the iterator function's reading operation is controlled by the lua_socket_read_timeout config directive and the settimeout method. And the latter takes priority. For example: local readline = sock:receiveuntil(\"\\r\\n\")\n\n sock:settimeout(1000) -- one second timeout\n line, err, partial = readline()\n if not line then\n ngx.say(\"failed to read a line: \", err)\n return\n end\n ngx.say(\"successfully read a line: \", line)It is important here to call the settimeout method before calling the iterator function (note that the receiveuntil call is irrelevant here).As from the v0.5.1 release, this method also takes an optional options table argument to control the behavior. The following options are supported:\ninclusive\nThe inclusive takes a boolean value to control whether to include the pattern string in the returned data string. Default to false. For example, local reader = tcpsock:receiveuntil(\"_END_\", { inclusive = true })\n local data = reader()\n ngx.say(data)Then for the input data stream \"hello world _END_ blah blah blah\", then the example above will output hello world _END_, including the pattern string _END_ itself.Since the v0.8.8 release, this method no longer automatically closes the current connection when the read timeout error happens. For other connection errors, this method always automatically closes the connection.This method was first introduced in the v0.5.0rc1 release.", + "prefix": "tcpsock:receiveuntil", + "body": "local iterator = tcpsock:receiveuntil(${1:pattern}, ${2:options?})" + }, + "tcpsock:close": { + "description": "Closes the current TCP or stream unix domain socket. It returns the 1 in case of success and returns nil with a string describing the error otherwise.Note that there is no need to call this method on socket objects that have invoked the setkeepalive method because the socket object is already closed (and the current connection is saved into the built-in connection pool).Socket objects that have not invoked this method (and associated connections) will be closed when the socket object is released by the Lua GC (Garbage Collector) or the current client HTTP request finishes processing.This feature was first introduced in the v0.5.0rc1 release.", + "prefix": "tcpsock:close", + "body": "local ok, err = tcpsock:close()" + }, + "tcpsock:settimeout": { + "description": "Set the timeout value in milliseconds for subsequent socket operations (connect, receive, and iterators returned from receiveuntil).Settings done by this method take priority over those specified via config directives (i.e. lua_socket_connect_timeout, lua_socket_send_timeout, and lua_socket_read_timeout).Note that this method does not affect the lua_socket_keepalive_timeout setting; the timeout argument to the setkeepalive method should be used for this purpose instead.This feature was first introduced in the v0.5.0rc1 release.", + "prefix": "tcpsock:settimeout", + "body": "tcpsock:settimeout(${1:time})" + }, + "tcpsock:settimeouts": { + "description": "Respectively sets the connect, send, and read timeout thresholds (in milliseconds) for subsequent socket\noperations (connect, send, receive, and iterators returned from receiveuntil).Settings done by this method take priority over those specified via config directives (i.e. lua_socket_connect_timeout, lua_socket_send_timeout, and lua_socket_read_timeout).It is recommended to use settimeouts instead of settimeout.Note that this method does not affect the lua_socket_keepalive_timeout setting; the timeout argument to the setkeepalive method should be used for this purpose instead.This feature was first introduced in the v0.10.7 release.", + "prefix": "tcpsock:settimeouts", + "body": "tcpsock:settimeouts(${1:connect_timeout}, ${2:send_timeout}, ${3:read_timeout})" + }, + "tcpsock:setoption": { + "description": "This function is added for LuaSocket API compatibility, its functionality is implemented v0.10.18.This feature was first introduced in the v0.5.0rc1 release.In case of success, it returns true. Otherwise, it returns nil and a string describing the error.The option is a string with the option name, and the value depends on the option being set:\n\nkeepalive\nSetting this option to true enables sending of keep-alive messages on\nconnection-oriented sockets. Make sure the connect function\nhad been called before, for example,\nlocal ok, err = tcpsock:setoption(\"keepalive\", true)\nif not ok then\n ngx.say(\"setoption keepalive failed: \", err)\nend\n\n\nreuseaddr\nEnabling this option indicates that the rules used in validating addresses\nsupplied in a call to bind should allow reuse of local addresses. Make sure\nthe connect function had been called before, for example,\nlocal ok, err = tcpsock:setoption(\"reuseaddr\", 0)\nif not ok then\n ngx.say(\"setoption reuseaddr failed: \", err)\nend\n\n\ntcp-nodelay\nSetting this option to true disables the Nagle's algorithm for the connection.\nMake sure the connect function had been called before, for example,\nlocal ok, err = tcpsock:setoption(\"tcp-nodelay\", true)\nif not ok then\n ngx.say(\"setoption tcp-nodelay failed: \", err)\nend\n\n\nsndbuf\nSets the maximum socket send buffer in bytes. The kernel doubles this value\n(to allow space for bookkeeping overhead) when it is set using setsockopt().\nMake sure the connect function had been called before, for example,\nlocal ok, err = tcpsock:setoption(\"sndbuf\", 1024 * 10)\nif not ok then\n ngx.say(\"setoption sndbuf failed: \", err)\nend\n\n\nrcvbuf\nSets the maximum socket receive buffer in bytes. The kernel doubles this value\n(to allow space for bookkeeping overhead) when it is set using setsockopt. Make\nsure the connect function had been called before, for example,\nlocal ok, err = tcpsock:setoption(\"rcvbuf\", 1024 * 10)\nif not ok then\n ngx.say(\"setoption rcvbuf failed: \", err)\nend\n\nNOTE: Once the option is set, it will become effective until the connection is closed. If you know the connection is from the connection pool and all the in-pool connections already have called the setoption() method with the desired socket option state, then you can just skip calling setoption() again to avoid the overhead of repeated calls, for example, local count, err = tcpsock:getreusedtimes()\n if not count then\n ngx.say(\"getreusedtimes failed: \", err)\n return\n end\n\n if count == 0 then\n local ok, err = tcpsock:setoption(\"rcvbuf\", 1024 * 10)\n if not ok then\n ngx.say(\"setoption rcvbuf failed: \", err)\n return\n end\n endThese options described above are supported in v0.10.18, and more options will be implemented in future.", + "prefix": "tcpsock:setoption", + "body": "local ok, err = tcpsock:setoption(${1:option}, ${2:value?})" + }, + "tcpsock:setkeepalive": { + "description": "Puts the current socket's connection immediately into the cosocket built-in connection pool and keep it alive until other connect method calls request it or the associated maximal idle timeout is expired.The first optional argument, timeout, can be used to specify the maximal idle timeout (in milliseconds) for the current connection. If omitted, the default setting in the lua_socket_keepalive_timeout config directive will be used. If the 0 value is given, then the timeout interval is unlimited.The second optional argument size is considered deprecated since\nthe v0.10.14 release of this module, in favor of the\npool_size option of the connect method.\nSince the v0.10.14 release, this option will only take effect if\nthe call to connect did not already create a connection\npool.\nWhen this option takes effect (no connection pool was previously created by\nconnect), it will specify the size of the connection pool,\nand create it.\nIf omitted (and no pool was previously created), the default size is the value\nof the lua_socket_pool_size directive.\nThe connection pool holds up to size alive connections ready to be\nreused by subsequent calls to connect, but note that there\nis no upper limit to the total number of opened connections outside of the\npool.\nWhen the connection pool would exceed its size limit, the least recently used\n(kept-alive) connection already in the pool will be closed to make room for\nthe current connection.\nNote that the cosocket connection pool is per Nginx worker process rather\nthan per Nginx server instance, so the size limit specified here also applies\nto every single Nginx worker process. Also note that the size of the connection\npool cannot be changed once it has been created.\nIf you need to restrict the total number of opened connections, specify both\nthe pool_size and backlog option in the call to\nconnect.In case of success, this method returns 1; otherwise, it returns nil and a string describing the error.When the system receive buffer for the current connection has unread data, then this method will return the \"connection in dubious state\" error message (as the second return value) because the previous session has unread data left behind for the next session and the connection is not safe to be reused.This method also makes the current cosocket object enter the \"closed\" state, so there is no need to manually call the close method on it afterwards.This feature was first introduced in the v0.5.0rc1 release.", + "prefix": "tcpsock:setkeepalive", + "body": "local ok, err = tcpsock:setkeepalive(${1:timeout?}, ${2:size?})" + }, + "tcpsock:getreusedtimes": { + "description": "This method returns the (successfully) reused times for the current connection. In case of error, it returns nil and a string describing the error.If the current connection does not come from the built-in connection pool, then this method always returns 0, that is, the connection has never been reused (yet). If the connection comes from the connection pool, then the return value is always non-zero. So this method can also be used to determine if the current connection comes from the pool.This feature was first introduced in the v0.5.0rc1 release.", + "prefix": "tcpsock:getreusedtimes", + "body": "local count, err = tcpsock:getreusedtimes()" + }, + "ngx.socket.connect": { + "description": "This function is a shortcut for combining ngx.socket.tcp() and the connect() method call in a single operation. It is actually implemented like this: local sock = ngx.socket.tcp()\n local ok, err = sock:connect(...)\n if not ok then\n return nil, err\n end\n return sockThere is no way to use the settimeout method to specify connecting timeout for this method and the lua_socket_connect_timeout directive must be set at configure time instead.This feature was first introduced in the v0.5.0rc1 release.", + "prefix": "ngx.socket.connect", + "body": "local tcpsock, err = ngx.socket.connect(\"unix:/path/to/unix-domain.socket\")" + }, + "ngx.get_phase": { + "description": "Retrieves the current running phase name. Possible return values are\ninit\nfor the context of init_by_lua*.\ninit_worker\nfor the context of init_worker_by_lua*.\nssl_cert\nfor the context of ssl_certificate_by_lua*.\nssl_session_fetch\nfor the context of ssl_session_fetch_by_lua*.\nssl_session_store\nfor the context of ssl_session_store_by_lua*.\nssl_client_hello\nfor the context of ssl_client_hello_by_lua*.\nset\nfor the context of set_by_lua*.\nrewrite\nfor the context of rewrite_by_lua*.\nbalancer\nfor the context of balancer_by_lua*.\naccess\nfor the context of access_by_lua*.\ncontent\nfor the context of content_by_lua*.\nheader_filter\nfor the context of header_filter_by_lua*.\nbody_filter\nfor the context of body_filter_by_lua*.\nlog\nfor the context of log_by_lua*.\ntimer\nfor the context of user callback functions for ngx.timer.*.\nexit_worker\nfor the context of exit_worker_by_lua*.\nThis API was first introduced in the v0.5.10 release.", + "prefix": "ngx.get_phase", + "body": "local str = ngx.get_phase()" + }, + "ngx.thread.spawn": { + "description": "Spawns a new user \"light thread\" with the Lua function func as well as those optional arguments arg1, arg2, and etc. Returns a Lua thread (or Lua coroutine) object represents this \"light thread\".\"Light threads\" are just a special kind of Lua coroutines that are scheduled by the ngx_lua module.Before ngx.thread.spawn returns, the func will be called with those optional arguments until it returns, aborts with an error, or gets yielded due to I/O operations via the Nginx API for Lua (like tcpsock:receive).After ngx.thread.spawn returns, the newly-created \"light thread\" will keep running asynchronously usually at various I/O events.All the Lua code chunks running by rewrite_by_lua, access_by_lua, and content_by_lua are in a boilerplate \"light thread\" created automatically by ngx_lua. Such boilerplate \"light thread\" are also called \"entry threads\".By default, the corresponding Nginx handler (e.g., rewrite_by_lua handler) will not terminate until\nboth the \"entry thread\" and all the user \"light threads\" terminates,\na \"light thread\" (either the \"entry thread\" or a user \"light thread\") aborts by calling ngx.exit, ngx.exec, ngx.redirect, or ngx.req.set_uri(uri, true), or\nthe \"entry thread\" terminates with a Lua error.\nWhen the user \"light thread\" terminates with a Lua error, however, it will not abort other running \"light threads\" like the \"entry thread\" does.Due to the limitation in the Nginx subrequest model, it is not allowed to abort a running Nginx subrequest in general. So it is also prohibited to abort a running \"light thread\" that is pending on one ore more Nginx subrequests. You must call ngx.thread.wait to wait for those \"light thread\" to terminate before quitting the \"world\". A notable exception here is that you can abort pending subrequests by calling ngx.exit with and only with the status code ngx.ERROR (-1), 408, 444, or 499.The \"light threads\" are not scheduled in a pre-emptive way. In other words, no time-slicing is performed automatically. A \"light thread\" will keep running exclusively on the CPU until\na (nonblocking) I/O operation cannot be completed in a single run,\nit calls coroutine.yield to actively give up execution, or\nit is aborted by a Lua error or an invocation of ngx.exit, ngx.exec, ngx.redirect, or ngx.req.set_uri(uri, true).\nFor the first two cases, the \"light thread\" will usually be resumed later by the ngx_lua scheduler unless a \"stop-the-world\" event happens.User \"light threads\" can create \"light threads\" themselves. And normal user coroutines created by coroutine.create can also create \"light threads\". The coroutine (be it a normal Lua coroutine or a \"light thread\") that directly spawns the \"light thread\" is called the \"parent coroutine\" for the \"light thread\" newly spawned.The \"parent coroutine\" can call ngx.thread.wait to wait on the termination of its child \"light thread\".You can call coroutine.status() and coroutine.yield() on the \"light thread\" coroutines.The status of the \"light thread\" coroutine can be \"zombie\" if\nthe current \"light thread\" already terminates (either successfully or with an error),\nits parent coroutine is still alive, and\nits parent coroutine is not waiting on it with ngx.thread.wait.\nThe following example demonstrates the use of coroutine.yield() in the \"light thread\" coroutines\nto do manual time-slicing: local yield = coroutine.yield\n\n function f()\n local self = coroutine.running()\n ngx.say(\"f 1\")\n yield(self)\n ngx.say(\"f 2\")\n yield(self)\n ngx.say(\"f 3\")\n end\n\n local self = coroutine.running()\n ngx.say(\"0\")\n yield(self)\n\n ngx.say(\"1\")\n ngx.thread.spawn(f)\n\n ngx.say(\"2\")\n yield(self)\n\n ngx.say(\"3\")\n yield(self)\n\n ngx.say(\"4\")Then it will generate the output0\n1\nf 1\n2\nf 2\n3\nf 3\n4\n\"Light threads\" are mostly useful for making concurrent upstream requests in a single Nginx request handler, much like a generalized version of ngx.location.capture_multi that can work with all the Nginx API for Lua. The following example demonstrates parallel requests to MySQL, Memcached, and upstream HTTP services in a single Lua handler, and outputting the results in the order that they actually return (similar to Facebook's BigPipe model): -- query mysql, memcached, and a remote http service at the same time,\n -- output the results in the order that they\n -- actually return the results.\n\n local mysql = require \"resty.mysql\"\n local memcached = require \"resty.memcached\"\n\n local function query_mysql()\n local db = mysql:new()\n db:connect{\n host = \"127.0.0.1\",\n port = 3306,\n database = \"test\",\n user = \"monty\",\n password = \"mypass\"\n }\n local res, err, errno, sqlstate =\n db:query(\"select * from cats order by id asc\")\n db:set_keepalive(0, 100)\n ngx.say(\"mysql done: \", cjson.encode(res))\n end\n\n local function query_memcached()\n local memc = memcached:new()\n memc:connect(\"127.0.0.1\", 11211)\n local res, err = memc:get(\"some_key\")\n ngx.say(\"memcached done: \", res)\n end\n\n local function query_http()\n local res = ngx.location.capture(\"/my-http-proxy\")\n ngx.say(\"http done: \", res.body)\n end\n\n ngx.thread.spawn(query_mysql) -- create thread 1\n ngx.thread.spawn(query_memcached) -- create thread 2\n ngx.thread.spawn(query_http) -- create thread 3This API was first enabled in the v0.7.0 release.", + "prefix": "ngx.thread.spawn", + "body": "local co = ngx.thread.spawn(${1:func}, ${2:arg1}, ${3:arg2}, ...)" + }, + "ngx.thread.wait": { + "description": "Waits on one or more child \"light threads\" and returns the results of the first \"light thread\" that terminates (either successfully or with an error).The arguments thread1, thread2, and etc are the Lua thread objects returned by earlier calls of ngx.thread.spawn.The return values have exactly the same meaning as coroutine.resume, that is, the first value returned is a boolean value indicating whether the \"light thread\" terminates successfully or not, and subsequent values returned are the return values of the user Lua function that was used to spawn the \"light thread\" (in case of success) or the error object (in case of failure).Only the direct \"parent coroutine\" can wait on its child \"light thread\", otherwise a Lua exception will be raised.The following example demonstrates the use of ngx.thread.wait and ngx.location.capture to emulate ngx.location.capture_multi: local capture = ngx.location.capture\n local spawn = ngx.thread.spawn\n local wait = ngx.thread.wait\n local say = ngx.say\n\n local function fetch(uri)\n return capture(uri)\n end\n\n local threads = {\n spawn(fetch, \"/foo\"),\n spawn(fetch, \"/bar\"),\n spawn(fetch, \"/baz\")\n }\n\n for i = 1, #threads do\n local ok, res = wait(threads[i])\n if not ok then\n say(i, \": failed to run: \", res)\n else\n say(i, \": status: \", res.status)\n say(i, \": body: \", res.body)\n end\n endHere it essentially implements the \"wait all\" model.And below is an example demonstrating the \"wait any\" model: function f()\n ngx.sleep(0.2)\n ngx.say(\"f: hello\")\n return \"f done\"\n end\n\n function g()\n ngx.sleep(0.1)\n ngx.say(\"g: hello\")\n return \"g done\"\n end\n\n local tf, err = ngx.thread.spawn(f)\n if not tf then\n ngx.say(\"failed to spawn thread f: \", err)\n return\n end\n\n ngx.say(\"f thread created: \", coroutine.status(tf))\n\n local tg, err = ngx.thread.spawn(g)\n if not tg then\n ngx.say(\"failed to spawn thread g: \", err)\n return\n end\n\n ngx.say(\"g thread created: \", coroutine.status(tg))\n\n ok, res = ngx.thread.wait(tf, tg)\n if not ok then\n ngx.say(\"failed to wait: \", res)\n return\n end\n\n ngx.say(\"res: \", res)\n\n -- stop the \"world\", aborting other running threads\n ngx.exit(ngx.OK)And it will generate the following output:f thread created: running\ng thread created: running\ng: hello\nres: g done\nThis API was first enabled in the v0.7.0 release.", + "prefix": "ngx.thread.wait", + "body": "local ok, res1, res2, ... = ngx.thread.wait(${1:thread1}, ${2:thread2}, ...)" + }, + "ngx.thread.kill": { + "description": "Kills a running \"light thread\" created by ngx.thread.spawn. Returns a true value when successful or nil and a string describing the error otherwise.According to the current implementation, only the parent coroutine (or \"light thread\") can kill a thread. Also, a running \"light thread\" with pending Nginx subrequests (initiated by ngx.location.capture for example) cannot be killed due to a limitation in the Nginx core.This API was first enabled in the v0.9.9 release.", + "prefix": "ngx.thread.kill", + "body": "local ok, err = ngx.thread.kill(${1:thread})" + }, + "ngx.on_abort": { + "description": "Registers a user Lua function as the callback which gets called automatically when the client closes the (downstream) connection prematurely.Returns 1 if the callback is registered successfully or returns nil and a string describing the error otherwise.All the Nginx API for Lua can be used in the callback function because the function is run in a special \"light thread\", just as those \"light threads\" created by ngx.thread.spawn.The callback function can decide what to do with the client abortion event all by itself. For example, it can simply ignore the event by doing nothing and the current Lua request handler will continue executing without interruptions. And the callback function can also decide to terminate everything by calling ngx.exit, for example, local function my_cleanup()\n -- custom cleanup work goes here, like cancelling a pending DB transaction\n\n -- now abort all the \"light threads\" running in the current request handler\n ngx.exit(499)\n end\n\n local ok, err = ngx.on_abort(my_cleanup)\n if not ok then\n ngx.log(ngx.ERR, \"failed to register the on_abort callback: \", err)\n ngx.exit(500)\n endWhen lua_check_client_abort is set to off (which is the default), then this function call will always return the error message \"lua_check_client_abort is off\".According to the current implementation, this function can only be called once in a single request handler; subsequent calls will return the error message \"duplicate call\".This API was first introduced in the v0.7.4 release.See also lua_check_client_abort.", + "prefix": "ngx.on_abort", + "body": "local ok, err = ngx.on_abort(${1:callback})" + }, + "ngx.timer.at": { + "description": "Creates an Nginx timer with a user callback function as well as optional user arguments.The first argument, delay, specifies the delay for the timer,\nin seconds. One can specify fractional seconds like 0.001 to mean 1\nmillisecond here. 0 delay can also be specified, in which case the\ntimer will immediately expire when the current handler yields\nexecution.The second argument, callback, can\nbe any Lua function, which will be invoked later in a background\n\"light thread\" after the delay specified. The user callback will be\ncalled automatically by the Nginx core with the arguments premature,\nuser_arg1, user_arg2, and etc, where the premature\nargument takes a boolean value indicating whether it is a premature timer\nexpiration or not(for the 0 delay timer it is always false), and user_arg1, user_arg2, and etc, are\nthose (extra) user arguments specified when calling ngx.timer.at\nas the remaining arguments.Premature timer expiration happens when the Nginx worker process is\ntrying to shut down, as in an Nginx configuration reload triggered by\nthe HUP signal or in an Nginx server shutdown. When the Nginx worker\nis trying to shut down, one can no longer call ngx.timer.at to\ncreate new timers with nonzero delays and in that case ngx.timer.at will return a \"conditional false\" value and\na string describing the error, that is, \"process exiting\".Starting from the v0.9.3 release, it is allowed to create zero-delay timers even when the Nginx worker process starts shutting down.When a timer expires, the user Lua code in the timer callback is\nrunning in a \"light thread\" detached completely from the original\nrequest creating the timer. So objects with the same lifetime as the\nrequest creating them, like cosockets, cannot be shared between the\noriginal request and the timer user callback function.Here is a simple example: location / {\n ...\n log_by_lua_block {\n local function push_data(premature, uri, args, status)\n -- push the data uri, args, and status to the remote\n -- via ngx.socket.tcp or ngx.socket.udp\n -- (one may want to buffer the data in Lua a bit to\n -- save I/O operations)\n end\n local ok, err = ngx.timer.at(0, push_data,\n ngx.var.uri, ngx.var.args, ngx.header.status)\n if not ok then\n ngx.log(ngx.ERR, \"failed to create timer: \", err)\n return\n end\n\n -- other job in log_by_lua_block\n }\n }One can also create infinite re-occurring timers, for instance, a timer getting triggered every 5 seconds, by calling ngx.timer.at recursively in the timer callback function. Here is such an example, local delay = 5\n local handler\n handler = function (premature)\n -- do some routine job in Lua just like a cron job\n if premature then\n return\n end\n local ok, err = ngx.timer.at(delay, handler)\n if not ok then\n ngx.log(ngx.ERR, \"failed to create the timer: \", err)\n return\n end\n\n -- do something in timer\n end\n\n local ok, err = ngx.timer.at(delay, handler)\n if not ok then\n ngx.log(ngx.ERR, \"failed to create the timer: \", err)\n return\n end\n\n -- do other jobsIt is recommended, however, to use the ngx.timer.every API function\ninstead for creating recurring timers since it is more robust.Because timer callbacks run in the background and their running time\nwill not add to any client request's response time, they can easily\naccumulate in the server and exhaust system resources due to either\nLua programming mistakes or just too much client traffic. To prevent\nextreme consequences like crashing the Nginx server, there are\nbuilt-in limitations on both the number of \"pending timers\" and the\nnumber of \"running timers\" in an Nginx worker process. The \"pending\ntimers\" here mean timers that have not yet been expired and \"running\ntimers\" are those whose user callbacks are currently running.The maximal number of pending timers allowed in an Nginx\nworker is controlled by the lua_max_pending_timers\ndirective. The maximal number of running timers is controlled by the\nlua_max_running_timers directive.According to the current implementation, each \"running timer\" will\ntake one (fake) connection record from the global connection record\nlist configured by the standard worker_connections directive in\nnginx.conf. So ensure that the\nworker_connections directive is set to\na large enough value that takes into account both the real connections\nand fake connections required by timer callbacks (as limited by the\nlua_max_running_timers directive).A lot of the Lua APIs for Nginx are enabled in the context of the timer\ncallbacks, like stream/datagram cosockets (ngx.socket.tcp and ngx.socket.udp), shared\nmemory dictionaries (ngx.shared.DICT), user coroutines (coroutine.*),\nuser \"light threads\" (ngx.thread.*), ngx.exit, ngx.now/ngx.time,\nngx.md5/ngx.sha1_bin, are all allowed. But the subrequest API (like\nngx.location.capture), the ngx.req.* API, the downstream output API\n(like ngx.say, ngx.print, and ngx.flush) are explicitly disabled in\nthis context.You must notice that each timer will be based on a fake request (this fake request is also based on a fake connection). Because Nginx's memory release is based on the connection closure, if you run a lot of APIs that apply for memory resources in a timer, such as tcpsock:connect, will cause the accumulation of memory resources. So it is recommended to create a new timer after running several times to release memory resources.You can pass most of the standard Lua values (nils, booleans, numbers, strings, tables, closures, file handles, and etc) into the timer callback, either explicitly as user arguments or implicitly as upvalues for the callback closure. There are several exceptions, however: you cannot pass any thread objects returned by coroutine.create and ngx.thread.spawn or any cosocket objects returned by ngx.socket.tcp, ngx.socket.udp, and ngx.req.socket because these objects' lifetime is bound to the request context creating them while the timer callback is detached from the creating request's context (by design) and runs in its own (fake) request context. If you try to share the thread or cosocket objects across the boundary of the creating request, then you will get the \"no co ctx found\" error (for threads) or \"bad request\" (for cosockets). It is fine, however, to create all these objects inside your timer callback.Please note that the timer Lua handler has its own copy of the ngx.ctx magic\ntable. It won't share the same ngx.ctx with the Lua handler creating the timer.\nIf you need to pass data from the timer creator to the timer handler, please\nuse the extra parameters of ngx.timer.at().This API was first introduced in the v0.8.0 release.", + "prefix": "ngx.timer.at", + "body": "local hdl, err = ngx.timer.at(${1:delay}, ${2:callback}, ${3:user_arg1}, ${4:user_arg2}, ...)" + }, + "ngx.timer.every": { + "description": "Similar to the ngx.timer.at API function, but\ndelay cannot be zero,\ntimer will be created every delay seconds until the current Nginx worker process starts exiting.\nLike ngx.timer.at, the callback argument will be called\nautomatically with the arguments premature, user_arg1, user_arg2, etc.When success, returns a \"conditional true\" value (but not a true). Otherwise, returns a \"conditional false\" value and a string describing the error.This API also respect the lua_max_pending_timers and lua_max_running_timers.This API was first introduced in the v0.10.9 release.", + "prefix": "ngx.timer.every", + "body": "local hdl, err = ngx.timer.every(${1:delay}, ${2:callback}, ${3:user_arg1}, ${4:user_arg2}, ...)" + }, + "ngx.timer.running_count": { + "description": "Returns the number of timers currently running.This directive was first introduced in the v0.9.20 release.", + "prefix": "ngx.timer.running_count", + "body": "local count = ngx.timer.running_count()" + }, + "ngx.timer.pending_count": { + "description": "Returns the number of pending timers.This directive was first introduced in the v0.9.20 release.", + "prefix": "ngx.timer.pending_count", + "body": "local count = ngx.timer.pending_count()" + }, + "ngx.config.subsystem": { + "description": "This string field indicates the Nginx subsystem the current Lua environment is based on. For this module, this field always takes the string value \"http\". For\nngx_stream_lua_module, however, this field takes the value \"stream\".This field was first introduced in the 0.10.1.", + "prefix": "ngx.config.subsystem", + "body": "local subsystem = ngx.config.subsystem" + }, + "ngx.config.debug": { + "description": "This boolean field indicates whether the current Nginx is a debug build, i.e., being built by the ./configure option --with-debug.This field was first introduced in the 0.8.7.", + "prefix": "ngx.config.debug", + "body": "local debug = ngx.config.debug" + }, + "ngx.config.prefix": { + "description": "Returns the Nginx server \"prefix\" path, as determined by the -p command-line option when running the Nginx executable, or the path specified by the --prefix command-line option when building Nginx with the ./configure script.This function was first introduced in the 0.9.2.", + "prefix": "ngx.config.prefix", + "body": "local prefix = ngx.config.prefix()" + }, + "ngx.config.nginx_version": { + "description": "This field take an integral value indicating the version number of the current Nginx core being used. For example, the version number 1.4.3 results in the Lua number 1004003.This API was first introduced in the 0.9.3 release.", + "prefix": "ngx.config.nginx_version", + "body": "local ver = ngx.config.nginx_version" + }, + "ngx.config.nginx_configure": { + "description": "This function returns a string for the Nginx ./configure command's arguments string.This API was first introduced in the 0.9.5 release.", + "prefix": "ngx.config.nginx_configure", + "body": "local str = ngx.config.nginx_configure()" + }, + "ngx.config.ngx_lua_version": { + "description": "This field take an integral value indicating the version number of the current ngx_lua module being used. For example, the version number 0.9.3 results in the Lua number 9003.This API was first introduced in the 0.9.3 release.", + "prefix": "ngx.config.ngx_lua_version", + "body": "local ver = ngx.config.ngx_lua_version" + }, + "ngx.worker.exiting": { + "description": "This function returns a boolean value indicating whether the current Nginx worker process already starts exiting. Nginx worker process exiting happens on Nginx server quit or configuration reload (aka HUP reload).This API was first introduced in the 0.9.3 release.", + "prefix": "ngx.worker.exiting", + "body": "local exiting = ngx.worker.exiting()" + }, + "ngx.worker.pid": { + "description": "This function returns a Lua number for the process ID (PID) of the current Nginx worker process. This API is more efficient than ngx.var.pid and can be used in contexts where the ngx.var.VARIABLE API cannot be used (like init_worker_by_lua).This API was first introduced in the 0.9.5 release.", + "prefix": "ngx.worker.pid", + "body": "local pid = ngx.worker.pid()" + }, + "ngx.worker.pids": { + "description": "This function returns a Lua table for all Nginx worker process IDs (PIDs). Nginx uses channel to send the current worker PID to another worker in the worker process start or restart. So this API can get all current worker PIDs. Windows does not have this API.This API was first introduced in the 0.10.23 release.", + "prefix": "ngx.worker.pids", + "body": "local pids = ngx.worker.pids()" + }, + "ngx.worker.count": { + "description": "Returns the total number of the Nginx worker processes (i.e., the value configured\nby the worker_processes\ndirective in nginx.conf).This API was first introduced in the 0.9.20 release.", + "prefix": "ngx.worker.count", + "body": "local count = ngx.worker.count()" + }, + "ngx.worker.id": { + "description": "Returns the ordinal number of the current Nginx worker processes (starting from number 0).So if the total number of workers is N, then this method may return a number between 0\nand N - 1 (inclusive).This function returns meaningful values only for Nginx 1.9.1+. With earlier versions of Nginx, it\nalways returns nil.See also ngx.worker.count.This API was first introduced in the 0.9.20 release.", + "prefix": "ngx.worker.id", + "body": "local id = ngx.worker.id()" + }, + "ngx.semaphore": { + "description": "This is a Lua module that implements a classic-style semaphore API for efficient synchronizations among\ndifferent \"light threads\". Sharing the same semaphore among different \"light threads\" created in different (request)\ncontexts are also supported as long as the \"light threads\" reside in the same Nginx worker process\nand the lua_code_cache directive is turned on (which is the default).This Lua module does not ship with this ngx_lua module itself rather it is shipped with\nthe\nlua-resty-core library.Please refer to the documentation\nfor this ngx.semaphore Lua module in lua-resty-core\nfor more details.This feature requires at least ngx_lua v0.10.0.", + "prefix": "ngx.semaphore", + "body": "local semaphore = require \"ngx.semaphore\"" + }, + "ngx.balancer": { + "description": "This is a Lua module that provides a Lua API to allow defining completely dynamic load balancers\nin pure Lua.This Lua module does not ship with this ngx_lua module itself rather it is shipped with\nthe\nlua-resty-core library.Please refer to the documentation\nfor this ngx.balancer Lua module in lua-resty-core\nfor more details.This feature requires at least ngx_lua v0.10.0.", + "prefix": "ngx.balancer", + "body": "local balancer = require \"ngx.balancer\"" + }, + "ngx.ssl": { + "description": "This Lua module provides API functions to control the SSL handshake process in contexts like\nssl_certificate_by_lua*.This Lua module does not ship with this ngx_lua module itself rather it is shipped with\nthe\nlua-resty-core library.Please refer to the documentation\nfor this ngx.ssl Lua module for more details.This feature requires at least ngx_lua v0.10.0.", + "prefix": "ngx.ssl", + "body": "local ssl = require \"ngx.ssl\"" + }, + "ngx.ocsp": { + "description": "This Lua module provides API to perform OCSP queries, OCSP response validations, and\nOCSP stapling planting.Usually, this module is used together with the ngx.ssl\nmodule in the\ncontext of ssl_certificate_by_lua*.This Lua module does not ship with this ngx_lua module itself rather it is shipped with\nthe\nlua-resty-core library.Please refer to the documentation\nfor this ngx.ocsp Lua module for more details.This feature requires at least ngx_lua v0.10.0.", + "prefix": "ngx.ocsp", + "body": "local ocsp = require \"ngx.ocsp\"" + }, + "ndk.set_var.DIRECTIVE": { + "description": "This mechanism allows calling other Nginx C modules' directives that are implemented by Nginx Devel Kit (NDK)'s set_var submodule's ndk_set_var_value.For example, the following set-misc-nginx-module directives can be invoked this way:\nset_quote_sql_str\nset_quote_pgsql_str\nset_quote_json_str\nset_unescape_uri\nset_escape_uri\nset_encode_base32\nset_decode_base32\nset_encode_base64\nset_decode_base64\nset_encode_hex\nset_decode_hex\nset_sha1\nset_md5\nFor instance, local res = ndk.set_var.set_escape_uri('a/b')\n -- now res == 'a%2fb'Similarly, the following directives provided by encrypted-session-nginx-module can be invoked from within Lua too:\nset_encrypt_session\nset_decrypt_session\nThis feature requires the ngx_devel_kit module.", + "prefix": "ndk.set_var.DIRECTIVE", + "body": "local res = ndk.set_var.DIRECTIVE_NAME" + }, + "coroutine.create": { + "description": "Creates a user Lua coroutines with a Lua function, and returns a coroutine object.Similar to the standard Lua coroutine.create API, but works in the context of the Lua coroutines created by ngx_lua.This API was first usable in the context of init_by_lua* since the 0.9.2.This API was first introduced in the v0.6.0 release.", + "prefix": "coroutine.create", + "body": "local co = coroutine.create(${1:f})" + }, + "coroutine.resume": { + "description": "Resumes the execution of a user Lua coroutine object previously yielded or just created.Similar to the standard Lua coroutine.resume API, but works in the context of the Lua coroutines created by ngx_lua.This API was first usable in the context of init_by_lua* since the 0.9.2.This API was first introduced in the v0.6.0 release.", + "prefix": "coroutine.resume", + "body": "local ok, ... = coroutine.resume(${1:co}, ...)" + }, + "coroutine.yield": { + "description": "Yields the execution of the current user Lua coroutine.Similar to the standard Lua coroutine.yield API, but works in the context of the Lua coroutines created by ngx_lua.This API was first usable in the context of init_by_lua* since the 0.9.2.This API was first introduced in the v0.6.0 release.", + "prefix": "coroutine.yield", + "body": "local ... = coroutine.yield(...)" + }, + "coroutine.wrap": { + "description": "Similar to the standard Lua coroutine.wrap API, but works in the context of the Lua coroutines created by ngx_lua.This API was first usable in the context of init_by_lua* since the 0.9.2.This API was first introduced in the v0.6.0 release.", + "prefix": "coroutine.wrap", + "body": "local co = coroutine.wrap(${1:f})" + }, + "coroutine.running": { + "description": "Identical to the standard Lua coroutine.running API.This API was first usable in the context of init_by_lua* since the 0.9.2.This API was first enabled in the v0.6.0 release.", + "prefix": "coroutine.running", + "body": "local co = coroutine.running()" + }, + "coroutine.status": { + "description": "Identical to the standard Lua coroutine.status API.This API was first usable in the context of init_by_lua* since the 0.9.2.This API was first enabled in the v0.6.0 release.", + "prefix": "coroutine.status", + "body": "local status = coroutine.status(${1:co})" + }, + "ngx.run_worker_thread": { + "description": "This API is still experimental and may change in the future without notice.This API is available only for Linux.Wrap the nginx worker thread to execute lua function. The caller coroutine would yield until the function returns.Only the following ngx_lua APIs could be used in function_name function of the module module:\n\nngx.encode_base64\n\n\nngx.decode_base64\n\n\nngx.hmac_sha1\n\n\nngx.encode_args\n\n\nngx.decode_args\n\n\nngx.quote_sql_str\n\n\nngx.crc32_short\n\n\nngx.crc32_long\n\n\nngx.hmac_sha1\n\n\nngx.md5_bin\n\n\nngx.md5\n\n\nngx.config.subsystem\n\n\nngx.config.debug\n\n\nngx.config.prefix\n\n\nngx.config.nginx_version\n\n\nngx.config.nginx_configure\n\n\nngx.config.ngx_lua_version\n\n\nngx.shared.DICT\n\nThe first argument threadpool specifies the Nginx thread pool name defined by thread_pool.The second argument module_name specifies the lua module name to execute in the worker thread, which would return a lua table. The module must be inside the package path, e.g. lua_package_path '/opt/openresty/?.lua;;';The third argument func_name specifies the function field in the module table as the second argument.The type of args must be one of type below:\nboolean\nnumber\nstring\nnil\ntable (the table may be recursive, and contains members of types above.)\nThe ok is in boolean type, which indicate the C land error (failed to get thread from thread pool, pcall the module function failed, .etc). If ok is false, the res1 is the error string.The return values (res1, ...) are returned by invocation of the module function. Normally, the res1 should be in boolean type, so that the caller could inspect the error.This API is useful when you need to execute the below types of tasks:\nCPU bound task, e.g. do md5 calculation\nFile I/O task\nCall os.execute() or blocking C API via ffi\nCall external Lua library not based on cosocket or nginx\nExample1: do md5 calculation. location /calc_md5 {\n default_type 'text/plain';\n\n content_by_lua_block {\n local ok, md5_or_err = ngx.run_worker_thread(\"testpool\", \"md5\", \"md5\")\n ngx.say(ok, \" : \", md5_or_err)\n }\n }md5.lualocal function md5()\n return ngx.md5(\"hello\")\nend\n\nreturn { md5=md5, }Example2: write logs into the log file. location /write_log_file {\n default_type 'text/plain';\n\n content_by_lua_block {\n local ok, err = ngx.run_worker_thread(\"testpool\", \"write_log_file\", \"log\", ngx.var.arg_str)\n if not ok then\n ngx.say(ok, \" : \", err)\n return\n end\n ngx.say(ok)\n }\n }write_log_file.lua local function log(str)\n local file, err = io.open(\"/tmp/tmp.log\", \"a\")\n if not file then\n return false, err\n end\n file:write(str)\n file:flush()\n file:close()\n return true\n end\n return {log=log}", + "prefix": "ngx.run_worker_thread", + "body": "local ok, res1, res2, ... = ngx.run_worker_thread(${1:threadpool}, ${2:module_name}, ${3:func_name}, ${4:arg1}, ${5:arg2}, ...)" + }, + "memcached.new": { + "description": "Creates a memcached object. In case of failures, returns nil and a string describing the error.It accepts an optional opts table argument. The following options are supported:\n\nkey_transform\nan array table containing two functions for escaping and unescaping the\nmemcached keys, respectively. By default,\nthe memcached keys will be escaped and unescaped as URI components, that is\n\n memached:new{\n key_transform = { ngx.escape_uri, ngx.unescape_uri }\n }", + "prefix": "memcached.new", + "body": "local memc, err = memcached:new($opts?)" + }, + "memcached.connect": { + "description": "local ok, err = memc:connect(\"unix:/path/to/unix.sock\")\nAttempts to connect to the remote host and port that the memcached server is listening to or a local unix domain socket file listened by the memcached server.Before actually resolving the host name and connecting to the remote backend, this method will always look up the connection pool for matched idle connections created by previous calls of this method.", + "prefix": "memcached.connect", + "body": "local ok, err = memc:connect($host, $port)" + }, + "memcached.sslhandshake": { + "description": "Does SSL/TLS handshake on the currently established connection. See the\ntcpsock.sslhandshake\nAPI from OpenResty for more details.", + "prefix": "memcached.sslhandshake", + "body": "local session, err = memc:sslhandshake($reused_session?, $server_name?, $ssl_verify?, $send_status_req?)" + }, + "memcached.set": { + "description": "Inserts an entry into memcached unconditionally. If the key already exists, overrides it.The value argument could also be a Lua table holding multiple Lua\nstrings that are supposed to be concatenated as a whole\n(without any delimiters). For example, memc:set(\"dog\", {\"a \", {\"kind of\"}, \" animal\"})is functionally equivalent to memc:set(\"dog\", \"a kind of animal\")The exptime parameter is optional and defaults to 0 (meaning never expires). The expiration time is in seconds.The flags parameter is optional and defaults to 0.", + "prefix": "memcached.set", + "body": "local ok, err = memc:set($key, $value, $exptime, $flags)" + }, + "memcached.set_timeout": { + "description": "Sets the timeout (in ms) protection for subsequent operations, including the connect method.Returns 1 when successful and nil plus a string describing the error otherwise.", + "prefix": "memcached.set_timeout", + "body": "local ok, err = memc:set_timeout($timeout)" + }, + "memcached.set_timeouts": { + "description": "Sets the timeouts (in ms) for connect, send and read operations respectively.Returns 1 when successful and nil plus a string describing the error otherwise.set_keepalivelocal ok, err = memc:set_keepalive($max_idle_timeout, $pool_size)\nPuts the current memcached connection immediately into the ngx_lua cosocket connection pool.You can specify the max idle timeout (in ms) when the connection is in the pool and the maximal size of the pool every nginx worker process.In case of success, returns 1. In case of errors, returns nil with a string describing the error.Only call this method in the place you would have called the close method instead. Calling this method will immediately turn the current memcached object into the closed state. Any subsequent operations other than connect() on the current object will return the closed error.", + "prefix": "memcached.set_timeouts", + "body": "local ok, err = memc:set_timeouts($connect_timeout, $send_timeout, $read_timeout)" + }, + "memcached.set_keepalive": { + "description": "Puts the current memcached connection immediately into the ngx_lua cosocket connection pool.You can specify the max idle timeout (in ms) when the connection is in the pool and the maximal size of the pool every nginx worker process.In case of success, returns 1. In case of errors, returns nil with a string describing the error.Only call this method in the place you would have called the close method instead. Calling this method will immediately turn the current memcached object into the closed state. Any subsequent operations other than connect() on the current object will return the closed error.", + "prefix": "memcached.set_keepalive", + "body": "local ok, err = memc:set_keepalive($max_idle_timeout, $pool_size)" + }, + "memcached.get_reused_times": { + "description": "This method returns the (successfully) reused times for the current connection. In case of error, it returns nil and a string describing the error.If the current connection does not come from the built-in connection pool, then this method always returns 0, that is, the connection has never been reused (yet). If the connection comes from the connection pool, then the return value is always non-zero. So this method can also be used to determine if the current connection comes from the pool.", + "prefix": "memcached.get_reused_times", + "body": "local times, err = memc:get_reused_times()" + }, + "memcached.close": { + "description": "Closes the current memcached connection and returns the status.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", + "prefix": "memcached.close", + "body": "local ok, err = memc:close()" + }, + "memcached.add": { + "description": "Inserts an entry into memcached if and only if the key does not exist.The value argument could also be a Lua table holding multiple Lua\nstrings that are supposed to be concatenated as a whole\n(without any delimiters). For example, memc:add(\"dog\", {\"a \", {\"kind of\"}, \" animal\"})is functionally equivalent to memc:add(\"dog\", \"a kind of animal\")The exptime parameter is optional and defaults to 0 (meaning never expires). The expiration time is in seconds.The flags parameter is optional, defaults to 0.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", + "prefix": "memcached.add", + "body": "local ok, err = memc:add($key, $value, $exptime, $flags)" + }, + "memcached.replace": { + "description": "Inserts an entry into memcached if and only if the key does exist.The value argument could also be a Lua table holding multiple Lua\nstrings that are supposed to be concatenated as a whole\n(without any delimiters). For example, memc:replace(\"dog\", {\"a \", {\"kind of\"}, \" animal\"})is functionally equivalent to memc:replace(\"dog\", \"a kind of animal\")The exptime parameter is optional and defaults to 0 (meaning never expires). The expiration time is in seconds.The flags parameter is optional, defaults to 0.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", + "prefix": "memcached.replace", + "body": "local ok, err = memc:replace($key, $value, $exptime, $flags)" + }, + "memcached.append": { + "description": "Appends the value to an entry with the same key that already exists in memcached.The value argument could also be a Lua table holding multiple Lua\nstrings that are supposed to be concatenated as a whole\n(without any delimiters). For example, memc:append(\"dog\", {\"a \", {\"kind of\"}, \" animal\"})is functionally equivalent to memc:append(\"dog\", \"a kind of animal\")The exptime parameter is optional and defaults to 0 (meaning never expires). The expiration time is in seconds.The flags parameter is optional, defaults to 0.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", + "prefix": "memcached.append", + "body": "local ok, err = memc:append($key, $value, $exptime, $flags)" + }, + "memcached.prepend": { + "description": "Prepends the value to an entry with the same key that already exists in memcached.The value argument could also be a Lua table holding multiple Lua\nstrings that are supposed to be concatenated as a whole\n(without any delimiters). For example, memc:prepend(\"dog\", {\"a \", {\"kind of\"}, \" animal\"})is functionally equivalent to memc:prepend(\"dog\", \"a kind of animal\")The exptime parameter is optional and defaults to 0 (meaning never expires). The expiration time is in seconds.The flags parameter is optional and defaults to 0.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", + "prefix": "memcached.prepend", + "body": "local ok, err = memc:prepend($key, $value, $exptime, $flags)" + }, + "memcached.get": { + "description": "Get a single entry or multiple entries in the memcached server via a single key or a table of keys.Let us first discuss the case When the key is a single string.The key's value and associated flags value will be returned if the entry is found and no error happens.In case of errors, nil values will be turned for value and flags and a 3rd (string) value will also be returned for describing the error.If the entry is not found, then three nil values will be returned.Then let us discuss the case when the a Lua table of multiple keys are provided.In this case, a Lua table holding the key-result pairs will be always returned in case of success. Each value corresponding each key in the table is also a table holding two values, the key's value and the key's flags. If a key does not exist, then there is no responding entries in the results table.In case of errors, nil will be returned, and the second return value will be a string describing the error.", + "prefix": "memcached.get", + "body": "local value, flags, err = memc:get($key)" + }, + "memcached.gets": { + "description": "local results, err = memc:gets($keys)\nJust like the get method, but will also return the CAS unique value associated with the entry in addition to the key's value and flags.This method is usually used together with the cas method.", + "prefix": "memcached.gets", + "body": "local value, flags, cas_unique, err = memc:gets($key)" + }, + "memcached.cas": { + "description": "Just like the set method but does a check and set operation, which means \"store this data but\nonly if no one else has updated since I last fetched it.\"The cas_unique argument can be obtained from the gets method.", + "prefix": "memcached.cas", + "body": "local ok, err = memc:cas($key, $value, $cas_unique, $exptime?, $flags?)" + }, + "memcached.touch": { + "description": "Update the expiration time of an existing key.Returns 1 for success or nil with a string describing the error otherwise.This method was first introduced in the v0.11 release.", + "prefix": "memcached.touch", + "body": "local ok, err = memc:touch($key, $exptime)" + }, + "memcached.flush_all": { + "description": "Flushes (or invalidates) all the existing entries in the memcached server immediately (by default) or after the expiration\nspecified by the time argument (in seconds).In case of success, returns 1. In case of errors, returns nil with a string describing the error.", + "prefix": "memcached.flush_all", + "body": "local ok, err = memc:flush_all($time?)" + }, + "memcached.delete": { + "description": "Deletes the key from memcached immediately.The key to be deleted must already exist in memcached.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", + "prefix": "memcached.delete", + "body": "local ok, err = memc:delete($key)" + }, + "memcached.incr": { + "description": "Increments the value of the specified key by the integer value specified in the delta argument.Returns the new value after incrementation in success, and nil with a string describing the error in case of failures.", + "prefix": "memcached.incr", + "body": "local new_value, err = memc:incr($key, $delta)" + }, + "memcached.decr": { + "description": "Decrements the value of the specified key by the integer value specified in the delta argument.Returns the new value after decrementation in success, and nil with a string describing the error in case of failures.", + "prefix": "memcached.decr", + "body": "local new_value, err = memc:decr($key, $value)" + }, + "memcached.stats": { + "description": "Returns memcached server statistics information with an optional args argument.In case of success, this method returns a lua table holding all of the lines of the output; in case of failures, it returns nil with a string describing the error.If the args argument is omitted, general server statistics is returned. Possible args argument values are items, sizes, slabs, among others.", + "prefix": "memcached.stats", + "body": "local lines, err = memc:stats($args?)" + }, + "memcached.version": { + "description": "Returns the server version number, like 1.2.8.In case of error, it returns nil with a string describing the error.", + "prefix": "memcached.version", + "body": "local version, err = memc:version($args?)" + }, + "memcached.quit": { + "description": "Tells the server to close the current memcached connection.Returns 1 in case of success and nil other wise. In case of failures, another string value will also be returned to describe the error.Generally you can just directly call the close method to achieve the same effect.", + "prefix": "memcached.quit", + "body": "local ok, err = memc:quit()" + }, + "memcached.verbosity": { + "description": "Sets the verbosity level used by the memcached server. The level argument should be given integers only.Returns 1 in case of success and nil other wise. In case of failures, another string value will also be returned to describe the error.", + "prefix": "memcached.verbosity", + "body": "local ok, err = memc:verbosity($level)" + }, + "memcached.init_pipeline": { + "description": "Enable the Memcache pipelining mode. All subsequent calls to Memcache command methods will automatically get buffer and will send to the server in one run when the commit_pipeline method is called or get cancelled by calling the cancel_pipeline method.The optional params n is buffer tables size. default value 4", + "prefix": "memcached.init_pipeline", + "body": "local err = memc:init_pipeline($n?)" + }, + "memcached.commit_pipeline": { + "description": "Quits the pipelining mode by committing all the cached Memcache queries to the remote server in a single run. All the replies for these queries will be collected automatically and are returned as if a big multi-bulk reply at the highest level.This method success return a lua table. failed return a lua string describing the error upon failures.", + "prefix": "memcached.commit_pipeline", + "body": "local results, err = memc:commit_pipeline()" + }, + "memcached.cancel_pipeline": { + "description": "Quits the pipelining mode by discarding all existing buffer Memcache commands since the last call to the init_pipeline method.the method no return. always succeeds.", + "prefix": "memcached.cancel_pipeline", + "body": "memc:cancel_pipeline()" + }, + "mysql.new": { + "description": "Creates a MySQL connection object. In case of failures, returns nil and a string describing the error.", + "prefix": "mysql.new", + "body": "local db, err = mysql:new()" + }, + "mysql.connect": { + "description": "Attempts to connect to the remote MySQL server.The options argument is a Lua table holding the following keys:\n\nhost\nthe host name for the MySQL server.\n\n\nport\nthe port that the MySQL server is listening on. Default to 3306.\n\n\npath\nthe path of the unix socket file listened by the MySQL server.\n\n\ndatabase\nthe MySQL database name.\n\n\nuser\nMySQL account name for login.\n\n\npassword\nMySQL account password for login (in clear text).\n\n\ncharset\nthe character set used on the MySQL connection, which can be different from the default charset setting.\nThe following values are accepted: big5, dec8, cp850, hp8, koi8r, latin1, latin2,\nswe7, ascii, ujis, sjis, hebrew, tis620, euckr, koi8u, gb2312, greek,\ncp1250, gbk, latin5, armscii8, utf8, ucs2, cp866, keybcs2, macce,\nmacroman, cp852, latin7, utf8mb4, cp1251, utf16, utf16le, cp1256,\ncp1257, utf32, binary, geostd8, cp932, eucjpms, gb18030.\n\n\nmax_packet_size\nthe upper limit for the reply packets sent from the MySQL server (default to 1MB).\n\n\nssl\nIf set to true, then uses SSL to connect to MySQL (default to false). If the MySQL\nserver does not have SSL support\n(or just disabled), the error string \"ssl disabled on server\" will be returned.\n\n\nssl_verify\nIf set to true, then verifies the validity of the server SSL certificate (default to false).\nNote that you need to configure the lua_ssl_trusted_certificate\nto specify the CA (or server) certificate used by your MySQL server. You may also\nneed to configure lua_ssl_verify_depth\naccordingly.\n\n\npool\nthe name for the MySQL connection pool. if omitted, an ambiguous pool name will be generated automatically with the string template user:database:host:port or user:database:path. (this option was first introduced in v0.08.)\n\n\npool_size\nSpecifies the size of the connection pool. If omitted and no backlog option was provided, no pool will be created. If omitted but backlog was provided, the pool will be created with a default size equal to the value of the lua_socket_pool_size directive. The connection pool holds up to pool_size alive connections ready to be reused by subsequent calls to connect, but note that there is no upper limit to the total number of opened connections outside of the pool. If you need to restrict the total number of opened connections, specify the backlog option. When the connection pool would exceed its size limit, the least recently used (kept-alive) connection already in the pool will be closed to make room for the current connection. Note that the cosocket connection pool is per Nginx worker process rather than per Nginx server instance, so the size limit specified here also applies to every single Nginx worker process. Also note that the size of the connection pool cannot be changed once it has been created. Note that at least ngx_lua 0.10.14 is required to use this options.\n\n\nbacklog\nIf specified, this module will limit the total number of opened connections for this pool. No more connections than pool_size can be opened for this pool at any time. If the connection pool is full, subsequent connect operations will be queued into a queue equal to this option's value (the \"backlog\" queue). If the number of queued connect operations is equal to backlog, subsequent connect operations will fail and return nil plus the error string \"too many waiting connect operations\". The queued connect operations will be resumed once the number of connections in the pool is less than pool_size. The queued connect operation will abort once they have been queued for more than connect_timeout, controlled by set_timeout, and will return nil plus the error string \"timeout\". Note that at least ngx_lua 0.10.14 is required to use this options.\n\n\ncompact_arrays\nwhen this option is set to true, then the query and read_result methods will return the array-of-arrays structure for the resultset, rather than the default array-of-hashes structure.\n\nBefore actually resolving the host name and connecting to the remote backend, this method will always look up the connection pool for matched idle connections created by previous calls of this method.", + "prefix": "mysql.connect", + "body": "local ok, err, errcode, sqlstate = db:connect($options)" + }, + "mysql.set_timeout": { + "description": "Sets the timeout (in ms) protection for subsequent operations, including the connect method.", + "prefix": "mysql.set_timeout", + "body": "db:set_timeout($time)" + }, + "mysql.set_keepalive": { + "description": "Puts the current MySQL connection immediately into the ngx_lua cosocket connection pool.You can specify the max idle timeout (in ms) when the connection is in the pool and the maximal size of the pool every nginx worker process.In case of success, returns 1. In case of errors, returns nil with a string describing the error.Only call this method in the place you would have called the close method instead. Calling this method will immediately turn the current resty.mysql object into the closed state. Any subsequent operations other than connect() on the current objet will return the closed error.", + "prefix": "mysql.set_keepalive", + "body": "local ok, err = db:set_keepalive($max_idle_timeout, $pool_size)" + }, + "mysql.get_reused_times": { + "description": "This method returns the (successfully) reused times for the current connection. In case of error, it returns nil and a string describing the error.If the current connection does not come from the built-in connection pool, then this method always returns 0, that is, the connection has never been reused (yet). If the connection comes from the connection pool, then the return value is always non-zero. So this method can also be used to determine if the current connection comes from the pool.", + "prefix": "mysql.get_reused_times", + "body": "local times, err = db:get_reused_times()" + }, + "mysql.close": { + "description": "Closes the current mysql connection and returns the status.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", + "prefix": "mysql.close", + "body": "local ok, err = db:close()" + }, + "mysql.send_query": { + "description": "Sends the query to the remote MySQL server without waiting for its replies.Returns the bytes successfully sent out in success and otherwise returns nil and a string describing the error.You should use the read_result method to read the MySQL replies afterwards.", + "prefix": "mysql.send_query", + "body": "local bytes, err = db:send_query($query)" + }, + "mysql.read_result": { + "description": "local res, err, errcode, sqlstate = db:read_result($nrows)\nReads in one result returned from the MySQL server.It returns a Lua table (res) describing the MySQL OK packet or result set packet for the query result.For queries corresponding to a result set, it returns an array holding all the rows. Each row holds key-value pairs for each data fields. For instance, {\n { name = \"Bob\", age = 32, phone = ngx.null },\n { name = \"Marry\", age = 18, phone = \"10666372\"}\n }For queries that do not correspond to a result set, it returns a Lua table like this: {\n insert_id = 0,\n server_status = 2,\n warning_count = 1,\n affected_rows = 32,\n message = nil\n }If more results are following the current result, a second err return value will be given the string again. One should always check this (second) return value and if it is again, then she should call this method again to retrieve more results. This usually happens when the original query contains multiple statements (separated by semicolon in the same query string) or calling a MySQL procedure. See also Multi-Resultset Support.In case of errors, this method returns at most 4 values: nil, err, errcode, and sqlstate. The err return value contains a string describing the error, the errcode return value holds the MySQL error code (a numerical value), and finally, the sqlstate return value contains the standard SQL error code that consists of 5 characters. Note that, the errcode and sqlstate might be nil if MySQL does not return them.The optional argument nrows can be used to specify an approximate number of rows for the result set. This value can be used\nto pre-allocate space in the resulting Lua table for the result set. By default, it takes the value 4.", + "prefix": "mysql.read_result", + "body": "local res, err, errcode, sqlstate = db:read_result()" + }, + "mysql.query": { + "description": "local res, err, errcode, sqlstate = db:query($query, $nrows)\nThis is a shortcut for combining the send_query call and the first read_result call.You should always check if the err return value is again in case of success because this method will only call read_result only once for you. See also Multi-Resultset Support.", + "prefix": "mysql.query", + "body": "local res, err, errcode, sqlstate = db:query($query)" + }, + "mysql.server_ver": { + "description": "Returns the MySQL server version string, like \"5.1.64\".You should only call this method after successfully connecting to a MySQL server, otherwise nil will be returned.", + "prefix": "mysql.server_ver", + "body": "local str = db:server_ver()" + }, + "mysql.set_compact_arrays": { + "description": "Sets whether to use the \"compact-arrays\" structure for the resultsets returned by subsequent queries. See the compact_arrays option for the connect method for more details.This method was first introduced in the v0.09 release.", + "prefix": "mysql.set_compact_arrays", + "body": "db:set_compact_arrays($boolean)" + }, + "redis.new": { + "description": "Creates a redis object. In case of failures, returns nil and a string describing the error.", + "prefix": "redis.new", + "body": "local red, err = redis:new()" + }, + "redis.connect": { + "description": "local ok, err = red:connect(\"unix:/path/to/unix.sock\", options_table?)\nAttempts to connect to the remote host and port that the redis server is listening to or a local unix domain socket file listened by the redis server.Before actually resolving the host name and connecting to the remote backend, this method will always look up the connection pool for matched idle connections created by previous calls of this method.The optional options_table argument is a Lua table holding the following keys:\n\nssl\nIf set to true, then uses SSL to connect to redis (defaults to false).\n\n\nssl_verify\nIf set to true, then verifies the validity of the server SSL certificate (defaults to false). Note that you need to configure the lua_ssl_trusted_certificate to specify the CA (or server) certificate used by your redis server. You may also need to configure lua_ssl_verify_depth accordingly.\n\n\nserver_name\nSpecifies the server name for the new TLS extension Server Name Indication (SNI) when connecting over SSL.\n\n\npool\nSpecifies a custom name for the connection pool being used. If omitted, then the connection pool name will be generated from the string template : or .\n\n\npool_size\nSpecifies the size of the connection pool. If omitted and no backlog option was provided, no pool will be created. If omitted but backlog was provided, the pool will be created with a default size equal to the value of the lua_socket_pool_size directive. The connection pool holds up to pool_size alive connections ready to be reused by subsequent calls to connect, but note that there is no upper limit to the total number of opened connections outside of the pool. If you need to restrict the total number of opened connections, specify the backlog option. When the connection pool would exceed its size limit, the least recently used (kept-alive) connection already in the pool will be closed to make room for the current connection. Note that the cosocket connection pool is per Nginx worker process rather than per Nginx server instance, so the size limit specified here also applies to every single Nginx worker process. Also note that the size of the connection pool cannot be changed once it has been created. Note that at least ngx_lua 0.10.14 is required to use this options.\n\n\nbacklog\nIf specified, this module will limit the total number of opened connections for this pool. No more connections than pool_size can be opened for this pool at any time. If the connection pool is full, subsequent connect operations will be queued into a queue equal to this option's value (the \"backlog\" queue). If the number of queued connect operations is equal to backlog, subsequent connect operations will fail and return nil plus the error string \"too many waiting connect operations\". The queued connect operations will be resumed once the number of connections in the pool is less than pool_size. The queued connect operation will abort once they have been queued for more than connect_timeout, controlled by set_timeout, and will return nil plus the error string \"timeout\". Note that at least ngx_lua 0.10.14 is required to use this options.\n\n", + "prefix": "redis.connect", + "body": "local ok, err = red:connect($host, $port, $options_table?)" + }, + "redis.set_timeout": { + "description": "Sets the timeout (in ms) protection for subsequent operations, including the connect method.Since version v0.28 of this module, it is advised that\nset_timeouts be used in favor of this method.", + "prefix": "redis.set_timeout", + "body": "red:set_timeout($time)" + }, + "redis.set_timeouts": { + "description": "Respectively sets the connect, send, and read timeout thresholds (in ms), for\nsubsequent socket operations. Setting timeout thresholds with this method\noffers more granularity than set_timeout. As such, it is\npreferred to use set_timeouts over\nset_timeout.This method was added in the v0.28 release.", + "prefix": "redis.set_timeouts", + "body": "red:set_timeouts($connect_timeout, $send_timeout, $read_timeout)" + }, + "redis.set_keepalive": { + "description": "Puts the current Redis connection immediately into the ngx_lua cosocket connection pool.You can specify the max idle timeout (in ms) when the connection is in the pool and the maximal size of the pool every nginx worker process.In case of success, returns 1. In case of errors, returns nil with a string describing the error.Only call this method in the place you would have called the close method instead. Calling this method will immediately turn the current redis object into the closed state. Any subsequent operations other than connect() on the current object will return the closed error.", + "prefix": "redis.set_keepalive", + "body": "local ok, err = red:set_keepalive($max_idle_timeout, $pool_size)" + }, + "redis.get_reused_times": { + "description": "This method returns the (successfully) reused times for the current connection. In case of error, it returns nil and a string describing the error.If the current connection does not come from the built-in connection pool, then this method always returns 0, that is, the connection has never been reused (yet). If the connection comes from the connection pool, then the return value is always non-zero. So this method can also be used to determine if the current connection comes from the pool.", + "prefix": "redis.get_reused_times", + "body": "local times, err = red:get_reused_times()" + }, + "redis.close": { + "description": "Closes the current redis connection and returns the status.In case of success, returns 1. In case of errors, returns nil with a string describing the error.", + "prefix": "redis.close", + "body": "local ok, err = red:close()" + }, + "redis.init_pipeline": { + "description": "red:init_pipeline($n)\nEnable the redis pipelining mode. All subsequent calls to Redis command methods will automatically get cached and will send to the server in one run when the commit_pipeline method is called or get cancelled by calling the cancel_pipeline method.This method always succeeds.If the redis object is already in the Redis pipelining mode, then calling this method will discard existing cached Redis queries.The optional n argument specifies the (approximate) number of commands that are going to add to this pipeline, which can make things a little faster.", + "prefix": "redis.init_pipeline", + "body": "red:init_pipeline()" + }, + "redis.commit_pipeline": { + "description": "Quits the pipelining mode by committing all the cached Redis queries to the remote server in a single run. All the replies for these queries will be collected automatically and are returned as if a big multi-bulk reply at the highest level.This method returns nil and a Lua string describing the error upon failures.", + "prefix": "redis.commit_pipeline", + "body": "local results, err = red:commit_pipeline()" + }, + "redis.cancel_pipeline": { + "description": "Quits the pipelining mode by discarding all existing cached Redis commands since the last call to the init_pipeline method.This method always succeeds.If the redis object is not in the Redis pipelining mode, then this method is a no-op.", + "prefix": "redis.cancel_pipeline", + "body": "red:cancel_pipeline()" + }, + "redis.hmset": { + "description": "local res, err = red:hmset($myhash, { field1 \nSpecial wrapper for the Redis \"hmset\" command.When there are only three arguments (including the \"red\" object\nitself), then the last argument must be a Lua table holding all the field/value pairs.", + "prefix": "redis.hmset", + "body": "local res, err = red:hmset($myhash, $field1, $value1, $field2, $value2, ...)" + }, + "redis.array_to_hash": { + "description": "Auxiliary function that converts an array-like Lua table into a hash-like table.This method was first introduced in the v0.11 release.", + "prefix": "redis.array_to_hash", + "body": "local hash = red:array_to_hash($array)" + }, + "redis.read_reply": { + "description": "Reading a reply from the redis server. This method is mostly useful for the Redis Pub/Sub API, for example, local cjson = require \"cjson\"\n local redis = require \"resty.redis\"\n\n local red = redis:new()\n local red2 = redis:new()\n\n red:set_timeouts(1000, 1000, 1000) -- 1 sec\n red2:set_timeouts(1000, 1000, 1000) -- 1 sec\n\n local ok, err = red:connect(\"127.0.0.1\", 6379)\n if not ok then\n ngx.say(\"1: failed to connect: \", err)\n return\n end\n\n ok, err = red2:connect(\"127.0.0.1\", 6379)\n if not ok then\n ngx.say(\"2: failed to connect: \", err)\n return\n end\n\n local res, err = red:subscribe(\"dog\")\n if not res then\n ngx.say(\"1: failed to subscribe: \", err)\n return\n end\n\n ngx.say(\"1: subscribe: \", cjson.encode(res))\n\n res, err = red2:publish(\"dog\", \"Hello\")\n if not res then\n ngx.say(\"2: failed to publish: \", err)\n return\n end\n\n ngx.say(\"2: publish: \", cjson.encode(res))\n\n res, err = red:read_reply()\n if not res then\n ngx.say(\"1: failed to read reply: \", err)\n return\n end\n\n ngx.say(\"1: receive: \", cjson.encode(res))\n\n red:close()\n red2:close()Running this example gives the output like this:1: subscribe: [\"subscribe\",\"dog\",1]\n2: publish: 1\n1: receive: [\"message\",\"dog\",\"Hello\"]\nThe following class methods are provieded:", + "prefix": "redis.read_reply", + "body": "local res, err = red:read_reply()" + }, + "redis.add_commands": { + "description": "WARNING this method is now deprecated since we already do automatic Lua method generation\nfor any redis commands the user attempts to use and thus we no longer need this.Adds new redis commands to the resty.redis class. Here is an example: local redis = require \"resty.redis\"\n\n redis.add_commands(\"foo\", \"bar\")\n\n local red = redis:new()\n\n red:set_timeouts(1000, 1000, 1000) -- 1 sec\n\n local ok, err = red:connect(\"127.0.0.1\", 6379)\n if not ok then\n ngx.say(\"failed to connect: \", err)\n return\n end\n\n local res, err = red:foo(\"a\")\n if not res then\n ngx.say(\"failed to foo: \", err)\n end\n\n res, err = red:bar()\n if not res then\n ngx.say(\"failed to bar: \", err)\n end", + "prefix": "redis.add_commands", + "body": "local hash = redis.add_commands($cmd_name1, $cmd_name2, ...)" + }, + "dns.new": { + "description": "Creates a dns.resolver object. Returns nil and a message string on error.It accepts a opts table argument. The following options are supported:\n\nnameservers\na list of nameservers to be used. Each nameserver entry can be either a single hostname string or a table holding both the hostname string and the port number. The nameserver is picked up by a simple round-robin algorithm for each query method call. This option is required.\n\n\nretrans\nthe total number of times of retransmitting the DNS request when receiving a DNS response times out according to the timeout setting. Defaults to 5 times. When trying to retransmit the query, the next nameserver according to the round-robin algorithm will be picked up.\n\n\ntimeout\nthe time in milliseconds for waiting for the response for a single attempt of request transmission. note that this is ''not'' the maximal total waiting time before giving up, the maximal total waiting time can be calculated by the expression timeout x retrans. The timeout setting can also be changed by calling the set_timeout method. The default timeout setting is 2000 milliseconds, or 2 seconds.\n\n\nno_recurse\na boolean flag controls whether to disable the \"recursion desired\" (RD) flag in the UDP request. Defaults to false.\n\n\nno_random\na boolean flag controls whether to randomly pick the nameserver to query first, if true will always start with the first nameserver listed. Defaults to false.\n\n", + "prefix": "dns.new", + "body": "local r, err = class:new($opts)" + }, + "dns.destroy": { + "description": "Destroy the dns.resolver object by releasing all the internal occupied resources.", + "prefix": "dns.destroy", + "body": "r:destroy()" + }, + "dns.query": { + "description": "Performs a DNS standard query to the nameservers specified by the new method,\nand returns all the answer records in an array-like Lua table. In case of errors, it will\nreturn nil and a string describing the error instead.If the server returns a non-zero error code, the fields errcode and errstr will be set accordingly in the Lua table returned.Each entry in the answers returned table value is also a hash-like Lua table\nwhich usually takes some of the following fields:\n\nname\nThe resource record name.\n\n\ntype\nThe current resource record type, possible values are 1 (TYPE_A), 5 (TYPE_CNAME), 28 (TYPE_AAAA), and any other values allowed by RFC 1035.\n\n\naddress\nThe IPv4 or IPv6 address in their textual representations when the resource record type is either 1 (TYPE_A) or 28 (TYPE_AAAA), respectively. Successive 16-bit zero groups in IPv6 addresses will not be compressed by default, if you want that, you need to call the compress_ipv6_addr static method instead.\n\n\nsection\nThe identifier of the section that the current answer record belongs to. Possible values are 1 (SECTION_AN), 2 (SECTION_NS), and 3 (SECTION_AR).\n\n\ncname\nThe (decoded) record data value for CNAME resource records. Only present for CNAME records.\n\n\nttl\nThe time-to-live (TTL) value in seconds for the current resource record.\n\n\nclass\nThe current resource record class, possible values are 1 (CLASS_IN) or any other values allowed by RFC 1035.\n\n\npreference\nThe preference integer number for MX resource records. Only present for MX type records.\n\n\nexchange\nThe exchange domain name for MX resource records. Only present for MX type records.\n\n\nnsdname\nA domain-name which specifies a host which should be authoritative for the specified class and domain. Usually present for NS type records.\n\n\nrdata\nThe raw resource data (RDATA) for resource records that are not recognized.\n\n\ntxt\nThe record value for TXT records. When there is only one character string in this record, then this field takes a single Lua string. Otherwise this field takes a Lua table holding all the strings.\n\n\nptrdname\nThe record value for PTR records.\n\nThis method also takes an optional options argument table, which takes the following fields:\n\nqtype\nThe type of the question. Possible values are 1 (TYPE_A), 5 (TYPE_CNAME), 28 (TYPE_AAAA), or any other QTYPE value specified by RFC 1035 and RFC 3596. Default to 1 (TYPE_A).\n\n\nauthority_section\nWhen set to a true value, the answers return value includes the Authority section of the DNS response. Default to false.\n\n\nadditional_section\nWhen set to a true value, the answers return value includes the Additional section of the DNS response. Default to false.\n\nThe optional parameter tries can be provided as an empty table, and will be\nreturned as a third result. The table will be an array with the error message\nfor each (if any) failed try.When data truncation happens, the resolver will automatically retry using the TCP transport mode\nto query the current nameserver. All TCP connections are short lived.", + "prefix": "dns.query", + "body": "local answers, err, tries? = r:query($name, $options?, $tries?)" + }, + "dns.tcp_query": { + "description": "Just like the query method, but enforce the TCP transport mode instead of UDP.All TCP connections are short lived.Here is an example: local resolver = require \"resty.dns.resolver\"\n\n local r, err = resolver:new{\n nameservers = { \"8.8.8.8\" }\n }\n if not r then\n ngx.say(\"failed to instantiate resolver: \", err)\n return\n end\n\n local ans, err = r:tcp_query(\"www.google.com\", { qtype = r.TYPE_A })\n if not ans then\n ngx.say(\"failed to query: \", err)\n return\n end\n\n local cjson = require \"cjson\"\n ngx.say(\"records: \", cjson.encode(ans))", + "prefix": "dns.tcp_query", + "body": "local answers, err = r:tcp_query($name, $options?)" + }, + "dns.set_timeout": { + "description": "Overrides the current timeout setting by the time argument in milliseconds for all the nameserver peers.", + "prefix": "dns.set_timeout", + "body": "r:set_timeout($time)" + }, + "dns.compress_ipv6_addr": { + "description": "Compresses the successive 16-bit zero groups in the textual format of the IPv6 address.For example, local resolver = require \"resty.dns.resolver\"\n local compress = resolver.compress_ipv6_addr\n local new_addr = compress(\"FF01:0:0:0:0:0:0:101\")will yield FF01::101 in the new_addr return value.", + "prefix": "dns.compress_ipv6_addr", + "body": "local compressed = resty.dns.resolver.compress_ipv6_addr($address)" + }, + "dns.expand_ipv6_addr": { + "description": "Expands the successive 16-bit zero groups in the textual format of the IPv6 address.For example, local resolver = require \"resty.dns.resolver\"\n local expand = resolver.expand_ipv6_addr\n local new_addr = expand(\"FF01::101\")will yield FF01:0:0:0:0:0:0:101 in the new_addr return value.", + "prefix": "dns.expand_ipv6_addr", + "body": "local expanded = resty.dns.resolver.expand_ipv6_addr($address)" + }, + "dns.arpa_str": { + "description": "Generates the reverse domain name for PTR lookups for both IPv4 and IPv6 addresses. Compressed IPv6 addresses\nwill be automatically expanded.For example, local resolver = require \"resty.dns.resolver\"\n local ptr4 = resolver.arpa_str(\"1.2.3.4\")\n local ptr6 = resolver.arpa_str(\"FF01::101\")will yield 4.3.2.1.in-addr.arpa for ptr4 and 1.0.1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.F.F.ip6.arpa for ptr6.", + "prefix": "dns.arpa_str", + "body": "local arpa_record = resty.dns.resolver.arpa_str($address)" + }, + "dns.reverse_query": { + "description": "Performs a PTR lookup for both IPv4 and IPv6 addresses. This function is basically a wrapper for the query command\nwhich uses the arpa_str command to convert the IP address on the fly.", + "prefix": "dns.reverse_query", + "body": "local answers, err = r:reverse_query($address)" + }, + "websocket.new": { + "description": "local wb, err = server:new($opts)\nPerforms the websocket handshake process on the server side and returns a WebSocket server object.In case of error, it returns nil and a string describing the error.An optional options table can be specified. The following options are as follows:\n\nmax_payload_len\nSpecifies the maximal length of payload allowed when sending and receiving WebSocket frames.\n\n\nsend_masked\nSpecifies whether to send out masked WebSocket frames. When it is true, masked frames are always sent. Default to false.\n\n\ntimeout\nSpecifies the network timeout threshold in milliseconds. You can change this setting later via the set_timeout method call. Note that this timeout setting does not affect the HTTP response header sending process for the websocket handshake; you need to configure the send_timeout directive at the same time.\n\n", + "prefix": "websocket.new", + "body": "local wb, err = server:new()" + }, + "websocket.set_timeout": { + "description": "Sets the timeout delay (in milliseconds) for the network-related operations.", + "prefix": "websocket.set_timeout", + "body": "wb:set_timeout($ms)" + }, + "websocket.send_text": { + "description": "Sends the text argument out as an unfragmented data frame of the text type. Returns the number of bytes that have actually been sent on the TCP level.In case of errors, returns nil and a string describing the error.", + "prefix": "websocket.send_text", + "body": "local bytes, err = wb:send_text($text)" + }, + "websocket.send_binary": { + "description": "Sends the data argument out as an unfragmented data frame of the binary type. Returns the number of bytes that have actually been sent on the TCP level.In case of errors, returns nil and a string describing the error.", + "prefix": "websocket.send_binary", + "body": "local bytes, err = wb:send_binary($data)" + }, + "websocket.send_ping": { + "description": "local bytes, err = wb:send_ping($msg)\nSends out a ping frame with an optional message specified by the msg argument. Returns the number of bytes that have actually been sent on the TCP level.In case of errors, returns nil and a string describing the error.Note that this method does not wait for a pong frame from the remote end.", + "prefix": "websocket.send_ping", + "body": "local bytes, err = wb:send_ping()" + }, + "websocket.send_pong": { + "description": "local bytes, err = wb:send_pong($msg)\nSends out a pong frame with an optional message specified by the msg argument. Returns the number of bytes that have actually been sent on the TCP level.In case of errors, returns nil and a string describing the error.", + "prefix": "websocket.send_pong", + "body": "local bytes, err = wb:send_pong()" + }, + "websocket.send_close": { + "description": "local bytes, err = wb:send_close($code, $msg)\nSends out a close frame with an optional status code and a message.In case of errors, returns nil and a string describing the error.For a list of valid status code, see the following document:http://tools.ietf.org/html/rfc6455#section-7.4.1Note that this method does not wait for a close frame from the remote end.", + "prefix": "websocket.send_close", + "body": "local bytes, err = wb:send_close()" + }, + "websocket.send_frame": { + "description": "Sends out a raw websocket frame by specifying the fin field (boolean value), the opcode, and the payload.For a list of valid opcode, seehttp://tools.ietf.org/html/rfc6455#section-5.2In case of errors, returns nil and a string describing the error.To control the maximal payload length allowed, you can pass the max_payload_len option to the new constructor.To control whether to send masked frames, you can pass true to the send_masked option in the new constructor method. By default, unmasked frames are sent.", + "prefix": "websocket.send_frame", + "body": "local bytes, err = wb:send_frame($fin, $opcode, $payload)" + }, + "websocket.recv_frame": { + "description": "Receives a WebSocket frame from the wire.In case of an error, returns two nil values and a string describing the error.The second return value is always the frame type, which could be one of continuation, text, binary, close, ping, pong, or nil (for unknown types).For close frames, returns 3 values: the extra status message (which could be an empty string), the string \"close\", and a Lua number for the status code (if any). For possible closing status codes, seehttp://tools.ietf.org/html/rfc6455#section-7.4.1For other types of frames, just returns the payload and the type.For fragmented frames, the err return value is the Lua string \"again\".", + "prefix": "websocket.recv_frame", + "body": "local data, typ, err = wb:recv_frame()" + }, + "websocket.client:new": { + "description": "local wb, err = client:new($opts)\nInstantiates a WebSocket client object.In case of error, it returns nil and a string describing the error.An optional options table can be specified. The following options are as follows:\n\nmax_payload_len\nSpecifies the maximal length of payload allowed when sending and receiving WebSocket frames.\n\n\nsend_unmasked\nSpecifies whether to send out an unmasked WebSocket frames. When it is true, unmasked frames are always sent. Default to false. RFC 6455 requires, however, that the client MUST send masked frames to the server, so never set this option to true unless you know what you are doing.\n\n\ntimeout\nSpecifies the default network timeout threshold in milliseconds. You can change this setting later via the set_timeout method call.\n\n", + "prefix": "websocket.client:new", + "body": "local wb, err = client:new()" + }, + "websocket.client:connect": { + "description": "local ok, err = wb:connect(\"wss://:/\")\nlocal ok, err = wb:connect(\"ws://:/\", options)\nlocal ok, err = wb:connect(\"wss://:/\", options)\nConnects to the remote WebSocket service port and performs the websocket handshake process on the client side.Before actually resolving the host name and connecting to the remote backend, this method will always look up the connection pool for matched idle connections created by previous calls of this method.An optional Lua table can be specified as the last argument to this method to specify various connect options:\n\nprotocols\nSpecifies all the subprotocols used for the current WebSocket session. It could be a Lua table holding all the subprotocol names or just a single Lua string.\n\n\norigin\nSpecifies the value of the Origin request header.\n\n\npool\nSpecifies a custom name for the connection pool being used. If omitted, then the connection pool name will be generated from the string template :.\n\n\npool_size\nspecify the size of the connection pool. If omitted and no\nbacklog option was provided, no pool will be created. If omitted\nbut backlog was provided, the pool will be created with a default\nsize equal to the value of the lua_socket_pool_size\ndirective.\nThe connection pool holds up to pool_size alive connections\nready to be reused by subsequent calls to connect, but\nnote that there is no upper limit to the total number of opened connections\noutside of the pool. If you need to restrict the total number of opened\nconnections, specify the backlog option.\nWhen the connection pool would exceed its size limit, the least recently used\n(kept-alive) connection already in the pool will be closed to make room for\nthe current connection.\nNote that the cosocket connection pool is per Nginx worker process rather\nthan per Nginx server instance, so the size limit specified here also applies\nto every single Nginx worker process. Also note that the size of the connection\npool cannot be changed once it has been created.\nThis option was first introduced in the v0.10.14 release.\n\n\nbacklog\nif specified, this module will limit the total number of opened connections\nfor this pool. No more connections than pool_size can be opened\nfor this pool at any time. If the connection pool is full, subsequent\nconnect operations will be queued into a queue equal to this option's\nvalue (the \"backlog\" queue).\nIf the number of queued connect operations is equal to backlog,\nsubsequent connect operations will fail and return nil plus the\nerror string \"too many waiting connect operations\".\nThe queued connect operations will be resumed once the number of connections\nin the pool is less than pool_size.\nThe queued connect operation will abort once they have been queued for more\nthan connect_timeout, controlled by\nsettimeouts, and will return nil plus\nthe error string \"timeout\".\nThis option was first introduced in the v0.10.14 release.\n\n\nssl_verify\nSpecifies whether to perform SSL certificate verification during the\nSSL handshake if the wss:// scheme is used.\n\n\nheaders\nSpecifies custom headers to be sent in the handshake request. The table is expected to contain strings in the format {\"a-header: a header value\", \"another-header: another header value\"}.\n\n\nclient_cert\nSpecifies a client certificate chain cdata object that will be used while TLS handshaking with remote server.\nThese objects can be created using\nngx.ssl.parse_pem_cert\nfunction provided by lua-resty-core.\nNote that specifying the client_cert option requires corresponding client_priv_key be provided too. See below.\n\n\nclient_priv_key\nSpecifies a private key corresponds to the client_cert option above.\nThese objects can be created using\nngx.ssl.parse_pem_priv_key\nfunction provided by lua-resty-core.\n\nThe SSL connection mode (wss://) requires at least ngx_lua 0.9.11 or OpenResty 1.7.4.1.", + "prefix": "websocket.client:connect", + "body": "local ok, err = wb:connect(\"ws://:/\")" + }, + "websocket.client:close": { + "description": "Closes the current WebSocket connection. If no close frame is sent yet, then the close frame will be automatically sent.", + "prefix": "websocket.client:close", + "body": "local ok, err = wb:close()" + }, + "websocket.client:set_keepalive": { + "description": "Puts the current WebSocket connection immediately into the ngx_lua cosocket connection pool.You can specify the max idle timeout (in ms) when the connection is in the pool and the maximal size of the pool every nginx worker process.In case of success, returns 1. In case of errors, returns nil with a string describing the error.Only call this method in the place you would have called the close method instead. Calling this method will immediately turn the current WebSocket object into the closed state. Any subsequent operations other than connect() on the current objet will return the closed error.", + "prefix": "websocket.client:set_keepalive", + "body": "local ok, err = wb:set_keepalive($max_idle_timeout, $pool_size)" + }, + "websocket.client:set_timeout": { + "description": "Identical to the set_timeout method of the resty.websocket.server objects.", + "prefix": "websocket.client:set_timeout", + "body": "wb:set_timeout($ms)" + }, + "websocket.client:send_text": { + "description": "Identical to the send_text method of the resty.websocket.server objects.", + "prefix": "websocket.client:send_text", + "body": "local bytes, err = wb:send_text($text)" + }, + "websocket.client:send_binary": { + "description": "Identical to the send_binary method of the resty.websocket.server objects.", + "prefix": "websocket.client:send_binary", + "body": "local bytes, err = wb:send_binary($data)" + }, + "websocket.client:send_ping": { + "description": "local bytes, err = wb:send_ping($msg)\nIdentical to the send_ping method of the resty.websocket.server objects.", + "prefix": "websocket.client:send_ping", + "body": "local bytes, err = wb:send_ping()" + }, + "websocket.client:send_pong": { + "description": "local bytes, err = wb:send_pong($msg)\nIdentical to the send_pong method of the resty.websocket.server objects.", + "prefix": "websocket.client:send_pong", + "body": "local bytes, err = wb:send_pong()" + }, + "websocket.client:send_close": { + "description": "local bytes, err = wb:send_close($code, $msg)\nIdentical to the send_close method of the resty.websocket.server objects.", + "prefix": "websocket.client:send_close", + "body": "local bytes, err = wb:send_close()" + }, + "websocket.client:send_frame": { + "description": "Identical to the send_frame method of the resty.websocket.server objects.To control whether to send unmasked frames, you can pass true to the send_unmasked option in the new constructor method. By default, masked frames are sent.", + "prefix": "websocket.client:send_frame", + "body": "local bytes, err = wb:send_frame($fin, $opcode, $payload)" + }, + "websocket.client:recv_frame": { + "description": "Identical to the recv_frame method of the resty.websocket.server objects.", + "prefix": "websocket.client:recv_frame", + "body": "local data, typ, err = wb:recv_frame()" + }, + "websocket.protocol.recv_frame": { + "description": "Receives a WebSocket frame from the wire.", + "prefix": "websocket.protocol.recv_frame", + "body": "local data, typ, err = protocol.recv_frame($socket, $max_payload_len, $force_masking)" + }, + "websocket.protocol.build_frame": { + "description": "Builds a raw WebSocket frame.", + "prefix": "websocket.protocol.build_frame", + "body": "local frame = protocol.build_frame($fin, $opcode, $payload_len, $payload, $masking)" + }, + "websocket.protocol.send_frame": { + "description": "Sends a raw WebSocket frame.", + "prefix": "websocket.protocol.send_frame", + "body": "local bytes, err = protocol.send_frame($socket, $fin, $opcode, $payload, $max_payload_len, $masking)" + }, + "lock.new": { + "description": "local obj, err = lock:new($dict_name, $opts)\nCreates a new lock object instance by specifying the shared dictionary name (created by lua_shared_dict) and an optional options table opts.In case of failure, returns nil and a string describing the error.The options table accepts the following options:\nexptime\nSpecifies expiration time (in seconds) for the lock entry in the shared memory dictionary. You can specify up to 0.001 seconds. Default to 30 (seconds). Even if the invoker does not call unlock or the object holding the lock is not GC'd, the lock will be released after this time. So deadlock won't happen even when the worker process holding the lock crashes.\ntimeout\nSpecifies the maximal waiting time (in seconds) for the lock method calls on the current object instance. You can specify up to 0.001 seconds. Default to 5 (seconds). This option value cannot be bigger than exptime. This timeout is to prevent a lock method call from waiting forever.\nYou can specify 0 to make the lock method return immediately without waiting if it cannot acquire the lock right away.\nstep\nSpecifies the initial step (in seconds) of sleeping when waiting for the lock. Default to 0.001 (seconds). When the lock method is waiting on a busy lock, it sleeps by steps. The step size is increased by a ratio (specified by the ratio option) until reaching the step size limit (specified by the max_step option).\nratio\nSpecifies the step increasing ratio. Default to 2, that is, the step size doubles at each waiting iteration.\nmax_step\nSpecifies the maximal step size (i.e., sleep interval, in seconds) allowed. See also the step and ratio options). Default to 0.5 (seconds).\n", + "prefix": "lock.new", + "body": "local obj, err = lock:new($dict_name)" + }, + "lock.lock": { + "description": "Tries to lock a key across all the Nginx worker processes in the current Nginx server instance. Different keys are different locks.The length of the key string must not be larger than 65535 bytes.Returns the waiting time (in seconds) if the lock is successfully acquired. Otherwise returns nil and a string describing the error.The waiting time is not from the wallclock, but rather is from simply adding up all the waiting \"steps\". A nonzero elapsed return value indicates that someone else has just hold this lock. But a zero return value cannot gurantee that no one else has just acquired and released the lock.When this method is waiting on fetching the lock, no operating system threads will be blocked and the current Lua \"light thread\" will be automatically yielded behind the scene.It is strongly recommended to always call the unlock() method to actively release the lock as soon as possible.If the unlock() method is never called after this method call, the lock will get released when\nthe current resty.lock object instance is collected automatically by the Lua GC.\nthe exptime for the lock entry is reached.\nCommon errors for this method call is\n\"timeout\"\n: The timeout threshold specified by the timeout option of the new method is exceeded.\n\"locked\"\n: The current resty.lock object instance is already holding a lock (not necessarily of the same key).\nOther possible errors are from ngx_lua's shared dictionary API.It is required to create different resty.lock instances for multiple simultaneous locks (i.e., those around different keys).", + "prefix": "lock.lock", + "body": "local elapsed, err = obj:lock($key)" + }, + "lock.unlock": { + "description": "Releases the lock held by the current resty.lock object instance.Returns 1 on success. Returns nil and a string describing the error otherwise.If you call unlock when no lock is currently held, the error \"unlocked\" will be returned.", + "prefix": "lock.unlock", + "body": "local ok, err = obj:unlock()" + }, + "lock.expire": { + "description": "Sets the TTL of the lock held by the current resty.lock object instance. This will reset the\ntimeout of the lock to timeout seconds if it is given, otherwise the timeout provided while\ncalling new will be used.Note that the timeout supplied inside this function is independent from the timeout provided while\ncalling new. Calling expire() will not change the timeout value specified inside new\nand subsequent expire(nil) call will still use the timeout number from new.Returns true on success. Returns nil and a string describing the error otherwise.If you call expire when no lock is currently held, the error \"unlocked\" will be returned.", + "prefix": "lock.expire", + "body": "local ok, err = obj:expire($timeout)" + }, + "lrucache.new": { + "description": "Creates a new cache instance. Upon failure, returns nil and a string\ndescribing the error.The max_items argument specifies the maximal number of items this cache can\nhold.The load-factor argument designates the \"load factor\" of the FFI-based\nhash-table used internally by resty.lrucache.pureffi; the default value is\n0.5 (i.e. 50%); if the load factor is specified, it will be clamped to the\nrange of [0.1, 1] (i.e. if load factor is greater than 1, it will be\nsaturated to 1; likewise, if load-factor is smaller than 0.1, it will be\nclamped to 0.1). This argument is only meaningful for\nresty.lrucache.pureffi.", + "prefix": "lrucache.new", + "body": "local cache, err = lrucache.new($max_items [, $load_factor])" + }, + "lrucache.set": { + "description": "Sets a key with a value and an expiration time.When the cache is full, the cache will automatically evict the least recently\nused item.The optional ttl argument specifies the expiration time. The time value is in\nseconds, but you can also specify the fraction number part (e.g. 0.25). A nil\nttl argument means the value would never expire (which is the default).The optional flags argument specifies a user flags value associated with the\nitem to be stored. It can be retrieved later with the item. The user flags are\nstored as an unsigned 32-bit integer internally, and thus must be specified as\na Lua number. If not specified, flags will have a default value of 0. This\nargument was added in the v0.10 release.", + "prefix": "lrucache.set", + "body": "cache:set($key, $value, $ttl?, $flags?)" + }, + "lrucache.get": { + "description": "Fetches a value with the key. If the key does not exist in the cache or has\nalready expired, nil will be returned.Starting from v0.03, the stale data is also returned as the second return\nvalue if available.Starting from v0.10, the user flags value associated with the stored item is\nalso returned as the third return value. If no user flags were given to an\nitem, its default flags will be 0.", + "prefix": "lrucache.get", + "body": "local data, stale_data, flags = cache:get($key)" + }, + "lrucache.delete": { + "description": "Removes an item specified by the key from the cache.", + "prefix": "lrucache.delete", + "body": "cache:delete($key)" + }, + "lrucache.count": { + "description": "Returns the number of items currently stored in the cache including\nexpired items if any.The returned count value will always be greater or equal to 0 and smaller\nthan or equal to the size argument given to cache:new.This method was added in the v0.10 release.", + "prefix": "lrucache.count", + "body": "local count = cache:count()" + }, + "lrucache.capacity": { + "description": "Returns the maximum number of items the cache can hold. The return value is the\nsame as the size argument given to cache:new when the cache was\ncreated.This method was added in the v0.10 release.", + "prefix": "lrucache.capacity", + "body": "local size = cache:capacity()" + }, + "lrucache.get_keys": { + "description": "Fetch the list of keys currently inside the cache up to max_count. The keys\nwill be ordered in MRU fashion (Most-Recently-Used keys first).This function returns a Lua (array) table (with integer keys) containing the\nkeys.When max_count is nil or 0, all keys (if any) will be returned.When provided with a res table argument, this function will not allocate a\ntable and will instead insert the keys in res, along with a trailing nil\nvalue.This method was added in the v0.10 release.", + "prefix": "lrucache.get_keys", + "body": "local keys = cache:get_keys($max_count?, $res?)" + }, + "lrucache.flush_all": { + "description": "Flushes all the existing data (if any) in the current cache instance. This is\nan O(1) operation and should be much faster than creating a brand new cache\ninstance.Note however that the flush_all() method of resty.lrucache.pureffi is an\nO(n) operation.", + "prefix": "lrucache.flush_all", + "body": "cache:flush_all()" + }, + "healthcheck.spawn_checker": { + "description": "Spawns background timer-based \"light threads\" to perform periodic healthchecks on\nthe specified NGINX upstream group with the specified shm storage.The healthchecker does not need any client traffic to function. The checks are performed actively\nand periodically.This method call is asynchronous and returns immediately.Returns true on success, or nil and a string describing an error otherwise.", + "prefix": "healthcheck.spawn_checker", + "body": "local ok, err = healthcheck.spawn_checker($options)" + }, + "healthcheck.status_page": { + "description": "Generates a detailed status report for all the upstreams defined in the current NGINX server.One typical output isUpstream foo.com\n Primary Peers\n 127.0.0.1:12354 UP\n 127.0.0.1:12355 DOWN\n Backup Peers\n 127.0.0.1:12356 UP\n\nUpstream bar.com\n Primary Peers\n 127.0.0.1:12354 UP\n 127.0.0.1:12355 DOWN\n 127.0.0.1:12357 DOWN\n Backup Peers\n 127.0.0.1:12356 UP\nIf an upstream has no health checkers, then it will be marked by (NO checkers), as inUpstream foo.com (NO checkers)\n Primary Peers\n 127.0.0.1:12354 UP\n 127.0.0.1:12355 UP\n Backup Peers\n 127.0.0.1:12356 UP\nIf you indeed have spawned a healthchecker in init_worker_by_lua*, then you should really\ncheck out the NGINX error log file to see if there is any fatal errors aborting the healthchecker threads.", + "prefix": "healthcheck.status_page", + "body": "local str, err = healthcheck.status_page()" + }, + "balancer.new": { + "description": "Instantiates an object of this class. The class value is returned by the call require \"resty.chash\".The id should be table.concat({host, string.char(0), port}) like the nginx chash does,\nwhen we need to keep consistency with nginx chash.The id can be any string value when we do not need to keep consistency with nginx chash.\nThe weight should be a non negative integer.local nodes = {\n -- id => weight\n server1 = 10,\n server2 = 2,\n}\n\nlocal resty_chash = require \"resty.chash\"\n\nlocal chash = resty_chash:new(nodes)\n\nlocal id = chash:find(\"foo\")\n\nngx.say(id)", + "prefix": "balancer.new", + "body": "local obj, err = class.new($nodes)" + }, + "balancer.reinit": { + "description": "Reinit the chash obj with the new nodes.", + "prefix": "balancer.reinit", + "body": "obj:reinit($nodes)" + }, + "balancer.set": { + "description": "Set weight of the id.", + "prefix": "balancer.set", + "body": "obj:set($id, $weight)" + }, + "balancer.delete": { + "description": "Delete the id.", + "prefix": "balancer.delete", + "body": "obj:delete($id)" + }, + "balancer.incr": { + "description": "Increments weight for the id by the step value weight(default to 1).", + "prefix": "balancer.incr", + "body": "obj:incr($id, $weight?)" + }, + "balancer.decr": { + "description": "Decrease weight for the id by the step value weight(default to 1).", + "prefix": "balancer.decr", + "body": "obj:decr($id, $weight?)" + }, + "balancer.find": { + "description": "Find an id by the key, same key always return the same id in the same obj.The second return value index is the index in the chash circle of the hash value of the key.", + "prefix": "balancer.find", + "body": "local id, index = obj:find($key)" + }, + "balancer.next": { + "description": "If we have chance to retry when the first id(server) doesn't work well,\nthen we can use obj:next to get the next id.The new id may be the same as the old one.", + "prefix": "balancer.next", + "body": "local id, new_index = obj:next($old_index)" + }, + "ngx.OK": { + "description": "0", + "prefix": "ngx.OK", + "body": "ngx.OK" + }, + "ngx.ERROR": { + "description": "-1", + "prefix": "ngx.ERROR", + "body": "ngx.ERROR" + }, + "ngx.AGAIN": { + "description": "-2", + "prefix": "ngx.AGAIN", + "body": "ngx.AGAIN" + }, + "ngx.DONE": { + "description": "-4", + "prefix": "ngx.DONE", + "body": "ngx.DONE" + }, + "ngx.DECLINED": { + "description": "-5", + "prefix": "ngx.DECLINED", + "body": "ngx.DECLINED" + }, + "ngx.HTTP_GET": { + "description": "HTTP method constants.", + "prefix": "ngx.HTTP_GET", + "body": "ngx.HTTP_GET" + }, + "ngx.HTTP_HEAD": { + "description": "HTTP method constants.", + "prefix": "ngx.HTTP_HEAD", + "body": "ngx.HTTP_HEAD" + }, + "ngx.HTTP_PUT": { + "description": "HTTP method constants.", + "prefix": "ngx.HTTP_PUT", + "body": "ngx.HTTP_PUT" + }, + "ngx.HTTP_POST": { + "description": "HTTP method constants.", + "prefix": "ngx.HTTP_POST", + "body": "ngx.HTTP_POST" + }, + "ngx.HTTP_DELETE": { + "description": "HTTP method constants.", + "prefix": "ngx.HTTP_DELETE", + "body": "ngx.HTTP_DELETE" + }, + "ngx.HTTP_OPTIONS": { + "description": "HTTP method constants.(added in the v0.5.0rc24 release)", + "prefix": "ngx.HTTP_OPTIONS", + "body": "ngx.HTTP_OPTIONS" + }, + "ngx.HTTP_MKCOL": { + "description": "HTTP method constants.(added in the v0.8.2 release)", + "prefix": "ngx.HTTP_MKCOL", + "body": "ngx.HTTP_MKCOL" + }, + "ngx.HTTP_COPY": { + "description": "HTTP method constants.(added in the v0.8.2 release)", + "prefix": "ngx.HTTP_COPY", + "body": "ngx.HTTP_COPY" + }, + "ngx.HTTP_MOVE": { + "description": "HTTP method constants.(added in the v0.8.2 release)", + "prefix": "ngx.HTTP_MOVE", + "body": "ngx.HTTP_MOVE" + }, + "ngx.HTTP_PROPFIND": { + "description": "HTTP method constants.(added in the v0.8.2 release)", + "prefix": "ngx.HTTP_PROPFIND", + "body": "ngx.HTTP_PROPFIND" + }, + "ngx.HTTP_PROPPATCH": { + "description": "HTTP method constants.(added in the v0.8.2 release)", + "prefix": "ngx.HTTP_PROPPATCH", + "body": "ngx.HTTP_PROPPATCH" + }, + "ngx.HTTP_LOCK": { + "description": "HTTP method constants.(added in the v0.8.2 release)", + "prefix": "ngx.HTTP_LOCK", + "body": "ngx.HTTP_LOCK" + }, + "ngx.HTTP_UNLOCK": { + "description": "HTTP method constants.(added in the v0.8.2 release)", + "prefix": "ngx.HTTP_UNLOCK", + "body": "ngx.HTTP_UNLOCK" + }, + "ngx.HTTP_PATCH": { + "description": "HTTP method constants.(added in the v0.8.2 release)", + "prefix": "ngx.HTTP_PATCH", + "body": "ngx.HTTP_PATCH" + }, + "ngx.HTTP_TRACE": { + "description": "HTTP method constants.(added in the v0.8.2 release)", + "prefix": "ngx.HTTP_TRACE", + "body": "ngx.HTTP_TRACE" + }, + "ngx.STDERR": { + "description": "Nginx log level constants", + "prefix": "ngx.STDERR", + "body": "ngx.STDERR" + }, + "ngx.EMERG": { + "description": "Nginx log level constants", + "prefix": "ngx.EMERG", + "body": "ngx.EMERG" + }, + "ngx.ALERT": { + "description": "Nginx log level constants", + "prefix": "ngx.ALERT", + "body": "ngx.ALERT" + }, + "ngx.CRIT": { + "description": "Nginx log level constants", + "prefix": "ngx.CRIT", + "body": "ngx.CRIT" + }, + "ngx.ERR": { + "description": "Nginx log level constants", + "prefix": "ngx.ERR", + "body": "ngx.ERR" + }, + "ngx.WARN": { + "description": "Nginx log level constants", + "prefix": "ngx.WARN", + "body": "ngx.WARN" + }, + "ngx.NOTICE": { + "description": "Nginx log level constants", + "prefix": "ngx.NOTICE", + "body": "ngx.NOTICE" + }, + "ngx.INFO": { + "description": "Nginx log level constants", + "prefix": "ngx.INFO", + "body": "ngx.INFO" + }, + "ngx.DEBUG": { + "description": "Nginx log level constants", + "prefix": "ngx.DEBUG", + "body": "ngx.DEBUG" + }, + "ngx.HTTP_CONTINUE": { + "description": "(100) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_CONTINUE", + "body": "ngx.HTTP_CONTINUE" + }, + "ngx.HTTP_SWITCHING_PROTOCOLS": { + "description": "(101)(first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_SWITCHING_PROTOCOLS", + "body": "ngx.HTTP_SWITCHING_PROTOCOLS" + }, + "ngx.HTTP_OK": { + "description": "(200)", + "prefix": "ngx.HTTP_OK", + "body": "ngx.HTTP_OK" + }, + "ngx.HTTP_CREATED": { + "description": "(201)", + "prefix": "ngx.HTTP_CREATED", + "body": "ngx.HTTP_CREATED" + }, + "ngx.HTTP_ACCEPTED": { + "description": "(202) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_ACCEPTED", + "body": "ngx.HTTP_ACCEPTED" + }, + "ngx.HTTP_NO_CONTENT": { + "description": "(204) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_NO_CONTENT", + "body": "ngx.HTTP_NO_CONTENT" + }, + "ngx.HTTP_PARTIAL_CONTENT": { + "description": "(206) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_PARTIAL_CONTENT", + "body": "ngx.HTTP_PARTIAL_CONTENT" + }, + "ngx.HTTP_SPECIAL_RESPONSE": { + "description": "(300)", + "prefix": "ngx.HTTP_SPECIAL_RESPONSE", + "body": "ngx.HTTP_SPECIAL_RESPONSE" + }, + "ngx.HTTP_MOVED_PERMANENTLY": { + "description": "(301)", + "prefix": "ngx.HTTP_MOVED_PERMANENTLY", + "body": "ngx.HTTP_MOVED_PERMANENTLY" + }, + "ngx.HTTP_MOVED_TEMPORARILY": { + "description": "(302)", + "prefix": "ngx.HTTP_MOVED_TEMPORARILY", + "body": "ngx.HTTP_MOVED_TEMPORARILY" + }, + "ngx.HTTP_SEE_OTHER": { + "description": "(303)", + "prefix": "ngx.HTTP_SEE_OTHER", + "body": "ngx.HTTP_SEE_OTHER" + }, + "ngx.HTTP_NOT_MODIFIED": { + "description": "(304)", + "prefix": "ngx.HTTP_NOT_MODIFIED", + "body": "ngx.HTTP_NOT_MODIFIED" + }, + "ngx.HTTP_TEMPORARY_REDIRECT": { + "description": "(307) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_TEMPORARY_REDIRECT", + "body": "ngx.HTTP_TEMPORARY_REDIRECT" + }, + "ngx.HTTP_PERMANENT_REDIRECT": { + "description": "(308)", + "prefix": "ngx.HTTP_PERMANENT_REDIRECT", + "body": "ngx.HTTP_PERMANENT_REDIRECT" + }, + "ngx.HTTP_BAD_REQUEST": { + "description": "(400)", + "prefix": "ngx.HTTP_BAD_REQUEST", + "body": "ngx.HTTP_BAD_REQUEST" + }, + "ngx.HTTP_UNAUTHORIZED": { + "description": "(401)", + "prefix": "ngx.HTTP_UNAUTHORIZED", + "body": "ngx.HTTP_UNAUTHORIZED" + }, + "ngx.HTTP_PAYMENT_REQUIRED": { + "description": "(402) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_PAYMENT_REQUIRED", + "body": "ngx.HTTP_PAYMENT_REQUIRED" + }, + "ngx.HTTP_FORBIDDEN": { + "description": "(403)", + "prefix": "ngx.HTTP_FORBIDDEN", + "body": "ngx.HTTP_FORBIDDEN" + }, + "ngx.HTTP_NOT_FOUND": { + "description": "(404)", + "prefix": "ngx.HTTP_NOT_FOUND", + "body": "ngx.HTTP_NOT_FOUND" + }, + "ngx.HTTP_NOT_ALLOWED": { + "description": "(405)", + "prefix": "ngx.HTTP_NOT_ALLOWED", + "body": "ngx.HTTP_NOT_ALLOWED" + }, + "ngx.HTTP_NOT_ACCEPTABLE": { + "description": "(406) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_NOT_ACCEPTABLE", + "body": "ngx.HTTP_NOT_ACCEPTABLE" + }, + "ngx.HTTP_REQUEST_TIMEOUT": { + "description": "(408) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_REQUEST_TIMEOUT", + "body": "ngx.HTTP_REQUEST_TIMEOUT" + }, + "ngx.HTTP_CONFLICT": { + "description": "(409) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_CONFLICT", + "body": "ngx.HTTP_CONFLICT" + }, + "ngx.HTTP_GONE": { + "description": "(410)", + "prefix": "ngx.HTTP_GONE", + "body": "ngx.HTTP_GONE" + }, + "ngx.HTTP_UPGRADE_REQUIRED": { + "description": "(426) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_UPGRADE_REQUIRED", + "body": "ngx.HTTP_UPGRADE_REQUIRED" + }, + "ngx.HTTP_TOO_MANY_REQUESTS": { + "description": "(429) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_TOO_MANY_REQUESTS", + "body": "ngx.HTTP_TOO_MANY_REQUESTS" + }, + "ngx.HTTP_CLOSE": { + "description": "(444) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_CLOSE", + "body": "ngx.HTTP_CLOSE" + }, + "ngx.HTTP_ILLEGAL": { + "description": "(451) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_ILLEGAL", + "body": "ngx.HTTP_ILLEGAL" + }, + "ngx.HTTP_INTERNAL_SERVER_ERROR": { + "description": "(500)", + "prefix": "ngx.HTTP_INTERNAL_SERVER_ERROR", + "body": "ngx.HTTP_INTERNAL_SERVER_ERROR" + }, + "ngx.HTTP_METHOD_NOT_IMPLEMENTED": { + "description": "(501)", + "prefix": "ngx.HTTP_METHOD_NOT_IMPLEMENTED", + "body": "ngx.HTTP_METHOD_NOT_IMPLEMENTED" + }, + "ngx.HTTP_BAD_GATEWAY": { + "description": "(502) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_BAD_GATEWAY", + "body": "ngx.HTTP_BAD_GATEWAY" + }, + "ngx.HTTP_SERVICE_UNAVAILABLE": { + "description": "(503)", + "prefix": "ngx.HTTP_SERVICE_UNAVAILABLE", + "body": "ngx.HTTP_SERVICE_UNAVAILABLE" + }, + "ngx.HTTP_GATEWAY_TIMEOUT": { + "description": "(504) (first added in the v0.3.1rc38 release)", + "prefix": "ngx.HTTP_GATEWAY_TIMEOUT", + "body": "ngx.HTTP_GATEWAY_TIMEOUT" + }, + "ngx.HTTP_VERSION_NOT_SUPPORTED": { + "description": "(505) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_VERSION_NOT_SUPPORTED", + "body": "ngx.HTTP_VERSION_NOT_SUPPORTED" + }, + "ngx.HTTP_INSUFFICIENT_STORAGE": { + "description": "(507) (first added in the v0.9.20 release)", + "prefix": "ngx.HTTP_INSUFFICIENT_STORAGE", + "body": "ngx.HTTP_INSUFFICIENT_STORAGE" + } } diff --git a/src/downloader/http_headers.ts b/src/downloader/http_headers.ts index cbf687e..3c8182c 100644 --- a/src/downloader/http_headers.ts +++ b/src/downloader/http_headers.ts @@ -252,7 +252,12 @@ async function main() { const handleGermanRow = ($row: Cheerio, type: ManifestItemType) => { const $cols = $row.find("td"); if ($cols.length === 0) return; - const headerNames = normalizeHeaderName($cols.eq(0).text()); + + let headerName = $cols.eq(0).text(); + if(headerName.indexOf("[") > -1){ + headerName = headerName.split("[")[0] + } + const headerNames = normalizeHeaderName(headerName); const description = getDescriptionMarkdown($cols.eq(2), baseUrl); if (!description) print.warn(`header ${headerNames[0]} has no description`); for (let j = 0; j < headerNames.length; j++) { @@ -261,10 +266,10 @@ async function main() { } }; - const $reqH2 = $("h2 #Anfrage-Headerfelder"); + const $reqH2 = $("h2#Anfrage-Headerfelder"); assertLength("request fields h2", $reqH2, 1); const $tables = getNextTables($reqH2.parent(), "h2"); - assertLength("request fields table", $tables, 1); + assertLength("request fields table", $tables, 4); for (const element of $tables) { const $rows = element.find("tr"); for (let row = 0; row < $rows.length; row++) {