diff --git a/apisix/plugins/limit-conn.lua b/apisix/plugins/limit-conn.lua
index 2f174c9ac3ae..40a85c1b0740 100644
--- a/apisix/plugins/limit-conn.lua
+++ b/apisix/plugins/limit-conn.lua
@@ -25,6 +25,7 @@ local schema = {
conn = {type = "integer", exclusiveMinimum = 0},
burst = {type = "integer", minimum = 0},
default_conn_delay = {type = "number", exclusiveMinimum = 0},
+ only_use_default_delay = {type = "boolean", default = false},
key = {type = "string",
enum = {"remote_addr", "server_addr", "http_x_real_ip",
"http_x_forwarded_for", "consumer_name"},
diff --git a/apisix/plugins/limit-conn/init.lua b/apisix/plugins/limit-conn/init.lua
index e462f1dc7a9d..16a771745f91 100644
--- a/apisix/plugins/limit-conn/init.lua
+++ b/apisix/plugins/limit-conn/init.lua
@@ -62,7 +62,7 @@ function _M.increase(conf, ctx)
ctx.limit_conn = core.tablepool.fetch("plugin#limit-conn", 0, 6)
end
- core.table.insert_tail(ctx.limit_conn, lim, key, delay)
+ core.table.insert_tail(ctx.limit_conn, lim, key, delay, conf.only_use_default_delay)
end
if delay >= 0.001 then
@@ -77,18 +77,20 @@ function _M.decrease(conf, ctx)
return
end
- for i = 1, #limit_conn, 3 do
+ for i = 1, #limit_conn, 4 do
local lim = limit_conn[i]
local key = limit_conn[i + 1]
local delay = limit_conn[i + 2]
+ local use_delay = limit_conn[i + 3]
local latency
- if ctx.proxy_passed then
- latency = ctx.var.upstream_response_time
- else
- latency = ctx.var.request_time - delay
+ if not use_delay then
+ if ctx.proxy_passed then
+ latency = ctx.var.upstream_response_time
+ else
+ latency = ctx.var.request_time - delay
+ end
end
-
core.log.debug("request latency is ", latency) -- for test
local conn, err = lim:leaving(key, latency)
diff --git a/apisix/stream/plugins/limit-conn.lua b/apisix/stream/plugins/limit-conn.lua
index 6f949c3d081c..d2bd25e97d57 100644
--- a/apisix/stream/plugins/limit-conn.lua
+++ b/apisix/stream/plugins/limit-conn.lua
@@ -25,6 +25,7 @@ local schema = {
conn = {type = "integer", exclusiveMinimum = 0},
burst = {type = "integer", minimum = 0},
default_conn_delay = {type = "number", exclusiveMinimum = 0},
+ only_use_default_delay = {type = "boolean", default = false},
key = {
type = "string",
enum = {"remote_addr", "server_addr"}
diff --git a/docs/en/latest/plugins/limit-conn.md b/docs/en/latest/plugins/limit-conn.md
index b402a268ab07..bba984ef1cc7 100644
--- a/docs/en/latest/plugins/limit-conn.md
+++ b/docs/en/latest/plugins/limit-conn.md
@@ -40,6 +40,7 @@ Limiting request concurrency plugin.
| conn | integer | required | | conn > 0 | the maximum number of concurrent requests allowed. Requests exceeding this ratio (and below `conn` + `burst`) will get delayed(the latency seconds is configured by `default_conn_delay`) to conform to this threshold. |
| burst | integer | required | | burst >= 0 | the number of excessive concurrent requests (or connections) allowed to be delayed. |
| default_conn_delay | number | required | | default_conn_delay > 0 | the latency seconds of request when concurrent requests exceeding `conn` but below (`conn` + `burst`). |
+| only_use_default_delay | boolean | optional | false | [true,false] | enable the strict mode of the latency seconds. If you set this option to `true`, it will run strictly according to the latency seconds you set without additional calculation logic. |
| key | object | required | | ["remote_addr", "server_addr", "http_x_real_ip", "http_x_forwarded_for", "consumer_name"] | to limit the concurrency level.
For example, one can use the host name (or server zone) as the key so that we limit concurrency per host name. Otherwise, we can also use the client address as the key so that we can avoid a single client from flooding our service with too many parallel connections or requests.
Now accept those as key: "remote_addr"(client's IP), "server_addr"(server's IP), "X-Forwarded-For/X-Real-IP" in request header, "consumer_name"(consumer's username). |
| rejected_code | string | optional | 503 | [200,...,599] | returned when the request exceeds `conn` + `burst` will be rejected. |
diff --git a/docs/zh/latest/plugins/limit-conn.md b/docs/zh/latest/plugins/limit-conn.md
index ef02c93c0cbd..7dfb2fa952af 100644
--- a/docs/zh/latest/plugins/limit-conn.md
+++ b/docs/zh/latest/plugins/limit-conn.md
@@ -30,6 +30,7 @@ title: limit-conn
| conn | integer | required | | conn > 0 | 允许的最大并发请求数。超过 `conn` 的限制、但是低于 `conn` + `burst` 的请求,将被延迟处理。 |
| burst | integer | required | | burst >= 0 | 允许被延迟处理的并发请求数。 |
| default_conn_delay | number | required | | default_conn_delay > 0 | 默认的典型连接(或请求)的处理延迟时间。 |
+| only_use_default_delay | boolean | optional | false | [true,false] | 延迟时间的严格模式。 如果设置为`true`的话,将会严格按照设置的时间来进行延迟 |
| key | object | required | | ["remote_addr", "server_addr", "http_x_real_ip", "http_x_forwarded_for", "consumer_name"] | 用户指定的限制并发级别的关键字,可以是客户端 IP 或服务端 IP。
例如,可以使用主机名(或服务器区域)作为关键字,以便限制每个主机名的并发性。 否则,我们也可以使用客户端地址作为关键字,这样我们就可以避免单个客户端用太多的并行连接或请求淹没我们的服务。
当前接受的 key 有:"remote_addr"(客户端 IP 地址), "server_addr"(服务端 IP 地址), 请求头中的"X-Forwarded-For" 或 "X-Real-IP", "consumer_name"(consumer 的 username)。 |
| rejected_code | string | optional | 503 | [200,...,599] | 当请求超过 `conn` + `burst` 这个阈值时,返回的 HTTP 状态码 |
diff --git a/t/plugin/limit-conn2.t b/t/plugin/limit-conn2.t
index c1d8f0683cd0..bbad64894cd4 100644
--- a/t/plugin/limit-conn2.t
+++ b/t/plugin/limit-conn2.t
@@ -178,3 +178,53 @@ qr/request latency is/
--- grep_error_log_out
request latency is
request latency is
+
+
+
+=== TEST 5: set only_use_default_delay option to true in specific route
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "uri": "/hello1",
+ "plugins": {
+ "limit-conn": {
+ "conn": 1,
+ "burst": 0,
+ "default_conn_delay": 0.3,
+ "only_use_default_delay": true,
+ "rejected_code": 503,
+ "key": "remote_addr"
+ }
+ },
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1982": 1
+ },
+ "type": "roundrobin"
+ }
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- response_body
+passed
+
+
+
+=== TEST 6: hit route
+--- log_level: debug
+--- request
+GET /hello1
+--- grep_error_log eval
+qr/request latency is nil/
+--- grep_error_log_out
+request latency is nil