diff --git a/apisix/plugins/limit-count.lua b/apisix/plugins/limit-count.lua index cbce3a798c6c..5b0e8c37c9f4 100644 --- a/apisix/plugins/limit-count.lua +++ b/apisix/plugins/limit-count.lua @@ -16,6 +16,11 @@ -- local limit_local_new = require("resty.limit.count").new local core = require("apisix.core") +local tab_insert = table.insert +local ipairs = ipairs +local pairs = pairs + + local plugin_name = "limit-count" local limit_redis_cluster_new local limit_redis_new @@ -29,13 +34,60 @@ end local lrucache = core.lrucache.new({ type = 'plugin', serial_creating = true, }) +local group_conf_lru = core.lrucache.new({ + type = 'plugin', +}) +local policy_to_additional_properties = { + redis = { + properties = { + redis_host = { + type = "string", minLength = 2 + }, + redis_port = { + type = "integer", minimum = 1, default = 6379, + }, + redis_password = { + type = "string", minLength = 0, + }, + redis_database = { + type = "integer", minimum = 0, default = 0, + }, + redis_timeout = { + type = "integer", minimum = 1, default = 1000, + }, + }, + required = {"redis_host"}, + }, + ["redis-cluster"] = { + properties = { + redis_cluster_nodes = { + type = "array", + minItems = 2, + items = { + type = "string", minLength = 2, maxLength = 100 + }, + }, + redis_password = { + type = "string", minLength = 0, + }, + redis_timeout = { + type = "integer", minimum = 1, default = 1000, + }, + redis_cluster_name = { + type = "string", + }, + }, + required = {"redis_cluster_nodes", "redis_cluster_name"}, + }, +} local schema = { type = "object", properties = { count = {type = "integer", exclusiveMinimum = 0}, time_window = {type = "integer", exclusiveMinimum = 0}, + group = {type = "string"}, key = {type = "string", default = "remote_addr"}, key_type = {type = "string", enum = {"var", "var_combination"}, @@ -66,53 +118,20 @@ local schema = { }, }, }, - { + core.table.merge({ properties = { policy = { enum = {"redis"}, }, - redis_host = { - type = "string", minLength = 2 - }, - redis_port = { - type = "integer", minimum = 1, default = 6379, - }, - redis_password = { - type = "string", minLength = 0, - }, - redis_database = { - type = "integer", minimum = 0, default = 0, - }, - redis_timeout = { - type = "integer", minimum = 1, default = 1000, - }, }, - required = {"redis_host"}, - }, - { + }, policy_to_additional_properties.redis), + core.table.merge({ properties = { policy = { enum = {"redis-cluster"}, }, - redis_cluster_nodes = { - type = "array", - minItems = 2, - items = { - type = "string", minLength = 2, maxLength = 100 - }, - }, - redis_password = { - type = "string", minLength = 0, - }, - redis_timeout = { - type = "integer", minimum = 1, default = 1000, - }, - redis_cluster_name = { - type = "string", - }, }, - required = {"redis_cluster_nodes", "redis_cluster_name"}, - } + }, policy_to_additional_properties["redis-cluster"]), } } } @@ -127,12 +146,42 @@ local _M = { } +local function group_conf(conf) + return conf +end + + function _M.check_schema(conf) local ok, err = core.schema.check(schema, conf) if not ok then return false, err end + if conf.group then + local fields = {} + for k in pairs(schema.properties) do + tab_insert(fields, k) + end + local extra = policy_to_additional_properties[conf.policy] + if extra then + for k in pairs(extra.properties) do + tab_insert(fields, k) + end + end + + local prev_conf = group_conf_lru(conf.group, "", group_conf, conf) + + for _, field in ipairs(fields) do + if not core.table.deep_eq(prev_conf[field], conf[field]) then + core.log.error("previous limit-conn group ", prev_conf.group, + " conf: ", core.json.encode(prev_conf)) + core.log.error("current limit-conn group ", conf.group, + " conf: ", core.json.encode(conf)) + return false, "group conf mismatched" + end + end + end + return true end @@ -161,7 +210,14 @@ end function _M.access(conf, ctx) core.log.info("ver: ", ctx.conf_version) - local lim, err = core.lrucache.plugin_ctx(lrucache, ctx, conf.policy, create_limit_obj, conf) + + local lim, err + if not conf.group then + lim, err = core.lrucache.plugin_ctx(lrucache, ctx, conf.policy, create_limit_obj, conf) + else + lim, err = lrucache(conf.group, "", create_limit_obj, conf) + end + if not lim then core.log.error("failed to fetch limit.count object: ", err) if conf.allow_degradation then @@ -192,7 +248,12 @@ function _M.access(conf, ctx) key = ctx.var["remote_addr"] end - key = key .. ctx.conf_type .. ctx.conf_version + if not conf.group then + key = key .. ctx.conf_type .. ctx.conf_version + else + key = key .. conf.group + end + core.log.info("limit key: ", key) local delay, remaining = lim:incoming(key, true) diff --git a/docs/en/latest/plugins/limit-count.md b/docs/en/latest/plugins/limit-count.md index 5e094d540dba..0062596b3912 100644 --- a/docs/en/latest/plugins/limit-count.md +++ b/docs/en/latest/plugins/limit-count.md @@ -46,6 +46,7 @@ Limit request rate by a fixed number of requests in a given time window. | policy | string | optional | "local" | ["local", "redis", "redis-cluster"] | The rate-limiting policies to use for retrieving and incrementing the limits. Available values are `local`(the counters will be stored locally in-memory on the node), `redis`(counters are stored on a Redis server and will be shared across the nodes, usually use it to do the global speed limit), and `redis-cluster` which works the same as `redis` but with redis cluster. | | allow_degradation | boolean | optional | false | | Whether to enable plugin degradation when the limit-count function is temporarily unavailable(e.g. redis timeout). Allow requests to continue when the value is set to true, default false. | | show_limit_quota_header | boolean | optional | true | | Whether show `X-RateLimit-Limit` and `X-RateLimit-Remaining` (which mean the total number of requests and the remaining number of requests that can be sent) in the response header, default true. | +| group | string | optional | | non-empty | Route configured with the same group will share the same counter | | redis_host | string | required for `redis` | | | When using the `redis` policy, this property specifies the address of the Redis server. | | redis_port | integer | optional | 6379 | [1,...] | When using the `redis` policy, this property specifies the port of the Redis server. | | redis_password | string | optional | | | When using the `redis` or `redis-cluster` policy, this property specifies the password of the Redis server. | @@ -107,6 +108,54 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335 You also can complete the above operation through the web interface, first add a route, then add limit-count plugin: ![Add limit-count plugin.](../../../assets/images/plugin/limit-count-1.png) +It is possible to share the same limit counter across different routes. For example, + +``` +curl -i http://127.0.0.1:9080/apisix/admin/services/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "limit-count": { + "count": 1, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "group": "services_1#1640140620" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +Every route which group name is "services_1#1640140620" will share the same count limitation `1` in one minute per remote_addr. + +``` +$ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "service_id": "1", + "uri": "/hello" +}' + +$ curl -i http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "service_id": "1", + "uri": "/hello2" +}' + +$ curl -i http://127.0.0.1:9080/hello +HTTP/1.1 200 ... + +$ curl -i http://127.0.0.1:9080/hello2 +HTTP/1.1 503 ... +``` + +Note that every limit-count configuration of the same group must be the same. +Therefore, once update the configuration, we also need to update the group name. + If you need a cluster-level precision traffic limit, then we can do it with the redis server. The rate limit of the traffic will be shared between different APISIX nodes to limit the rate of cluster traffic. Here is the example if we use single `redis` policy: diff --git a/docs/zh/latest/plugins/limit-count.md b/docs/zh/latest/plugins/limit-count.md index a3e9c7aad6c1..4d7273d3e1e6 100644 --- a/docs/zh/latest/plugins/limit-count.md +++ b/docs/zh/latest/plugins/limit-count.md @@ -49,6 +49,7 @@ title: limit-count | policy | string | 可选 | "local" | ["local", "redis", "redis-cluster"] | 用于检索和增加限制的速率限制策略。可选的值有:`local`(计数器被以内存方式保存在节点本地,默认选项) 和 `redis`(计数器保存在 Redis 服务节点上,从而可以跨节点共享结果,通常用它来完成全局限速);以及`redis-cluster`,跟 redis 功能一样,只是使用 redis 集群方式。 | | allow_degradation | boolean | 可选 | false | | 当限流插件功能临时不可用时(例如,Redis 超时)是否允许请求继续。当值设置为 true 时则自动允许请求继续,默认值是 false。| | show_limit_quota_header | boolean | 可选 | true | | 是否在响应头中显示 `X-RateLimit-Limit` 和 `X-RateLimit-Remaining` (限制的总请求数和剩余还可以发送的请求数),默认值是 true。 | +| group | string | 可选 | | 非空 | 配置同样的 group 的 Route 将共享同样的限流计数器 | | redis_host | string | `redis` 必须 | | | 当使用 `redis` 限速策略时,该属性是 Redis 服务节点的地址。 | | redis_port | integer | 可选 | 6379 | [1,...] | 当使用 `redis` 限速策略时,该属性是 Redis 服务节点的端口 | | redis_password | string | 可选 | | | 当使用 `redis` 或者 `redis-cluster` 限速策略时,该属性是 Redis 服务节点的密码。 | @@ -112,6 +113,54 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335 你也可以通过 web 界面来完成上面的操作,先增加一个 route,然后在插件页面中添加 limit-count 插件: ![添加插件](../../../assets/images/plugin/limit-count-1.png) +我们也支持在多个 Route 间共享同一个限流计数器。举个例子, + +``` +curl -i http://127.0.0.1:9080/apisix/admin/services/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "limit-count": { + "count": 1, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "group": "services_1#1640140620" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +每个配置了 `group` 为 `services_1#1640140620` 的 Route 都将共享同一个每个 IP 地址每分钟只能访问一次的计数器。 + +``` +$ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "service_id": "1", + "uri": "/hello" +}' + +$ curl -i http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "service_id": "1", + "uri": "/hello2" +}' + +$ curl -i http://127.0.0.1:9080/hello +HTTP/1.1 200 ... + +$ curl -i http://127.0.0.1:9080/hello2 +HTTP/1.1 503 ... +``` + +注意同一个 group 里面的 limit-count 配置必须一样。 +所以,一旦修改了配置,我们需要更新对应的 group 的值。 + 如果你需要一个集群级别的流量控制,我们可以借助 redis server 来完成。不同的 APISIX 节点之间将共享流量限速结果,实现集群流量限速。 如果启用单 redis 策略,请看下面例子: diff --git a/t/plugin/limit-count-redis2.t b/t/plugin/limit-count-redis2.t index 5be2efe6ec51..a9f23e858f7b 100644 --- a/t/plugin/limit-count-redis2.t +++ b/t/plugin/limit-count-redis2.t @@ -30,6 +30,19 @@ repeat_each(1); no_long_string(); no_shuffle(); no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + run_tests; __DATA__ @@ -70,12 +83,8 @@ __DATA__ ngx.say(body) } } ---- request -GET /t --- response_body passed ---- no_error_log -[error] @@ -116,12 +125,8 @@ passed ngx.say(body) } } ---- request -GET /t --- response_body passed ---- no_error_log -[error] @@ -172,12 +177,8 @@ failed to limit count: failed to change redis db, err: ERR invalid DB index ngx.say(body) } } ---- request -GET /t --- response_body passed ---- no_error_log -[error] @@ -186,8 +187,6 @@ passed ["GET /hello", "GET /hello", "GET /hello", "GET /hello"] --- error_code eval [200, 200, 503, 503] ---- no_error_log -[error] @@ -229,12 +228,8 @@ passed ngx.say(body) } } ---- request -GET /t --- response_body passed ---- no_error_log -[error] @@ -243,6 +238,8 @@ passed GET /hello --- response_body hello world +--- error_log +connection refused @@ -284,12 +281,8 @@ hello world ngx.say(body) } } ---- request -GET /t --- response_body passed ---- no_error_log -[error] @@ -298,3 +291,82 @@ passed GET /hello --- raw_response_headers_unlike eval qr/X-RateLimit-Limit/ + + + +=== TEST 10: configuration from the same group should be the same +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "show_limit_quota_header": false, + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_database": 1, + "redis_timeout": 1001, + "group": "redis" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "show_limit_quota_header": false, + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_database": 2, + "redis_timeout": 1001, + "group": "redis" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- error_log +[error] +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: group conf mismatched"} diff --git a/t/plugin/limit-count2.t b/t/plugin/limit-count2.t index ea4a675c2db9..3f6bdf325b58 100644 --- a/t/plugin/limit-count2.t +++ b/t/plugin/limit-count2.t @@ -214,12 +214,8 @@ GET /hello ngx.say(body) } } ---- request -GET /t --- response_body passed ---- no_error_log -[error] @@ -244,10 +240,6 @@ passed ngx.say(json.encode(ress)) } } ---- request -GET /t ---- no_error_log -[error] --- response_body [200,200,503,503] @@ -274,10 +266,6 @@ GET /t ngx.say(json.encode(ress)) } } ---- request -GET /t ---- no_error_log -[error] --- response_body [200,200,503,503] @@ -316,12 +304,8 @@ GET /t ngx.say(body) } } ---- request -GET /t --- response_body passed ---- no_error_log -[error] @@ -346,10 +330,6 @@ passed ngx.say(json.encode(ress)) } } ---- request -GET /t ---- no_error_log -[error] --- response_body [200,200,503,503] @@ -376,10 +356,6 @@ GET /t ngx.say(json.encode(ress)) } } ---- request -GET /t ---- no_error_log -[error] --- response_body [503,200] @@ -406,10 +382,6 @@ GET /t ngx.say(json.encode(ress)) } } ---- request -GET /t ---- no_error_log -[error] --- response_body [200,200,503,503] --- error_log @@ -450,12 +422,8 @@ The value of the configured key is empty, use client IP instead ngx.say(body) } } ---- request -GET /t --- response_body passed ---- no_error_log -[error] @@ -480,11 +448,240 @@ passed ngx.say(json.encode(ress)) } } ---- request -GET /t ---- no_error_log -[error] --- response_body [200,200,503,503] --- error_log The value of the configured key is empty, use client IP instead + + + +=== TEST 15: limit count in group +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "group": "services_1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "group": "services_1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello_chunked" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: hit multiple paths +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri1 = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local uri2 = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello_chunked" + local ress = {} + for i = 1, 4 do + local httpc = http.new() + local uri + if i % 2 == 1 then + uri = uri1 + else + uri = uri2 + end + + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[200,200,503,503] + + + +=== TEST 17: limit count in group, configuration is from services +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "group": "afafafhao" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "service_id": "1", + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "service_id": "1", + "uri": "/hello_chunked" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: hit multiple paths +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri1 = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local uri2 = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello_chunked" + local ress = {} + for i = 1, 4 do + local httpc = http.new() + local uri + if i % 2 == 1 then + uri = uri1 + else + uri = uri2 + end + + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[200,200,503,503] + + + +=== TEST 19: configuration from the same group should be the same +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 1, + "time_window": 60, + "rejected_code": 503, + "group": "afafafhao" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- error_log +[error] +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: group conf mismatched"}