Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(limit-count): allow sharing counter #5881

Merged
merged 4 commits into from
Dec 24, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
139 changes: 100 additions & 39 deletions apisix/plugins/limit-count.lua
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@
--
local limit_local_new = require("resty.limit.count").new
local core = require("apisix.core")
local tab_insert = table.insert
local ipairs = ipairs
local pairs = pairs


local plugin_name = "limit-count"
local limit_redis_cluster_new
local limit_redis_new
Expand All @@ -29,13 +34,60 @@ end
local lrucache = core.lrucache.new({
type = 'plugin', serial_creating = true,
})
local group_conf_lru = core.lrucache.new({
type = 'plugin',
})


local policy_to_additional_properties = {
redis = {
properties = {
redis_host = {
type = "string", minLength = 2
},
redis_port = {
type = "integer", minimum = 1, default = 6379,
},
redis_password = {
type = "string", minLength = 0,
},
redis_database = {
type = "integer", minimum = 0, default = 0,
},
redis_timeout = {
type = "integer", minimum = 1, default = 1000,
},
},
required = {"redis_host"},
},
["redis-cluster"] = {
properties = {
redis_cluster_nodes = {
type = "array",
minItems = 2,
items = {
type = "string", minLength = 2, maxLength = 100
},
},
redis_password = {
type = "string", minLength = 0,
},
redis_timeout = {
type = "integer", minimum = 1, default = 1000,
},
redis_cluster_name = {
type = "string",
},
},
required = {"redis_cluster_nodes", "redis_cluster_name"},
},
}
local schema = {
type = "object",
properties = {
count = {type = "integer", exclusiveMinimum = 0},
time_window = {type = "integer", exclusiveMinimum = 0},
group = {type = "string"},
key = {type = "string", default = "remote_addr"},
key_type = {type = "string",
enum = {"var", "var_combination"},
Expand Down Expand Up @@ -66,53 +118,20 @@ local schema = {
},
},
},
{
core.table.merge({
properties = {
policy = {
enum = {"redis"},
},
redis_host = {
type = "string", minLength = 2
},
redis_port = {
type = "integer", minimum = 1, default = 6379,
},
redis_password = {
type = "string", minLength = 0,
},
redis_database = {
type = "integer", minimum = 0, default = 0,
},
redis_timeout = {
type = "integer", minimum = 1, default = 1000,
},
},
required = {"redis_host"},
},
{
}, policy_to_additional_properties.redis),
core.table.merge({
properties = {
policy = {
enum = {"redis-cluster"},
},
redis_cluster_nodes = {
type = "array",
minItems = 2,
items = {
type = "string", minLength = 2, maxLength = 100
},
},
redis_password = {
type = "string", minLength = 0,
},
redis_timeout = {
type = "integer", minimum = 1, default = 1000,
},
redis_cluster_name = {
type = "string",
},
},
required = {"redis_cluster_nodes", "redis_cluster_name"},
}
}, policy_to_additional_properties["redis-cluster"]),
}
}
}
Expand All @@ -127,12 +146,42 @@ local _M = {
}


local function group_conf(conf)
return conf
end


function _M.check_schema(conf)
local ok, err = core.schema.check(schema, conf)
if not ok then
return false, err
end

if conf.group then
local fields = {}
for k in pairs(schema.properties) do
tab_insert(fields, k)
end
local extra = policy_to_additional_properties[conf.policy]
if extra then
for k in pairs(extra.properties) do
tab_insert(fields, k)
end
end

local prev_conf = group_conf_lru(conf.group, "", group_conf, conf)

for _, field in ipairs(fields) do
if not core.table.deep_eq(prev_conf[field], conf[field]) then
core.log.error("previous limit-conn group ", prev_conf.group,
" conf: ", core.json.encode(prev_conf))
core.log.error("current limit-conn group ", conf.group,
" conf: ", core.json.encode(conf))
return false, "group conf mismatched"
end
end
end

return true
end

Expand Down Expand Up @@ -161,7 +210,14 @@ end

function _M.access(conf, ctx)
core.log.info("ver: ", ctx.conf_version)
local lim, err = core.lrucache.plugin_ctx(lrucache, ctx, conf.policy, create_limit_obj, conf)

local lim, err
if not conf.group then
lim, err = core.lrucache.plugin_ctx(lrucache, ctx, conf.policy, create_limit_obj, conf)
else
lim, err = lrucache(conf.group, "", create_limit_obj, conf)
end

if not lim then
core.log.error("failed to fetch limit.count object: ", err)
if conf.allow_degradation then
Expand Down Expand Up @@ -192,7 +248,12 @@ function _M.access(conf, ctx)
key = ctx.var["remote_addr"]
end

key = key .. ctx.conf_type .. ctx.conf_version
if not conf.group then
key = key .. ctx.conf_type .. ctx.conf_version
else
key = key .. conf.group
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So traffic will eventually be limited according to the key generated by the combination of key and group.
But key can not be empty (if key is empty, remote_addr will be set as the default key),
so it is never possible to limit the rate only according to the group.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, the previous limit-count is two-dimension:
per-route x per-key

This PR allows to use:
group x per-key

If we want to limit only according to the group, we can use a constant variable, like hostname, so the per-key will be always a single point.

In the next PR, I will introduce a new key type that is equal to a constant variable, so we don't need a special configuration for the key part.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Look like the hostname is not constant if the cluster limit-count is used. Anyway, we can solve it in the next PR.

end

core.log.info("limit key: ", key)

local delay, remaining = lim:incoming(key, true)
Expand Down
49 changes: 49 additions & 0 deletions docs/en/latest/plugins/limit-count.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ Limit request rate by a fixed number of requests in a given time window.
| policy | string | optional | "local" | ["local", "redis", "redis-cluster"] | The rate-limiting policies to use for retrieving and incrementing the limits. Available values are `local`(the counters will be stored locally in-memory on the node), `redis`(counters are stored on a Redis server and will be shared across the nodes, usually use it to do the global speed limit), and `redis-cluster` which works the same as `redis` but with redis cluster. |
| allow_degradation | boolean | optional | false | | Whether to enable plugin degradation when the limit-count function is temporarily unavailable(e.g. redis timeout). Allow requests to continue when the value is set to true, default false. |
| show_limit_quota_header | boolean | optional | true | | Whether show `X-RateLimit-Limit` and `X-RateLimit-Remaining` (which mean the total number of requests and the remaining number of requests that can be sent) in the response header, default true. |
| group | string | optional | | non-empty | Route configured with the same group will share the same counter |
| redis_host | string | required for `redis` | | | When using the `redis` policy, this property specifies the address of the Redis server. |
| redis_port | integer | optional | 6379 | [1,...] | When using the `redis` policy, this property specifies the port of the Redis server. |
| redis_password | string | optional | | | When using the `redis` or `redis-cluster` policy, this property specifies the password of the Redis server. |
Expand Down Expand Up @@ -107,6 +108,54 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335
You also can complete the above operation through the web interface, first add a route, then add limit-count plugin:
![Add limit-count plugin.](../../../assets/images/plugin/limit-count-1.png)

It is possible to share the same limit counter across different routes. For example,

```
curl -i http://127.0.0.1:9080/apisix/admin/services/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"plugins": {
"limit-count": {
"count": 1,
"time_window": 60,
"rejected_code": 503,
"key": "remote_addr",
"group": "services_1#1640140620"
}
},
"upstream": {
"type": "roundrobin",
"nodes": {
"127.0.0.1:1980": 1
}
}
}'
```

Every route which group name is "services_1#1640140620" will share the same count limitation `2` per remote_addr.

```
$ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"service_id": "1",
"uri": "/hello"
}'

$ curl -i http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"service_id": "1",
"uri": "/hello2"
}'

$ curl -i http://127.0.0.1:9080/hello
HTTP/1.1 200 ...

$ curl -i http://127.0.0.1:9080/hello2
HTTP/1.1 503 ...
```

Note that every limit-count configuration of the same group must be the same.
Therefore, once update the configuration, we also need to update the group name.

If you need a cluster-level precision traffic limit, then we can do it with the redis server. The rate limit of the traffic will be shared between different APISIX nodes to limit the rate of cluster traffic.

Here is the example if we use single `redis` policy:
Expand Down
49 changes: 49 additions & 0 deletions docs/zh/latest/plugins/limit-count.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ title: limit-count
| policy | string | 可选 | "local" | ["local", "redis", "redis-cluster"] | 用于检索和增加限制的速率限制策略。可选的值有:`local`(计数器被以内存方式保存在节点本地,默认选项) 和 `redis`(计数器保存在 Redis 服务节点上,从而可以跨节点共享结果,通常用它来完成全局限速);以及`redis-cluster`,跟 redis 功能一样,只是使用 redis 集群方式。 |
| allow_degradation | boolean | 可选 | false | | 当限流插件功能临时不可用时(例如,Redis 超时)是否允许请求继续。当值设置为 true 时则自动允许请求继续,默认值是 false。|
| show_limit_quota_header | boolean | 可选 | true | | 是否在响应头中显示 `X-RateLimit-Limit` 和 `X-RateLimit-Remaining` (限制的总请求数和剩余还可以发送的请求数),默认值是 true。 |
| group | string | 可选 | | 非空 | 配置同样的 group 的 Route 将共享同样的限流计数器 |
| redis_host | string | `redis` 必须 | | | 当使用 `redis` 限速策略时,该属性是 Redis 服务节点的地址。 |
| redis_port | integer | 可选 | 6379 | [1,...] | 当使用 `redis` 限速策略时,该属性是 Redis 服务节点的端口 |
| redis_password | string | 可选 | | | 当使用 `redis` 或者 `redis-cluster` 限速策略时,该属性是 Redis 服务节点的密码。 |
Expand Down Expand Up @@ -112,6 +113,54 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335
你也可以通过 web 界面来完成上面的操作,先增加一个 route,然后在插件页面中添加 limit-count 插件:
![添加插件](../../../assets/images/plugin/limit-count-1.png)

我们也支持在多个 Route 间共享同样的限流计数器。举个例子,
spacewander marked this conversation as resolved.
Show resolved Hide resolved

```
curl -i http://127.0.0.1:9080/apisix/admin/services/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"plugins": {
"limit-count": {
"count": 1,
"time_window": 60,
"rejected_code": 503,
"key": "remote_addr",
"group": "services_1#1640140620"
}
},
"upstream": {
"type": "roundrobin",
"nodes": {
"127.0.0.1:1980": 1
}
}
}'
```

每个配置了 `group` 为 `services_1#1640140620` 的 Route 都将共享同一个每个 IP 地址只能访问两次的计数器。
spacewander marked this conversation as resolved.
Show resolved Hide resolved

```
$ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"service_id": "1",
"uri": "/hello"
}'

$ curl -i http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"service_id": "1",
"uri": "/hello2"
}'

$ curl -i http://127.0.0.1:9080/hello
HTTP/1.1 200 ...

$ curl -i http://127.0.0.1:9080/hello2
HTTP/1.1 503 ...
```

注意同一个 group 里面的 limit-count 配置必须一样。
所以,一旦修改了配置,我们需要更新对应的 group 的值。

如果你需要一个集群级别的流量控制,我们可以借助 redis server 来完成。不同的 APISIX 节点之间将共享流量限速结果,实现集群流量限速。

如果启用单 redis 策略,请看下面例子:
Expand Down
Loading