diff --git a/apisix/plugins/limit-count.lua b/apisix/plugins/limit-count.lua index 56ff34cb21d4..0eafd64235b3 100644 --- a/apisix/plugins/limit-count.lua +++ b/apisix/plugins/limit-count.lua @@ -14,271 +14,24 @@ -- See the License for the specific language governing permissions and -- limitations under the License. -- -local limit_local_new = require("resty.limit.count").new -local core = require("apisix.core") -local apisix_plugin = require("apisix.plugin") -local tab_insert = table.insert -local ipairs = ipairs -local pairs = pairs - +local limit_count = require("apisix.plugins.limit-count.init") local plugin_name = "limit-count" -local limit_redis_cluster_new -local limit_redis_new -do - local redis_src = "apisix.plugins.limit-count.limit-count-redis" - limit_redis_new = require(redis_src).new - - local cluster_src = "apisix.plugins.limit-count.limit-count-redis-cluster" - limit_redis_cluster_new = require(cluster_src).new -end -local lrucache = core.lrucache.new({ - type = 'plugin', serial_creating = true, -}) -local group_conf_lru = core.lrucache.new({ - type = 'plugin', -}) - - -local policy_to_additional_properties = { - redis = { - properties = { - redis_host = { - type = "string", minLength = 2 - }, - redis_port = { - type = "integer", minimum = 1, default = 6379, - }, - redis_password = { - type = "string", minLength = 0, - }, - redis_database = { - type = "integer", minimum = 0, default = 0, - }, - redis_timeout = { - type = "integer", minimum = 1, default = 1000, - }, - }, - required = {"redis_host"}, - }, - ["redis-cluster"] = { - properties = { - redis_cluster_nodes = { - type = "array", - minItems = 2, - items = { - type = "string", minLength = 2, maxLength = 100 - }, - }, - redis_password = { - type = "string", minLength = 0, - }, - redis_timeout = { - type = "integer", minimum = 1, default = 1000, - }, - redis_cluster_name = { - type = "string", - }, - }, - required = {"redis_cluster_nodes", "redis_cluster_name"}, - }, -} -local schema = { - type = "object", - properties = { - count = {type = "integer", exclusiveMinimum = 0}, - time_window = {type = "integer", exclusiveMinimum = 0}, - group = {type = "string"}, - key = {type = "string", default = "remote_addr"}, - key_type = {type = "string", - enum = {"var", "var_combination", "constant"}, - default = "var", - }, - rejected_code = { - type = "integer", minimum = 200, maximum = 599, default = 503 - }, - rejected_msg = { - type = "string", minLength = 1 - }, - policy = { - type = "string", - enum = {"local", "redis", "redis-cluster"}, - default = "local", - }, - allow_degradation = {type = "boolean", default = false}, - show_limit_quota_header = {type = "boolean", default = true} - }, - required = {"count", "time_window"}, - ["if"] = { - properties = { - policy = { - enum = {"redis"}, - }, - }, - }, - ["then"] = policy_to_additional_properties.redis, - ["else"] = { - ["if"] = { - properties = { - policy = { - enum = {"redis-cluster"}, - }, - }, - }, - ["then"] = policy_to_additional_properties["redis-cluster"], - } -} - -local schema_copy = core.table.deepcopy(schema) - local _M = { version = 0.4, priority = 1002, name = plugin_name, - schema = schema, + schema = limit_count.schema, } -local function group_conf(conf) - return conf -end - - function _M.check_schema(conf) - local ok, err = core.schema.check(schema, conf) - if not ok then - return false, err - end - - if conf.group then - local fields = {} - -- When the goup field is configured, - -- we will use schema_copy to get the whitelist of properties, - -- so that we can avoid getting injected properties. - for k in pairs(schema_copy.properties) do - tab_insert(fields, k) - end - local extra = policy_to_additional_properties[conf.policy] - if extra then - for k in pairs(extra.properties) do - tab_insert(fields, k) - end - end - - local prev_conf = group_conf_lru(conf.group, "", group_conf, conf) - - for _, field in ipairs(fields) do - if not core.table.deep_eq(prev_conf[field], conf[field]) then - core.log.error("previous limit-conn group ", prev_conf.group, - " conf: ", core.json.encode(prev_conf)) - core.log.error("current limit-conn group ", conf.group, - " conf: ", core.json.encode(conf)) - return false, "group conf mismatched" - end - end - end - - return true -end - - -local function create_limit_obj(conf) - core.log.info("create new limit-count plugin instance") - - if not conf.policy or conf.policy == "local" then - return limit_local_new("plugin-" .. plugin_name, conf.count, - conf.time_window) - end - - if conf.policy == "redis" then - return limit_redis_new("plugin-" .. plugin_name, - conf.count, conf.time_window, conf) - end - - if conf.policy == "redis-cluster" then - return limit_redis_cluster_new("plugin-" .. plugin_name, conf.count, - conf.time_window, conf) - end - - return nil + return limit_count.check_schema(conf) end function _M.access(conf, ctx) - core.log.info("ver: ", ctx.conf_version) - - local lim, err - if not conf.group then - lim, err = core.lrucache.plugin_ctx(lrucache, ctx, conf.policy, create_limit_obj, conf) - else - lim, err = lrucache(conf.group, "", create_limit_obj, conf) - end - - if not lim then - core.log.error("failed to fetch limit.count object: ", err) - if conf.allow_degradation then - return - end - return 500 - end - - local conf_key = conf.key - local key - if conf.key_type == "var_combination" then - local err, n_resolved - key, err, n_resolved = core.utils.resolve_var(conf_key, ctx.var) - if err then - core.log.error("could not resolve vars in ", conf_key, " error: ", err) - end - - if n_resolved == 0 then - key = nil - end - elseif conf.key_type == "constant" then - key = conf_key - else - key = ctx.var[conf_key] - end - - if key == nil then - core.log.info("The value of the configured key is empty, use client IP instead") - -- When the value of key is empty, use client IP instead - key = ctx.var["remote_addr"] - end - - -- here we add a separator ':' to mark the boundary of the prefix and the key itself - if not conf.group then - -- Here we use plugin-level conf version to prevent the counter from being resetting - -- because of the change elsewhere. - -- A route which reuses a previous route's ID will inherits its counter. - key = ctx.conf_type .. ctx.conf_id .. ':' .. apisix_plugin.conf_version(conf) .. ':' .. key - else - key = conf.group .. ':' .. key - end - - core.log.info("limit key: ", key) - - local delay, remaining = lim:incoming(key, true) - if not delay then - local err = remaining - if err == "rejected" then - if conf.rejected_msg then - return conf.rejected_code, { error_msg = conf.rejected_msg } - end - return conf.rejected_code - end - - core.log.error("failed to limit count: ", err) - if conf.allow_degradation then - return - end - return 500, {error_msg = "failed to limit count"} - end - - if conf.show_limit_quota_header then - core.response.set_header("X-RateLimit-Limit", conf.count, - "X-RateLimit-Remaining", remaining) - end + return limit_count.rate_limit(conf, ctx) end diff --git a/apisix/plugins/limit-count/init.lua b/apisix/plugins/limit-count/init.lua new file mode 100644 index 000000000000..c9051d2e14ef --- /dev/null +++ b/apisix/plugins/limit-count/init.lua @@ -0,0 +1,310 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local limit_local_new = require("resty.limit.count").new +local core = require("apisix.core") +local apisix_plugin = require("apisix.plugin") +local tab_insert = table.insert +local ipairs = ipairs +local pairs = pairs + + +local plugin_name = "limit-count" +local limit_redis_cluster_new +local limit_redis_new +do + local redis_src = "apisix.plugins.limit-count.limit-count-redis" + limit_redis_new = require(redis_src).new + + local cluster_src = "apisix.plugins.limit-count.limit-count-redis-cluster" + limit_redis_cluster_new = require(cluster_src).new +end +local lrucache = core.lrucache.new({ + type = 'plugin', serial_creating = true, +}) +local group_conf_lru = core.lrucache.new({ + type = 'plugin', +}) + + +local policy_to_additional_properties = { + redis = { + properties = { + redis_host = { + type = "string", minLength = 2 + }, + redis_port = { + type = "integer", minimum = 1, default = 6379, + }, + redis_password = { + type = "string", minLength = 0, + }, + redis_database = { + type = "integer", minimum = 0, default = 0, + }, + redis_timeout = { + type = "integer", minimum = 1, default = 1000, + }, + }, + required = {"redis_host"}, + }, + ["redis-cluster"] = { + properties = { + redis_cluster_nodes = { + type = "array", + minItems = 2, + items = { + type = "string", minLength = 2, maxLength = 100 + }, + }, + redis_password = { + type = "string", minLength = 0, + }, + redis_timeout = { + type = "integer", minimum = 1, default = 1000, + }, + redis_cluster_name = { + type = "string", + }, + }, + required = {"redis_cluster_nodes", "redis_cluster_name"}, + }, +} +local schema = { + type = "object", + properties = { + count = {type = "integer", exclusiveMinimum = 0}, + time_window = {type = "integer", exclusiveMinimum = 0}, + group = {type = "string"}, + key = {type = "string", default = "remote_addr"}, + key_type = {type = "string", + enum = {"var", "var_combination", "constant"}, + default = "var", + }, + rejected_code = { + type = "integer", minimum = 200, maximum = 599, default = 503 + }, + rejected_msg = { + type = "string", minLength = 1 + }, + policy = { + type = "string", + enum = {"local", "redis", "redis-cluster"}, + default = "local", + }, + allow_degradation = {type = "boolean", default = false}, + show_limit_quota_header = {type = "boolean", default = true} + }, + required = {"count", "time_window"}, + ["if"] = { + properties = { + policy = { + enum = {"redis"}, + }, + }, + }, + ["then"] = policy_to_additional_properties.redis, + ["else"] = { + ["if"] = { + properties = { + policy = { + enum = {"redis-cluster"}, + }, + }, + }, + ["then"] = policy_to_additional_properties["redis-cluster"], + } +} + +local schema_copy = core.table.deepcopy(schema) + +local _M = { + schema = schema +} + + +local function group_conf(conf) + return conf +end + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + if conf.group then + -- means that call by some plugin not support + if conf._vid then + return false, "group is not supported" + end + + local fields = {} + -- When the goup field is configured, + -- we will use schema_copy to get the whitelist of properties, + -- so that we can avoid getting injected properties. + for k in pairs(schema_copy.properties) do + tab_insert(fields, k) + end + local extra = policy_to_additional_properties[conf.policy] + if extra then + for k in pairs(extra.properties) do + tab_insert(fields, k) + end + end + + local prev_conf = group_conf_lru(conf.group, "", group_conf, conf) + + for _, field in ipairs(fields) do + if not core.table.deep_eq(prev_conf[field], conf[field]) then + core.log.error("previous limit-conn group ", prev_conf.group, + " conf: ", core.json.encode(prev_conf)) + core.log.error("current limit-conn group ", conf.group, + " conf: ", core.json.encode(conf)) + return false, "group conf mismatched" + end + end + end + + return true +end + + +local function create_limit_obj(conf) + core.log.info("create new limit-count plugin instance") + + if not conf.policy or conf.policy == "local" then + return limit_local_new("plugin-" .. plugin_name, conf.count, + conf.time_window) + end + + if conf.policy == "redis" then + return limit_redis_new("plugin-" .. plugin_name, + conf.count, conf.time_window, conf) + end + + if conf.policy == "redis-cluster" then + return limit_redis_cluster_new("plugin-" .. plugin_name, conf.count, + conf.time_window, conf) + end + + return nil +end + + +local function gen_limit_key(conf, ctx, key) + if conf.group then + return conf.group .. ':' .. key + end + + -- here we add a separator ':' to mark the boundary of the prefix and the key itself + -- Here we use plugin-level conf version to prevent the counter from being resetting + -- because of the change elsewhere. + -- A route which reuses a previous route's ID will inherits its counter. + local new_key = ctx.conf_type .. ctx.conf_id .. ':' .. apisix_plugin.conf_version(conf) + .. ':' .. key + if conf._vid then + -- conf has _vid means it's from workflow plugin, add _vid to the key + -- so that the counter is unique per action. + return new_key .. ':' .. conf._vid + end + + return new_key +end + + +local function gen_limit_obj(conf, ctx) + if conf.group then + return lrucache(conf.group, "", create_limit_obj, conf) + end + + local extra_key + if conf._vid then + extra_key = conf.policy .. '#' .. conf._vid + else + extra_key = conf.policy + end + + return core.lrucache.plugin_ctx(lrucache, ctx, extra_key, create_limit_obj, conf) +end + + +function _M.rate_limit(conf, ctx) + core.log.info("ver: ", ctx.conf_version) + + local lim, err = gen_limit_obj(conf, ctx) + + if not lim then + core.log.error("failed to fetch limit.count object: ", err) + if conf.allow_degradation then + return + end + return 500 + end + + local conf_key = conf.key + local key + if conf.key_type == "var_combination" then + local err, n_resolved + key, err, n_resolved = core.utils.resolve_var(conf_key, ctx.var) + if err then + core.log.error("could not resolve vars in ", conf_key, " error: ", err) + end + + if n_resolved == 0 then + key = nil + end + elseif conf.key_type == "constant" then + key = conf_key + else + key = ctx.var[conf_key] + end + + if key == nil then + core.log.info("The value of the configured key is empty, use client IP instead") + -- When the value of key is empty, use client IP instead + key = ctx.var["remote_addr"] + end + + key = gen_limit_key(conf, ctx, key) + core.log.info("limit key: ", key) + + local delay, remaining = lim:incoming(key, true) + if not delay then + local err = remaining + if err == "rejected" then + if conf.rejected_msg then + return conf.rejected_code, { error_msg = conf.rejected_msg } + end + return conf.rejected_code + end + + core.log.error("failed to limit count: ", err) + if conf.allow_degradation then + return + end + return 500, {error_msg = "failed to limit count"} + end + + if conf.show_limit_quota_header then + core.response.set_header("X-RateLimit-Limit", conf.count, + "X-RateLimit-Remaining", remaining) + end +end + + +return _M diff --git a/apisix/plugins/workflow.lua b/apisix/plugins/workflow.lua index a303826f6fb0..a586a923b9b7 100644 --- a/apisix/plugins/workflow.lua +++ b/apisix/plugins/workflow.lua @@ -14,10 +14,10 @@ -- See the License for the specific language governing permissions and -- limitations under the License. -- -local core = require("apisix.core") -local expr = require("resty.expr.v1") -local ipairs = ipairs -local tonumber = tonumber +local core = require("apisix.core") +local limit_count = require("apisix.plugins.limit-count.init") +local expr = require("resty.expr.v1") +local ipairs = ipairs local schema = { type = "object", @@ -71,17 +71,34 @@ local return_schema = { } +local function check_return_schema(conf) + local ok, err = core.schema.check(return_schema, conf) + if not ok then + return false, err + end + return true +end + + local function exit(conf) - local code = tonumber(conf.code) - return code, {error_msg = "rejected by workflow"} + return conf.code, {error_msg = "rejected by workflow"} +end + + +local function rate_limit(conf, ctx) + return limit_count.rate_limit(conf, ctx) end local support_action = { ["return"] = { - handler = exit, - schema = return_schema, + handler = exit, + check_schema = check_return_schema, }, + ["limit-count"] = { + handler = rate_limit, + check_schema = limit_count.check_schema, + } } @@ -91,7 +108,7 @@ function _M.check_schema(conf) return false, err end - for _, rule in ipairs(conf.rules) do + for idx, rule in ipairs(conf.rules) do local ok, err = expr.new(rule.case) if not ok then return false, "failed to validate the 'case' expression: " .. err @@ -104,7 +121,9 @@ function _M.check_schema(conf) return false, "unsupported action: " .. action[1] end - local ok, err = core.schema.check(support_action[action[1]].schema, action[2]) + -- use the action's idx as an identifier to isolate between confs + action[2]["_vid"] = idx + local ok, err = support_action[action[1]].check_schema(action[2], plugin_name) if not ok then return false, "failed to validate the '" .. action[1] .. "' action: " .. err end @@ -123,7 +142,7 @@ function _M.access(conf, ctx) if match_result then -- only one action is currently supported local action = rule.actions[1] - return support_action[action[1]].handler(action[2]) + return support_action[action[1]].handler(action[2], ctx) end end end diff --git a/t/plugin/workflow.t b/t/plugin/workflow.t index dac3fdc8ba9b..e1bf77a1f26c 100644 --- a/t/plugin/workflow.t +++ b/t/plugin/workflow.t @@ -37,238 +37,143 @@ run_tests(); __DATA__ -=== TEST 1: sanity +=== TEST 1: schema check --- config location /t { content_by_lua_block { local plugin = require("apisix.plugins.workflow") - local ok, err = plugin.check_schema({ - rules = { - { - case = { - {"uri", "==", "/hello"} - }, - actions = { - { - "return", + local data = { + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { { - code = 403 + "return", + { + code = 403 + } } } } } - } - }) - if not ok then - ngx.say(err) - end - - ngx.say("done") - } - } ---- response_body -done - - - -=== TEST 2: missing actions ---- config - location /t { - content_by_lua_block { - local plugin = require("apisix.plugins.workflow") - local ok, err = plugin.check_schema({ - rules = { - { - case = { - {"uri", "==", "/hello"} + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + } } } - } - }) - if not ok then - ngx.say(err) - return - end - - ngx.say("done") - } - } ---- response_body eval -qr/property "actions" is required/ - - - -=== TEST 3: actions have at least 1 items ---- config - location /t { - content_by_lua_block { - local plugin = require("apisix.plugins.workflow") - local ok, err = plugin.check_schema({ - rules = { - { - case = { - {"uri", "==", "/hello"} - }, - actions = { - { + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + } } } } - } - }) - if not ok then - ngx.say(err) - return - end - - ngx.say("done") - } - } ---- response_body eval -qr/expect array to have at least 1 items/ - - - -=== TEST 4: code is needed if action is return ---- config - location /t { - content_by_lua_block { - local plugin = require("apisix.plugins.workflow") - local ok, err = plugin.check_schema({ - rules = { - { - case = { - {"uri", "==", "/hello"} - }, - actions = { - { - "return", + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { { - status = 403 + "return", + { + status = 403 + } } } } } - } - }) - if not ok then - ngx.say(err) - return - end - - ngx.say("done") - } - } ---- response_body eval -qr/property "code" is required/ - - - -=== TEST 5: the required type of code is number ---- config - location /t { - content_by_lua_block { - local plugin = require("apisix.plugins.workflow") - local ok, err = plugin.check_schema({ - rules = { - { - case = { - {"uri", "==", "/hello"} - }, - actions = { - { - "return", + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { { - code = "403" + "return", + { + code = "403" + } } } } } - } - }) - if not ok then - ngx.say(err) - return - end - - ngx.say("done") - } - } ---- response_body eval -qr/property "code" validation failed: wrong type: expected integer, got string/ - - - -=== TEST 6: bad conf of case ---- config - location /t { - content_by_lua_block { - local plugin = require("apisix.plugins.workflow") - local ok, err = plugin.check_schema({ - rules = { - { - case = { + }, + { + rules = { + { + case = { - }, - actions = { - { - "return", + }, + actions = { { - code = 403 + "return", + { + code = 403 + } } } } } - } - }) - if not ok then - ngx.say(err) - end - - ngx.say("done") - } - } ---- response_body eval -qr/property "case" validation failed: expect array to have at least 1 items/ - - - -=== TEST 7: unsupported action ---- config - location /t { - content_by_lua_block { - local plugin = require("apisix.plugins.workflow") - local ok, err = plugin.check_schema({ - rules = { - { - case = { - {"uri", "==", "/hello"} - }, - actions = { - { - "fake", + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { { - code = 403 + "fake", + { + code = 403 + } } } } } } - }) - if not ok then - ngx.say(err) - return - end + } - ngx.say("done") + for _, conf in ipairs(data) do + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + end } } --- response_body +done +property "rules" validation failed: failed to validate item 1: property "actions" is required +property "rules" validation failed: failed to validate item 1: property "actions" validation failed: failed to validate item 1: expect array to have at least 1 items +failed to validate the 'return' action: property "code" is required +failed to validate the 'return' action: property "code" validation failed: wrong type: expected integer, got string +property "rules" validation failed: failed to validate item 1: property "case" validation failed: expect array to have at least 1 items unsupported action: fake -=== TEST 8: set plugin +=== TEST 2: set plugin --- config location /t { content_by_lua_block { @@ -317,14 +222,14 @@ passed -=== TEST 9: trigger workflow +=== TEST 3: trigger workflow --- request GET /hello --- error_code: 403 -=== TEST 10: multiple conditions in one case +=== TEST 4: multiple conditions in one case --- config location /t { content_by_lua_block { @@ -374,13 +279,13 @@ passed -=== TEST 11: missing match the only case +=== TEST 5: missing match the only case --- request GET /hello?foo=bad -=== TEST 12: trigger workflow +=== TEST 6: trigger workflow --- request GET /hello?foo=bar --- error_code: 403 @@ -389,7 +294,7 @@ GET /hello?foo=bar -=== TEST 13: multiple cases with different actions +=== TEST 7: multiple cases with different actions --- config location /t { content_by_lua_block { @@ -453,21 +358,21 @@ passed -=== TEST 14: trigger one case +=== TEST 8: trigger one case --- request GET /hello --- error_code: 403 -=== TEST 15: trigger another case +=== TEST 9: trigger another case --- request GET /hello2 --- error_code: 401 -=== TEST 16: match case in order +=== TEST 10: match case in order # rules is an array, match in the order of the index of the array, # when cases are matched, actions are executed and do not continue --- config @@ -533,22 +438,252 @@ passed -=== TEST 17: both case 1&2 matched, trigger the first cases +=== TEST 11: both case 1&2 matched, trigger the first cases --- request GET /hello?foo=bar --- error_code: 403 -=== TEST 18: case 1 mismatched, trigger the second cases +=== TEST 12: case 1 mismatched, trigger the second cases --- request GET /hello?foo=bad --- error_code: 401 -=== TEST 19: all cases mismatched, pass to upstream +=== TEST 13: all cases mismatched, pass to upstream --- request GET /hello1 --- response_body hello1 world + + + +=== TEST 14: schema check(limit-count) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.workflow") + local data = { + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + {count = 2, time_window = 60, rejected_code = 503, key = 'remote_addr'} + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + {count = 2} + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + {time_window = 60} + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 2, + time_window = 60, + rejected_code = 503, + group = "services_1" + } + } + } + } + } + } + } + + for _, conf in ipairs(data) do + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + end + } + } +--- response_body +done +failed to validate the 'limit-count' action: property "time_window" is required +failed to validate the 'limit-count' action: property "count" is required +failed to validate the 'limit-count' action: group is not supported + + + +=== TEST 15: set actions as limit-count +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "workflow": { + "rules": [ + { + "case": [ + ["uri", "==", "/hello"] + ], + "actions": [ + [ + "limit-count", + { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + ] + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 503] + + + +=== TEST 17: the conf in actions is isolation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: cross-hit case 1 and case 2, up limit by isolation +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1", +"GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 200, 200, 200, 200, 503, 503] diff --git a/t/plugin/workflow2.t b/t/plugin/workflow2.t new file mode 100644 index 000000000000..b30567532832 --- /dev/null +++ b/t/plugin/workflow2.t @@ -0,0 +1,285 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests(); + + +__DATA__ + +=== TEST 1: multiple cases with different actions(return & limit-count) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = 403 + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 1, + time_window = 60, + rejected_code = 503 + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: cross-hit case 1 and case 2, trigger actions by isolation +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello1"] +--- error_code eval +[403, 200, 503] + + + +=== TEST 3: the conf in actions is isolation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: cross-hit case 1 and case 2, trigger actions by isolation +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 200, 200] + + + +=== TEST 5: cross-hit case 1 and case 2, up limit by isolation 2 +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 6: different actions with different limit count conf, up limit by isolation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 1, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 2, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: case 1 up limit, case 2 psssed +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 503, 200]