diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 95e72fe578b9..1219be782dd4 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -28,6 +28,9 @@ jobs: - ubuntu-20.04 os_name: - linux_openresty + events_module: + - lua-resty-worker-events + - lua-resty-events test_dir: - t/plugin/[a-k]* - t/plugin/[l-z]* @@ -169,6 +172,7 @@ jobs: - name: Linux Script env: TEST_FILE_SUB_DIR: ${{ matrix.test_dir }} + TEST_EVENTS_MODULE: ${{ matrix.events_module }} run: sudo -E ./ci/${{ matrix.os_name }}_runner.sh script - if: ${{ steps.cache-images.outputs.cache-hit != 'true' }} diff --git a/.github/workflows/centos7-ci.yml b/.github/workflows/centos7-ci.yml index d2c447ef37ff..0200bd1e2be5 100644 --- a/.github/workflows/centos7-ci.yml +++ b/.github/workflows/centos7-ci.yml @@ -27,6 +27,9 @@ jobs: strategy: fail-fast: false matrix: + events_module: + - lua-resty-worker-events + - lua-resty-events test_dir: - t/plugin/[a-k]* - t/plugin/[l-z]* @@ -111,8 +114,9 @@ jobs: - name: Run centos7 docker and mapping apisix into container env: TEST_FILE_SUB_DIR: ${{ matrix.test_dir }} + TEST_EVENTS_MODULE: ${{ matrix.events_module }} run: | - docker run -itd -v ${{ github.workspace }}:/apisix --env TEST_FILE_SUB_DIR="$TEST_FILE_SUB_DIR" --name centos7Instance --net="host" --dns 8.8.8.8 --dns-search apache.org docker.io/centos:7 /bin/bash + docker run -itd -v ${{ github.workspace }}:/apisix --env TEST_FILE_SUB_DIR="$TEST_FILE_SUB_DIR" --env TEST_EVENTS_MODULE="$TEST_EVENTS_MODULE" --name centos7Instance --net="host" --dns 8.8.8.8 --dns-search apache.org docker.io/centos:7 /bin/bash # docker exec centos7Instance bash -c "cp -r /tmp/apisix ./" - name: Cache images diff --git a/.github/workflows/redhat-ci.yaml b/.github/workflows/redhat-ci.yaml index cc9d819bebe4..e38705772b9b 100644 --- a/.github/workflows/redhat-ci.yaml +++ b/.github/workflows/redhat-ci.yaml @@ -23,6 +23,9 @@ jobs: strategy: fail-fast: false matrix: + events_module: + - lua-resty-worker-events + - lua-resty-events test_dir: - t/plugin/[a-k]* - t/plugin/[l-z]* @@ -107,8 +110,9 @@ jobs: - name: Run redhat docker and mapping apisix into container env: TEST_FILE_SUB_DIR: ${{ matrix.test_dir }} + TEST_EVENTS_MODULE: ${{ matrix.events_module }} run: | - docker run -itd -v ${{ github.workspace }}:/apisix --env TEST_FILE_SUB_DIR="$TEST_FILE_SUB_DIR" --name ubiInstance --net="host" --dns 8.8.8.8 --dns-search apache.org registry.access.redhat.com/ubi8/ubi:8.6 /bin/bash + docker run -itd -v ${{ github.workspace }}:/apisix --env TEST_FILE_SUB_DIR="$TEST_FILE_SUB_DIR" --env TEST_EVENTS_MODULE="$TEST_EVENTS_MODULE" --name ubiInstance --net="host" --dns 8.8.8.8 --dns-search apache.org registry.access.redhat.com/ubi8/ubi:8.6 /bin/bash - name: Cache images id: cache-images diff --git a/apisix/cli/ngx_tpl.lua b/apisix/cli/ngx_tpl.lua index 3e1aadd9b543..41374cefd6a0 100644 --- a/apisix/cli/ngx_tpl.lua +++ b/apisix/cli/ngx_tpl.lua @@ -199,6 +199,17 @@ stream { apisix.stream_init_worker() } + {% if (events.module or "") == "lua-resty-events" then %} + # the server block for lua-resty-events + server { + listen unix:{*apisix_lua_home*}/logs/stream_worker_events.sock; + access_log off; + content_by_lua_block { + require("resty.events.compat").run() + } + } + {% end %} + server { {% for _, item in ipairs(stream_proxy.tcp or {}) do %} listen {*item.addr*} {% if item.tls then %} ssl {% end %} {% if enable_reuseport then %} reuseport {% end %} {% if proxy_protocol and proxy_protocol.enable_tcp_pp then %} proxy_protocol {% end %}; @@ -483,6 +494,19 @@ http { apisix.http_exit_worker() } + {% if (events.module or "") == "lua-resty-events" then %} + # the server block for lua-resty-events + server { + listen unix:{*apisix_lua_home*}/logs/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } + {% end %} + {% if enable_control then %} server { listen {* control_server_addr *}; diff --git a/apisix/events.lua b/apisix/events.lua index 7045a9bf5e9e..dac71ac15f61 100644 --- a/apisix/events.lua +++ b/apisix/events.lua @@ -15,18 +15,27 @@ -- limitations under the License. -- -local require = require -local error = error -local ngx = ngx - -local _M = {} +local require = require +local error = error +local assert = assert +local tostring = tostring +local pairs = pairs +local setmetatable = setmetatable +local ngx = ngx +local core = require("apisix.core") + +local _M = { + events_module = nil, +} _M.EVENTS_MODULE_LUA_RESTY_WORKER_EVENTS = 'lua-resty-worker-events' _M.EVENTS_MODULE_LUA_RESTY_EVENTS = 'lua-resty-events' -- use lua-resty-worker-events -local function init_worker_events() +local function init_resty_worker_events() + _M.events_module = _M.EVENTS_MODULE_LUA_RESTY_WORKER_EVENTS + local we = require("resty.worker.events") local shm = ngx.config.subsystem == "http" and "worker-events" or "worker-events-stream" local ok, err = we.configure({shm = shm, interval = 0.1}) @@ -38,17 +47,53 @@ local function init_worker_events() end +-- use lua-resty-events +local function init_resty_events() + _M.events_module = _M.EVENTS_MODULE_LUA_RESTY_EVENTS + + local listening = "unix:" .. ngx.config.prefix() .. "logs/" + if ngx.config.subsystem == "http" then + listening = listening .. "worker_events.sock" + else + listening = listening .. "stream_worker_events.sock" + end + core.log.info("subsystem: " .. ngx.config.subsystem .. " listening sock: " .. listening) + + local opts = { + unique_timeout = 5, -- life time of unique event data in lrucache + broker_id = 0, -- broker server runs in nginx worker #0 + listening = listening, -- unix socket for broker listening + } + + local we = require("resty.events.compat") + assert(we.configure(opts)) + assert(we.configured()) + + return we +end + + function _M.init_worker() if _M.inited then - -- Prevent duplicate initializations in the same worker to - -- avoid potentially unanticipated behavior + -- prevent duplicate initializations in the same worker to + -- avoid potentially unexpected behavior return end _M.inited = true - -- use lua-resty-worker-events default now - _M.worker_events = init_worker_events() + local conf = core.config.local_conf() + local module_name = core.table.try_read_attr(conf, "apisix", "events", "module") + or _M.EVENTS_MODULE_LUA_RESTY_WORKER_EVENTS + + if module_name == _M.EVENTS_MODULE_LUA_RESTY_EVENTS then + -- use lua-resty-events as an event module via the apisix.events.module + -- key in the configuration file + _M.worker_events = init_resty_events() + else + -- use lua-resty-worker-events default now + _M.worker_events = init_resty_worker_events() + end end @@ -57,8 +102,23 @@ function _M.register(self, ...) end -function _M.event_list(self, ...) - return self.worker_events.event_list(...) +function _M.event_list(self, source, ...) + -- a patch for the lua-resty-events to support event_list + -- this snippet is copied from the lua-resty-worker-events lib + if self.events_module == _M.EVENTS_MODULE_LUA_RESTY_EVENTS then + local events = { _source = source } + for _, event in pairs({...}) do + events[event] = event + end + return setmetatable(events, { + __index = function(_, key) + error("event '"..tostring(key).."' is an unknown event", 2) + end + }) + end + + -- the lua-resty-worker-events has a built-in event_list implementation + return self.worker_events.event_list(source, ...) end @@ -67,4 +127,13 @@ function _M.post(self, ...) end +function _M.get_healthcheck_events_modele(self) + if self.events_module == _M.EVENTS_MODULE_LUA_RESTY_EVENTS then + return "resty.events" + else + return "resty.worker.events" + end +end + + return _M diff --git a/apisix/upstream.lua b/apisix/upstream.lua index d8e3f3a98750..5a92879db24f 100644 --- a/apisix/upstream.lua +++ b/apisix/upstream.lua @@ -19,6 +19,7 @@ local core = require("apisix.core") local discovery = require("apisix.discovery.init").discovery local upstream_util = require("apisix.utils.upstream") local apisix_ssl = require("apisix.ssl") +local events = require("apisix.events") local error = error local tostring = tostring local ipairs = ipairs @@ -110,10 +111,18 @@ local function create_checker(upstream) end upstream.is_creating_checker = true + core.log.debug("events module used by the healthcheck: ", events.events_module, + ", module name: ",events:get_healthcheck_events_modele()) + local checker, err = healthcheck.new({ name = get_healthchecker_name(healthcheck_parent), shm_name = "upstream-healthcheck", checks = upstream.checks, + -- the events.init_worker will be executed in the init_worker phase, + -- events.healthcheck_events_module is set + -- while the healthcheck object is executed in the http access phase, + -- so it can be used here + events_module = events:get_healthcheck_events_modele(), }) if not checker then diff --git a/ci/centos7-ci.sh b/ci/centos7-ci.sh index beb3750a1f56..821fbcdc55c6 100755 --- a/ci/centos7-ci.sh +++ b/ci/centos7-ci.sh @@ -40,12 +40,19 @@ install_dependencies() { # install openresty to make apisix's rpm test work yum install -y yum-utils && yum-config-manager --add-repo https://openresty.org/package/centos/openresty.repo - wget "https://raw.githubusercontent.com/api7/apisix-build-tools/apisix-runtime/${APISIX_RUNTIME}/build-apisix-runtime-debug-centos7.sh" - wget "https://raw.githubusercontent.com/api7/apisix-build-tools/apisix-runtime/${APISIX_RUNTIME}/build-apisix-runtime.sh" + + # TODO: disabled temporarily, waiting for APISIX 3.8 to be released to synchronize the apisix-runtime version + #wget "https://raw.githubusercontent.com/api7/apisix-build-tools/apisix-runtime/${APISIX_RUNTIME}/build-apisix-runtime-debug-centos7.sh" + #wget "https://raw.githubusercontent.com/api7/apisix-build-tools/apisix-runtime/${APISIX_RUNTIME}/build-apisix-runtime.sh" + wget "https://raw.githubusercontent.com/api7/apisix-build-tools/master/build-apisix-runtime-debug-centos7.sh" + wget "https://raw.githubusercontent.com/api7/apisix-build-tools/master/build-apisix-runtime.sh" chmod +x build-apisix-runtime-debug-centos7.sh chmod +x build-apisix-runtime.sh ./build-apisix-runtime-debug-centos7.sh + # patch lua-resty-events + sed -i 's/log(ERR, "event worker failed: ", perr)/log(ngx.WARN, "event worker failed: ", perr)/' /usr/local/openresty/lualib/resty/events/worker.lua + # install luarocks ./utils/linux-install-luarocks.sh @@ -95,7 +102,7 @@ run_case() { make init set_coredns # run test cases - FLUSH_ETCD=1 prove --timer -Itest-nginx/lib -I./ -r ${TEST_FILE_SUB_DIR} | tee /tmp/test.result + FLUSH_ETCD=1 TEST_EVENTS_MODULE=$TEST_EVENTS_MODULE prove --timer -Itest-nginx/lib -I./ -r ${TEST_FILE_SUB_DIR} | tee /tmp/test.result rerun_flaky_tests /tmp/test.result } diff --git a/ci/linux-install-openresty.sh b/ci/linux-install-openresty.sh index 59807fbc8bff..36ca62089b82 100755 --- a/ci/linux-install-openresty.sh +++ b/ci/linux-install-openresty.sh @@ -80,6 +80,11 @@ if [ "$OPENRESTY_VERSION" == "source" ]; then fi fi -wget "https://raw.githubusercontent.com/api7/apisix-build-tools/apisix-runtime/${APISIX_RUNTIME}/build-apisix-runtime.sh" +# TODO: disabled temporarily, waiting for APISIX 3.8 to be released to synchronize the apisix-runtime version +#wget "https://raw.githubusercontent.com/api7/apisix-build-tools/apisix-runtime/${APISIX_RUNTIME}/build-apisix-runtime.sh" +wget "https://raw.githubusercontent.com/api7/apisix-build-tools/master/build-apisix-runtime.sh" chmod +x build-apisix-runtime.sh ./build-apisix-runtime.sh latest + +# patch lua-resty-events +sudo sed -i 's/log(ERR, "event worker failed: ", perr)/log(ngx.WARN, "event worker failed: ", perr)/' /usr/local/openresty/lualib/resty/events/worker.lua diff --git a/ci/linux_openresty_common_runner.sh b/ci/linux_openresty_common_runner.sh index 466fe8b69651..dc15fd7ca27b 100755 --- a/ci/linux_openresty_common_runner.sh +++ b/ci/linux_openresty_common_runner.sh @@ -75,7 +75,7 @@ script() { start_grpc_server_example # APISIX_ENABLE_LUACOV=1 PERL5LIB=.:$PERL5LIB prove -Itest-nginx/lib -r t - FLUSH_ETCD=1 prove --timer -Itest-nginx/lib -I./ -r $TEST_FILE_SUB_DIR | tee /tmp/test.result + FLUSH_ETCD=1 TEST_EVENTS_MODULE=$TEST_EVENTS_MODULE prove --timer -Itest-nginx/lib -I./ -r $TEST_FILE_SUB_DIR | tee /tmp/test.result rerun_flaky_tests /tmp/test.result } diff --git a/ci/redhat-ci.sh b/ci/redhat-ci.sh index d40ccbfeb495..4a5963e256a4 100755 --- a/ci/redhat-ci.sh +++ b/ci/redhat-ci.sh @@ -37,12 +37,18 @@ install_dependencies() { yum install -y openresty-openssl111 openresty-openssl111-devel pcre pcre pcre-devel xz yum -y install https://repos.apiseven.com/packages/centos/apache-apisix-repo-1.0-1.noarch.rpm - wget "https://raw.githubusercontent.com/api7/apisix-build-tools/apisix-runtime/${APISIX_RUNTIME}/build-apisix-runtime-debug-centos7.sh" - wget "https://raw.githubusercontent.com/api7/apisix-build-tools/apisix-runtime/${APISIX_RUNTIME}/build-apisix-runtime.sh" + # TODO: disabled temporarily, waiting for APISIX 3.8 to be released to synchronize the apisix-runtime version + #wget "https://raw.githubusercontent.com/api7/apisix-build-tools/apisix-runtime/${APISIX_RUNTIME}/build-apisix-runtime-debug-centos7.sh" + #wget "https://raw.githubusercontent.com/api7/apisix-build-tools/apisix-runtime/${APISIX_RUNTIME}/build-apisix-runtime.sh" + wget "https://raw.githubusercontent.com/api7/apisix-build-tools/master/build-apisix-runtime-debug-centos7.sh" + wget "https://raw.githubusercontent.com/api7/apisix-build-tools/master/build-apisix-runtime.sh" chmod +x build-apisix-runtime.sh chmod +x build-apisix-runtime-debug-centos7.sh ./build-apisix-runtime-debug-centos7.sh + # patch lua-resty-events + sed -i 's/log(ERR, "event worker failed: ", perr)/log(ngx.WARN, "event worker failed: ", perr)/' /usr/local/openresty/lualib/resty/events/worker.lua + # install luarocks ./utils/linux-install-luarocks.sh @@ -94,7 +100,7 @@ run_case() { make init set_coredns # run test cases - FLUSH_ETCD=1 prove --timer -Itest-nginx/lib -I./ -r ${TEST_FILE_SUB_DIR} | tee /tmp/test.result + FLUSH_ETCD=1 TEST_EVENTS_MODULE=$TEST_EVENTS_MODULE prove --timer -Itest-nginx/lib -I./ -r ${TEST_FILE_SUB_DIR} | tee /tmp/test.result rerun_flaky_tests /tmp/test.result } diff --git a/conf/config-default.yaml b/conf/config-default.yaml index 8717d1398b3f..d53f2ce59b1c 100755 --- a/conf/config-default.yaml +++ b/conf/config-default.yaml @@ -139,6 +139,10 @@ apisix: # with the new key. Removing the old keys directly can render the data # unrecoverable. + events: # Event distribution module configuration + module: lua-resty-events # Sets the name of the events module used. + # Supported module: lua-resty-worker-events and lua-resty-events + nginx_config: # Config for render the template to generate nginx.conf # user: root # Set the execution user of the worker process. This is only # effective if the master process runs with super-user privileges. diff --git a/t/APISIX.pm b/t/APISIX.pm index a8c49348f563..630ac6311cbc 100644 --- a/t/APISIX.pm +++ b/t/APISIX.pm @@ -78,10 +78,12 @@ if ($custom_dns_server) { } +my $events_module = $ENV{TEST_EVENTS_MODULE} // "lua-resty-events"; my $default_yaml_config = read_file("conf/config-default.yaml"); # enable example-plugin as some tests require it $default_yaml_config =~ s/#- example-plugin/- example-plugin/; $default_yaml_config =~ s/enable_export_server: true/enable_export_server: false/; +$default_yaml_config =~ s/module: lua-resty-events/module: $events_module/; my $user_yaml_config = read_file("conf/config.yaml"); my $ssl_crt = read_file("t/certs/apisix.crt"); @@ -437,6 +439,14 @@ _EOC_ $extra_stream_config + server { + listen unix:$apisix_home/t/servroot/logs/stream_worker_events.sock; + access_log off; + content_by_lua_block { + require("resty.events.compat").run() + } + } + # fake server, only for test server { listen 1995; @@ -687,6 +697,18 @@ _EOC_ } } +_EOC_ + + $http_config .= <<_EOC_; + server { + listen unix:$apisix_home/t/servroot/logs/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } _EOC_ $block->set_value("http_config", $http_config); diff --git a/t/cli/test_cmd.sh b/t/cli/test_cmd.sh index 81864aeb9a18..2f3a46cd2070 100755 --- a/t/cli/test_cmd.sh +++ b/t/cli/test_cmd.sh @@ -181,7 +181,7 @@ fi bin/apisix quit -sleep 0.5 +sleep 2 if ps -ef | grep "worker process is shutting down" | grep -v "grep"; then echo "all workers should exited" @@ -200,7 +200,7 @@ fi bin/apisix reload -sleep 0.5 +sleep 3 if ps -ef | grep "worker process is shutting down" | grep -v "grep"; then echo "old workers should exited" diff --git a/t/discovery/consul_dump.t b/t/discovery/consul_dump.t index c74e5b1500bd..42c8f7eb24a4 100644 --- a/t/discovery/consul_dump.t +++ b/t/discovery/consul_dump.t @@ -471,6 +471,7 @@ location /v1/agent { --- response_body eval --- error_code eval [200, 200, 200, 200, 200, 200] +--- wait: 2 diff --git a/t/node/healthcheck-discovery.t b/t/node/healthcheck-discovery.t index e8e21b04240f..8a9b0e9769fc 100644 --- a/t/node/healthcheck-discovery.t +++ b/t/node/healthcheck-discovery.t @@ -153,8 +153,6 @@ qr/(create new checker|try to release checker): table/ create new checker: table try to release checker: table create new checker: table ---- no_error_log -all upstream nodes is unhealthy, use default diff --git a/t/node/healthcheck-ipv6.t b/t/node/healthcheck-ipv6.t index 69e375adc9c6..dc33dece2633 100644 --- a/t/node/healthcheck-ipv6.t +++ b/t/node/healthcheck-ipv6.t @@ -94,6 +94,8 @@ qr/^.*?\[error\](?!.*process exiting).*/ --- config location /t { content_by_lua_block { + ngx.sleep(3) -- wait for new workers replacement to complete + local http = require "resty.http" local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/server_port" diff --git a/t/node/healthcheck-multiple-worker.t b/t/node/healthcheck-multiple-worker.t index 469eb22f40c6..fa6076e07cf0 100644 --- a/t/node/healthcheck-multiple-worker.t +++ b/t/node/healthcheck-multiple-worker.t @@ -90,6 +90,8 @@ qr/^.*?\[error\](?!.*process exiting).*/ --- config location /t { content_by_lua_block { + ngx.sleep(3) -- wait for new workers replacement to complete + local http = require "resty.http" local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/server_port" @@ -136,4 +138,4 @@ qr/unhealthy TCP increment/ --- grep_error_log_out unhealthy TCP increment unhealthy TCP increment ---- timeout: 10 +--- timeout: 20 diff --git a/t/node/healthcheck-passive-resty-events.t b/t/node/healthcheck-passive-resty-events.t new file mode 100644 index 000000000000..d90cbece7459 --- /dev/null +++ b/t/node/healthcheck-passive-resty-events.t @@ -0,0 +1,382 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + if ($ENV{TEST_EVENTS_MODULE} ne "lua-resty-events") { + $SkipReason = "Only for lua-resty-events events module"; + } +} + +use Test::Nginx::Socket::Lua $SkipReason ? (skip_all => $SkipReason) : (); +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); +worker_connections(256); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(passive) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 100, + "successes": 1 + }, + "unhealthy": { + "interval": 100, + "http_failures": 2 + } + },]] .. [[ + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [502], + "http_failures": 1, + "tcp_failures": 1 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit routes (two healthy nodes) +--- config + location /t { + content_by_lua_block { + ngx.sleep(3) -- wait for sync + + local json_sort = require("toolkit.json") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/server_port" + + local httpc = http.new() + -- Since a failed request attempt triggers a passive health check to report + -- a non-health condition, a request is first triggered manually here to + -- trigger a passive health check to refresh the monitoring state of the build + -- + -- The reason for this is to avoid delays in event synchronization timing due + -- to non-deterministic asynchronous connections when using lua-resty-events + -- as an events module. + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + ngx.sleep(1) -- Wait for health check unhealthy events sync + + local ports_count = {} + for i = 1, 6 do + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + + local status = tostring(res.status) + ports_count[status] = (ports_count[status] or 0) + 1 + end + + ngx.say(json_sort.encode(ports_count)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +{"200":5,"502":1} +--- error_log +(upstream#/apisix/routes/1) unhealthy HTTP increment (1/1) +--- timeout: 10 + + + +=== TEST 3: set route(only passive) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [502], + "http_failures": 1, + "tcp_failures": 1 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"checks\" validation failed: object matches none of the required: [\"active\"] or [\"active\",\"passive\"]"} + + + +=== TEST 4: set route(only active + active & passive) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 100, + "successes": 1 + }, + "unhealthy": { + "interval": 100, + "http_failures": 2 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello_", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 100, + "successes": 1 + }, + "unhealthy": { + "interval": 100, + "http_failures": 2 + } + },]] .. [[ + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [502], + "http_failures": 1, + "tcp_failures": 1 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: only one route should have passive healthcheck +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json_sort = require("toolkit.json") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + local ports_count = {} + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello_") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + + -- only /hello_ has passive healthcheck + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + } + } +--- request +GET /t +--- response_body +502 +502 +--- grep_error_log eval +qr/enabled healthcheck passive/ +--- grep_error_log_out +enabled healthcheck passive + + + +=== TEST 6: make sure passive healthcheck works (conf is not corrupted by the default value) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json_sort = require("toolkit.json") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + + -- The first time request to /hello_ + -- Ensure that the event that triggers the healthchecker to perform + -- add_target has been sent and processed correctly + -- + -- Due to the implementation of lua-resty-events, it relies on the kernel and + -- the Nginx event loop to process socket connections. + -- When lua-resty-healthcheck handles passive healthchecks and uses lua-resty-events + -- as the events module, the synchronization of the first event usually occurs + -- before the start of the passive healthcheck. So when the execution finishes and + -- healthchecker tries to record the healthcheck status, it will not be able to find + -- an existing target (because the synchronization event has not finished yet), which + -- will lead to some anomalies that deviate from the original test case, so compatibility + -- operations are performed here. + local res, err = httpc:request_uri(uri .. "/hello_") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + + ngx.sleep(1) -- Wait for health check unhealthy events sync + + -- The second time request to /hello_ + local res, err = httpc:request_uri(uri .. "/hello_") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + } + } +--- request +GET /t +--- response_body +502 +502 +502 +--- grep_error_log eval +qr/\[healthcheck\] \([^)]+\) unhealthy HTTP increment/ +--- grep_error_log_out +[healthcheck] (upstream#/apisix/routes/2) unhealthy HTTP increment diff --git a/t/node/healthcheck-passive.t b/t/node/healthcheck-passive.t index f3f694b9f38f..7404ff0169fb 100644 --- a/t/node/healthcheck-passive.t +++ b/t/node/healthcheck-passive.t @@ -15,6 +15,13 @@ # limitations under the License. # +BEGIN { + if ($ENV{TEST_EVENTS_MODULE} ne "lua-resty-worker-events") { + $SkipReason = "Only for lua-resty-worker-events events module"; + } +} + +use Test::Nginx::Socket::Lua $SkipReason ? (skip_all => $SkipReason) : (); use t::APISIX 'no_plan'; repeat_each(1); diff --git a/t/node/healthcheck.t b/t/node/healthcheck.t index 3053a0bb713f..546b06db4870 100644 --- a/t/node/healthcheck.t +++ b/t/node/healthcheck.t @@ -80,15 +80,22 @@ qr/^.*?\[error\](?!.*process exiting).*/ --- config location /t { content_by_lua_block { - ngx.sleep(2) -- wait for sync + ngx.sleep(3) -- wait for sync local http = require "resty.http" + local httpc = http.new() local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/server_port" + -- hit route before start test loop + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + local ports_count = {} for i = 1, 12 do - local httpc = http.new() local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) if not res then ngx.say(err) @@ -118,7 +125,7 @@ GET /t --- grep_error_log eval qr/^.*?\[error\](?!.*process exiting).*/ --- grep_error_log_out ---- timeout: 6 +--- timeout: 10