diff --git a/apisix/cli/ops.lua b/apisix/cli/ops.lua
index ef069f815010..5ce51dab3b32 100644
--- a/apisix/cli/ops.lua
+++ b/apisix/cli/ops.lua
@@ -185,12 +185,9 @@ local function init(env)
local checked_admin_key = false
local allow_admin = yaml_conf.deployment.admin and
yaml_conf.deployment.admin.allow_admin
- if yaml_conf.apisix.enable_admin and allow_admin then
- for _, allow_ip in ipairs(allow_admin) do
- if allow_ip == "127.0.0.0/24" then
- checked_admin_key = true
- end
- end
+ if yaml_conf.apisix.enable_admin and allow_admin
+ and #allow_admin == 1 and allow_admin[1] == "127.0.0.0/24" then
+ checked_admin_key = true
end
if yaml_conf.apisix.enable_admin and not checked_admin_key then
diff --git a/apisix/control/v1.lua b/apisix/control/v1.lua
index fd031a473ca2..3143ae5948fc 100644
--- a/apisix/control/v1.lua
+++ b/apisix/control/v1.lua
@@ -14,6 +14,7 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
+local require = require
local core = require("apisix.core")
local plugin = require("apisix.plugin")
local get_routes = require("apisix.router").http_routes
@@ -22,6 +23,7 @@ local upstream_mod = require("apisix.upstream")
local get_upstreams = upstream_mod.upstreams
local collectgarbage = collectgarbage
local ipairs = ipairs
+local pcall = pcall
local str_format = string.format
local ngx_var = ngx.var
@@ -62,52 +64,137 @@ function _M.schema()
end
-local function extra_checker_info(value, src_type)
- local checker = value.checker
- local upstream = value.checker_upstream
- local host = upstream.checks and upstream.checks.active and upstream.checks.active.host
- local port = upstream.checks and upstream.checks.active and upstream.checks.active.port
- local nodes = upstream.nodes
- local healthy_nodes = core.table.new(#nodes, 0)
- for _, node in ipairs(nodes) do
- local ok = checker:get_target_status(node.host, port or node.port, host)
- if ok then
- core.table.insert(healthy_nodes, node)
- end
+local healthcheck
+local function extra_checker_info(value)
+ if not healthcheck then
+ healthcheck = require("resty.healthcheck")
end
- local conf = value.value
+ local name = upstream_mod.get_healthchecker_name(value)
+ local nodes, err = healthcheck.get_target_list(name, "upstream-healthcheck")
+ if err then
+ core.log.error("healthcheck.get_target_list failed: ", err)
+ end
return {
- name = upstream_mod.get_healthchecker_name(value),
- src_id = conf.id,
- src_type = src_type,
+ name = value.key,
nodes = nodes,
- healthy_nodes = healthy_nodes,
}
end
-local function iter_and_add_healthcheck_info(infos, values, src_type)
+local function get_checker_type(checks)
+ if checks.active and checks.active.type then
+ return checks.active.type
+ elseif checks.passive and checks.passive.type then
+ return checks.passive.type
+ end
+end
+
+
+local function iter_and_add_healthcheck_info(infos, values)
if not values then
return
end
for _, value in core.config_util.iterate_values(values) do
- if value.checker then
- core.table.insert(infos, extra_checker_info(value, src_type))
+ local checks = value.value.checks or (value.value.upstream and value.value.upstream.checks)
+ if checks then
+ local info = extra_checker_info(value)
+ info.type = get_checker_type(checks)
+ core.table.insert(infos, info)
end
end
end
-function _M.get_health_checkers()
+local HTML_TEMPLATE = [[
+
+
+ APISIX upstream check status
+
+
+APISIX upstream check status
+
+
+ Index |
+ Upstream |
+ Check type |
+ Host |
+ Status |
+ Success counts |
+ TCP Failures |
+ HTTP Failures |
+ TIMEOUT Failures |
+
+{% local i = 0 %}
+{% for _, stat in ipairs(stats) do %}
+{% for _, node in ipairs(stat.nodes) do %}
+{% i = i + 1 %}
+ {% if node.status == "healthy" then %}
+
+ {% else %}
+
+ {% end %}
+ {* i *} |
+ {* stat.name *} |
+ {* stat.type *} |
+ {* node.ip .. ":" .. node.port *} |
+ {* node.status *} |
+ {* node.counter.success *} |
+ {* node.counter.tcp_failure *} |
+ {* node.counter.http_failure *} |
+ {* node.counter.timeout_failure *} |
+
+{% end %}
+{% end %}
+
+
+
+]]
+
+local html_render
+
+local function try_render_html(data)
+ if not html_render then
+ local template = require("resty.template")
+ html_render = template.compile(HTML_TEMPLATE)
+ end
+ local accept = ngx_var.http_accept
+ if accept and accept:find("text/html") then
+ local ok, out = pcall(html_render, data)
+ if not ok then
+ local err = str_format("HTML template rendering: %s", out)
+ core.log.error(err)
+ return nil, err
+ end
+ return out
+ end
+end
+
+
+local function _get_health_checkers()
local infos = {}
local routes = get_routes()
- iter_and_add_healthcheck_info(infos, routes, "routes")
+ iter_and_add_healthcheck_info(infos, routes)
local services = get_services()
- iter_and_add_healthcheck_info(infos, services, "services")
+ iter_and_add_healthcheck_info(infos, services)
local upstreams = get_upstreams()
- iter_and_add_healthcheck_info(infos, upstreams, "upstreams")
+ iter_and_add_healthcheck_info(infos, upstreams)
+ return infos
+end
+
+
+function _M.get_health_checkers()
+ local infos = _get_health_checkers()
+ local out, err = try_render_html({stats=infos})
+ if out then
+ core.response.set_header("Content-Type", "text/html")
+ return 200, out
+ end
+ if err then
+ return 503, {error_msg = err}
+ end
+
return 200, infos
end
@@ -119,11 +206,15 @@ local function iter_and_find_healthcheck_info(values, src_type, src_id)
for _, value in core.config_util.iterate_values(values) do
if value.value.id == src_id then
- if not value.checker then
+ local checks = value.value.checks or
+ (value.value.upstream and value.value.upstream.checks)
+ if not checks then
return nil, str_format("no checker for %s[%s]", src_type, src_id)
end
- return extra_checker_info(value, src_type)
+ local info = extra_checker_info(value)
+ info.type = get_checker_type(checks)
+ return info
end
end
@@ -155,6 +246,16 @@ function _M.get_health_checker()
if not info then
return 404, {error_msg = err}
end
+
+ local out, err = try_render_html({stats={info}})
+ if out then
+ core.response.set_header("Content-Type", "text/html")
+ return 200, out
+ end
+ if err then
+ return 503, {error_msg = err}
+ end
+
return 200, info
end
@@ -372,5 +473,6 @@ return {
methods = {"GET"},
uris = {"/plugin_metadata/*"},
handler = _M.dump_plugin_metadata,
- }
+ },
+ get_health_checkers = _get_health_checkers,
}
diff --git a/apisix/plugins/prometheus/exporter.lua b/apisix/plugins/prometheus/exporter.lua
index 45ff94c3f631..1cb4a534cb9f 100644
--- a/apisix/plugins/prometheus/exporter.lua
+++ b/apisix/plugins/prometheus/exporter.lua
@@ -17,6 +17,7 @@
local base_prometheus = require("prometheus")
local core = require("apisix.core")
local plugin = require("apisix.plugin")
+local control = require("apisix.control.v1")
local ipairs = ipairs
local pairs = pairs
local ngx = ngx
@@ -158,6 +159,10 @@ function _M.http_init(prometheus_enabled_in_stream)
"The free space of each nginx shared DICT since APISIX start",
{"name"})
+ metrics.upstream_status = prometheus:gauge("upstream_status",
+ "Upstream status from health check",
+ {"name", "ip", "port"})
+
-- per service
-- The consumer label indicates the name of consumer corresponds to the
@@ -458,6 +463,15 @@ local function collect(ctx, stream_only)
metrics.node_info:set(1, gen_arr(hostname))
+ -- update upstream_status metrics
+ local stats = control.get_health_checkers()
+ for _, stat in ipairs(stats) do
+ for _, node in ipairs(stat.nodes) do
+ metrics.upstream_status:set((node.status == "healthy") and 1 or 0,
+ gen_arr(stat.name, node.ip, node.port))
+ end
+ end
+
core.response.set_header("content_type", "text/plain")
return 200, core.table.concat(prometheus:metric_data())
end
diff --git a/apisix/upstream.lua b/apisix/upstream.lua
index 79214c5ea724..4e66b701c4f1 100644
--- a/apisix/upstream.lua
+++ b/apisix/upstream.lua
@@ -135,7 +135,7 @@ local function create_checker(upstream)
local host = upstream.checks and upstream.checks.active and upstream.checks.active.host
local port = upstream.checks and upstream.checks.active and upstream.checks.active.port
local up_hdr = upstream.pass_host == "rewrite" and upstream.upstream_host
- local use_node_hdr = upstream.pass_host == "node"
+ local use_node_hdr = upstream.pass_host == "node" or nil
for _, node in ipairs(upstream.nodes) do
local host_hdr = up_hdr or (use_node_hdr and node.domain)
local ok, err = checker:add_target(node.host, port or node.port, host,
diff --git a/docs/assets/images/health_check_status_page.png b/docs/assets/images/health_check_status_page.png
new file mode 100644
index 000000000000..ed4aebead0fd
Binary files /dev/null and b/docs/assets/images/health_check_status_page.png differ
diff --git a/docs/en/latest/control-api.md b/docs/en/latest/control-api.md
index c6944f2b5421..a068d4411fb3 100644
--- a/docs/en/latest/control-api.md
+++ b/docs/en/latest/control-api.md
@@ -98,71 +98,50 @@ Returns a [health check](./tutorials/health-check.md) of the APISIX instance.
```json
[
- {
- "healthy_nodes": [
- {
- "host": "127.0.0.1",
- "port": 1980,
- "priority": 0,
- "weight": 1
- }
- ],
- "name": "upstream#/upstreams/1",
- "nodes": [
- {
- "host": "127.0.0.1",
- "port": 1980,
- "priority": 0,
- "weight": 1
- },
- {
- "host": "127.0.0.2",
- "port": 1988,
- "priority": 0,
- "weight": 1
- }
- ],
- "src_id": "1",
- "src_type": "upstreams"
- },
- {
- "healthy_nodes": [
- {
- "host": "127.0.0.1",
- "port": 1980,
- "priority": 0,
- "weight": 1
- }
- ],
- "name": "upstream#/routes/1",
- "nodes": [
- {
- "host": "127.0.0.1",
- "port": 1980,
- "priority": 0,
- "weight": 1
- },
- {
- "host": "127.0.0.1",
- "port": 1988,
- "priority": 0,
- "weight": 1
- }
- ],
- "src_id": "1",
- "src_type": "routes"
- }
+ {
+ "nodes": [
+ {
+ "ip": "52.86.68.46",
+ "counter": {
+ "http_failure": 0,
+ "success": 0,
+ "timeout_failure": 0,
+ "tcp_failure": 0
+ },
+ "port": 80,
+ "status": "healthy"
+ },
+ {
+ "ip": "100.24.156.8",
+ "counter": {
+ "http_failure": 5,
+ "success": 0,
+ "timeout_failure": 0,
+ "tcp_failure": 0
+ },
+ "port": 80,
+ "status": "unhealthy"
+ }
+ ],
+ "name": "/apisix/routes/1",
+ "type": "http"
+ }
]
+
```
Each of the returned objects contain the following fields:
-* src_type: where the health checker is reporting from. Value is one of `["routes", "services", "upstreams"]`.
-* src_id: id of the object creating the health checker. For example, if an Upstream
-object with id `1` creates a health checker, the `src_type` is `upstreams` and the `src_id` is `1`.
-* name: name of the health checker.
+* name: resource id, where the health checker is reporting from.
+* type: health check type: `["http", "https", "tcp"]`.
* nodes: target nodes of the health checker.
-* healthy_nodes: healthy nodes discovered by the health checker.
+* nodes[i].ip: ip address.
+* nodes[i].port: port number.
+* nodes[i].status: health check result: `["healthy", "unhealthy", "mostly_healthy", "mostly_unhealthy"]`.
+* nodes[i].counter.success: success health check count.
+* nodes[i].counter.http_failure: http failures count.
+* nodes[i].counter.tcp_failure: tcp connect/read/write failures count.
+* nodes[i].counter.timeout_failure: timeout count.
You can also use `/v1/healthcheck/$src_type/$src_id` to get the health status of specific nodes.
@@ -170,40 +149,50 @@ For example, `GET /v1/healthcheck/upstreams/1` returns:
```json
{
- "healthy_nodes": [
- {
- "host": "127.0.0.1",
- "port": 1980,
- "priority": 0,
- "weight": 1
- }
- ],
- "name": "upstream#/upstreams/1",
- "nodes": [
- {
- "host": "127.0.0.1",
- "port": 1980,
- "priority": 0,
- "weight": 1
- },
- {
- "host": "127.0.0.2",
- "port": 1988,
- "priority": 0,
- "weight": 1
- }
- ],
- "src_id": "1",
- "src_type": "upstreams"
+ "nodes": [
+ {
+ "ip": "52.86.68.46",
+ "counter": {
+ "http_failure": 0,
+ "success": 2,
+ "timeout_failure": 0,
+ "tcp_failure": 0
+ },
+ "port": 80,
+ "status": "healthy"
+ },
+ {
+ "ip": "100.24.156.8",
+ "counter": {
+ "http_failure": 5,
+ "success": 0,
+ "timeout_failure": 0,
+ "tcp_failure": 0
+ },
+ "port": 80,
+ "status": "unhealthy"
+ }
+ ],
+ "type": "http"
+ "name": "/apisix/routes/1"
}
+
```
:::note
-As APISIX uses multiple-process architecture, if the process never handles the request of a specific upstream, then the upstream's health check information will not appear on the process. This may result in the health check API can't get all data during testing.
+Only when one upstream is satisfied by the conditions below,
+its status is shown in the result list:
+
+* The upstream is configured with a health checker
+* The upstream has served requests in any worker process
:::
+If you use browser to access the control API URL, then you will get the HTML output:
+
+![Health Check Status Page](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/health_check_status_page.png)
+
### POST /v1/gc
Introduced in [v2.8](https://github.com/apache/apisix/releases/tag/2.8).
diff --git a/docs/en/latest/mtls.md b/docs/en/latest/mtls.md
index 1a1180a0978f..fe0f43ef7ef9 100644
--- a/docs/en/latest/mtls.md
+++ b/docs/en/latest/mtls.md
@@ -1,5 +1,10 @@
---
title: Mutual TLS Authentication
+keywords:
+ - Apache APISIX
+ - Mutual TLS
+ - mTLS
+description: This document describes how you can secure communication to and within APISIX with mTLS.
---