Skip to content

Commit

Permalink
Merge branch 'upstream/master' into github/master
Browse files Browse the repository at this point in the history
* upstream/master:
  fix: limit count plugin conf parameter undefined error (apache#8902)
  chore: remove duplicate kubernetes test case (apache#8882)
  feat: add 'range_id' algorithm for 'request-id' plugin (apache#8790)
  chore(error-log-logger): add kafka meta_refresh_interval (apache#8821)
  chore(deps): bump golang.org/x/net from 0.2.0 to 0.7.0 in /t/grpc_server_example (apache#8881)
  docs: Update getting-started.md (apache#8763)
  fix(admin): fix wrong http code for patch method (apache#8855)
  feat: stream subsystem support tars service discovery (apache#8826)
  feat(ci): implement image caching to reduce ci build time. (apache#8735)
  feat(admin): add head method support to /apisix/admin (apache#8752)
  feat: opentelemetry plugin config collector.address support specify https scheme (apache#8823)
  fix: add admin schema to control_plane config (apache#8809)
  • Loading branch information
hongbinhsu committed Feb 23, 2023
2 parents a7e20e7 + 4ab50da commit 211a51a
Show file tree
Hide file tree
Showing 39 changed files with 778 additions and 915 deletions.
55 changes: 35 additions & 20 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -99,30 +99,36 @@ jobs:
rm -rf $(ls -1 --ignore=*.tgz --ignore=ci --ignore=t --ignore=utils --ignore=.github)
tar zxvf ${{ steps.branch_env.outputs.fullname }}
- name: Start CI env (FIRST_TEST)
if: steps.test_env.outputs.type == 'first'
run: |
# launch deps env
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
- name: Cache images
id: cache-images
uses: actions/cache@v3
env:
cache-name: cache-apisix-docker-images
with:
path: docker-images-backup
key: ${{ runner.os }}-${{ env.cache-name }}-${{ steps.test_env.outputs.type }}-${{ matrix.os_name }}-${{ hashFiles(format('./ci/pod/docker-compose.{0}.yml', steps.test_env.outputs.type )) }}

- name: Start CI env (PLUGIN_TEST)
if: steps.test_env.outputs.type == 'plugin'
- if: ${{ steps.cache-images.outputs.cache-hit == 'true' }}
name: Load saved docker images
run: |
# download keycloak cas provider
sudo wget https://github.com/jacekkow/keycloak-protocol-cas/releases/download/18.0.2/keycloak-protocol-cas-18.0.2.jar -O /opt/keycloak-protocol-cas-18.0.2.jar
./ci/pod/openfunction/build-function-image.sh
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh
- name: Start CI env (LAST_TEST)
if: steps.test_env.outputs.type == 'last'
if [[ -f docker-images-backup/apisix-images.tar ]]; then
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
docker load --input docker-images-backup/apisix-images.tar
rm docker-images-backup/apisix-images.tar
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
echo "loaded docker images"
if [[ ${{ steps.test_env.outputs.type }} != first ]]; then
sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
fi
fi
- if: ${{ steps.cache-images.outputs.cache-hit != 'true' }}
name: Linux launch services
run: |
# generating SSL certificates for Kafka
sudo keytool -genkeypair -keyalg RSA -dname "CN=127.0.0.1" -alias 127.0.0.1 -keystore ./ci/pod/kafka/kafka-server/selfsigned.jks -validity 365 -keysize 2048 -storepass changeit
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
[[ ${{ steps.test_env.outputs.type }} == plugin ]] && ./ci/pod/openfunction/build-function-image.sh
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
echo "Linux launch services, done."
- name: Start Dubbo Backend
if: matrix.os_name == 'linux_openresty' && steps.test_env.outputs.type == 'plugin'
run: |
Expand Down Expand Up @@ -158,3 +164,12 @@ jobs:
env:
TEST_FILE_SUB_DIR: ${{ matrix.test_dir }}
run: sudo -E ./ci/${{ matrix.os_name }}_runner.sh script

- if: ${{ steps.cache-images.outputs.cache-hit != 'true' }}
name: Save docker images
run: |
# free disk space
bash ./ci/free_disk_space.sh
echo "start backing up, $(date)"
bash ./ci/backup-docker-images.sh ${{ steps.test_env.outputs.type }}
echo "backup done, $(date)"
25 changes: 5 additions & 20 deletions .github/workflows/centos7-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -103,28 +103,13 @@ jobs:
docker run -itd -v /home/runner/work/apisix/apisix:/apisix --env TEST_FILE_SUB_DIR="$TEST_FILE_SUB_DIR" --name centos7Instance --net="host" --dns 8.8.8.8 --dns-search apache.org docker.io/centos:7 /bin/bash
# docker exec centos7Instance bash -c "cp -r /tmp/apisix ./"
- name: Start CI env (FIRST_TEST)
if: steps.test_env.outputs.type == 'first'
- name: Linux launch services
run: |
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
[[ ${{ steps.test_env.outputs.type }} == plugin ]] && ./ci/pod/openfunction/build-function-image.sh
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
- name: Start CI env (PLUGIN_TEST)
if: steps.test_env.outputs.type == 'plugin'
run: |
# download keycloak cas provider
sudo wget https://github.com/jacekkow/keycloak-protocol-cas/releases/download/18.0.2/keycloak-protocol-cas-18.0.2.jar -O /opt/keycloak-protocol-cas-18.0.2.jar
./ci/pod/openfunction/build-function-image.sh
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh
- name: Start CI env (LAST_TEST)
if: steps.test_env.outputs.type == 'last'
run: |
# generating SSL certificates for Kafka
keytool -genkeypair -keyalg RSA -dname "CN=127.0.0.1" -alias 127.0.0.1 -keystore ./ci/pod/kafka/kafka-server/selfsigned.jks -validity 365 -keysize 2048 -storepass changeit
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
echo "Linux launch services, done"
- name: Install dependencies
run: |
Expand Down
27 changes: 6 additions & 21 deletions .github/workflows/fips.yml
Original file line number Diff line number Diff line change
Expand Up @@ -89,29 +89,14 @@ jobs:
run: |
make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml
- name: Start CI env (FIRST_TEST)
if: steps.test_env.outputs.type == 'first'
- name: Linux launch services
run: |
# launch deps env
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
[[ ${{ steps.test_env.outputs.type }} == plugin ]] && ./ci/pod/openfunction/build-function-image.sh
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
- name: Start CI env (PLUGIN_TEST)
if: steps.test_env.outputs.type == 'plugin'
run: |
# download keycloak cas provider
sudo wget https://github.com/jacekkow/keycloak-protocol-cas/releases/download/18.0.2/keycloak-protocol-cas-18.0.2.jar -O /opt/keycloak-protocol-cas-18.0.2.jar
./ci/pod/openfunction/build-function-image.sh
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh
- name: Start CI env (LAST_TEST)
if: steps.test_env.outputs.type == 'last'
run: |
# generating SSL certificates for Kafka
sudo keytool -genkeypair -keyalg RSA -dname "CN=127.0.0.1" -alias 127.0.0.1 -keystore ./ci/pod/kafka/kafka-server/selfsigned.jks -validity 365 -keysize 2048 -storepass changeit
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh
echo "make ci-env-up, done"
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
echo "Linux launch services, done"
- name: Linux Before install
run: sudo ./ci/${{ matrix.os_name }}_runner.sh before_install
Expand Down
26 changes: 5 additions & 21 deletions .github/workflows/gm-cron.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -86,29 +86,13 @@ jobs:
make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml
sudo ./ci/init-common-test-service.sh
- name: Start CI env (FIRST_TEST)
if: steps.test_env.outputs.type == 'first'
- name: Linux launch services
run: |
# launch deps env
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
[[ ${{ steps.test_env.outputs.type }} == plugin ]] && ./ci/pod/openfunction/build-function-image.sh
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
- name: Start CI env (PLUGIN_TEST)
if: steps.test_env.outputs.type == 'plugin'
run: |
# download keycloak cas provider
sudo wget https://github.com/jacekkow/keycloak-protocol-cas/releases/download/18.0.2/keycloak-protocol-cas-18.0.2.jar -O /opt/keycloak-protocol-cas-18.0.2.jar
./ci/pod/openfunction/build-function-image.sh
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh
- name: Start CI env (LAST_TEST)
if: steps.test_env.outputs.type == 'last'
run: |
# generating SSL certificates for Kafka
sudo keytool -genkeypair -keyalg RSA -dname "CN=127.0.0.1" -alias 127.0.0.1 -keystore ./ci/pod/kafka/kafka-server/selfsigned.jks -validity 365 -keysize 2048 -storepass changeit
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
echo "Linux launch services, done."
- name: Start Dubbo Backend
if: steps.test_env.outputs.type == 'plugin'
Expand Down
10 changes: 10 additions & 0 deletions apisix/admin/init.lua
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,11 @@ local function strip_etcd_resp(data)
end


local function head()
core.response.exit(200)
end


local function run()
local api_ctx = {}
core.ctx.set_vars_meta(api_ctx)
Expand Down Expand Up @@ -365,6 +370,11 @@ end


local uri_route = {
{
paths = [[/apisix/admin]],
methods = {"HEAD"},
handler = head,
},
{
paths = [[/apisix/admin/*]],
methods = {"GET", "PUT", "POST", "DELETE", "PATCH"},
Expand Down
4 changes: 4 additions & 0 deletions apisix/cli/ngx_tpl.lua
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,10 @@ stream {
lua_shared_dict etcd-cluster-health-check-stream {* stream.lua_shared_dict["etcd-cluster-health-check-stream"] *};
lua_shared_dict worker-events-stream {* stream.lua_shared_dict["worker-events-stream"] *};
{% if enabled_discoveries["tars"] then %}
lua_shared_dict tars-stream {* stream.lua_shared_dict["tars-stream"] *};
{% end %}
{% if enabled_stream_plugins["limit-conn"] then %}
lua_shared_dict plugin-limit-conn-stream {* stream.lua_shared_dict["plugin-limit-conn-stream"] *};
{% end %}
Expand Down
1 change: 1 addition & 0 deletions apisix/cli/schema.lua
Original file line number Diff line number Diff line change
Expand Up @@ -375,6 +375,7 @@ local deployment_schema = {
control_plane = {
properties = {
etcd = etcd_schema,
admin = admin_schema,
role_control_plane = {
properties = {
config_provider = {
Expand Down
1 change: 0 additions & 1 deletion apisix/core/etcd.lua
Original file line number Diff line number Diff line change
Expand Up @@ -507,7 +507,6 @@ function _M.atomic_set(key, value, ttl, mod_revision)
key = key,
value = value,
}
res.status = 201

return res, nil
end
Expand Down
20 changes: 18 additions & 2 deletions apisix/discovery/tars/init.lua
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ local tonumber = tonumber
local local_conf = require("apisix.core.config_local").local_conf()
local core = require("apisix.core")
local mysql = require("resty.mysql")
local process = require("ngx.process")
local is_http = ngx.config.subsystem == "http"
local support_process, process = pcall(require, "ngx.process")

local endpoint_dict

Expand Down Expand Up @@ -331,9 +332,24 @@ function _M.nodes(servant)
return get_endpoint(servant)
end

local function get_endpoint_dict()
local shm = "tars"

if not is_http then
shm = shm .. "-stream"
end

return ngx.shared[shm]
end

function _M.init_worker()
endpoint_dict = ngx.shared.tars
if not support_process then
core.log.error("tars discovery not support in subsystem: ", ngx.config.subsystem,
", please check if your openresty version >= 1.19.9.1 or not")
return
end

endpoint_dict = get_endpoint_dict()
if not endpoint_dict then
error("failed to get lua_shared_dict: tars, please check your APISIX version")
end
Expand Down
2 changes: 2 additions & 0 deletions apisix/plugins/error-log-logger.lua
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ local metadata_schema = {
-- in lua-resty-kafka, cluster_name is defined as number
-- see https://github.com/doujiang24/lua-resty-kafka#new-1
cluster_name = {type = "integer", minimum = 1, default = 1},
meta_refresh_interval = {type = "integer", minimum = 1, default = 30},
},
required = {"brokers", "kafka_topic"},
},
Expand Down Expand Up @@ -370,6 +371,7 @@ local function send_to_kafka(log_message)
broker_config["request_timeout"] = config.timeout * 1000
broker_config["producer_type"] = config.kafka.producer_type
broker_config["required_acks"] = config.kafka.required_acks
broker_config["refresh_interval"] = config.kafka.meta_refresh_interval * 1000

-- reuse producer via kafka_prod_lrucache to avoid unbalanced partitions of messages in kafka
local prod, err = kafka_prod_lrucache(plugin_name, metadata.modifiedIndex,
Expand Down
4 changes: 2 additions & 2 deletions apisix/plugins/limit-count/limit-count-local.lua
Original file line number Diff line number Diff line change
Expand Up @@ -51,11 +51,11 @@ local function read_reset(self, key)
return reset
end

function _M.new(plugin_name, limit, window, conf)
function _M.new(plugin_name, limit, window)
assert(limit > 0 and window > 0)

local self = {
limit_count = limit_local_new(plugin_name, limit, window, conf),
limit_count = limit_local_new(plugin_name, limit, window),
dict = ngx.shared["plugin-limit-count-reset-header"]
}

Expand Down
33 changes: 14 additions & 19 deletions apisix/plugins/opentelemetry.lua
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,9 @@ local span_status = require("opentelemetry.trace.span_status")
local resource_new = require("opentelemetry.resource").new
local attr = require("opentelemetry.attribute")

local context_storage = require("opentelemetry.context_storage")
local context = require("opentelemetry.context").new(context_storage)
local carrier_new = require("opentelemetry.trace.propagation.carrier").new
local trace_context = require("opentelemetry.trace.propagation.trace_context")
local context = require("opentelemetry.context").new()
local trace_context_propagator =
require("opentelemetry.trace.propagation.text_map.trace_context_propagator").new()

local ngx = ngx
local ngx_var = ngx.var
Expand Down Expand Up @@ -310,7 +309,7 @@ function _M.rewrite(conf, api_ctx)
end

-- extract trace context from the headers of downstream HTTP request
local upstream_context = trace_context.extract(context, carrier_new())
local upstream_context = trace_context_propagator:extract(context, ngx.req)
local attributes = {
attr.string("service", api_ctx.service_name),
attr.string("route", api_ctx.route_name),
Expand All @@ -333,27 +332,24 @@ function _M.rewrite(conf, api_ctx)
kind = span_kind.server,
attributes = attributes,
})
ctx:attach()
api_ctx.otel_context_token = ctx:attach()

-- inject trace context into the headers of upstream HTTP request
trace_context.inject(ctx, carrier_new())
trace_context_propagator:inject(ctx, ngx.req)
end


function _M.delayed_body_filter(conf, api_ctx)
if ngx.arg[2] then
if api_ctx.otel_context_token and ngx.arg[2] then
local ctx = context:current()
if not ctx then
return
end

local upstream_status = core.response.get_upstream_status(api_ctx)
ctx:detach()
ctx:detach(api_ctx.otel_context_token)
api_ctx.otel_context_token = nil

-- get span from current context
local span = ctx:span()
local upstream_status = core.response.get_upstream_status(api_ctx)
if upstream_status and upstream_status >= 500 then
span:set_status(span_status.error,
span:set_status(span_status.ERROR,
"upstream response status: " .. upstream_status)
end

Expand All @@ -365,15 +361,14 @@ end
-- body_filter maybe not called because of empty http body response
-- so we need to check if the span has finished in log phase
function _M.log(conf, api_ctx)
local ctx = context:current()
if ctx then
if api_ctx.otel_context_token then
-- ctx:detach() is not necessary, because of ctx is stored in ngx.ctx
local upstream_status = core.response.get_upstream_status(api_ctx)

-- get span from current context
local span = ctx:span()
local span = context:current():span()
if upstream_status and upstream_status >= 500 then
span:set_status(span_status.error,
span:set_status(span_status.ERROR,
"upstream response status: " .. upstream_status)
end

Expand Down
Loading

0 comments on commit 211a51a

Please sign in to comment.