diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index bd199672e7ad..21d185cbe455 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -31,7 +31,7 @@ jobs:
- linux_openresty_1_17
test_dir:
- t/plugin
- - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc
+ - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc
- t/node t/pubsub t/router t/script t/stream-node t/utils t/wasm t/xds-library t/xrpc
runs-on: ${{ matrix.platform }}
@@ -67,6 +67,21 @@ jobs:
echo "##[set-output name=version;]$(echo ${GITHUB_REF##*/})"
echo "##[set-output name=fullname;]$(echo apache-apisix-${GITHUB_REF##*/}-src.tgz)"
+ - name: Extract test type
+ shell: bash
+ id: test_env
+ run: |
+ test_dir="${{ matrix.test_dir }}"
+ if [[ $test_dir =~ 't/plugin' ]]; then
+ echo "##[set-output name=type;]$(echo 'plugin')"
+ fi
+ if [[ $test_dir =~ 't/admin ' ]]; then
+ echo "##[set-output name=type;]$(echo 'first')"
+ fi
+ if [[ $test_dir =~ ' t/xrpc' ]]; then
+ echo "##[set-output name=type;]$(echo 'last')"
+ fi
+
- name: Linux launch common services
run: |
make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml
@@ -82,32 +97,28 @@ jobs:
rm -rf $(ls -1 --ignore=*.tgz --ignore=ci --ignore=t --ignore=utils --ignore=.github)
tar zxvf ${{ steps.branch_env.outputs.fullname }}
- - name: Build wasm code
- if: matrix.os_name == 'linux_openresty'
+ - name: Start CI env (FIRST_TEST)
+ if: steps.test_env.outputs.type == 'first'
run: |
- export TINYGO_VER=0.20.0
- wget https://github.com/tinygo-org/tinygo/releases/download/v${TINYGO_VER}/tinygo_${TINYGO_VER}_amd64.deb 2>/dev/null
- sudo dpkg -i tinygo_${TINYGO_VER}_amd64.deb
- cd t/wasm && find . -type f -name "*.go" | xargs -Ip tinygo build -o p.wasm -scheduler=none -target=wasi p
+ # launch deps env
+ make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
- - name: Build xDS library
+ - name: Start CI env (PLUGIN_TEST)
+ if: steps.test_env.outputs.type == 'plugin'
run: |
- cd t/xds-library
- go build -o libxds.so -buildmode=c-shared main.go export.go
+ make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
+ sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh
- - name: Linux Before install
- run: sudo ./ci/${{ matrix.os_name }}_runner.sh before_install
-
- - name: Start CI env
+ - name: Start CI env (LAST_TEST)
+ if: steps.test_env.outputs.type == 'last'
run: |
# generating SSL certificates for Kafka
sudo keytool -genkeypair -keyalg RSA -dname "CN=127.0.0.1" -alias 127.0.0.1 -keystore ./ci/pod/kafka/kafka-server/selfsigned.jks -validity 365 -keysize 2048 -storepass changeit
- # launch deps env
- make ci-env-up
- sudo ./ci/linux-ci-init-service.sh
+ make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
+ sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh
- name: Start Dubbo Backend
- if: matrix.os_name == 'linux_openresty'
+ if: matrix.os_name == 'linux_openresty' && steps.test_env.outputs.type == 'plugin'
run: |
sudo apt install -y maven
cd t/lib/dubbo-backend
@@ -115,6 +126,23 @@ jobs:
cd dubbo-backend-provider/target
java -Djava.net.preferIPv4Stack=true -jar dubbo-demo-provider.one-jar.jar > /tmp/java.log &
+ - name: Build xDS library
+ if: steps.test_env.outputs.type == 'last'
+ run: |
+ cd t/xds-library
+ go build -o libxds.so -buildmode=c-shared main.go export.go
+
+ - name: Build wasm code
+ if: matrix.os_name == 'linux_openresty' && steps.test_env.outputs.type == 'last'
+ run: |
+ export TINYGO_VER=0.20.0
+ wget https://github.com/tinygo-org/tinygo/releases/download/v${TINYGO_VER}/tinygo_${TINYGO_VER}_amd64.deb 2>/dev/null
+ sudo dpkg -i tinygo_${TINYGO_VER}_amd64.deb
+ cd t/wasm && find . -type f -name "*.go" | xargs -Ip tinygo build -o p.wasm -scheduler=none -target=wasi p
+
+ - name: Linux Before install
+ run: sudo ./ci/${{ matrix.os_name }}_runner.sh before_install
+
- name: Linux Install
run: |
sudo --preserve-env=OPENRESTY_VERSION \
diff --git a/.github/workflows/centos7-ci.yml b/.github/workflows/centos7-ci.yml
index 9b2f8fc81b9d..b308c79fb95b 100644
--- a/.github/workflows/centos7-ci.yml
+++ b/.github/workflows/centos7-ci.yml
@@ -29,7 +29,7 @@ jobs:
matrix:
test_dir:
- t/plugin
- - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc
+ - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc
- t/node t/pubsub t/router t/script t/stream-node t/utils t/wasm t/xds-library
steps:
@@ -45,6 +45,21 @@ jobs:
run: |
echo "##[set-output name=version;]$(echo ${GITHUB_REF##*/})"
+ - name: Extract test type
+ shell: bash
+ id: test_env
+ run: |
+ test_dir="${{ matrix.test_dir }}"
+ if [[ $test_dir =~ 't/plugin' ]]; then
+ echo "##[set-output name=type;]$(echo 'plugin')"
+ fi
+ if [[ $test_dir =~ 't/admin ' ]]; then
+ echo "##[set-output name=type;]$(echo 'first')"
+ fi
+ if [[ $test_dir =~ ' t/xds-library' ]]; then
+ echo "##[set-output name=type;]$(echo 'last')"
+ fi
+
- name: Linux launch common services
run: |
make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml
@@ -66,6 +81,7 @@ jobs:
rm -rf $(ls -1 --ignore=apisix-build-tools --ignore=t --ignore=utils --ignore=ci --ignore=Makefile --ignore=rockspec)
- name: Build xDS library
+ if: steps.test_env.outputs.type == 'last'
run: |
cd t/xds-library
go build -o libxds.so -buildmode=c-shared main.go export.go
@@ -77,12 +93,24 @@ jobs:
docker run -itd -v /home/runner/work/apisix/apisix:/apisix --env TEST_FILE_SUB_DIR="$TEST_FILE_SUB_DIR" --name centos7Instance --net="host" --dns 8.8.8.8 --dns-search apache.org docker.io/centos:7 /bin/bash
# docker exec centos7Instance bash -c "cp -r /tmp/apisix ./"
- - name: Run other docker containers for test
+ - name: Start CI env (FIRST_TEST)
+ if: steps.test_env.outputs.type == 'first'
+ run: |
+ make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
+
+ - name: Start CI env (PLUGIN_TEST)
+ if: steps.test_env.outputs.type == 'plugin'
+ run: |
+ make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
+ ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh
+
+ - name: Start CI env (LAST_TEST)
+ if: steps.test_env.outputs.type == 'last'
run: |
# generating SSL certificates for Kafka
keytool -genkeypair -keyalg RSA -dname "CN=127.0.0.1" -alias 127.0.0.1 -keystore ./ci/pod/kafka/kafka-server/selfsigned.jks -validity 365 -keysize 2048 -storepass changeit
- make ci-env-up
- ./ci/linux-ci-init-service.sh
+ make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
+ ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh
- name: Install dependencies
run: |
diff --git a/.github/workflows/chaos.yml b/.github/workflows/chaos.yml
index 677b6150d6ee..20b45f602c90 100644
--- a/.github/workflows/chaos.yml
+++ b/.github/workflows/chaos.yml
@@ -40,9 +40,8 @@ jobs:
- name: Creating minikube cluster
run: |
bash ./t/chaos/utils/setup_chaos_utils.sh start_minikube
- wget https://raw.githubusercontent.com/apache/apisix-docker/master/alpine-local/Dockerfile
mkdir logs
- docker build -t apache/apisix:alpine-local --build-arg APISIX_PATH=. -f Dockerfile .
+ docker build -t apache/apisix:alpine-local --build-arg APISIX_PATH=. -f ./t/chaos/utils/Dockerfile .
minikube cache add apache/apisix:alpine-local -v 7 --alsologtostderr
- name: Print cluster information
diff --git a/.github/workflows/doc-lint.yml b/.github/workflows/doc-lint.yml
index cd71d8bdffff..d6b64921b0da 100644
--- a/.github/workflows/doc-lint.yml
+++ b/.github/workflows/doc-lint.yml
@@ -18,7 +18,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: 🚀 Use Node.js
- uses: actions/setup-node@v3.2.0
+ uses: actions/setup-node@v3.3.0
with:
node-version: '12.x'
- run: npm install -g markdownlint-cli@0.25.0
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 431801b6a849..2338100168a7 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -32,7 +32,7 @@ jobs:
uses: actions/checkout@v3
- name: Setup Nodejs env
- uses: actions/setup-node@v3.2.0
+ uses: actions/setup-node@v3.3.0
with:
node-version: '12'
diff --git a/.licenserc.yaml b/.licenserc.yaml
index 5822d7fd25bb..85f1c69e4722 100644
--- a/.licenserc.yaml
+++ b/.licenserc.yaml
@@ -19,7 +19,7 @@ header:
spdx-id: Apache-2.0
copyright-owner: Apache Software Foundation
- license-location-threshold: 250
+ license-location-threshold: 350
paths-ignore:
- '.gitignore'
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3f1a8ab3464c..63e5737651b6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -25,6 +25,7 @@ title: Changelog
- [2.14.1](#2141)
- [2.14.0](#2140)
+- [2.13.2](#2132)
- [2.13.1](#2131)
- [2.13.0](#2130)
- [2.12.1](#2121)
@@ -60,9 +61,9 @@ title: Changelog
## 2.14.1
-**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.14` branch.**
+### Bugfix
-[https://github.com/apache/apisix/blob/release/2.14/CHANGELOG.md#2141](https://github.com/apache/apisix/blob/release/2.14/CHANGELOG.md#2141)
+- The "unix:" in the `real_ip_from` configuration should not break the batch-requests plugin: [#7106](https://github.com/apache/apisix/pull/7106)
## 2.14.0
@@ -120,6 +121,12 @@ title: Changelog
- [#6686](https://github.com/apache/apisix/pull/6686)
- Admin API rejects unknown stream plugin: [#6813](https://github.com/apache/apisix/pull/6813)
+## 2.13.2
+
+**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.13` branch.**
+
+[https://github.com/apache/apisix/blob/release/2.13/CHANGELOG.md#2132](https://github.com/apache/apisix/blob/release/2.13/CHANGELOG.md#2132)
+
## 2.13.1
**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.13` branch.**
diff --git a/Makefile b/Makefile
index 989fe3714f8a..6c82a6a94341 100644
--- a/Makefile
+++ b/Makefile
@@ -24,7 +24,6 @@ SHELL := /bin/bash -o pipefail
# Project basic setting
VERSION ?= master
project_name ?= apache-apisix
-project_compose_ci ?= ci/pod/docker-compose.yml
project_release_name ?= $(project_name)-$(VERSION)-src
diff --git a/README.md b/README.md
index e3522db2b2eb..e28bef917c13 100644
--- a/README.md
+++ b/README.md
@@ -17,7 +17,7 @@
#
-->
-# Apache APISIX
+# Apache APISIX API Gateway
@@ -27,11 +27,11 @@
[![Average time to resolve an issue](http://isitmaintained.com/badge/resolution/apache/apisix.svg)](http://isitmaintained.com/project/apache/apisix "Average time to resolve an issue")
[![Percentage of issues still open](http://isitmaintained.com/badge/open/apache/apisix.svg)](http://isitmaintained.com/project/apache/apisix "Percentage of issues still open")
-**Apache APISIX** is a dynamic, real-time, high-performance API gateway.
+**Apache APISIX** is a dynamic, real-time, high-performance API Gateway.
-APISIX provides rich traffic management features such as load balancing, dynamic upstream, canary release, circuit breaking, authentication, observability, and more.
+APISIX API Gateway provides rich traffic management features such as load balancing, dynamic upstream, canary release, circuit breaking, authentication, observability, and more.
-You can use Apache APISIX to handle traditional north-south traffic,
+You can use **APISIX API Gateway** to handle traditional north-south traffic,
as well as east-west traffic between services.
It can also be used as a [k8s ingress controller](https://github.com/apache/apisix-ingress-controller).
@@ -45,25 +45,18 @@ The technical architecture of Apache APISIX:
- QQ Group - 552030619, 781365357
- Slack Workspace - [invitation link](https://join.slack.com/t/the-asf/shared_invite/zt-vlfbf7ch-HkbNHiU_uDlcH_RvaHv9gQ) (Please open an [issue](https://apisix.apache.org/docs/general/submit-issue) if this link is expired), and then join the #apisix channel (Channels -> Browse channels -> search for "apisix").
- ![Twitter Follow](https://img.shields.io/twitter/follow/ApacheAPISIX?style=social) - follow and interact with us using hashtag `#ApacheAPISIX`
-- **Good first issues**:
- - [Apache APISIX®](https://github.com/apache/apisix/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
- - [Apache APISIX® Ingress Controller](https://github.com/apache/apisix-ingress-controller/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
- - [Apache APISIX® dashboard](https://github.com/apache/apisix-dashboard/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
- - [Apache APISIX® Helm Chart](https://github.com/apache/apisix-helm-chart/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
- - [Docker distribution for Apache APISIX®](https://github.com/apache/apisix-docker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
- - [Apache APISIX® Website](https://github.com/apache/apisix-website/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
- - [Apache APISIX® Java Plugin Runner](https://github.com/apache/apisix-java-plugin-runner/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
- - [Apache APISIX® Go Plugin Runner](https://github.com/apache/apisix-go-plugin-runner/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
- - [Apache APISIX® Python Plugin Runner](https://github.com/apache/apisix-python-plugin-runner/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
+- [Documentation](https://apisix.apache.org/docs/)
+- [Discussions](https://github.com/apache/apisix/discussions)
+- [Blog](https://apisix.apache.org/blog)
## Features
-You can use Apache APISIX as a traffic entrance to process all business data, including dynamic routing, dynamic upstream, dynamic certificates,
+You can use APISIX API Gateway as a traffic entrance to process all business data, including dynamic routing, dynamic upstream, dynamic certificates,
A/B testing, canary release, blue-green deployment, limit rate, defense against malicious attacks, metrics, monitoring alarms, service observability, service governance, etc.
- **All platforms**
- - Cloud-Native: Platform agnostic, No vendor lock-in, APISIX can run from bare-metal to Kubernetes.
+ - Cloud-Native: Platform agnostic, No vendor lock-in, APISIX API Gateway can run from bare-metal to Kubernetes.
- Supports ARM64: Don't worry about the lock-in of the infra technology.
- **Multi protocols**
@@ -194,6 +187,8 @@ Using AWS's eight-core server, APISIX's QPS reaches 140,000 with a latency of on
[Benchmark script](benchmark/run.sh) has been open sourced, welcome to try and contribute.
+[The APISIX APISIX Gateway also works perfectly in AWS graviton3 C7g.](https://apisix.apache.org/blog/2022/06/07/installation-performance-test-of-apigateway-apisix-on-aws-graviton3)
+
## Contributor Over Time
> [visit here](https://www.apiseven.com/contributor-graph) to generate Contributor Over Time.
@@ -206,9 +201,9 @@ Using AWS's eight-core server, APISIX's QPS reaches 140,000 with a latency of on
- [Copernicus Reference System Software](https://github.com/COPRS/infrastructure/wiki/Networking-trade-off)
- [More Stories](https://apisix.apache.org/blog/tags/user-case)
-## Who Uses APISIX?
+## Who Uses APISIX API Gateway?
-A wide variety of companies and organizations use APISIX for research, production and commercial product, below are some of them:
+A wide variety of companies and organizations use APISIX API Gateway for research, production and commercial product, below are some of them:
- Airwallex
- Bilibili
@@ -224,7 +219,7 @@ A wide variety of companies and organizations use APISIX for research, productio
- Tencent Game
- Travelsky
- VIVO
-- weibo
+- Sina Weibo
- WPS
## Landscape
diff --git a/apisix/admin/ssl.lua b/apisix/admin/ssl.lua
index 341e03004d1a..9a73107c9f10 100644
--- a/apisix/admin/ssl.lua
+++ b/apisix/admin/ssl.lua
@@ -46,7 +46,7 @@ local function check_conf(id, conf, need_id)
conf.id = id
core.log.info("schema: ", core.json.delay_encode(core.schema.ssl))
- core.log.info("conf : ", core.json.delay_encode(conf))
+ core.log.info("conf: ", core.json.delay_encode(conf))
local ok, err = apisix_ssl.check_ssl_conf(false, conf)
if not ok then
diff --git a/apisix/cli/etcd.lua b/apisix/cli/etcd.lua
index 9edfbcd51876..43aa4f84ae26 100644
--- a/apisix/cli/etcd.lua
+++ b/apisix/cli/etcd.lua
@@ -197,8 +197,10 @@ function _M.init(env, args)
local res, err
local retry_time = 0
- local health_check_retry = tonumber(yaml_conf.etcd.health_check_retry) or 2
- while retry_time < health_check_retry do
+ local etcd = yaml_conf.etcd
+ -- TODO: remove deprecated health_check_retry option in APISIX v3
+ local max_retry = tonumber(etcd.startup_retry or etcd.health_check_retry) or 2
+ while retry_time < max_retry do
res, err = request(version_url, yaml_conf)
-- In case of failure, request returns nil followed by an error message.
-- Else the first return value is the response body
diff --git a/apisix/cli/file.lua b/apisix/cli/file.lua
index 66600b54b41b..9c528005e1fd 100644
--- a/apisix/cli/file.lua
+++ b/apisix/cli/file.lua
@@ -251,6 +251,13 @@ function _M.read_yaml_conf(apisix_home)
end
end
+ if default_conf.deployment
+ and default_conf.deployment.role == "traditional"
+ and default_conf.deployment.etcd
+ then
+ default_conf.etcd = default_conf.deployment.etcd
+ end
+
return default_conf
end
diff --git a/apisix/cli/ngx_tpl.lua b/apisix/cli/ngx_tpl.lua
index 4709362e5913..f22280766982 100644
--- a/apisix/cli/ngx_tpl.lua
+++ b/apisix/cli/ngx_tpl.lua
@@ -66,6 +66,7 @@ lua {
{% if enabled_stream_plugins["prometheus"] and not enable_http then %}
http {
+ {% if enabled_stream_plugins["prometheus"] then %}
init_worker_by_lua_block {
require("apisix.plugins.prometheus.exporter").http_init(true)
}
@@ -82,14 +83,17 @@ http {
}
}
- {% if with_module_status then %}
location = /apisix/nginx_status {
allow 127.0.0.0/24;
deny all;
stub_status;
}
- {% end %}
}
+ {% end %}
+
+ {% if conf_server then %}
+ {* conf_server *}
+ {% end %}
}
{% end %}
@@ -503,13 +507,11 @@ http {
}
}
- {% if with_module_status then %}
location = /apisix/nginx_status {
allow 127.0.0.0/24;
deny all;
stub_status;
}
- {% end %}
}
{% end %}
@@ -574,6 +576,10 @@ http {
}
{% end %}
+ {% if conf_server then %}
+ {* conf_server *}
+ {% end %}
+
server {
{% for _, item in ipairs(node_listen) do %}
listen {* item.ip *}:{* item.port *} default_server {% if item.enable_http2 then %} http2 {% end %} {% if enable_reuseport then %} reuseport {% end %};
@@ -618,14 +624,12 @@ http {
{% end %}
# http server configuration snippet ends
- {% if with_module_status then %}
location = /apisix/nginx_status {
allow 127.0.0.0/24;
deny all;
access_log off;
stub_status;
}
- {% end %}
{% if enable_admin and not admin_server_addr then %}
location /apisix/admin {
diff --git a/apisix/cli/ops.lua b/apisix/cli/ops.lua
index 937d741060cb..d2275bed5813 100644
--- a/apisix/cli/ops.lua
+++ b/apisix/cli/ops.lua
@@ -21,6 +21,7 @@ local file = require("apisix.cli.file")
local schema = require("apisix.cli.schema")
local ngx_tpl = require("apisix.cli.ngx_tpl")
local cli_ip = require("apisix.cli.ip")
+local snippet = require("apisix.cli.snippet")
local profile = require("apisix.core.profile")
local template = require("resty.template")
local argparse = require("argparse")
@@ -65,6 +66,7 @@ stop: stop the apisix server
quit: stop the apisix server gracefully
restart: restart the apisix server
reload: reload the apisix server
+test: test the generated nginx.conf
version: print the version of apisix
]])
end
@@ -244,12 +246,9 @@ Please modify "admin_key" in conf/config.yaml .
end
local or_info = util.execute_cmd("openresty -V 2>&1")
- local with_module_status = true
if or_info and not or_info:find("http_stub_status_module", 1, true) then
- stderr:write("'http_stub_status_module' module is missing in ",
- "your openresty, please check it out. Without this ",
- "module, there will be fewer monitoring indicators.\n")
- with_module_status = false
+ util.die("'http_stub_status_module' module is missing in ",
+ "your openresty, please check it out.\n")
end
local use_apisix_openresty = true
@@ -541,6 +540,15 @@ Please modify "admin_key" in conf/config.yaml .
proxy_mirror_timeouts = yaml_conf.plugin_attr["proxy-mirror"].timeout
end
+ local conf_server, err = snippet.generate_conf_server(env, yaml_conf)
+ if err then
+ util.die(err, "\n")
+ end
+
+ if yaml_conf.deployment and yaml_conf.deployment.role then
+ env.deployment_role = yaml_conf.deployment.role
+ end
+
-- Using template.render
local sys_conf = {
use_openresty_1_17 = use_openresty_1_17,
@@ -548,7 +556,6 @@ Please modify "admin_key" in conf/config.yaml .
lua_cpath = env.pkg_cpath_org,
os_name = util.trim(util.execute_cmd("uname")),
apisix_lua_home = env.apisix_home,
- with_module_status = with_module_status,
use_apisix_openresty = use_apisix_openresty,
error_log = {level = "warn"},
enable_http = enable_http,
@@ -561,6 +568,7 @@ Please modify "admin_key" in conf/config.yaml .
control_server_addr = control_server_addr,
prometheus_server_addr = prometheus_server_addr,
proxy_mirror_timeouts = proxy_mirror_timeouts,
+ conf_server = conf_server,
}
if not yaml_conf.apisix then
@@ -806,7 +814,10 @@ local function start(env, ...)
end
init(env)
- init_etcd(env, args)
+
+ if env.deployment_role ~= "data_plane" then
+ init_etcd(env, args)
+ end
util.execute_cmd(env.openresty_args)
end
diff --git a/apisix/cli/schema.lua b/apisix/cli/schema.lua
index 8c7a873214c1..db4f47477de5 100644
--- a/apisix/cli/schema.lua
+++ b/apisix/cli/schema.lua
@@ -22,6 +22,44 @@ local require = require
local _M = {}
+local etcd_schema = {
+ type = "object",
+ properties = {
+ resync_delay = {
+ type = "integer",
+ },
+ user = {
+ type = "string",
+ },
+ password = {
+ type = "string",
+ },
+ tls = {
+ type = "object",
+ properties = {
+ cert = {
+ type = "string",
+ },
+ key = {
+ type = "string",
+ },
+ },
+ },
+ prefix = {
+ type = "string",
+ pattern = [[^/[^/]+$]]
+ },
+ host = {
+ type = "array",
+ items = {
+ type = "string",
+ pattern = [[^https?://]]
+ },
+ minItems = 1,
+ }
+ },
+ required = {"prefix", "host"}
+}
local config_schema = {
type = "object",
properties = {
@@ -190,31 +228,7 @@ local config_schema = {
}
}
},
- etcd = {
- type = "object",
- properties = {
- resync_delay = {
- type = "integer",
- },
- user = {
- type = "string",
- },
- password = {
- type = "string",
- },
- tls = {
- type = "object",
- properties = {
- cert = {
- type = "string",
- },
- key = {
- type = "string",
- },
- }
- }
- }
- },
+ etcd = etcd_schema,
wasm = {
type = "object",
properties = {
@@ -243,8 +257,25 @@ local config_schema = {
}
}
},
+ deployment = {
+ type = "object",
+ properties = {
+ role = {
+ enum = {"traditional", "control_plane", "data_plane", "standalone"}
+ }
+ },
+ required = {"role"},
+ },
}
}
+local deployment_schema = {
+ traditional = {
+ properties = {
+ etcd = etcd_schema,
+ },
+ required = {"etcd"}
+ },
+}
function _M.validate(yaml_conf)
@@ -267,6 +298,15 @@ function _M.validate(yaml_conf)
end
end
+ if yaml_conf.deployment then
+ local role = yaml_conf.deployment.role
+ local validator = jsonschema.generate_validator(deployment_schema[role])
+ local ok, err = validator(yaml_conf.deployment)
+ if not ok then
+ return false, "invalid deployment " .. role .. " configuration: " .. err
+ end
+ end
+
return true
end
diff --git a/apisix/cli/snippet.lua b/apisix/cli/snippet.lua
new file mode 100644
index 000000000000..bfaf973a026c
--- /dev/null
+++ b/apisix/cli/snippet.lua
@@ -0,0 +1,119 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local template = require("resty.template")
+local pl_path = require("pl.path")
+local ipairs = ipairs
+
+
+-- this module provide methods to generate snippets which will be used in the nginx.conf template
+local _M = {}
+
+
+function _M.generate_conf_server(env, conf)
+ if not (conf.deployment and conf.deployment.role == "traditional") then
+ return nil, nil
+ end
+
+ -- we use proxy even the role is traditional so that we can test the proxy in daily dev
+ local etcd = conf.deployment.etcd
+ local servers = etcd.host
+ local enable_https = false
+ local prefix = "https://"
+ if servers[1]:find(prefix, 1, true) then
+ enable_https = true
+ end
+ -- there is not a compatible way to verify upstream TLS like the one we do in cosocket
+ -- so here we just ignore it as the verification is already done in the init phase
+ for i, s in ipairs(servers) do
+ if (s:find(prefix, 1, true) ~= nil) ~= enable_https then
+ return nil, "all nodes in the etcd cluster should enable/disable TLS together"
+ end
+
+ local _, to = s:find("://", 1, true)
+ if not to then
+ return nil, "bad etcd endpoint format"
+ end
+ end
+
+ local conf_render = template.compile([[
+ upstream apisix_conf_backend {
+ server 0.0.0.0:80;
+ balancer_by_lua_block {
+ local conf_server = require("apisix.conf_server")
+ conf_server.balancer()
+ }
+ }
+ server {
+ listen unix:{* home *}/conf/config_listen.sock;
+ access_log off;
+
+ set $upstream_host '';
+
+ access_by_lua_block {
+ local conf_server = require("apisix.conf_server")
+ conf_server.access()
+ }
+
+ location / {
+ {% if enable_https then %}
+ proxy_pass https://apisix_conf_backend;
+ proxy_ssl_server_name on;
+ {% if sni then %}
+ proxy_ssl_name {* sni *};
+ {% else %}
+ proxy_ssl_name $upstream_host;
+ {% end %}
+ proxy_ssl_protocols TLSv1.2 TLSv1.3;
+ {% if client_cert then %}
+ proxy_ssl_certificate {* client_cert *};
+ proxy_ssl_certificate_key {* client_cert_key *};
+ {% end %}
+ {% else %}
+ proxy_pass http://apisix_conf_backend;
+ {% end %}
+
+ proxy_http_version 1.1;
+ proxy_set_header Connection "";
+ proxy_set_header Host $upstream_host;
+ }
+
+ log_by_lua_block {
+ local conf_server = require("apisix.conf_server")
+ conf_server.log()
+ }
+ }
+ ]])
+
+ local tls = etcd.tls
+ local client_cert
+ local client_cert_key
+ if tls and tls.cert then
+ client_cert = pl_path.abspath(tls.cert)
+ client_cert_key = pl_path.abspath(tls.key)
+ end
+
+ return conf_render({
+ sni = etcd.tls and etcd.tls.sni,
+ enable_https = enable_https,
+ home = env.apisix_home or ".",
+ client_cert = client_cert,
+ client_cert_key = client_cert_key,
+ })
+end
+
+
+return _M
diff --git a/apisix/conf_server.lua b/apisix/conf_server.lua
new file mode 100644
index 000000000000..40cf2895158b
--- /dev/null
+++ b/apisix/conf_server.lua
@@ -0,0 +1,209 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local fetch_local_conf = require("apisix.core.config_local").local_conf
+local picker = require("apisix.balancer.least_conn")
+local balancer = require("ngx.balancer")
+local error = error
+local ipairs = ipairs
+local ngx = ngx
+local ngx_var = ngx.var
+
+
+local _M = {}
+local servers = {}
+local resolved_results = {}
+local server_picker
+local has_domain = false
+
+
+local function create_resolved_result(server)
+ local host, port = core.utils.parse_addr(server)
+ return {
+ host = host,
+ port = port,
+ server = server,
+ }
+end
+
+
+function _M.init()
+ local conf = fetch_local_conf()
+ if not (conf.deployment and conf.deployment.etcd) then
+ return
+ end
+
+ local etcd = conf.deployment.etcd
+ for i, s in ipairs(etcd.host) do
+ local _, to = core.string.find(s, "://")
+ if not to then
+ error("bad etcd endpoint format")
+ end
+
+ local addr = s:sub(to + 1)
+ local host, _, err = core.utils.parse_addr(addr)
+ if err then
+ error("failed to parse host: ".. err)
+ end
+
+ resolved_results[i] = create_resolved_result(addr)
+ servers[i] = addr
+
+ if not core.utils.parse_ipv4(host) and not core.utils.parse_ipv6(host) then
+ has_domain = true
+ resolved_results[i].domain = host
+ end
+ end
+
+ if #servers > 1 then
+ local nodes = {}
+ for _, s in ipairs(servers) do
+ nodes[s] = 1
+ end
+ server_picker = picker.new(nodes, {})
+ end
+end
+
+
+local function response_err(err)
+ ngx.log(ngx.ERR, "failure in conf server: ", err)
+ ngx.say(core.json.encode({error = err}))
+ ngx.exit(0)
+end
+
+
+local function resolve_servers(ctx)
+ if not has_domain then
+ return
+ end
+
+ local changed = false
+ for _, res in ipairs(resolved_results) do
+ local domain = res.domain
+ if not domain then
+ goto CONTINUE
+ end
+
+ local ip, err = core.resolver.parse_domain(domain)
+ if ip and res.host ~= ip then
+ res.host = ip
+ changed = true
+ core.log.info(domain, " is resolved to: ", ip)
+ end
+
+ if err then
+ core.log.error("dns resolver resolves domain: ", domain, " error: ", err)
+ end
+
+ ::CONTINUE::
+ end
+
+ if not changed then
+ return
+ end
+
+ if #servers > 1 then
+ local nodes = {}
+ for _, res in ipairs(resolved_results) do
+ local s = res.server
+ nodes[s] = 1
+ end
+ server_picker = picker.new(nodes, {})
+ end
+end
+
+
+local function pick_node(ctx)
+ local res
+ if server_picker then
+ local server, err = server_picker.get(ctx)
+ if not server then
+ err = err or "no valid upstream node"
+ return nil, "failed to find valid upstream server, " .. err
+ end
+
+ ctx.server_picker = server_picker
+ ctx.balancer_server = server
+
+ for _, r in ipairs(resolved_results) do
+ if r.server == server then
+ res = r
+ break
+ end
+ end
+ else
+ res = resolved_results[1]
+ end
+
+ ctx.balancer_ip = res.host
+ ctx.balancer_port = res.port
+
+ ngx_var.upstream_host = res.domain or res.host
+ if balancer.recreate_request and ngx.get_phase() == "balancer" then
+ balancer.recreate_request()
+ end
+
+ return true
+end
+
+
+function _M.access()
+ local ctx = ngx.ctx
+ -- Nginx's DNS resolver doesn't support search option,
+ -- so we have to use our own resolver
+ resolve_servers(ctx)
+ local ok, err = pick_node(ctx)
+ if not ok then
+ return response_err(err)
+ end
+end
+
+
+function _M.balancer()
+ local ctx = ngx.ctx
+ if not ctx.balancer_run then
+ ctx.balancer_run = true
+ local retries = #servers - 1
+ local ok, err = balancer.set_more_tries(retries)
+ if not ok then
+ core.log.error("could not set upstream retries: ", err)
+ elseif err then
+ core.log.warn("could not set upstream retries: ", err)
+ end
+ else
+ local ok, err = pick_node(ctx)
+ if not ok then
+ return response_err(err)
+ end
+ end
+
+ local ok, err = balancer.set_current_peer(ctx.balancer_ip, ctx.balancer_port)
+ if not ok then
+ return response_err(err)
+ end
+end
+
+
+function _M.log()
+ local ctx = ngx.ctx
+ if ctx.server_picker and ctx.server_picker.after_balance then
+ ctx.server_picker.after_balance(ctx, false)
+ end
+end
+
+
+return _M
diff --git a/apisix/core/config_etcd.lua b/apisix/core/config_etcd.lua
index f022fa96552e..183c52aac338 100644
--- a/apisix/core/config_etcd.lua
+++ b/apisix/core/config_etcd.lua
@@ -523,6 +523,7 @@ local function _automatic_fetch(premature, self)
end
if not (health_check.conf and health_check.conf.shm_name) then
+ -- used for worker processes to synchronize configuration
local _, err = health_check.init({
shm_name = health_check_shm_name,
fail_timeout = self.health_check_timeout,
@@ -816,9 +817,13 @@ function _M.init()
return nil, "failed to start a etcd instance: " .. err
end
+ -- don't go through proxy during start because the proxy is not available
+ local proxy = etcd_cli.unix_socket_proxy
+ etcd_cli.unix_socket_proxy = nil
local etcd_conf = local_conf.etcd
local prefix = etcd_conf.prefix
local res, err = readdir(etcd_cli, prefix, create_formatter(prefix))
+ etcd_cli.unix_socket_proxy = proxy
if not res then
return nil, err
end
diff --git a/apisix/core/config_xds.lua b/apisix/core/config_xds.lua
index e5e452f7eec6..793592b6fb4c 100644
--- a/apisix/core/config_xds.lua
+++ b/apisix/core/config_xds.lua
@@ -23,6 +23,7 @@ local config_local = require("apisix.core.config_local")
local string = require("apisix.core.string")
local log = require("apisix.core.log")
local json = require("apisix.core.json")
+local os = require("apisix.core.os")
local ngx_sleep = require("apisix.core.utils").sleep
local check_schema = require("apisix.core.schema").check
local new_tab = require("table.new")
@@ -67,10 +68,7 @@ end
ffi.cdef[[
-typedef unsigned int useconds_t;
-
extern void initial(void* config_zone, void* version_zone);
-int usleep(useconds_t usec);
]]
local created_obj = {}
@@ -323,7 +321,7 @@ function _M.new(key, opts)
-- blocking until xds completes initial configuration
while true do
- C.usleep(0.1)
+ os.usleep(1000)
fetch_version()
if latest_version then
break
diff --git a/apisix/core/etcd.lua b/apisix/core/etcd.lua
index 9d289bd5d6e5..a57a5d0c86bf 100644
--- a/apisix/core/etcd.lua
+++ b/apisix/core/etcd.lua
@@ -19,15 +19,19 @@
--
-- @module core.etcd
-local fetch_local_conf = require("apisix.core.config_local").local_conf
-local array_mt = require("apisix.core.json").array_mt
-local etcd = require("resty.etcd")
-local clone_tab = require("table.clone")
-local health_check = require("resty.etcd.health_check")
-local ipairs = ipairs
-local setmetatable = setmetatable
-local string = string
-local tonumber = tonumber
+local fetch_local_conf = require("apisix.core.config_local").local_conf
+local array_mt = require("apisix.core.json").array_mt
+local etcd = require("resty.etcd")
+local clone_tab = require("table.clone")
+local health_check = require("resty.etcd.health_check")
+local ipairs = ipairs
+local setmetatable = setmetatable
+local string = string
+local tonumber = tonumber
+local ngx_config_prefix = ngx.config.prefix()
+
+
+local is_http = ngx.config.subsystem == "http"
local _M = {}
@@ -38,7 +42,33 @@ local function new()
return nil, nil, err
end
- local etcd_conf = clone_tab(local_conf.etcd)
+ local etcd_conf
+ local proxy_by_conf_server = false
+
+ if local_conf.deployment then
+ etcd_conf = clone_tab(local_conf.deployment.etcd)
+
+ if local_conf.deployment.role == "traditional"
+ -- we proxy the etcd requests in traditional mode so we can test the CP's behavior in
+ -- daily development. However, a stream proxy can't be the CP.
+ -- Hence, generate a HTTP conf server to proxy etcd requests in stream proxy is
+ -- unnecessary and inefficient.
+ and is_http
+ then
+ local sock_prefix = ngx_config_prefix
+ etcd_conf.unix_socket_proxy =
+ "unix:" .. sock_prefix .. "/conf/config_listen.sock"
+ etcd_conf.host = {"http://127.0.0.1:2379"}
+ proxy_by_conf_server = true
+
+ elseif local_conf.deployment.role == "control_plane" then
+ -- TODO: add the proxy conf in control_plane
+ proxy_by_conf_server = true
+ end
+ else
+ etcd_conf = clone_tab(local_conf.etcd)
+ end
+
local prefix = etcd_conf.prefix
etcd_conf.http_host = etcd_conf.host
etcd_conf.host = nil
@@ -63,10 +93,15 @@ local function new()
end
end
- -- enable etcd health check retry for curr worker
- if not health_check.conf then
+ -- if an unhealthy etcd node is selected in a single admin read/write etcd operation,
+ -- the retry mechanism for health check can select another healthy etcd node
+ -- to complete the read/write etcd operation.
+ if proxy_by_conf_server then
+ -- health check is done in conf server
+ health_check.disable()
+ elseif not health_check.conf then
health_check.init({
- max_fails = #etcd_conf.http_host,
+ max_fails = 1,
retry = true,
})
end
diff --git a/apisix/core/os.lua b/apisix/core/os.lua
index ae721e883435..4a922d01e43d 100644
--- a/apisix/core/os.lua
+++ b/apisix/core/os.lua
@@ -23,6 +23,9 @@ local ffi = require("ffi")
local ffi_str = ffi.string
local ffi_errno = ffi.errno
local C = ffi.C
+local ceil = math.ceil
+local floor = math.floor
+local error = error
local tostring = tostring
local type = type
@@ -71,6 +74,20 @@ function _M.setenv(name, value)
end
+---
+-- sleep blockingly in microseconds
+--
+-- @function core.os.usleep
+-- @tparam number us The number of microseconds.
+local function usleep(us)
+ if ceil(us) ~= floor(us) then
+ error("bad microseconds: " .. us)
+ end
+ C.usleep(us)
+end
+_M.usleep = usleep
+
+
local function waitpid_nohang(pid)
local res = C.waitpid(pid, nil, WNOHANG)
if res == -1 then
@@ -86,7 +103,7 @@ function _M.waitpid(pid, timeout)
local total = timeout * 1000 * 1000
while step * count < total do
count = count + 1
- C.usleep(step)
+ usleep(step)
local ok, err = waitpid_nohang(pid)
if err then
return nil, err
diff --git a/apisix/debug.lua b/apisix/debug.lua
index 363aee172e4f..72c101635881 100644
--- a/apisix/debug.lua
+++ b/apisix/debug.lua
@@ -20,6 +20,7 @@ local log = require("apisix.core.log")
local profile = require("apisix.core.profile")
local lfs = require("lfs")
local inspect = require("inspect")
+local jsonschema = require("jsonschema")
local io = io
local ngx = ngx
local re_find = ngx.re.find
@@ -38,6 +39,51 @@ local debug_yaml_ctime
local _M = {version = 0.1}
+local config_schema = {
+ type = "object",
+ properties = {
+ basic = {
+ properties = {
+ enable = {
+ type = "boolean",
+ },
+ }
+ },
+ http_filter = {
+ properties = {
+ enable = {
+ type = "boolean",
+ },
+ enable_header_name = {
+ type = "string",
+ },
+ }
+ },
+ hook_conf = {
+ properties = {
+ enable = {
+ type = "boolean",
+ },
+ name = {
+ type = "string",
+ },
+ log_level = {
+ enum = {"debug", "info", "notice", "warn", "error",
+ "crit", "alert","emerg"},
+ },
+ is_print_input_args = {
+ type = "boolean",
+ },
+ is_print_return_value = {
+ type = "boolean",
+ },
+ }
+ },
+ },
+ required = {"basic", "http_filter", "hook_conf"},
+}
+
+
local function read_debug_yaml()
local attributes, err = lfs.attributes(debug_yaml_path)
if not attributes then
@@ -93,6 +139,16 @@ local function read_debug_yaml()
debug_yaml_new.hooks = debug_yaml_new.hooks or {}
debug_yaml = debug_yaml_new
debug_yaml_ctime = last_change_time
+
+ -- validate the debug yaml config
+ local validator = jsonschema.generate_validator(config_schema)
+ local ok, err = validator(debug_yaml)
+ if not ok then
+ log.error("failed to validate debug config " .. err)
+ return
+ end
+
+ return true
end
@@ -204,7 +260,10 @@ local function sync_debug_status(premature)
return
end
- read_debug_yaml()
+ if not read_debug_yaml() then
+ return
+ end
+
sync_debug_hooks()
end
diff --git a/apisix/init.lua b/apisix/init.lua
index d68e31ba5666..25d9d5aa2bfb 100644
--- a/apisix/init.lua
+++ b/apisix/init.lua
@@ -27,6 +27,7 @@ require("jit.opt").start("minstitch=2", "maxtrace=4000",
require("apisix.patch").patch()
local core = require("apisix.core")
+local conf_server = require("apisix.conf_server")
local plugin = require("apisix.plugin")
local plugin_config = require("apisix.plugin_config")
local script = require("apisix.script")
@@ -95,6 +96,7 @@ function _M.http_init(args)
end
xrpc.init()
+ conf_server.init()
end
@@ -227,9 +229,9 @@ end
local function set_upstream_headers(api_ctx, picked_server)
set_upstream_host(api_ctx, picked_server)
- local hdr = core.request.header(api_ctx, "X-Forwarded-Proto")
- if hdr then
- api_ctx.var.var_x_forwarded_proto = hdr
+ local proto = api_ctx.var.http_x_forwarded_proto
+ if proto then
+ api_ctx.var.var_x_forwarded_proto = proto
end
end
@@ -445,9 +447,10 @@ function _M.http_access_phase()
if changed then
api_ctx.matched_route = route
core.table.clear(api_ctx.plugins)
- api_ctx.plugins = plugin.filter(api_ctx, route, api_ctx.plugins)
+ local phase = "rewrite_in_consumer"
+ api_ctx.plugins = plugin.filter(api_ctx, route, api_ctx.plugins, nil, phase)
-- rerun rewrite phase for newly added plugins in consumer
- plugin.run_plugin("rewrite_in_consumer", api_ctx.plugins, api_ctx)
+ plugin.run_plugin(phase, api_ctx.plugins, api_ctx)
end
end
plugin.run_plugin("access", plugins, api_ctx)
@@ -486,15 +489,35 @@ function _M.http_access_phase()
end
local route_val = route.value
- if route_val.upstream and route_val.upstream.enable_websocket then
- enable_websocket = true
- end
api_ctx.matched_upstream = (route.dns_value and
route.dns_value.upstream)
or route_val.upstream
end
+ if api_ctx.matched_upstream and api_ctx.matched_upstream.tls and
+ api_ctx.matched_upstream.tls.client_cert_id then
+
+ local cert_id = api_ctx.matched_upstream.tls.client_cert_id
+ local upstream_ssl = router.router_ssl.get_by_id(cert_id)
+ if not upstream_ssl or upstream_ssl.type ~= "client" then
+ local err = upstream_ssl and
+ "ssl type should be 'client'" or
+ "ssl id [" .. cert_id .. "] not exits"
+ core.log.error("failed to get ssl cert: ", err)
+
+ if is_http then
+ return core.response.exit(502)
+ end
+
+ return ngx_exit(1)
+ end
+
+ core.log.info("matched ssl: ",
+ core.json.delay_encode(upstream_ssl, true))
+ api_ctx.upstream_ssl = upstream_ssl
+ end
+
if enable_websocket then
api_ctx.var.upstream_upgrade = api_ctx.var.http_upgrade
api_ctx.var.upstream_connection = api_ctx.var.http_connection
diff --git a/apisix/plugin.lua b/apisix/plugin.lua
index 5aad12e8926c..d8f4d538c83d 100644
--- a/apisix/plugin.lua
+++ b/apisix/plugin.lua
@@ -41,7 +41,7 @@ local merged_route = core.lrucache.new({
ttl = 300, count = 512
})
local local_conf
-
+local check_plugin_metadata
local _M = {
version = 0.3,
@@ -68,6 +68,10 @@ local function sort_plugin(l, r)
return l.priority > r.priority
end
+local function custom_sort_plugin(l, r)
+ return l._meta.priority > r._meta.priority
+end
+
local PLUGIN_TYPE_HTTP = 1
local PLUGIN_TYPE_STREAM = 2
@@ -151,6 +155,9 @@ local function load_plugin(name, plugins_list, plugin_type)
properties._meta = plugin_injected_schema._meta
-- new injected fields should be added under `_meta`
+ -- 1. so we won't break user's code when adding any new injected fields
+ -- 2. the semantics is clear, especially in the doc and in the caller side
+ -- TODO: move the `disable` to `_meta` too
plugin.schema['$comment'] = plugin_injected_schema['$comment']
end
@@ -365,7 +372,7 @@ local function trace_plugins_info_for_debug(ctx, plugins)
end
-function _M.filter(ctx, conf, plugins, route_conf)
+function _M.filter(ctx, conf, plugins, route_conf, phase)
local user_plugin_conf = conf.value.plugins
if user_plugin_conf == nil or
core.table.nkeys(user_plugin_conf) == 0 then
@@ -375,6 +382,7 @@ function _M.filter(ctx, conf, plugins, route_conf)
return plugins or core.tablepool.fetch("plugins", 0, 0)
end
+ local custom_sort = false
local route_plugin_conf = route_conf and route_conf.value.plugins
plugins = plugins or core.tablepool.fetch("plugins", 32, 0)
for _, plugin_obj in ipairs(local_plugins) do
@@ -389,6 +397,9 @@ function _M.filter(ctx, conf, plugins, route_conf)
end
end
+ if plugin_conf._meta and plugin_conf._meta.priority then
+ custom_sort = true
+ end
core.table.insert(plugins, plugin_obj)
core.table.insert(plugins, plugin_conf)
@@ -398,6 +409,51 @@ function _M.filter(ctx, conf, plugins, route_conf)
trace_plugins_info_for_debug(ctx, plugins)
+ if custom_sort then
+ local tmp_plugin_objs = core.tablepool.fetch("tmp_plugin_objs", 0, #plugins / 2)
+ local tmp_plugin_confs = core.tablepool.fetch("tmp_plugin_confs", #plugins / 2, 0)
+
+ for i = 1, #plugins, 2 do
+ local plugin_obj = plugins[i]
+ local plugin_conf = plugins[i + 1]
+
+ -- in the rewrite phase, the plugin executes in the following order:
+ -- 1. execute the rewrite phase of the plugins on route(including the auth plugins)
+ -- 2. merge plugins from consumer and route
+ -- 3. execute the rewrite phase of the plugins on consumer(phase: rewrite_in_consumer)
+ -- in this case, we need to skip the plugins that was already executed(step 1)
+ if phase == "rewrite_in_consumer" and not plugin_conf._from_consumer then
+ plugin_conf._skip_rewrite_in_consumer = true
+ end
+
+ tmp_plugin_objs[plugin_conf] = plugin_obj
+ core.table.insert(tmp_plugin_confs, plugin_conf)
+
+ if not plugin_conf._meta then
+ plugin_conf._meta = core.table.new(0, 1)
+ plugin_conf._meta.priority = plugin_obj.priority
+ else
+ if not plugin_conf._meta.priority then
+ plugin_conf._meta.priority = plugin_obj.priority
+ end
+ end
+ end
+
+ sort_tab(tmp_plugin_confs, custom_sort_plugin)
+
+ local index
+ for i = 1, #tmp_plugin_confs do
+ index = i * 2 - 1
+ local plugin_conf = tmp_plugin_confs[i]
+ local plugin_obj = tmp_plugin_objs[plugin_conf]
+ plugins[index] = plugin_obj
+ plugins[index + 1] = plugin_conf
+ end
+
+ core.tablepool.release("tmp_plugin_objs", tmp_plugin_objs)
+ core.tablepool.release("tmp_plugin_confs", tmp_plugin_confs)
+ end
+
return plugins
end
@@ -579,7 +635,10 @@ function _M.init_worker()
end
local plugin_metadatas, err = core.config.new("/plugin_metadata",
- {automatic = true}
+ {
+ automatic = true,
+ checker = check_plugin_metadata
+ }
)
if not plugin_metadatas then
error("failed to create etcd instance for fetching /plugin_metadatas : "
@@ -633,39 +692,55 @@ function _M.conf_version(conf)
end
-local function check_schema(plugins_conf, schema_type, skip_disabled_plugin)
- for name, plugin_conf in pairs(plugins_conf) do
- core.log.info("check plugin schema, name: ", name, ", configurations: ",
- core.json.delay_encode(plugin_conf, true))
- if type(plugin_conf) ~= "table" then
- return false, "invalid plugin conf " ..
- core.json.encode(plugin_conf, true) ..
- " for plugin [" .. name .. "]"
+local function check_single_plugin_schema(name, plugin_conf, schema_type, skip_disabled_plugin)
+ core.log.info("check plugin schema, name: ", name, ", configurations: ",
+ core.json.delay_encode(plugin_conf, true))
+ if type(plugin_conf) ~= "table" then
+ return false, "invalid plugin conf " ..
+ core.json.encode(plugin_conf, true) ..
+ " for plugin [" .. name .. "]"
+ end
+
+ local plugin_obj = local_plugins_hash[name]
+ if not plugin_obj then
+ if skip_disabled_plugin then
+ return true
+ else
+ return false, "unknown plugin [" .. name .. "]"
end
+ end
- local plugin_obj = local_plugins_hash[name]
- if not plugin_obj then
- if skip_disabled_plugin then
- goto CONTINUE
- else
- return false, "unknown plugin [" .. name .. "]"
- end
+ if plugin_obj.check_schema then
+ local disable = plugin_conf.disable
+ plugin_conf.disable = nil
+
+ local ok, err = plugin_obj.check_schema(plugin_conf, schema_type)
+ if not ok then
+ return false, "failed to check the configuration of plugin "
+ .. name .. " err: " .. err
end
- if plugin_obj.check_schema then
- local disable = plugin_conf.disable
- plugin_conf.disable = nil
+ plugin_conf.disable = disable
+ end
- local ok, err = plugin_obj.check_schema(plugin_conf, schema_type)
- if not ok then
- return false, "failed to check the configuration of plugin "
- .. name .. " err: " .. err
- end
+ return true
+end
- plugin_conf.disable = disable
- end
- ::CONTINUE::
+check_plugin_metadata = function(item)
+ return check_single_plugin_schema(item.id, item,
+ core.schema.TYPE_METADATA, true)
+end
+
+
+
+local function check_schema(plugins_conf, schema_type, skip_disabled_plugin)
+ for name, plugin_conf in pairs(plugins_conf) do
+ local ok, err = check_single_plugin_schema(name, plugin_conf,
+ schema_type, skip_disabled_plugin)
+ if not ok then
+ return false, err
+ end
end
return true
@@ -754,6 +829,11 @@ function _M.run_plugin(phase, plugins, api_ctx)
phase = "rewrite"
end
local phase_func = plugins[i][phase]
+
+ if phase == "rewrite" and plugins[i + 1]._skip_rewrite_in_consumer then
+ goto CONTINUE
+ end
+
if phase_func then
plugin_run = true
local conf = plugins[i + 1]
@@ -781,6 +861,8 @@ function _M.run_plugin(phase, plugins, api_ctx)
end
end
end
+
+ ::CONTINUE::
end
return api_ctx, plugin_run
end
diff --git a/apisix/plugins/api-breaker.lua b/apisix/plugins/api-breaker.lua
index 5ccf4404082a..eabca140af11 100644
--- a/apisix/plugins/api-breaker.lua
+++ b/apisix/plugins/api-breaker.lua
@@ -53,7 +53,8 @@ local schema = {
type = "string",
minLength = 1
}
- }
+ },
+ required = {"key", "value"},
}
},
max_breaker_sec = {
diff --git a/apisix/plugins/grpc-transcode/util.lua b/apisix/plugins/grpc-transcode/util.lua
index de54cdb87984..dc4526195639 100644
--- a/apisix/plugins/grpc-transcode/util.lua
+++ b/apisix/plugins/grpc-transcode/util.lua
@@ -147,6 +147,22 @@ function _M.map_message(field, default_values, request_table)
if ty ~= "enum" and field_type:sub(1, 1) == "." then
if request_table[name] == nil then
sub = default_values and default_values[name]
+ elseif core.table.isarray(request_table[name]) then
+ local sub_array = core.table.new(#request_table[name], 0)
+ for i, value in ipairs(request_table[name]) do
+ local sub_array_obj
+ if type(value) == "table" then
+ sub_array_obj, err = _M.map_message(field_type,
+ default_values and default_values[name], value)
+ if err then
+ return nil, err
+ end
+ else
+ sub_array_obj = value
+ end
+ sub_array[i] = sub_array_obj
+ end
+ sub = sub_array
else
sub, err = _M.map_message(field_type, default_values and default_values[name],
request_table[name])
diff --git a/apisix/plugins/kafka-logger.lua b/apisix/plugins/kafka-logger.lua
index 2947d145e468..cb43ae3db24b 100644
--- a/apisix/plugins/kafka-logger.lua
+++ b/apisix/plugins/kafka-logger.lua
@@ -83,6 +83,11 @@ local schema = {
-- in lua-resty-kafka, cluster_name is defined as number
-- see https://github.com/doujiang24/lua-resty-kafka#new-1
cluster_name = {type = "integer", minimum = 1, default = 1},
+ -- config for lua-resty-kafka, default value is same as lua-resty-kafka
+ producer_batch_num = {type = "integer", minimum = 1, default = 200},
+ producer_batch_size = {type = "integer", minimum = 0, default = 1048576},
+ producer_max_buffering = {type = "integer", minimum = 1, default = 50000},
+ producer_time_linger = {type = "integer", minimum = 1, default = 1}
},
required = {"broker_list", "kafka_topic"}
}
@@ -208,6 +213,10 @@ function _M.log(conf, ctx)
broker_config["request_timeout"] = conf.timeout * 1000
broker_config["producer_type"] = conf.producer_type
broker_config["required_acks"] = conf.required_acks
+ broker_config["batch_num"] = conf.producer_batch_num
+ broker_config["batch_size"] = conf.producer_batch_size
+ broker_config["max_buffering"] = conf.producer_max_buffering
+ broker_config["flush_time"] = conf.producer_time_linger * 1000
local prod, err = core.lrucache.plugin_ctx(lrucache, ctx, nil, create_producer,
broker_list, broker_config, conf.cluster_name)
diff --git a/apisix/plugins/openid-connect.lua b/apisix/plugins/openid-connect.lua
index 73427bc3311c..4a6dbda1ccec 100644
--- a/apisix/plugins/openid-connect.lua
+++ b/apisix/plugins/openid-connect.lua
@@ -96,6 +96,12 @@ local schema = {
"header to the request for downstream.",
type = "boolean",
default = true
+ },
+ set_refresh_token_header = {
+ description = "Whether the refresh token should be added in the X-Refresh-Token " ..
+ "header to the request for downstream.",
+ type = "boolean",
+ default = false
}
},
required = {"client_id", "client_secret", "discovery"}
@@ -260,7 +266,7 @@ function _M.rewrite(plugin_conf, ctx)
conf.ssl_verify = "no"
end
- local response, err
+ local response, err, session, _
if conf.bearer_only or conf.introspection_endpoint or conf.public_key then
-- An introspection endpoint or a public key has been configured. Try to
@@ -298,7 +304,7 @@ function _M.rewrite(plugin_conf, ctx)
-- provider's authorization endpoint to initiate the Relying Party flow.
-- This code path also handles when the ID provider then redirects to
-- the configured redirect URI after successful authentication.
- response, err = openidc.authenticate(conf)
+ response, err, _, session = openidc.authenticate(conf)
if err then
core.log.error("OIDC authentication failed: ", err)
@@ -307,7 +313,8 @@ function _M.rewrite(plugin_conf, ctx)
if response then
-- If the openidc module has returned a response, it may contain,
- -- respectively, the access token, the ID token, and the userinfo.
+ -- respectively, the access token, the ID token, the refresh token,
+ -- and the userinfo.
-- Add respective headers to the request, if so configured.
-- Add configured access token header, maybe.
@@ -324,6 +331,11 @@ function _M.rewrite(plugin_conf, ctx)
core.request.set_header(ctx, "X-Userinfo",
ngx_encode_base64(core.json.encode(response.user)))
end
+
+ -- Add X-Refresh-Token header, maybe.
+ if session.data.refresh_token and conf.set_refresh_token_header then
+ core.request.set_header(ctx, "X-Refresh-Token", session.data.refresh_token)
+ end
end
end
end
diff --git a/apisix/plugins/request-id.lua b/apisix/plugins/request-id.lua
index 6f1ab7b0cc9e..353bd3f8322e 100644
--- a/apisix/plugins/request-id.lua
+++ b/apisix/plugins/request-id.lua
@@ -64,7 +64,7 @@ local attr_schema = {
local _M = {
version = 0.1,
- priority = 11010,
+ priority = 12015,
name = plugin_name,
schema = schema
}
diff --git a/apisix/plugins/response-rewrite.lua b/apisix/plugins/response-rewrite.lua
index b2c94f2aff1e..9a4015fb98bb 100644
--- a/apisix/plugins/response-rewrite.lua
+++ b/apisix/plugins/response-rewrite.lua
@@ -86,9 +86,15 @@ local schema = {
},
},
},
- oneOf = {"body", "filters"},
},
- minProperties = 1,
+ dependencies = {
+ body = {
+ ["not"] = {required = {"filters"}}
+ },
+ filters = {
+ ["not"] = {required = {"body"}}
+ }
+ }
}
diff --git a/apisix/plugins/server-info.lua b/apisix/plugins/server-info.lua
index 055bafa2858c..b7cd67793d75 100644
--- a/apisix/plugins/server-info.lua
+++ b/apisix/plugins/server-info.lua
@@ -261,6 +261,15 @@ function _M.init()
return
end
+
+ local local_conf = core.config.local_conf()
+ local deployment_role = core.table.try_read_attr(
+ local_conf, "deployment", "role")
+ if deployment_role == "data_plane" then
+ -- data_plane should not write to etcd
+ return
+ end
+
local attr = plugin.plugin_attr(plugin_name)
local ok, err = core.schema.check(attr_schema, attr)
if not ok then
diff --git a/apisix/plugins/sls-logger.lua b/apisix/plugins/sls-logger.lua
index ed34c847ebe2..290bf11917bb 100644
--- a/apisix/plugins/sls-logger.lua
+++ b/apisix/plugins/sls-logger.lua
@@ -17,6 +17,9 @@
local core = require("apisix.core")
local log_util = require("apisix.utils.log-util")
local bp_manager_mod = require("apisix.utils.batch-processor-manager")
+local plugin = require("apisix.plugin")
+
+
local plugin_name = "sls-logger"
local ngx = ngx
local rf5424 = require("apisix.plugins.slslog.rfc5424")
@@ -127,10 +130,15 @@ end
-- log phase in APISIX
function _M.log(conf, ctx)
- local entry = log_util.get_full_log(ngx, conf)
- if not entry.route_id then
- core.log.error("failed to obtain the route id for sys logger")
- return
+ local metadata = plugin.plugin_metadata(plugin_name)
+ local entry
+
+ if metadata and metadata.value.log_format
+ and core.table.nkeys(metadata.value.log_format) > 0
+ then
+ entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+ else
+ entry = log_util.get_full_log(ngx, conf)
end
local json_str, err = core.json.encode(entry)
diff --git a/apisix/plugins/traffic-split.lua b/apisix/plugins/traffic-split.lua
index 9ba0997f6f08..38e272b7be66 100644
--- a/apisix/plugins/traffic-split.lua
+++ b/apisix/plugins/traffic-split.lua
@@ -172,11 +172,7 @@ local function set_upstream(upstream_info, ctx)
upstream_host = upstream_info.upstream_host,
key = upstream_info.key,
nodes = new_nodes,
- timeout = {
- send = upstream_info.timeout and upstream_info.timeout.send or 15,
- read = upstream_info.timeout and upstream_info.timeout.read or 15,
- connect = upstream_info.timeout and upstream_info.timeout.connect or 15
- }
+ timeout = upstream_info.timeout,
}
local ok, err = upstream.check_schema(up_conf)
diff --git a/apisix/schema_def.lua b/apisix/schema_def.lua
index 767c2fa63503..16dccc6d8fa2 100644
--- a/apisix/schema_def.lua
+++ b/apisix/schema_def.lua
@@ -404,6 +404,7 @@ local upstream_schema = {
tls = {
type = "object",
properties = {
+ client_cert_id = id_schema,
client_cert = certificate_scheme,
client_key = private_key_schema,
verify = {
@@ -414,8 +415,17 @@ local upstream_schema = {
},
},
dependencies = {
- client_cert = {"client_key"},
- client_key = {"client_cert"},
+ client_cert = {
+ required = {"client_key"},
+ ["not"] = {required = {"client_cert_id"}}
+ },
+ client_key = {
+ required = {"client_cert"},
+ ["not"] = {required = {"client_cert_id"}}
+ },
+ client_cert_id = {
+ ["not"] = {required = {"client_client", "client_key"}}
+ }
}
},
keepalive_pool = {
@@ -504,7 +514,7 @@ local upstream_schema = {
oneOf = {
{required = {"type", "nodes"}},
{required = {"type", "service_name", "discovery_type"}},
- },
+ }
}
-- TODO: add more nginx variable support
@@ -722,6 +732,14 @@ _M.ssl = {
type = "object",
properties = {
id = id_schema,
+ type = {
+ description = "ssl certificate type, " ..
+ "server to server certificate, " ..
+ "client to client certificate for upstream",
+ type = "string",
+ default = "server",
+ enum = {"server", "client"}
+ },
cert = certificate_scheme,
key = private_key_schema,
sni = {
@@ -772,10 +790,20 @@ _M.ssl = {
create_time = timestamp_def,
update_time = timestamp_def
},
- oneOf = {
- {required = {"sni", "key", "cert"}},
- {required = {"snis", "key", "cert"}}
+ ["if"] = {
+ properties = {
+ type = {
+ enum = {"server"},
+ },
+ },
+ },
+ ["then"] = {
+ oneOf = {
+ {required = {"sni", "key", "cert"}},
+ {required = {"snis", "key", "cert"}}
+ }
},
+ ["else"] = {required = {"key", "cert"}}
}
@@ -924,6 +952,10 @@ _M.plugin_injected_schema = {
{ type = "object" },
}
},
+ priority = {
+ description = "priority of plugins by customized order",
+ type = "integer",
+ },
}
}
}
diff --git a/apisix/ssl.lua b/apisix/ssl.lua
index c0c47aec07fd..7d48f308502e 100644
--- a/apisix/ssl.lua
+++ b/apisix/ssl.lua
@@ -197,6 +197,10 @@ function _M.check_ssl_conf(in_dp, conf)
return nil, err
end
+ if conf.type == "client" then
+ return true
+ end
+
local numcerts = conf.certs and #conf.certs or 0
local numkeys = conf.keys and #conf.keys or 0
if numcerts ~= numkeys then
diff --git a/apisix/ssl/router/radixtree_sni.lua b/apisix/ssl/router/radixtree_sni.lua
index 70ac0faa32d1..891d8d21dd4c 100644
--- a/apisix/ssl/router/radixtree_sni.lua
+++ b/apisix/ssl/router/radixtree_sni.lua
@@ -26,6 +26,7 @@ local error = error
local str_find = core.string.find
local str_gsub = string.gsub
local str_lower = string.lower
+local tostring = tostring
local ssl_certificates
local radixtree_router
local radixtree_router_ver
@@ -44,7 +45,7 @@ local function create_router(ssl_items)
local idx = 0
for _, ssl in config_util.iterate_values(ssl_items) do
- if ssl.value ~= nil and
+ if ssl.value ~= nil and ssl.value.type == "server" and
(ssl.value.status == nil or ssl.value.status == 1) then -- compatible with old version
local j = 0
@@ -261,4 +262,19 @@ function _M.init_worker()
end
+function _M.get_by_id(ssl_id)
+ local ssl
+ local ssls = core.config.fetch_created_obj("/ssl")
+ if ssls then
+ ssl = ssls:get(tostring(ssl_id))
+ end
+
+ if not ssl then
+ return nil
+ end
+
+ return ssl.value
+end
+
+
return _M
diff --git a/apisix/upstream.lua b/apisix/upstream.lua
index a0e963b44f8a..0162ad8137ed 100644
--- a/apisix/upstream.lua
+++ b/apisix/upstream.lua
@@ -286,8 +286,11 @@ function _M.set_by_route(route, api_ctx)
end
end
- set_directly(api_ctx, up_conf.type .. "#upstream_" .. tostring(up_conf),
- tostring(up_conf), up_conf)
+ local id = up_conf.parent.value.id
+ local conf_version = up_conf.parent.modifiedIndex
+ -- include the upstream object as part of the version, because the upstream will be changed
+ -- by service discovery or dns resolver.
+ set_directly(api_ctx, id, conf_version .. "#" .. tostring(up_conf), up_conf)
local nodes_count = up_conf.nodes and #up_conf.nodes or 0
if nodes_count == 0 then
@@ -330,14 +333,24 @@ function _M.set_by_route(route, api_ctx)
local scheme = up_conf.scheme
if (scheme == "https" or scheme == "grpcs") and up_conf.tls then
+
+ local client_cert, client_key
+ if up_conf.tls.client_cert_id then
+ client_cert = api_ctx.upstream_ssl.cert
+ client_key = api_ctx.upstream_ssl.key
+ else
+ client_cert = up_conf.tls.client_cert
+ client_key = up_conf.tls.client_key
+ end
+
-- the sni here is just for logging
local sni = api_ctx.var.upstream_host
- local cert, err = apisix_ssl.fetch_cert(sni, up_conf.tls.client_cert)
+ local cert, err = apisix_ssl.fetch_cert(sni, client_cert)
if not ok then
return 503, err
end
- local key, err = apisix_ssl.fetch_pkey(sni, up_conf.tls.client_key)
+ local key, err = apisix_ssl.fetch_pkey(sni, client_key)
if not ok then
return 503, err
end
@@ -415,6 +428,29 @@ local function check_upstream_conf(in_dp, conf)
return false, "invalid configuration: " .. err
end
+ local ssl_id = conf.tls and conf.tls.client_cert_id
+ if ssl_id then
+ local key = "/ssl/" .. ssl_id
+ local res, err = core.etcd.get(key)
+ if not res then
+ return nil, "failed to fetch ssl info by "
+ .. "ssl id [" .. ssl_id .. "]: " .. err
+ end
+
+ if res.status ~= 200 then
+ return nil, "failed to fetch ssl info by "
+ .. "ssl id [" .. ssl_id .. "], "
+ .. "response code: " .. res.status
+ end
+ if res.body and res.body.node and
+ res.body.node.value and res.body.node.value.type ~= "client" then
+
+ return nil, "failed to fetch ssl info by "
+ .. "ssl id [" .. ssl_id .. "], "
+ .. "wrong ssl type"
+ end
+ end
+
-- encrypt the key in the admin
if conf.tls and conf.tls.client_key then
conf.tls.client_key = apisix_ssl.aes_encrypt_pkey(conf.tls.client_key)
diff --git a/benchmark/run.sh b/benchmark/run.sh
index 7d1f06a67d7b..8bb1047fba17 100755
--- a/benchmark/run.sh
+++ b/benchmark/run.sh
@@ -35,12 +35,15 @@ mkdir -p benchmark/fake-apisix/logs
make init
+fake_apisix_cmd="openresty -p $PWD/benchmark/fake-apisix -c $PWD/benchmark/fake-apisix/conf/nginx.conf"
+server_cmd="openresty -p $PWD/benchmark/server -c $PWD/benchmark/server/conf/nginx.conf"
+
trap 'onCtrlC' INT
function onCtrlC () {
sudo killall wrk
sudo killall openresty
- sudo openresty -p $PWD/benchmark/fake-apisix -s stop || exit 1
- sudo openresty -p $PWD/benchmark/server -s stop || exit 1
+ sudo ${fake_apisix_cmd} -s stop || exit 1
+ sudo ${server_cmd} -s stop || exit 1
}
for up_cnt in $(seq 1 $upstream_cnt);
@@ -55,14 +58,26 @@ do
done
if [[ "$(uname)" == "Darwin" ]]; then
- sed -i "" "s/worker_processes .*/worker_processes $worker_cnt;/g" conf/nginx.conf
+ sed -i "" "s/\- proxy-mirror .*/#\- proxy-mirror/g" conf/config-default.yaml
+ sed -i "" "s/\- proxy-cache .*/#\- proxy-cache/g" conf/config-default.yaml
sed -i "" "s/listen .*;/$nginx_listen/g" benchmark/server/conf/nginx.conf
else
- sed -i "s/worker_processes .*/worker_processes $worker_cnt;/g" conf/nginx.conf
+ sed -i "s/\- proxy-mirror/#\- proxy-mirror/g" conf/config-default.yaml
+ sed -i "s/\- proxy-cache/#\- proxy-cache/g" conf/config-default.yaml
sed -i "s/listen .*;/$nginx_listen/g" benchmark/server/conf/nginx.conf
fi
-sudo openresty -p $PWD/benchmark/server || exit 1
+echo "
+apisix:
+ admin_key:
+ - name: admin
+ key: edd1c9f034335f136f87ad84b625c8f1
+ role: admin
+nginx_config:
+ worker_processes: ${worker_cnt}
+" > conf/config.yaml
+
+sudo ${server_cmd} || exit 1
make run
@@ -140,7 +155,7 @@ else
sed -i "s/worker_processes [0-9]*/worker_processes $worker_cnt/g" benchmark/fake-apisix/conf/nginx.conf
fi
-sudo openresty -p $PWD/benchmark/fake-apisix || exit 1
+sudo ${fake_apisix_cmd} || exit 1
sleep 1
@@ -150,6 +165,6 @@ sleep 1
wrk -d 5 -c 16 http://127.0.0.1:9080/hello
-sudo openresty -p $PWD/benchmark/fake-apisix -s stop || exit 1
+sudo ${fake_apisix_cmd} -s stop || exit 1
-sudo openresty -p $PWD/benchmark/server -s stop || exit 1
+sudo ${server_cmd} -s stop || exit 1
diff --git a/ci/centos7-ci.sh b/ci/centos7-ci.sh
index 0f066e6c1520..543e54514be5 100755
--- a/ci/centos7-ci.sh
+++ b/ci/centos7-ci.sh
@@ -35,10 +35,7 @@ install_dependencies() {
./utils/linux-install-luarocks.sh
# install etcdctl
- wget https://github.com/etcd-io/etcd/releases/download/v3.4.18/etcd-v3.4.18-linux-amd64.tar.gz
- tar xf etcd-v3.4.18-linux-amd64.tar.gz
- cp ./etcd-v3.4.18-linux-amd64/etcdctl /usr/local/bin/
- rm -rf etcd-v3.4.18-linux-amd64
+ ./utils/linux-install-etcd-client.sh
# install vault cli capabilities
install_vault_cli
diff --git a/ci/init-last-test-service.sh b/ci/init-last-test-service.sh
new file mode 100755
index 000000000000..f49d4a747528
--- /dev/null
+++ b/ci/init-last-test-service.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+docker exec -i apache-apisix_kafka-server1_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 1 --topic test2
+docker exec -i apache-apisix_kafka-server1_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 3 --topic test3
+docker exec -i apache-apisix_kafka-server2_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server2:2181 --replication-factor 1 --partitions 1 --topic test4
+docker exec -i apache-apisix_kafka-server1_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 1 --topic test-consumer
+
+# create messages for test-consumer
+for i in `seq 30`
+do
+ docker exec -i apache-apisix_kafka-server1_1 bash -c "echo "testmsg$i" | /opt/bitnami/kafka/bin/kafka-console-producer.sh --bootstrap-server 127.0.0.1:9092 --topic test-consumer"
+ echo "Produces messages to the test-consumer topic, msg: testmsg$i"
+done
+echo "Kafka service initialization completed"
diff --git a/ci/linux-ci-init-service.sh b/ci/init-plugin-test-service.sh
similarity index 85%
rename from ci/linux-ci-init-service.sh
rename to ci/init-plugin-test-service.sh
index 73477a5febca..5f468502304d 100755
--- a/ci/linux-ci-init-service.sh
+++ b/ci/init-plugin-test-service.sh
@@ -19,15 +19,6 @@
docker exec -i apache-apisix_kafka-server1_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 1 --topic test2
docker exec -i apache-apisix_kafka-server1_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 3 --topic test3
docker exec -i apache-apisix_kafka-server2_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server2:2181 --replication-factor 1 --partitions 1 --topic test4
-docker exec -i apache-apisix_kafka-server1_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 1 --topic test-consumer
-
-# create messages for test-consumer
-for i in `seq 30`
-do
- docker exec -i apache-apisix_kafka-server1_1 bash -c "echo "testmsg$i" | /opt/bitnami/kafka/bin/kafka-console-producer.sh --bootstrap-server 127.0.0.1:9092 --topic test-consumer"
- echo "Produces messages to the test-consumer topic, msg: testmsg$i"
-done
-echo "Kafka service initialization completed"
# prepare openwhisk env
docker pull openwhisk/action-nodejs-v14:nightly
diff --git a/ci/pod/docker-compose.common.yml b/ci/pod/docker-compose.common.yml
index ecbdfcaf0a47..9e0394a48bd2 100644
--- a/ci/pod/docker-compose.common.yml
+++ b/ci/pod/docker-compose.common.yml
@@ -31,7 +31,7 @@ services:
- "3380:2380"
etcd:
- image: bitnami/etcd:3.4.18
+ image: bitnami/etcd:3.5.4
restart: unless-stopped
env_file:
- ci/pod/etcd/env/common.env
@@ -42,7 +42,7 @@ services:
- "2380:2380"
etcd_tls:
- image: bitnami/etcd:3.4.18
+ image: bitnami/etcd:3.5.4
restart: unless-stopped
env_file:
- ci/pod/etcd/env/common.env
@@ -58,7 +58,7 @@ services:
- ./t/certs:/certs
etcd_mtls:
- image: bitnami/etcd:3.4.18
+ image: bitnami/etcd:3.5.4
restart: unless-stopped
env_file:
- ci/pod/etcd/env/common.env
diff --git a/ci/pod/docker-compose.yml b/ci/pod/docker-compose.first.yml
similarity index 55%
rename from ci/pod/docker-compose.yml
rename to ci/pod/docker-compose.first.yml
index 68dab85c539b..a13ad3cf1586 100644
--- a/ci/pod/docker-compose.yml
+++ b/ci/pod/docker-compose.first.yml
@@ -18,95 +18,6 @@
version: "3.8"
services:
- ## Redis
- apisix_redis:
- # The latest image is the latest stable version
- image: redis:latest
- restart: unless-stopped
- ports:
- - "6379:6379"
- networks:
- apisix_net:
-
-
- ## keycloak
- apisix_keycloak:
- image: sshniro/keycloak-apisix:1.0.0
- environment:
- KEYCLOAK_USER: admin
- KEYCLOAK_PASSWORD: 123456
- restart: unless-stopped
- ports:
- - "8090:8080"
- - "8443:8443"
- networks:
- apisix_net:
-
-
- ## kafka-cluster
- zookeeper-server1:
- image: bitnami/zookeeper:3.6.0
- env_file:
- - ci/pod/kafka/zookeeper-server/env/common.env
- restart: unless-stopped
- ports:
- - "2181:2181"
- networks:
- kafka_net:
-
- zookeeper-server2:
- image: bitnami/zookeeper:3.6.0
- env_file:
- - ci/pod/kafka/zookeeper-server/env/common.env
- restart: unless-stopped
- ports:
- - "12181:12181"
- networks:
- kafka_net:
-
- kafka-server1:
- image: bitnami/kafka:2.8.1
- env_file:
- - ci/pod/kafka/kafka-server/env/common.env
- environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server1:2181
- restart: unless-stopped
- ports:
- - "9092:9092"
- - "9093:9093"
- - "9094:9094"
- depends_on:
- - zookeeper-server1
- - zookeeper-server2
- networks:
- kafka_net:
- volumes:
- - ./ci/pod/kafka/kafka-server/kafka_jaas.conf:/opt/bitnami/kafka/config/kafka_jaas.conf:ro
- - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.keystore.jks:ro
- - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.truststore.jks:ro
-
- kafka-server2:
- image: bitnami/kafka:2.8.1
- env_file:
- - ci/pod/kafka/kafka-server/env/common.env
- environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server2:2181
- restart: unless-stopped
- ports:
- - "19092:9092"
- - "19093:9093"
- - "19094:9094"
- depends_on:
- - zookeeper-server1
- - zookeeper-server2
- networks:
- kafka_net:
- volumes:
- - ./ci/pod/kafka/kafka-server/kafka_jaas.conf:/opt/bitnami/kafka/config/kafka_jaas.conf:ro
- - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.keystore.jks:ro
- - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.truststore.jks:ro
-
-
## Eureka
eureka:
image: bitinit/eureka
@@ -116,19 +27,6 @@ services:
ports:
- "8761:8761"
-
- ## SkyWalking
- skywalking:
- image: apache/skywalking-oap-server:8.7.0-es6
- restart: unless-stopped
- ports:
- - "1234:1234"
- - "11800:11800"
- - "12800:12800"
- networks:
- skywalk_net:
-
-
## Consul
consul_1:
image: consul:1.7
@@ -148,37 +46,6 @@ services:
networks:
consul_net:
-
- ## HashiCorp Vault
- vault:
- image: vault:1.9.0
- container_name: vault
- restart: unless-stopped
- ports:
- - "8200:8200"
- cap_add:
- - IPC_LOCK
- environment:
- VAULT_DEV_ROOT_TOKEN_ID: root
- VAULT_DEV_LISTEN_ADDRESS: 0.0.0.0:8200
- command: [ "vault", "server", "-dev" ]
- networks:
- vault_net:
-
-
- ## OpenLDAP
- openldap:
- image: bitnami/openldap:2.5.8
- environment:
- LDAP_ADMIN_USERNAME: amdin
- LDAP_ADMIN_PASSWORD: adminpassword
- LDAP_USERS: user01,user02
- LDAP_PASSWORDS: password1,password2
- ports:
- - "1389:1389"
- - "1636:1636"
-
-
## Nacos cluster
nacos_auth:
hostname: nacos1
@@ -368,69 +235,7 @@ services:
networks:
nacos_net:
- rocketmq_namesrv:
- image: apacherocketmq/rocketmq:4.6.0
- container_name: rmqnamesrv
- restart: unless-stopped
- ports:
- - "9876:9876"
- command: sh mqnamesrv
- networks:
- rocketmq_net:
-
- rocketmq_broker:
- image: apacherocketmq/rocketmq:4.6.0
- container_name: rmqbroker
- restart: unless-stopped
- ports:
- - "10909:10909"
- - "10911:10911"
- - "10912:10912"
- depends_on:
- - rocketmq_namesrv
- command: sh mqbroker -n rocketmq_namesrv:9876 -c ../conf/broker.conf
- networks:
- rocketmq_net:
-
- # Open Policy Agent
- opa:
- image: openpolicyagent/opa:0.35.0
- restart: unless-stopped
- ports:
- - 8181:8181
- command: run -s /example.rego /echo.rego /data.json
- volumes:
- - type: bind
- source: ./ci/pod/opa/example.rego
- target: /example.rego
- - type: bind
- source: ./ci/pod/opa/echo.rego
- target: /echo.rego
- - type: bind
- source: ./ci/pod/opa/data.json
- target: /data.json
- networks:
- opa_net:
-
- # Splunk HEC Logging Service
- splunk:
- image: splunk/splunk:8.2.3
- restart: unless-stopped
- ports:
- - "18088:8088"
- environment:
- SPLUNK_PASSWORD: "ApacheAPISIX@666"
- SPLUNK_START_ARGS: "--accept-license"
- SPLUNK_HEC_TOKEN: "BD274822-96AA-4DA6-90EC-18940FB2414C"
- SPLUNK_HEC_SSL: "False"
-
networks:
- apisix_net:
consul_net:
- kafka_net:
nacos_net:
- skywalk_net:
- rocketmq_net:
- vault_net:
- opa_net:
diff --git a/ci/pod/docker-compose.last.yml b/ci/pod/docker-compose.last.yml
new file mode 100644
index 000000000000..dbc835fdeaf7
--- /dev/null
+++ b/ci/pod/docker-compose.last.yml
@@ -0,0 +1,97 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+version: "3.8"
+
+services:
+ ## Redis
+ apisix_redis:
+ # The latest image is the latest stable version
+ image: redis:latest
+ restart: unless-stopped
+ ports:
+ - "6379:6379"
+ networks:
+ apisix_net:
+
+ ## kafka-cluster
+ zookeeper-server1:
+ image: bitnami/zookeeper:3.6.0
+ env_file:
+ - ci/pod/kafka/zookeeper-server/env/common.env
+ restart: unless-stopped
+ ports:
+ - "2181:2181"
+ networks:
+ kafka_net:
+
+ zookeeper-server2:
+ image: bitnami/zookeeper:3.6.0
+ env_file:
+ - ci/pod/kafka/zookeeper-server/env/common.env
+ restart: unless-stopped
+ ports:
+ - "12181:12181"
+ networks:
+ kafka_net:
+
+ kafka-server1:
+ image: bitnami/kafka:2.8.1
+ env_file:
+ - ci/pod/kafka/kafka-server/env/last.env
+ environment:
+ KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server1:2181
+ restart: unless-stopped
+ ports:
+ - "9092:9092"
+ - "9093:9093"
+ - "9094:9094"
+ depends_on:
+ - zookeeper-server1
+ - zookeeper-server2
+ networks:
+ kafka_net:
+ volumes:
+ - ./ci/pod/kafka/kafka-server/kafka_jaas.conf:/opt/bitnami/kafka/config/kafka_jaas.conf:ro
+ - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.keystore.jks:ro
+ - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.truststore.jks:ro
+
+ kafka-server2:
+ image: bitnami/kafka:2.8.1
+ env_file:
+ - ci/pod/kafka/kafka-server/env/last.env
+ environment:
+ KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server2:2181
+ restart: unless-stopped
+ ports:
+ - "19092:9092"
+ - "19093:9093"
+ - "19094:9094"
+ depends_on:
+ - zookeeper-server1
+ - zookeeper-server2
+ networks:
+ kafka_net:
+ volumes:
+ - ./ci/pod/kafka/kafka-server/kafka_jaas.conf:/opt/bitnami/kafka/config/kafka_jaas.conf:ro
+ - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.keystore.jks:ro
+ - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.truststore.jks:ro
+
+
+networks:
+ apisix_net:
+ kafka_net:
diff --git a/ci/pod/docker-compose.plugin.yml b/ci/pod/docker-compose.plugin.yml
new file mode 100644
index 000000000000..d0350860096b
--- /dev/null
+++ b/ci/pod/docker-compose.plugin.yml
@@ -0,0 +1,201 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+version: "3.8"
+
+services:
+ ## Redis
+ apisix_redis:
+ # The latest image is the latest stable version
+ image: redis:latest
+ restart: unless-stopped
+ ports:
+ - "6379:6379"
+ networks:
+ apisix_net:
+
+
+ ## keycloak
+ apisix_keycloak:
+ image: sshniro/keycloak-apisix:1.0.0
+ environment:
+ KEYCLOAK_USER: admin
+ KEYCLOAK_PASSWORD: 123456
+ restart: unless-stopped
+ ports:
+ - "8090:8080"
+ - "8443:8443"
+ networks:
+ apisix_net:
+
+
+ ## kafka-cluster
+ zookeeper-server1:
+ image: bitnami/zookeeper:3.6.0
+ env_file:
+ - ci/pod/kafka/zookeeper-server/env/common.env
+ restart: unless-stopped
+ ports:
+ - "2181:2181"
+ networks:
+ kafka_net:
+
+ zookeeper-server2:
+ image: bitnami/zookeeper:3.6.0
+ env_file:
+ - ci/pod/kafka/zookeeper-server/env/common.env
+ restart: unless-stopped
+ ports:
+ - "12181:12181"
+ networks:
+ kafka_net:
+
+ kafka-server1:
+ image: bitnami/kafka:2.8.1
+ env_file:
+ - ci/pod/kafka/kafka-server/env/common.env
+ environment:
+ KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server1:2181
+ restart: unless-stopped
+ ports:
+ - "9092:9092"
+ depends_on:
+ - zookeeper-server1
+ - zookeeper-server2
+ networks:
+ kafka_net:
+
+ kafka-server2:
+ image: bitnami/kafka:2.8.1
+ env_file:
+ - ci/pod/kafka/kafka-server/env/common.env
+ environment:
+ KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server2:2181
+ restart: unless-stopped
+ ports:
+ - "19092:9092"
+ depends_on:
+ - zookeeper-server1
+ - zookeeper-server2
+ networks:
+ kafka_net:
+
+ ## SkyWalking
+ skywalking:
+ image: apache/skywalking-oap-server:8.7.0-es6
+ restart: unless-stopped
+ ports:
+ - "1234:1234"
+ - "11800:11800"
+ - "12800:12800"
+ networks:
+ skywalk_net:
+
+ ## HashiCorp Vault
+ vault:
+ image: vault:1.9.0
+ container_name: vault
+ restart: unless-stopped
+ ports:
+ - "8200:8200"
+ cap_add:
+ - IPC_LOCK
+ environment:
+ VAULT_DEV_ROOT_TOKEN_ID: root
+ VAULT_DEV_LISTEN_ADDRESS: 0.0.0.0:8200
+ command: [ "vault", "server", "-dev" ]
+ networks:
+ vault_net:
+
+
+ ## OpenLDAP
+ openldap:
+ image: bitnami/openldap:2.5.8
+ environment:
+ LDAP_ADMIN_USERNAME: amdin
+ LDAP_ADMIN_PASSWORD: adminpassword
+ LDAP_USERS: user01,user02
+ LDAP_PASSWORDS: password1,password2
+ ports:
+ - "1389:1389"
+ - "1636:1636"
+
+
+ rocketmq_namesrv:
+ image: apacherocketmq/rocketmq:4.6.0
+ container_name: rmqnamesrv
+ restart: unless-stopped
+ ports:
+ - "9876:9876"
+ command: sh mqnamesrv
+ networks:
+ rocketmq_net:
+
+ rocketmq_broker:
+ image: apacherocketmq/rocketmq:4.6.0
+ container_name: rmqbroker
+ restart: unless-stopped
+ ports:
+ - "10909:10909"
+ - "10911:10911"
+ - "10912:10912"
+ depends_on:
+ - rocketmq_namesrv
+ command: sh mqbroker -n rocketmq_namesrv:9876 -c ../conf/broker.conf
+ networks:
+ rocketmq_net:
+
+ # Open Policy Agent
+ opa:
+ image: openpolicyagent/opa:0.35.0
+ restart: unless-stopped
+ ports:
+ - 8181:8181
+ command: run -s /example.rego /echo.rego /data.json
+ volumes:
+ - type: bind
+ source: ./ci/pod/opa/example.rego
+ target: /example.rego
+ - type: bind
+ source: ./ci/pod/opa/echo.rego
+ target: /echo.rego
+ - type: bind
+ source: ./ci/pod/opa/data.json
+ target: /data.json
+ networks:
+ opa_net:
+
+ # Splunk HEC Logging Service
+ splunk:
+ image: splunk/splunk:8.2.3
+ restart: unless-stopped
+ ports:
+ - "18088:8088"
+ environment:
+ SPLUNK_PASSWORD: "ApacheAPISIX@666"
+ SPLUNK_START_ARGS: "--accept-license"
+ SPLUNK_HEC_TOKEN: "BD274822-96AA-4DA6-90EC-18940FB2414C"
+ SPLUNK_HEC_SSL: "False"
+
+
+networks:
+ apisix_net:
+ kafka_net:
+ skywalk_net:
+ rocketmq_net:
+ vault_net:
+ opa_net:
diff --git a/ci/pod/kafka/kafka-server/env/common.env b/ci/pod/kafka/kafka-server/env/common.env
index adc9d7cad1f8..06200b9b0042 100644
--- a/ci/pod/kafka/kafka-server/env/common.env
+++ b/ci/pod/kafka/kafka-server/env/common.env
@@ -1,8 +1,3 @@
ALLOW_PLAINTEXT_LISTENER=yes
-KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=false
-KAFKA_CFG_LISTENERS=PLAINTEXT://0.0.0.0:9092,SSL://0.0.0.0:9093,SASL_PLAINTEXT://0.0.0.0:9094
-KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092,SSL://127.0.0.1:9093,SASL_PLAINTEXT://127.0.0.1:9094
-KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM=
-KAFKA_CFG_SSL_KEYSTORE_LOCATION=/opt/bitnami/kafka/config/certs/kafka.keystore.jks
-KAFKA_CFG_SSL_KEYSTORE_PASSWORD=changeit
-KAFKA_CFG_SSL_KEY_PASSWORD=changeit
+KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true
+KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092
diff --git a/ci/pod/kafka/kafka-server/env/last.env b/ci/pod/kafka/kafka-server/env/last.env
new file mode 100644
index 000000000000..adc9d7cad1f8
--- /dev/null
+++ b/ci/pod/kafka/kafka-server/env/last.env
@@ -0,0 +1,8 @@
+ALLOW_PLAINTEXT_LISTENER=yes
+KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=false
+KAFKA_CFG_LISTENERS=PLAINTEXT://0.0.0.0:9092,SSL://0.0.0.0:9093,SASL_PLAINTEXT://0.0.0.0:9094
+KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092,SSL://127.0.0.1:9093,SASL_PLAINTEXT://127.0.0.1:9094
+KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM=
+KAFKA_CFG_SSL_KEYSTORE_LOCATION=/opt/bitnami/kafka/config/certs/kafka.keystore.jks
+KAFKA_CFG_SSL_KEYSTORE_PASSWORD=changeit
+KAFKA_CFG_SSL_KEY_PASSWORD=changeit
diff --git a/conf/config-default.yaml b/conf/config-default.yaml
index 8f9e58e5df17..f03d31baca3a 100644
--- a/conf/config-default.yaml
+++ b/conf/config-default.yaml
@@ -284,12 +284,12 @@ etcd:
timeout: 30 # 30 seconds
#resync_delay: 5 # when sync failed and a rest is needed, resync after the configured seconds plus 50% random jitter
#health_check_timeout: 10 # etcd retry the unhealthy nodes after the configured seconds
- health_check_retry: 2 # etcd retry time that only affects the health check, default 2
+ startup_retry: 2 # the number of retry to etcd during the startup, default to 2
#user: root # root username for etcd
#password: 5tHkHhYkjr6cQY # root password for etcd
tls:
# To enable etcd client certificate you need to build APISIX-Base, see
- # https://apisix.apache.org/docs/apisix/FAQ#how-do-i-build-the-apisix-base-environment?
+ # https://apisix.apache.org/docs/apisix/FAQ#how-do-i-build-the-apisix-base-environment
#cert: /path/to/cert # path of certificate used by the etcd client
#key: /path/to/key # path of key used by the etcd client
@@ -335,11 +335,11 @@ plugins: # plugin list (sorted by priority)
- real-ip # priority: 23000
- client-control # priority: 22000
- proxy-control # priority: 21990
+ - request-id # priority: 12015
- zipkin # priority: 12011
#- skywalking # priority: 12010
#- opentelemetry # priority: 12009
- ext-plugin-pre-req # priority: 12000
- - request-id # priority: 11010
- fault-injection # priority: 11000
- mocking # priority: 10900
- serverless-pre-function # priority: 10000
@@ -477,3 +477,14 @@ plugin_attr:
send: 60s
# redirect:
# https_port: 8443 # the default port for use by HTTP redirects to HTTPS
+
+#deployment:
+# role: traditional
+# role_traditional:
+# config_provider: etcd
+# etcd:
+# host: # it's possible to define multiple etcd hosts addresses of the same etcd cluster.
+# - "http://127.0.0.1:2379" # multiple etcd address, if your etcd cluster enables TLS, please use https scheme,
+# # e.g. https://127.0.0.1:2379.
+# prefix: /apisix # configuration prefix in etcd
+# timeout: 30 # 30 seconds
diff --git a/docs/assets/images/control-plane-service-discovery.png b/docs/assets/images/control-plane-service-discovery.png
new file mode 100644
index 000000000000..034f81c76803
Binary files /dev/null and b/docs/assets/images/control-plane-service-discovery.png differ
diff --git a/docs/assets/images/deployment-cp_and_dp.png b/docs/assets/images/deployment-cp_and_dp.png
new file mode 100644
index 000000000000..6445cb3fd2c5
Binary files /dev/null and b/docs/assets/images/deployment-cp_and_dp.png differ
diff --git a/docs/assets/images/deployment-traditional.png b/docs/assets/images/deployment-traditional.png
new file mode 100644
index 000000000000..f2dc7d617881
Binary files /dev/null and b/docs/assets/images/deployment-traditional.png differ
diff --git a/docs/assets/images/external-plugin.png b/docs/assets/images/external-plugin.png
index a0b3d94c1100..38c3cdebc47a 100644
Binary files a/docs/assets/images/external-plugin.png and b/docs/assets/images/external-plugin.png differ
diff --git a/docs/en/latest/admin-api.md b/docs/en/latest/admin-api.md
index 9c7bec756f64..deaf1c4c7ce1 100644
--- a/docs/en/latest/admin-api.md
+++ b/docs/en/latest/admin-api.md
@@ -103,7 +103,7 @@ Example configuration:
"send": 3,
"read": 3
},
- "filter_func": "", # User-defined filtering function
+ "filter_func": "" # User-defined filtering function
}
```
@@ -458,7 +458,7 @@ Example Configuration:
{
"plugins": {}, # Bound plugin
"username": "name", # Consumer name
- "desc": "hello world", # Consumer desc
+ "desc": "hello world" # Consumer desc
}
```
@@ -541,8 +541,9 @@ In addition to the equalization algorithm selections, Upstream also supports pas
| labels | optional | Attributes of the Upstream specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} |
| create_time | optional | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 |
| update_time | optional | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 |
-| tls.client_cert | optional | Sets the client certificate while connecting to a TLS Upstream. | |
-| tls.client_key | optional | Sets the client private key while connecting to a TLS Upstream. | |
+| tls.client_cert | optional, can't be used with `tls.client_cert_id` | Sets the client certificate while connecting to a TLS Upstream. | |
+| tls.client_key | optional, can't be used with `tls.client_cert_id` | Sets the client private key while connecting to a TLS Upstream. | |
+| tls.client_cert_id | optional, can't be used with `tls.client_cert` and `tls.client_key` | Set the referenced [SSL](#ssl) id. | |
| keepalive_pool.size | optional | Sets `keepalive` directive dynamically. | |
| keepalive_pool.idle_timeout | optional | Sets `keepalive_timeout` directive dynamically. | |
| keepalive_pool.requests | optional | Sets `keepalive_requests` directive dynamically. | |
@@ -564,12 +565,14 @@ The following should be considered when setting the `hash_on` value:
- When set to `vars_combinations`, the `key` is required. The value of the key can be a combination of any of the [Nginx variables](http://nginx.org/en/docs/varindex.html) like `$request_uri$remote_addr`.
- When no value is set for either `hash_on` or `key`, the key defaults to `remote_addr`.
-The features described below requires APISIX to be run on [APISIX-Base](./FAQ.md#how-do-i-build-the-apisix-base-environment?):
+The features described below requires APISIX to be run on [APISIX-Base](./FAQ.md#how-do-i-build-the-apisix-base-environment):
You can set the `scheme` to `tls`, which means "TLS over TCP".
To use mTLS to communicate with Upstream, you can use the `tls.client_cert/key` in the same format as SSL's `cert` and `key` fields.
+Or you can reference SSL object by `tls.client_cert_id` to set SSL cert and key. The SSL object can be referenced only if the `type` field is `client`, otherwise the request will be rejected by APISIX. In addition, only `cert` and `key` will be used in the SSL object.
+
To allow Upstream to have a separate connection pool, use `keepalive_pool`. It can be configured by modifying its child fields.
Example Configuration:
@@ -581,7 +584,7 @@ Example Configuration:
"timeout": { # Set the timeout for connecting, sending and receiving messages, each is 15 seconds.
"connect":15,
"send":15,
- "read":15,
+ "read":15
},
"nodes": {"host:80": 100}, # Upstream machine address list, the format is `Address + Port`
# is the same as "nodes": [ {"host": "host", "port": 80, "weight": 100} ],
@@ -591,7 +594,7 @@ Example Configuration:
"key": "",
"name": "upstream-for-test",
"desc": "hello world",
- "scheme": "http", # The scheme used when communicating with upstream, the default is `http`
+ "scheme": "http" # The scheme used when communicating with upstream, the default is `http`
}
```
@@ -789,6 +792,7 @@ Currently, the response is returned from etcd.
| labels | False | Match Rules | Attributes of the resource specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} |
| create_time | False | Auxiliary | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 |
| update_time | False | Auxiliary | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 |
+| type | False | Auxiliary | Identifies the type of certificate, default `server`. | `client` Indicates that the certificate is a client certificate, which is used when APISIX accesses the upstream; `server` Indicates that the certificate is a server-side certificate, which is used by APISIX when verifying client requests. |
| status | False | Auxiliary | Enables the current SSL. Set to `1` (enabled) by default. | `1` to enable, `0` to disable |
Example Configuration:
diff --git a/docs/en/latest/architecture-design/deployment-role.md b/docs/en/latest/architecture-design/deployment-role.md
new file mode 100644
index 000000000000..5e750e7f17dd
--- /dev/null
+++ b/docs/en/latest/architecture-design/deployment-role.md
@@ -0,0 +1,137 @@
+---
+title: Deployment Role
+---
+
+
+
+## Concept
+
+Previously, the DP (Data Plane) and the CP (Control Plane) are not separate explicitly.
+
+Although we clearly distinguish the different responsibilities of DP and CP in the documentation, not everyone has correctly deployed APISIX in the production environment.
+
+Therefore, we introduce new concepts called deployment modes/roles, to help users deploy APISIX easily and safely.
+
+APISIX under different deployment modes will act differently.
+
+The table below shows the relationship among deployment modes and roles:
+
+| Deployment Modes | Role | Description |
+|------------------|----------------------------|------------------------------------------------------------------------------------------|
+| traditional | traditional | DP + CP are deployed together by default. People need to disable `enable_admin` manually |
+| decoupled | data_plane / control_plane | DP and CP are deployed independently. |
+| standalone | data_plane | Only DP, load the all configurations from local yaml file |
+
+## Deployment Modes
+
+### Traditional
+
+![traditional](../../../assets/images/deployment-traditional.png)
+
+In the traditional deployment mode, one instance can be both DP & CP.
+
+There will be a `conf server` listens on UNIX socket and acts as a proxy between APISIX and etcd.
+
+Both the DP part and CP part of the instance will connect to the `conf server` via HTTP protocol.
+
+Here is the example of configuration:
+
+```yaml title="conf/config.yaml"
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ host:
+ - http://xxxx
+ prefix: /apisix
+ timeout: 30
+```
+
+### Decoupled
+
+![decoupled](../../../assets/images/deployment-cp_and_dp.png)
+
+The instance deployed as data_plane will:
+
+1. Fetch configurations from the CP, the default port is 9280
+2. Before the DP service starts, it will perform a health check on all CP addresses
+ - If all CP addresses are unavailable, the startup fails and an exception message is output to the screen.
+ - If at least one CP address is available, print the unhealthy CP check result log, and then start the APISIX service.
+ - If all CP addresses are normal, start the APISIX service normally.
+3. Handle user requests.
+
+Here is the example of configuration:
+
+```yaml title="conf/config.yaml"
+deployment:
+ role: data_plane
+ role_data_plane:
+ config_provider: control_plane
+ control_plane:
+ host:
+ - xxxx:9280
+ timeout: 30
+ certs:
+ cert: /path/to/ca-cert
+ cert_key: /path/to/ca-cert
+ trusted_ca_cert: /path/to/ca-cert
+```
+
+The instance deployed as control_plane will:
+
+1. Listen on 9180 by default, and provide Admin API for Admin user
+2. Provide `conf server` which listens on port 9280 by default. Both the DP instances and this CP instance will connect to the `conf server` via HTTPS enforced by mTLS.
+
+Here is the example of configuration:
+
+```yaml title="conf/config.yaml"
+deployment:
+ role: control_plane
+ role_control_plan:
+ config_provider: etcd
+ conf_server:
+ listen: 0.0.0.0:9280
+ cert: /path/to/ca-cert
+ cert_key: /path/to/ca-cert
+ client_ca_cert: /path/to/ca-cert
+ etcd:
+ host:
+ - https://xxxx
+ prefix: /apisix
+ timeout: 30
+ certs:
+ cert: /path/to/ca-cert
+ cert_key: /path/to/ca-cert
+ trusted_ca_cert: /path/to/ca-cert
+```
+
+### Standalone
+
+In this mode, APISIX is deployed as DP and reads configurations from yaml file in the local file system.
+
+Here is the example of configuration:
+
+```yaml title="conf/config.yaml"
+deployment:
+ role: data_plane
+ role_data_plane:
+ config_provider: yaml
+```
diff --git a/docs/en/latest/batch-processor.md b/docs/en/latest/batch-processor.md
index 2f1a0e878d42..a790dbcd2139 100644
--- a/docs/en/latest/batch-processor.md
+++ b/docs/en/latest/batch-processor.md
@@ -22,7 +22,7 @@ title: Batch Processor
-->
The batch processor can be used to aggregate entries(logs/any data) and process them in a batch.
-When the batch_max_size is set to zero the processor will execute each entry immediately. Setting the batch max size more
+When the batch_max_size is set to 1 the processor will execute each entry immediately. Setting the batch max size more
than 1 will start aggregating the entries until it reaches the max size or the timeout expires.
## Configurations
diff --git a/docs/en/latest/building-apisix.md b/docs/en/latest/building-apisix.md
new file mode 100644
index 000000000000..1fd7246e6d1b
--- /dev/null
+++ b/docs/en/latest/building-apisix.md
@@ -0,0 +1,282 @@
+---
+id: building-apisix
+title: Building APISIX from source
+keywords:
+ - API gateway
+ - Apache APISIX
+ - Code Contribution
+ - Building APISIX
+description: Guide for building and running APISIX locally for development.
+---
+
+
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+If you are looking to contribute to APISIX or setup a development environment, this guide is for you.
+
+If you are looking to install and run APISIX, check out the [Installation](./installation-guide.md) docs.
+
+:::note
+
+If you want to build and package APISIX for a specific platform, see [apisix-build-tools](https://github.com/api7/apisix-build-tools).
+
+:::
+
+## Building APISIX from source
+
+To start, you have to install some dependencies. APISIX provides a handy script to get these installed:
+
+```shell
+curl https://raw.githubusercontent.com/apache/apisix/master/utils/install-dependencies.sh -sL | bash -
+```
+
+Then, create a directory and set the environment variable `APISIX_VERSION`:
+
+```shell
+APISIX_VERSION='2.14.1'
+mkdir apisix-${APISIX_VERSION}
+```
+
+You can now download the APISIX source code by running the command below:
+
+```shell
+wget https://downloads.apache.org/apisix/${APISIX_VERSION}/apache-apisix-${APISIX_VERSION}-src.tgz
+```
+
+You can also download the source package from the [Downloads page](https://apisix.apache.org/downloads/). You will also find source packages for APISIX Dashboard and APISIX Ingress Controller.
+
+After you have downloaded the package, you can extract the files to the folder created previously:
+
+```shell
+tar zxvf apache-apisix-${APISIX_VERSION}-src.tgz -C apisix-${APISIX_VERSION}
+```
+
+Now, navigate to the directory, create dependencies, and install APISIX as shown below:
+
+```shell
+cd apisix-${APISIX_VERSION}
+make deps
+make install
+```
+
+This will install the runtime dependent Lua libraries and the `apisix` command.
+
+:::note
+
+If you get an error message like `Could not find header file for LDAP/PCRE/openssl` while running `make deps`, use this solution.
+
+`luarocks` supports custom compile-time dependencies (See: [Config file format](https://github.com/luarocks/luarocks/wiki/Config-file-format)). You can use a third-party tool to install the missing packages and add its installation directory to the `luarocks`' variables table. This method works on macOS, Ubuntu, CentOS, and other similar operating systems.
+
+The solution below is for macOS but it works similarly for other operating systems:
+
+1. Install `openldap` by running:
+
+ ```shell
+ brew install openldap
+ ```
+
+2. Locate the installation directory by running:
+
+ ```shell
+ brew --prefix openldap
+ ```
+
+3. Add this path to the project configuration file by any of the two methods shown below:
+ 1. You can use the `luarocks config` command to set `LDAP_DIR`:
+
+ ```shell
+ luarocks config variables.LDAP_DIR /opt/homebrew/cellar/openldap/2.6.1
+ ```
+
+ 2. You can also change the default configuration file of `luarocks`. Open the file `~/.luaorcks/config-5.1.lua` and add the following:
+
+ ```shell
+ variables = { LDAP_DIR = "/opt/homebrew/cellar/openldap/2.6.1", LDAP_INCDIR = "/opt/homebrew/cellar/openldap/2.6.1/include", }
+ ```
+
+ `/opt/homebrew/cellar/openldap/` is default path `openldap` is installed on Apple Silicon macOS machines. For Intel machines, the default path is `/usr/local/opt/openldap/`.
+
+:::
+
+To uninstall the APISIX runtime, run:
+
+```shell
+make uninstall
+make undeps
+```
+
+:::danger
+
+This operation will remove the files completely.
+
+:::
+
+## Installing etcd
+
+APISIX uses [etcd](https://github.com/etcd-io/etcd) to save and synchronize configuration. Before running APISIX, you need to install etcd on your machine. Installation methods based on your operating system are mentioned below.
+
+
+
+
+```shell
+ETCD_VERSION='3.4.18'
+wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz
+tar -xvf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz && \
+ cd etcd-v${ETCD_VERSION}-linux-amd64 && \
+ sudo cp -a etcd etcdctl /usr/bin/
+nohup etcd >/tmp/etcd.log 2>&1 &
+```
+
+
+
+
+
+```shell
+brew install etcd
+brew services start etcd
+```
+
+
+
+
+## Running and managing APISIX server
+
+To initialize the configuration file, within the APISIX directory, run:
+
+```shell
+apisix init
+```
+
+:::tip
+
+You can run `apisix help` to see a list of available commands.
+
+:::
+
+You can then test the created configuration file by running:
+
+```shell
+apisix test
+```
+
+Finally, you can run the command below to start APISIX:
+
+```shell
+apisix start
+```
+
+To stop APISIX, you can use either the `quit` or the `stop` subcommand.
+
+`apisix quit` will gracefully shutdown APISIX. It will ensure that all received requests are completed before stopping.
+
+```shell
+apisix quit
+```
+
+Where as, the `apisix stop` command does a force shutdown and discards all pending requests.
+
+```shell
+apisix stop
+```
+
+## Building runtime for APISIX
+
+Some features of APISIX requires additional Nginx modules to be introduced into OpenResty.
+
+To use these features, you need to build a custom distribution of OpenResty (apisix-base). See [apisix-build-tools](https://github.com/api7/apisix-build-tools) for setting up your build environment and building it.
+
+## Running tests
+
+The steps below show how to run the test cases for APISIX:
+
+1. Install [cpanminus](https://metacpan.org/pod/App::cpanminus#INSTALLATION), the package manager for Perl.
+2. Install the [test-nginx](https://github.com/openresty/test-nginx) dependencies with `cpanm`:
+
+ ```shell
+ sudo cpanm --notest Test::Nginx IPC::Run > build.log 2>&1 || (cat build.log && exit 1)
+ ```
+
+3. Clone the test-nginx source code locally:
+
+ ```shell
+ git clone https://github.com/openresty/test-nginx.git
+ ```
+
+4. Append the current directory to Perl's module directory by running:
+
+ ```shell
+ export PERL5LIB=.:$PERL5LIB
+ ```
+
+ You can specify the Nginx binary path by running:
+
+ ```shell
+ TEST_NGINX_BINARY=/usr/local/bin/openresty prove -Itest-nginx/lib -r t
+ ```
+
+5. Run the tests by running:
+
+ ```shell
+ make test
+ ```
+
+:::note
+
+Some tests rely on external services and system configuration modification. See [ci/linux_openresty_common_runner.sh](https://github.com/apache/apisix/blob/master/ci/linux_openresty_common_runner.sh) for a complete test environment build.
+
+:::
+
+### Troubleshooting
+
+These are some common troubleshooting steps for running APISIX test cases.
+
+#### Configuring Nginx path
+
+For the error `Error unknown directive "lua_package_path" in /API_ASPIX/apisix/t/servroot/conf/nginx.conf`, ensure that OpenResty is set to the default Nginx and export the path as follows:
+
+- Linux default installation path:
+
+ ```shell
+ export PATH=/usr/local/openresty/nginx/sbin:$PATH
+ ```
+
+- macOS default installation path (view homebrew):
+
+ ```shell
+ export PATH=/usr/local/opt/openresty/nginx/sbin:$PATH
+ ```
+
+#### Running a specific test case
+
+To run a specific test case, use the command below:
+
+```shell
+prove -Itest-nginx/lib -r t/plugin/openid-connect.t
+```
+
+See [testing framework](https://github.com/apache/apisix/blob/master/docs/en/latest/internal/testing-framework.md) for more details.
diff --git a/docs/en/latest/config.json b/docs/en/latest/config.json
index b9ad7bc68448..46c6ab4e9ce6 100644
--- a/docs/en/latest/config.json
+++ b/docs/en/latest/config.json
@@ -7,7 +7,8 @@
"items": [
"architecture-design/apisix",
"architecture-design/plugin-config",
- "architecture-design/debug-mode"
+ "architecture-design/debug-mode",
+ "architecture-design/deployment-role"
]
},
{
@@ -48,7 +49,8 @@
"plugins/real-ip",
"plugins/server-info",
"plugins/ext-plugin-pre-req",
- "plugins/ext-plugin-post-req"
+ "plugins/ext-plugin-post-req",
+ "plugins/ext-plugin-post-resp"
]
},
{
@@ -169,7 +171,7 @@
},
{
"type": "category",
- "label": "Other Protocols",
+ "label": "Other protocols",
"items": [
"plugins/dubbo-proxy",
"plugins/mqtt-proxy",
@@ -192,6 +194,16 @@
}
]
},
+ {
+ "type": "category",
+ "label": "Development",
+ "items": [
+ {
+ "type": "doc",
+ "id": "building-apisix"
+ }
+ ]
+ },
{
"type": "doc",
"id": "FAQ"
@@ -209,7 +221,7 @@
"discovery/consul_kv",
"discovery/nacos",
"discovery/eureka",
- "discovery/zookeeper",
+ "discovery/control-plane-service-discovery",
"discovery/kubernetes"
]
},
diff --git a/docs/en/latest/discovery/control-plane-service-discovery.md b/docs/en/latest/discovery/control-plane-service-discovery.md
new file mode 100644
index 000000000000..a18bdc63d38d
--- /dev/null
+++ b/docs/en/latest/discovery/control-plane-service-discovery.md
@@ -0,0 +1,72 @@
+---
+title: Control Plane Service Discovery
+keywords:
+ - API Geteway
+ - APISIX
+ - ZooKeeper
+ - Nacos
+ - APISIX-Seed
+description: This documentation describes implement service discovery through Nacos and ZooKeeper on the API Gateway APISIX Control Plane.
+---
+
+
+
+This document describes how to implement service discovery with Nacos and Zookeeper on the APISIX Control Plane.
+
+## APISIX-Seed Architecture
+
+Apache APISIX has supported Data Plane service discovery in the early days, and now APISIX also supports Control Plane service discovery through the [APISIX-Seed](https://github.com/api7/apisix-seed) project. The following figure shows the APISIX-Seed architecture diagram.
+
+![control-plane-service-discovery](../../../assets/images/control-plane-service-discovery.png)
+
+The specific information represented by the figures in the figure is as follows:
+
+1. Register an upstream with APISIX and specify the service discovery type. APISIX-Seed will watch APISIX resource changes in etcd, filter discovery types, and obtain service names.
+2. APISIX-Seed subscribes the specified service name to the service registry to obtain changes to the corresponding service.
+3. After the client registers the service with the service registry, APISIX-Seed will obtain the new service information and write the updated service node into etcd;
+4. When the corresponding resources in etcd change, APISIX worker will refresh the latest service node information to memory.
+
+:::note
+
+It should be noted that after the introduction of APISIX-Seed, if the service of the registry changes frequently, the data in etcd will also change frequently. So, it is best to set the `--auto-compaction` option when starting etcd to compress the history periodically to avoid etcd eventually exhaust its storage space. Please refer to [revisions](https://etcd.io/docs/v3.5/learning/api/#revisions).
+
+:::
+
+## Why APISIX-Seed
+
+- Network topology becomes simpler
+
+ APISIX does not need to maintain a network connection with each registry, and only needs to pay attention to the configuration information in etcd. This will greatly simplify the network topology.
+
+- Total data volume about upstream service becomes smaller
+
+ Due to the characteristics of the registry, APISIX may store the full amount of registry service data in the worker, such as consul_kv. By introducing APISIX-Seed, each process of APISIX will not need to additionally cache upstream service-related information.
+
+- Easier to manage
+
+ Service discovery configuration needs to be configured once per APISIX instance. By introducing APISIX-Seed, Apache APISIX will be in different to the configuration changes of the service registry.
+
+## Supported service registry
+
+ZooKeeper and Nacos are currently supported, and more service registries will be supported in the future. For more information, please refer to: [APISIX Seed](https://github.com/api7/apisix-seed#apisix-seed-for-apache-apisix).
+
+- If you want to enable control plane ZooKeeper service discovery, please refer to: [ZooKeeper Deployment Tutorial](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/zookeeper.md).
+
+- If you want to enable control plane Nacos service discovery, please refer to: [Nacos Deployment Tutorial](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/zookeeper.md).
diff --git a/docs/en/latest/discovery/zookeeper.md b/docs/en/latest/discovery/zookeeper.md
deleted file mode 100644
index 3adf52dc9735..000000000000
--- a/docs/en/latest/discovery/zookeeper.md
+++ /dev/null
@@ -1,144 +0,0 @@
----
-title: zookeeper
----
-
-
-
-## Service Discovery Via Zookeeper
-
-`Zookeeper` service discovery needs to rely on the [apisix-seed](https://github.com/api7/apisix-seed) project.
-
-### How `apisix-seed` Works
-
-![APISIX-SEED](../../../assets/images/apisix-seed.svg)
-
-`apisix-seed` completes data exchange by watching the changes of `etcd` and `zookeeper` at the same time.
-
-The process is as follows:
-
-1. `APISIX` registers an upstream and specifies the service discovery type as `zookeeper` to `etcd`.
-2. `apisix-seed` watches the resource changes of `APISIX` in `etcd` and filters the discovery type and obtains the service name.
-3. `apisix-seed` binds the service to the `etcd` resource and starts watching the service in zookeeper.
-4. The client registers the service with `zookeeper`.
-5. `apisix-seed` gets the service changes in `zookeeper`.
-6. `apisix-seed` queries the bound `etcd` resource information through the service name, and writes the updated service node to `etcd`.
-7. The `APISIX` worker watches `etcd` changes and refreshes the service node information to the memory.
-
-### Setting `apisix-seed` and Zookeeper
-
-The configuration steps are as follows:
-
-1. Start the Zookeeper service
-
-```bash
-docker run -itd --rm --name=dev-zookeeper -p 2181:2181 zookeeper:3.7.0
-```
-
-2. Download and compile the `apisix-seed` project.
-
-```bash
-git clone https://github.com/api7/apisix-seed.git
-cd apisix-seed
-go build
-```
-
-3. Modify the `apisix-seed` configuration file, config path `conf/conf.yaml`.
-
-```bash
-etcd: # APISIX ETCD Configure
- host:
- - "http://127.0.0.1:2379"
- prefix: /apisix
- timeout: 30
-
-discovery:
- zookeeper: # Zookeeper Service Discovery
- hosts:
- - "127.0.0.1:2181" # Zookeeper service address
- prefix: /zookeeper
- weight: 100 # default weight for node
- timeout: 10 # default 10s
-```
-
-4. Start `apisix-seed` to monitor service changes
-
-```bash
-./apisix-seed
-```
-
-### Setting `APISIX` Route and Upstream
-
-Set a route, the request path is `/zk/*`, the upstream uses zookeeper as service discovery, and the service name
-is `APISIX-ZK`.
-
-```shell
-curl http://127.0.0.1:9080/apisix/admin/routes/1 \
--H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
-{
- "uri": "/zk/*",
- "upstream": {
- "service_name": "APISIX-ZK",
- "type": "roundrobin",
- "discovery_type": "zookeeper"
- }
-}'
-```
-
-### Register Service and verify Request
-
-1. Service registration using Zookeeper CLI
-
-- Register Service
-
-```bash
-# Login Container
-docker exec -it ${CONTAINERID} /bin/bash
-# Login Zookeeper Client
-oot@ae2f093337c1:/apache-zookeeper-3.7.0-bin# ./bin/zkCli.sh
-# Register Service
-[zk: localhost:2181(CONNECTED) 0] create /zookeeper/APISIX-ZK '{"host":"127.0.0.1:1980","weight":100}'
-```
-
-- Successful Response
-
-```bash
-Created /zookeeper/APISIX-ZK
-```
-
-2. Verify Request
-
-- Request
-
-```bash
-curl -i http://127.0.0.1:9080/zk/hello
-```
-
-- Response
-
-```bash
-HTTP/1.1 200 OK
-Connection: keep-alive
-Content-Type: text/html; charset=utf-8
-Date: Tue, 29 Mar 2022 08:51:28 GMT
-Server: APISIX/2.12.0
-Transfer-Encoding: chunked
-
-hello
-```
diff --git a/docs/en/latest/health-check.md b/docs/en/latest/health-check.md
index 850a96e60e2d..3f16d09309bf 100644
--- a/docs/en/latest/health-check.md
+++ b/docs/en/latest/health-check.md
@@ -23,7 +23,7 @@ title: Health Check
## Health Checks for Upstream
-Health Check of Apache APISIX is based on [lua-resty-healthcheck](https://github.com/Kong/lua-resty-healthcheck).
+Health Check of Apache APISIX is based on [lua-resty-healthcheck](https://github.com/api7/lua-resty-healthcheck).
Note:
diff --git a/docs/en/latest/installation-guide.md b/docs/en/latest/installation-guide.md
index 2384ba31b191..40d9e44e472d 100644
--- a/docs/en/latest/installation-guide.md
+++ b/docs/en/latest/installation-guide.md
@@ -43,6 +43,7 @@ APISIX can be installed by the different methods listed below:
{label: 'Docker', value: 'docker'},
{label: 'Helm', value: 'helm'},
{label: 'RPM', value: 'rpm'},
+ {label: 'Source Code', value: 'source code'},
]}>
@@ -166,6 +167,12 @@ Run `apisix help` to get a list of all available operations.
:::
+
+
+
+
+If you want to build APISIX from source, please refer to [Building APISIX from source](./building-apisix.md).
+
@@ -185,7 +192,7 @@ It would be installed automatically if you choose the Docker or Helm install met
```shell
-ETCD_VERSION='3.4.18'
+ETCD_VERSION='3.5.4'
wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz
tar -xvf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz && \
cd etcd-v${ETCD_VERSION}-linux-amd64 && \
diff --git a/docs/en/latest/mtls.md b/docs/en/latest/mtls.md
index b46e7d7b81e2..294d4b162fbf 100644
--- a/docs/en/latest/mtls.md
+++ b/docs/en/latest/mtls.md
@@ -66,7 +66,7 @@ curl --cacert /data/certs/mtls_ca.crt --key /data/certs/mtls_client.key --cert /
### How to configure
-You need to build [APISIX-Base](./FAQ.md#how-do-i-build-the-apisix-base-environment?) and configure `etcd.tls` section if you want APISIX to work on an etcd cluster with mTLS enabled.
+You need to build [APISIX-Base](./FAQ.md#how-do-i-build-the-apisix-base-environment) and configure `etcd.tls` section if you want APISIX to work on an etcd cluster with mTLS enabled.
```yaml
etcd:
diff --git a/docs/en/latest/plugins/api-breaker.md b/docs/en/latest/plugins/api-breaker.md
index 87c1f1d58cf5..4469b5a31d40 100644
--- a/docs/en/latest/plugins/api-breaker.md
+++ b/docs/en/latest/plugins/api-breaker.md
@@ -43,7 +43,7 @@ In an unhealthy state, if the Upstream service responds with a status code from
|-------------------------|----------------|----------|---------|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| break_response_code | integer | True | | [200, ..., 599] | HTTP error code to return when Upstream is unhealthy. |
| break_response_body | string | False | | | Body of the response message to return when Upstream is unhealthy. |
-| break_response_headers | array[object] | False | | | Headers of the response message to return when Upstream is unhealthy. Can only be configured when the `break_response_body` attribute is configured. The values can contain Nginx variables. For example, `$remote_addr` and `$balancer_ip`. |
+| break_response_headers | array[object] | False | | [{"key":"header_name","value":"can contain Nginx $var"}] | Headers of the response message to return when Upstream is unhealthy. Can only be configured when the `break_response_body` attribute is configured. The values can contain APISIX variables. For example, we can use `{"key":"X-Client-Addr","value":"$remote_addr:$remote_port"}`. |
| max_breaker_sec | integer | False | 300 | >=3 | Maximum time in seconds for circuit breaking. |
| unhealthy.http_statuses | array[integer] | False | [500] | [500, ..., 599] | Status codes of Upstream to be considered unhealthy. |
| unhealthy.failures | integer | False | 3 | >=1 | Number of consecutive failures for the Upstream service to be considered unhealthy. |
diff --git a/docs/en/latest/plugins/clickhouse-logger.md b/docs/en/latest/plugins/clickhouse-logger.md
index 2f3fd17446f3..505a26cd3160 100644
--- a/docs/en/latest/plugins/clickhouse-logger.md
+++ b/docs/en/latest/plugins/clickhouse-logger.md
@@ -1,5 +1,11 @@
---
title: clickhouse-logger
+keywords:
+ - APISIX
+ - API Gateway
+ - Plugin
+ - ClickHouse Logger
+description: This document contains information about the Apache APISIX clickhouse-logger Plugin.
---
+
+## Description
+
+The `ext-plugin-post-resp` Plugin is for running specific external Plugins in the Plugin Runner before executing the built-in Lua Plugins.
+
+The `ext-plugin-post-resp` plugin will be executed after the request gets a response from the upstream.
+
+After enabling this plugin, APISIX will use the [lua-resty-http](https://github.com/api7/lua-resty-http) library to make requests to the upstream, this results in:
+
+- [proxy-control](./proxy-control.md) plugin is not available
+- [proxy-mirror](./proxy-mirror.md) plugin is not available
+- [proxy-cache](./proxy-cache.md) plugin is not available
+- [mTLS Between APISIX and Upstream](../mtls.md#mtls-between-apisix-and-upstream) function is not available yet
+
+See [External Plugin](../external-plugin.md) to learn more.
+
+:::note
+
+Execution of External Plugins will affect the response of the current request.
+
+External Plugin does not yet support getting request context information.
+
+External Plugin does not yet support getting the response body of an upstream response.
+
+:::
+
+## Attributes
+
+| Name | Type | Required | Default | Valid values | Description |
+|-------------------|---------|----------|---------|-----------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|
+| conf | array | False | | [{"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"}] | List of Plugins and their configurations to be executed on the Plugin Runner. |
+| allow_degradation | boolean | False | false | | Sets Plugin degradation when the Plugin Runner is not available. When set to `true`, requests are allowed to continue. |
+
+## Enabling the Plugin
+
+The example below enables the `ext-plugin-post-resp` Plugin on a specific Route:
+
+```shell
+curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/index.html",
+ "plugins": {
+ "ext-plugin-post-resp": {
+ "conf" : [
+ {"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"}
+ ]
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ }
+}'
+```
+
+## Example usage
+
+Once you have configured the External Plugin as shown above, you can make a request to execute the Plugin:
+
+```shell
+curl -i http://127.0.0.1:9080/index.html
+```
+
+This will reach the configured Plugin Runner and the `ext-plugin-A` will be executed.
+
+## Disable Plugin
+
+To disable the `ext-plugin-post-resp` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect.
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/index.html",
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ }
+}'
+```
diff --git a/docs/en/latest/plugins/file-logger.md b/docs/en/latest/plugins/file-logger.md
index 27bc93d69089..8ad5cc1dea0c 100644
--- a/docs/en/latest/plugins/file-logger.md
+++ b/docs/en/latest/plugins/file-logger.md
@@ -1,5 +1,11 @@
---
title: file-logger
+keywords:
+ - APISIX
+ - API Gateway
+ - Plugin
+ - File Logger
+description: This document contains information about the Apache APISIX file-logger Plugin.
---
-## How to fetch the metric data
+## Fetching metrics
-We fetch the metric data from the specified url `/apisix/prometheus/metrics`.
+You can fetch the metrics from the specified export URI (default: `/apisix/prometheus/metrics`):
-```
+```shell
curl -i http://127.0.0.1:9091/apisix/prometheus/metrics
```
-Puts this URL address into prometheus, and it will automatically fetch
-these metric data.
-
-For example like this:
+You can add this address to Prometheus to fetch the data:
```yaml
scrape_configs:
- - job_name: 'apisix'
+ - job_name: "apisix"
scrape_interval: 15s # This value will be related to the time range of the rate function in Prometheus QL. The time range in the rate function should be at least twice this value.
- metrics_path: '/apisix/prometheus/metrics'
+ metrics_path: "/apisix/prometheus/metrics"
static_configs:
- - targets: ['127.0.0.1:9091']
+ - targets: ["127.0.0.1:9091"]
```
-And we can check the status at prometheus console:
+Now, you will be able to check the status in your Prometheus console:
![checking status on prometheus dashboard](../../../assets/images/plugin/prometheus01.png)
![prometheus apisix in-depth metric view](../../../assets/images/plugin/prometheus02.png)
-## How to specify export uri
-
-We can change the default export uri in the `plugin_attr` section of `conf/config.yaml`.
-
-| Name | Type | Default | Description |
-| ---------- | ------ | ---------------------------- | --------------------------------- |
-| export_uri | string | "/apisix/prometheus/metrics" | uri to get the prometheus metrics |
-
-Here is an example:
+## Using Grafana to graph the metrics
-```yaml
-plugin_attr:
- prometheus:
- export_uri: /apisix/metrics
-```
+Metrics exported by the `prometheus` Plugin can be graphed in Grafana using a drop in dashboard.
-## Grafana dashboard
-
-Metrics exported by the plugin can be graphed in Grafana using a drop in dashboard.
-
-Downloads [Grafana dashboard meta](https://github.com/apache/apisix/blob/master/docs/assets/other/json/apisix-grafana-dashboard.json) and imports it to Grafana。
-
-Or you can goto [Grafana official](https://grafana.com/grafana/dashboards/11719) for `Grafana` meta data.
+To set it up, download [Grafana dashboard meta](https://github.com/apache/apisix/blob/master/docs/assets/other/json/apisix-grafana-dashboard.json) and import it in Grafana. Or, you can go to [Grafana official](https://grafana.com/grafana/dashboards/11719) for Grafana metadata.
![Grafana chart-1](../../../assets/images/plugin/grafana-1.png)
@@ -153,52 +158,57 @@ Or you can goto [Grafana official](https://grafana.com/grafana/dashboards/11719)
## Available HTTP metrics
-* `Status codes`: HTTP status code returned from upstream services. These status code available per service and across all services.
+The following metrics are exported by the `prometheus` Plugin:
- Attributes:
+- Status code: HTTP status code returned from Upstream services. They are available for a single service and across all services.
- | Name | Description |
- | -------------| --------------------|
- | code | The HTTP status code returned by the upstream service. |
- | route | The `route_id` of the matched route is request. If it does not match, the default value is an empty string. |
- | matched_uri | The `uri` of the route matching the request, if it does not match, the default value is an empty string. |
- | matched_host | The `host` of the route that matches the request. If it does not match, the default value is an empty string. |
- | service | The `service_id` of the route matched by the request. When the route lacks service_id, the default is `$host`. |
- | consumer | The `consumer_name` of the consumer that matches the request. If it does not match, the default value is an empty string. |
- | node | The `ip` of the upstream node. |
+ The available attributes are:
-* `Bandwidth`: Total Bandwidth (egress/ingress) flowing through APISIX. The total bandwidth of per service can be counted.
+ | Name | Description |
+ |--------------|-------------------------------------------------------------------------------------------------------------------------------|
+ | code | HTTP status code returned by the upstream service. |
+ | route | `route_id` of the matched Route with request. Defaults to an empty string if the Routes don't match. |
+ | matched_uri | `uri` of the Route matching the request. Defaults to an empty string if the Routes don't match. |
+ | matched_host | `host` of the Route matching the request. Defaults to an empty string if the Routes don't match. |
+ | service | `service_id` of the Route matching the request. If the Route does not have a `service_id` configured, it defaults to `$host`. |
+ | consumer | `consumer_name` of the Consumer matching the request. Defaults to an empty string if it does not match. |
+ | node | IP address of the Upstream node. |
- Attributes:
+- Bandwidth: Total amount of traffic (ingress and egress) flowing through APISIX. Total bandwidth of a service can also be obtained.
- | Name | Description |
- | -------------| ------------- |
- | type | The type of bandwidth(egress/ingress). |
- | route | The `route_id` of the matched route is request. If it does not match, the default value is an empty string.. |
- | service | The `service_id` of the route matched by the request. When the route lacks service_id, the default is `$host`. |
- | consumer | The `consumer_name` of the consumer that matches the request. If it does not match, the default value is an empty string. |
- | node | The `ip` of the upstream node. |
+ The available attributes are:
-* `etcd reachability`: A gauge type with a value of 0 or 1, representing if etcd can be reached by a APISIX or not, where `1` is available, and `0` is unavailable.
-* `Connections`: Various Nginx connection metrics like active, reading, writing, and number of accepted connections.
-* `Batch process entries`: A gauge type, when we use plugins and the plugin used batch process to send data, such as: sys logger, http logger, sls logger, tcp logger, udp logger and zipkin, then the entries which hasn't been sent in batch process will be counted in the metrics.
-* `Latency`: The per service histogram of request time in different dimensions.
+ | Name | Description |
+ |----------|-------------------------------------------------------------------------------------------------------------------------------|
+ | type | Type of traffic (egress/ingress). |
+ | route | `route_id` of the matched Route with request. Defaults to an empty string if the Routes don't match. |
+ | service | `service_id` of the Route matching the request. If the Route does not have a `service_id` configured, it defaults to `$host`. |
+ | consumer | `consumer_name` of the Consumer matching the request. Defaults to an empty string if it does not match. |
+ | node | IP address of the Upstream node. |
+
+- etcd reachability: A gauge type representing whether etcd can be reached by APISIX. A value of `1` represents reachable and `0` represents unreachable.
+- Connections: Nginx connection metrics like active, reading, writing, and number of accepted connections.
+- Batch process entries: A gauge type useful when Plugins like [syslog](./syslog.md), [http-logger](./http-logger.md), [tcp-logger](./tcp-logger.md), [udp-logger](./udp-logger.md), and [zipkin](./zipkin.md) use batch process to send data. Entries that hasn't been sent in batch process will be counted in the metrics.
+- Latency: Histogram of the request time per service in different dimensions.
- Attributes:
+ The available attributes are:
- | Name | Description |
- | ----------| ------------- |
- | type | The value can be `apisix`, `upstream` or `request`, which means http latency caused by apisix, upstream, or their sum. |
- | service | The `service_id` of the route matched by the request. When the route lacks service_id, the default is `$host`. |
- | consumer | The `consumer_name` of the consumer that matches the request. If it does not match, the default value is an empty string. |
- | node | The `ip` of the upstream node. |
+ | Name | Description |
+ |----------|-------------------------------------------------------------------------------------------------------------------------------------|
+ | type | Value can be one of `apisix`, `upstream`, or `request`. This translates to latency caused by APISIX, Upstream, or both (their sum). |
+ | service | `service_id` of the Route matching the request. If the Route does not have a `service_id` configured, it defaults to `$host`. |
+ | consumer | `consumer_name` of the Consumer matching the request. Defaults to an empty string if it does not match. |
+ | node | IP address of the Upstream node. |
-* `Info`: the information of APISIX node.
+- Info: Information about the APISIX node.
-Here is the original metric data of APISIX:
+Here are the original metrics from APISIX:
+
+```shell
+curl http://127.0.0.1:9091/apisix/prometheus/metrics
+```
```shell
-$ curl http://127.0.0.1:9091/apisix/prometheus/metrics
# HELP apisix_bandwidth Total bandwidth in bytes consumed per service in Apisix
# TYPE apisix_bandwidth counter
apisix_bandwidth{type="egress",route="",service="",consumer="",node=""} 8417
@@ -266,8 +276,7 @@ apisix_node_info{hostname="desktop-2022q8f-wsl"} 1
## Disable Plugin
-Remove the corresponding json configuration in the plugin configuration to disable `prometheus`.
-APISIX plugins are hot-reloaded, therefore no need to restart APISIX.
+To disable the `prometheus` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect.
```shell
curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
diff --git a/docs/en/latest/plugins/real-ip.md b/docs/en/latest/plugins/real-ip.md
index f1f59559c8e5..88d7783f9423 100644
--- a/docs/en/latest/plugins/real-ip.md
+++ b/docs/en/latest/plugins/real-ip.md
@@ -35,7 +35,7 @@ This is more flexible but functions similarly to Nginx's [ngx_http_realip_module
:::info IMPORTANT
-This Plugin requires APISIX to run on [APISIX-Base](../FAQ.md#how-do-i-build-the-apisix-base-environment?).
+This Plugin requires APISIX to run on [APISIX-Base](../FAQ.md#how-do-i-build-the-apisix-base-environment).
:::
diff --git a/docs/en/latest/plugins/request-id.md b/docs/en/latest/plugins/request-id.md
index 05505ac8dc97..cc18b75eef0b 100644
--- a/docs/en/latest/plugins/request-id.md
+++ b/docs/en/latest/plugins/request-id.md
@@ -47,6 +47,12 @@ The Plugin will not add a unique ID if the request already has a header with the
| include_in_response | boolean | False | true | | When set to `true`, adds the unique request ID in the response header. |
| algorithm | string | False | "uuid" | ["uuid", "snowflake", "nanoid"] | Algorithm to use for generating the unique request ID. |
+:::warning
+
+When you need to use `snowflake` algorithm, make sure APISIX has the permission to write to the etcd.
+
+:::
+
### Using snowflake algorithm to generate unique ID
To use the snowflake algorithm, you have to enable it first on your configuration file (`conf/config.yaml`):
diff --git a/docs/en/latest/plugins/response-rewrite.md b/docs/en/latest/plugins/response-rewrite.md
index 80ce66e6ec68..91739d39ff82 100644
--- a/docs/en/latest/plugins/response-rewrite.md
+++ b/docs/en/latest/plugins/response-rewrite.md
@@ -126,10 +126,80 @@ However, if `ngx.exit` is executed during an access phase, it will only interrup
So, if you have configured the `response-rewrite` Plugin, it do a force overwrite of the response.
-![ngx.edit tabular overview](https://cdn.jsdelivr.net/gh/Miss-you/img/picgo/20201113010623.png)
+| Phase | rewrite | access | header_filter | body_filter |
+|---------------|----------|----------|---------------|-------------|
+| rewrite | ngx.exit | √ | √ | √ |
+| access | × | ngx.exit | √ | √ |
+| header_filter | √ | √ | ngx.exit | √ |
+| body_filter | √ | √ | × | ngx.exit |
:::
+The example below shows how you can replace a key in the response body. Here, the key X-Amzn-Trace-Id is replaced with X-Amzn-Trace-Id-Replace by configuring the filters attribute using regex:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "plugins":{
+ "response-rewrite":{
+ "headers":{
+ "X-Server-id":3,
+ "X-Server-status":"on",
+ "X-Server-balancer_addr":"$balancer_ip:$balancer_port"
+ },
+ "filters":[
+ {
+ "regex":"X-Amzn-Trace-Id",
+ "scope":"global",
+ "replace":"X-Amzn-Trace-Id-Replace"
+ }
+ ],
+ "vars":[
+ [
+ "status",
+ "==",
+ 200
+ ]
+ ]
+ }
+ },
+ "upstream":{
+ "type":"roundrobin",
+ "scheme":"https",
+ "nodes":{
+ "httpbin.org:443":1
+ }
+ },
+ "uri":"/*"
+}'
+```
+
+```shell
+curl -X GET -i http://127.0.0.1:9080/get
+```
+
+```shell
+HTTP/1.1 200 OK
+Transfer-Encoding: chunked
+X-Server-status: on
+X-Server-balancer-addr: 34.206.80.189:443
+X-Server-id: 3
+
+{
+ "args": {},
+ "headers": {
+ "Accept": "*/*",
+ "Host": "127.0.0.1",
+ "User-Agent": "curl/7.29.0",
+ "X-Amzn-Trace-Id-Replace": "Root=1-629e0b89-1e274fdd7c23ca6e64145aa2",
+ "X-Forwarded-Host": "127.0.0.1"
+ },
+ "origin": "127.0.0.1, 117.136.46.203",
+ "url": "https://127.0.0.1/get"
+}
+
+```
+
## Disable Plugin
To disable the `response-rewrite` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect.
diff --git a/docs/en/latest/plugins/rocketmq-logger.md b/docs/en/latest/plugins/rocketmq-logger.md
index 60137bc6a15c..f08397001744 100644
--- a/docs/en/latest/plugins/rocketmq-logger.md
+++ b/docs/en/latest/plugins/rocketmq-logger.md
@@ -1,7 +1,12 @@
---
title: rocketmq-logger
+keywords:
+ - APISIX
+ - API Gateway
+ - Plugin
+ - RocketMQ Logger
+description: This document contains information about the Apache APISIX rocketmq-logger Plugin.
---
-
批处理器可用于聚合条目(日志/任何数据)并进行批处理。
-当 `batch_max_size` 设置为零时,处理器将立即执行每个条目。将批处理的最大值设置为大于 1 将开始聚合条目,直到达到最大值或超时。
+当 `batch_max_size` 设置为 1 时,处理器将立即执行每个条目。将批处理的最大值设置为大于 1 将开始聚合条目,直到达到最大值或超时。
## 配置
diff --git a/docs/zh/latest/building-apisix.md b/docs/zh/latest/building-apisix.md
new file mode 100644
index 000000000000..4d2aadba2fea
--- /dev/null
+++ b/docs/zh/latest/building-apisix.md
@@ -0,0 +1,280 @@
+---
+id: building-apisix
+title: 源码安装 APISIX
+keywords:
+ - API 网关
+ - Apache APISIX
+ - 贡献代码
+ - 构建 APISIX
+ - 源码安装 APISIX
+description: 本文介绍了如何在本地使用源码安装 API 网关 Apache APISIX 来构建开发环境。
+---
+
+
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+如果你希望为 APISIX 做出贡献或配置开发环境,你可以参考本教程。
+
+如果你想通过其他方式安装 APISIX,你可以参考[安装指南](./installation-guide.md)。
+
+:::note
+
+如果你想为特定的环境或打包 APISIX,请参考 [apisix-build-tools](https://github.com/api7/apisix-build-tools)。
+
+:::
+
+## 源码安装 APISIX
+
+首先,你可以通过以下命令安装依赖项:
+
+```shell
+curl https://raw.githubusercontent.com/apache/apisix/master/utils/install-dependencies.sh -sL | bash -
+```
+
+然后,创建一个目录并设置环境变量 `APISIX_VERSION`:
+
+```shell
+APISIX_VERSION='2.14.1'
+mkdir apisix-${APISIX_VERSION}
+```
+
+现在,你可以运行以下命令来下载 APISIX 源码包:
+
+```shell
+wget https://downloads.apache.org/apisix/${APISIX_VERSION}/apache-apisix-${APISIX_VERSION}-src.tgz
+```
+
+你可以从[下载页面](https://apisix.apache.org/downloads/)下载源码包。你也可以在该页面找到 APISIX Dashboard 和 APISIX Ingress Controller 的源码包。
+
+下载源码包后,你可以将文件解压到之前创建的文件夹中:
+
+```shell
+tar zxvf apache-apisix-${APISIX_VERSION}-src.tgz -C apisix-${APISIX_VERSION}
+```
+
+然后切换到解压的目录,创建依赖项并安装 APISIX,如下所示:
+
+```shell
+cd apisix-${APISIX_VERSION}
+make deps
+make install
+```
+
+该命令将安装 APISIX 运行时依赖的 Lua 库和 `apisix` 命令。
+
+:::note
+
+如果你在运行 `make deps` 时收到类似 `Could not find header file for LDAP/PCRE/openssl` 的错误消息,请使用此解决方案。
+
+`luarocks` 支持自定义编译时依赖项(请参考:[配置文件格式](https://github.com/luarocks/luarocks/wiki/Config-file-format))。你可以使用第三方工具安装缺少的软件包并将其安装目录添加到 `luarocks` 变量表中。此方法适用于 macOS、Ubuntu、CentOS 和其他类似操作系统。
+
+此处仅给出 macOS 的具体解决步骤,其他操作系统的解决方案类似:
+
+1. 安装 `openldap`:
+
+ ```shell
+ brew install openldap
+ ```
+
+2. 使用以下命令命令找到本地安装目录:
+
+ ```shell
+ brew --prefix openldap
+ ```
+
+3. 将路径添加到项目配置文件中(选择两种方法中的一种即可):
+ 1. 你可以使用 `luarocks config` 命令设置 `LDAP_DIR`:
+
+ ```shell
+ luarocks config variables.LDAP_DIR /opt/homebrew/cellar/openldap/2.6.1
+ ```
+
+ 2. 你还可以更改 `luarocks` 的默认配置文件。打开 `~/.luaorcks/config-5.1.lua` 文件并添加以下内容:
+
+ ```shell
+ variables = { LDAP_DIR = "/opt/homebrew/cellar/openldap/2.6.1", LDAP_INCDIR = "/opt/homebrew/cellar/openldap/2.6.1/include", }
+ ```
+
+ `/opt/homebrew/cellar/openldap/` 是 `brew` 在 macOS(Apple Silicon) 上安装 `openldap` 的默认位置。`/usr/local/opt/openldap/` 是 brew 在 macOS(Intel) 上安装 openldap 的默认位置。
+
+:::
+
+如果你不再需要 APISIX,可以执行以下命令卸载:
+
+```shell
+make uninstall && make undeps
+```
+
+:::danger
+
+该操作将删除所有相关文件。
+
+:::
+
+## 安装 etcd
+
+APISIX 默认使用 [etcd](https://github.com/etcd-io/etcd) 来保存和同步配置。在运行 APISIX 之前,你需要在你的机器上安装 etcd。
+
+
+
+
+```shell
+ETCD_VERSION='3.4.18'
+wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz
+tar -xvf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz && \
+ cd etcd-v${ETCD_VERSION}-linux-amd64 && \
+ sudo cp -a etcd etcdctl /usr/bin/
+nohup etcd >/tmp/etcd.log 2>&1 &
+```
+
+
+
+
+
+```shell
+brew install etcd
+brew services start etcd
+```
+
+
+
+
+## 管理 APISIX 服务
+
+运行以下命令初始化 NGINX 配置文件和 etcd。
+
+```shell
+apisix init
+```
+
+:::tip
+
+你可以运行 `apisix help` 命令,查看返回结果,获取其他操作命令及其描述。
+
+:::
+
+运行以下命令测试配置文件,APISIX 将根据 `config.yaml` 生成 `nginx.conf`,并检查 `nginx.conf` 的语法是否正确。
+
+```shell
+apisix test
+```
+
+最后,你可以使用以下命令运行 APISIX。
+
+```shell
+apisix start
+```
+
+如果需要停止 APISIX,你可以使用 `apisix quit` 或者 `apisix stop` 命令。
+
+`apisix quit` 将正常关闭 APISIX,该指令确保在停止之前完成所有收到的请求。
+
+```shell
+apisix quit
+```
+
+`apisix stop` 命令会强制关闭 APISIX 并丢弃所有请求。
+
+```shell
+apisix stop
+```
+
+## 为 APISIX 构建 APISIX-Base
+
+APISIX 的一些特性需要在 OpenResty 中引入额外的 NGINX 模块。
+
+如果要使用这些功能,你需要构建一个自定义的 OpenResty 发行版(APISIX-Base)。请参考 [apisix-build-tools](https://github.com/api7/apisix-build-tools) 配置你的构建环境并进行构建。
+
+## 运行测试用例
+
+以下步骤展示了如何运行 APISIX 的测试用例:
+
+1. 安装 `perl` 的包管理器 [cpanminus](https://metacpan.org/pod/App::cpanminus#INSTALLATION)。
+2. 通过 `cpanm` 来安装 [test-nginx](https://github.com/openresty/test-nginx) 的依赖:
+
+ ```shell
+ sudo cpanm --notest Test::Nginx IPC::Run > build.log 2>&1 || (cat build.log && exit 1)
+ ```
+
+3. 将 `test-nginx` 源码克隆到本地:
+
+ ```shell
+ git clone https://github.com/openresty/test-nginx.git
+ ```
+
+4. 运行以下命令将当前目录添加到 Perl 的模块目录:
+
+ ```shell
+ export PERL5LIB=.:$PERL5LIB
+ ```
+
+ 你可以通过运行以下命令指定 NGINX 二进制路径:
+
+ ```shell
+ TEST_NGINX_BINARY=/usr/local/bin/openresty prove -Itest-nginx/lib -r t
+ ```
+
+5. 运行测试:
+
+ ```shell
+ make test
+ ```
+
+:::note
+
+部分测试需要依赖外部服务和修改系统配置。如果想要完整地构建测试环境,请参考 [ci/linux_openresty_common_runner.sh](https://github.com/apache/apisix/blob/master/ci/linux_openresty_common_runner.sh)。
+
+:::
+
+### 故障排查
+
+以下是运行 APISIX 测试用例的常见故障排除步骤。
+
+出现 `Error unknown directive "lua_package_path" in /API_ASPIX/apisix/t/servroot/conf/nginx.conf` 报错,是因为默认的 NGINX 安装路径未找到,解决方法如下:
+
+- Linux 默认安装路径:
+
+ ```shell
+ export PATH=/usr/local/openresty/nginx/sbin:$PATH
+ ```
+
+- macOS 通过 `homebrew` 的默认安装路径:
+
+ ```shell
+ export PATH=/usr/local/opt/openresty/nginx/sbin:$PATH
+ ```
+
+### 运行指定的测试用例
+
+使用以下命令运行指定的测试用例:
+
+```shell
+prove -Itest-nginx/lib -r t/plugin/openid-connect.t
+```
+
+如果你想要了解更多信息,请参考 [testing framework](https://github.com/apache/apisix/blob/master/docs/en/latest/internal/testing-framework.md)。
diff --git a/docs/zh/latest/config.json b/docs/zh/latest/config.json
index 940f16015df1..0832c0429924 100644
--- a/docs/zh/latest/config.json
+++ b/docs/zh/latest/config.json
@@ -48,7 +48,8 @@
"plugins/real-ip",
"plugins/server-info",
"plugins/ext-plugin-post-req",
- "plugins/ext-plugin-pre-req"
+ "plugins/ext-plugin-pre-req",
+ "plugins/ext-plugin-post-resp"
]
},
{
@@ -189,6 +190,16 @@
}
]
},
+ {
+ "type": "category",
+ "label": "Development",
+ "items": [
+ {
+ "type": "doc",
+ "id": "building-apisix"
+ }
+ ]
+ },
{
"type": "doc",
"id": "FAQ"
@@ -205,7 +216,7 @@
"discovery/dns",
"discovery/nacos",
"discovery/eureka",
- "discovery/zookeeper",
+ "discovery/control-plane-service-discovery",
"discovery/kubernetes"
]
},
diff --git a/docs/zh/latest/discovery/control-plane-service-discovery.md b/docs/zh/latest/discovery/control-plane-service-discovery.md
new file mode 100644
index 000000000000..b6bcb7450901
--- /dev/null
+++ b/docs/zh/latest/discovery/control-plane-service-discovery.md
@@ -0,0 +1,72 @@
+---
+title: 控制面服务发现
+keywords:
+ - API 网关
+ - APISIX
+ - ZooKeeper
+ - Nacos
+ - APISIX-Seed
+description: 本文档介绍了如何在 API 网关 Apache APISIX 控制面通过 Nacos 和 Zookeeper 实现服务发现。
+---
+
+
+
+本文档介绍了如何在 APISIX 控制面通过 Nacos 和 Zookeeper 实现服务发现。
+
+## APISIX-Seed 架构
+
+Apache APISIX 在早期已经支持了数据面服务发现,现在 APISIX 也通过 [APISIX-Seed](https://github.com/api7/apisix-seed) 项目实现了控制面服务发现,下图为 APISIX-Seed 架构图。
+
+![control-plane-service-discovery](../../../assets/images/control-plane-service-discovery.png)
+
+图中的数字代表的具体信息如下:
+
+1. 通过 Admin API 向 APISIX 注册上游并指定服务发现类型。APISIX-Seed 将监听 etcd 中的 APISIX 资源变化,过滤服务发现类型并获取服务名称(如 ZooKeeper);
+2. APISIX-Seed 将在服务注册中心(如 ZooKeeper)订阅指定的服务名称,以监控和更新对应的服务信息;
+3. 客户端向服务注册中心注册服务后,APISIX-Seed 会获取新的服务信息,并将更新后的服务节点写入 etcd;
+4. 当 APISIX-Seed 在 etcd 中更新相应的服务节点信息时,APISIX 会将最新的服务节点信息同步到内存中。
+
+:::note
+
+引入 APISIX-Seed 后,如果注册中心的服务变化频繁,etcd 中的数据也会频繁变化。因此,需要在启动 etcd 时设置 `--auto-compaction` 选项,用来定期压缩历史记录,避免耗尽 etcd 存储空间。详细信息请参考 [revisions](https://etcd.io/docs/v3.5/learning/api/#revisions)。
+
+:::
+
+## 为什么需要 APISIX-Seed?
+
+- 网络拓扑变得更简单
+
+ APISIX 不需要与每个注册中心保持网络连接,只需要关注 etcd 中的配置信息即可。这将大大简化网络拓扑。
+
+- 上游服务总数据量变小
+
+ 由于 `registry` 的特性,APISIX 可能会在 Worker 中存储全量的 `registry` 服务数据,例如 Consul_KV。通过引入 APISIX-Seed,APISIX 的每个进程将不需要额外缓存上游服务相关信息。
+
+- 更容易管理
+
+ 服务发现配置需要为每个 APISIX 实例配置一次。通过引入 APISIX-Seed,APISIX 将对服务注册中心的配置变化无感知。
+
+## 支持的服务发现类型
+
+目前已经支持了 ZooKeeper 和 Nacos,后续还将支持更多的服务注册中心,更多信息请参考:[APISIX Seed](https://github.com/api7/apisix-seed#apisix-seed-for-apache-apisix)。
+
+- 如果你想启用控制面 ZooKeeper 服务发现,请参考:[ZooKeeper 部署教程](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/zookeeper.md)。
+
+- 如果你想启用控制面 Nacos 服务发现,请参考:[Nacos 部署教程](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/zookeeper.md)。
diff --git a/docs/zh/latest/discovery/zookeeper.md b/docs/zh/latest/discovery/zookeeper.md
deleted file mode 100644
index db2bec30103c..000000000000
--- a/docs/zh/latest/discovery/zookeeper.md
+++ /dev/null
@@ -1,142 +0,0 @@
----
-title: zookeeper
-keywords:
- - APISIX
- - ZooKeeper
- - apisix-seed
-description: 本篇文档介绍了如何使用 ZooKeeper 做服务发现
----
-
-
-
-目前,如果你想在 APISIX 控制面使用 ZooKeeper 实现服务发现功能,需要依赖 [apisix-seed](https://github.com/api7/apisix-seed) 项目。
-
-## `apisix-seed` 工作原理
-
-![APISIX-SEED](../../../assets/images/apisix-seed.svg)
-
-`apisix-seed` 通过同时监听 etcd 和 ZooKeeper 的变化来完成数据交换。
-
-流程如下:
-
-1. 使用 APISIX 注册一个上游服务,并将服务类型设置为 `zookeeper` 并保存到 etcd;
-2. `apisix-seed` 监听 etcd 中 APISIX 的资源变更,并过滤服务发现类型获得服务名称;
-3. `apisix-seed` 将服务绑定到 etcd 资源,并开始在 ZooKeeper 中监控此服务;
-4. 客户端向 ZooKeeper 注册该服务;
-5. `apisix-seed` 获取 ZooKeeper 中的服务变更;
-6. `apisix-seed` 通过服务名称查询绑定的 etcd 资源,并将更新后的服务节点写入 etcd;
-7. APISIX Worker 监控 etcd 资源变更,并在内存中刷新服务节点信息。
-
-## 如何使用
-
-### 环境准备:配置 `apisix-seed` 和 ZooKeeper
-
-1. 启动 ZooKeeper
-
-```bash
-docker run -itd --rm --name=dev-zookeeper -p 2181:2181 zookeeper:3.7.0
-```
-
-2. 下载并编译 `apisix-seed` 项目
-
-```bash
-git clone https://github.com/api7/apisix-seed.git
-cd apisix-seed
-go build
-```
-
-3. 参考以下信息修改 `apisix-seed` 配置文件,路径为 `conf/conf.yaml`
-
-```bash
-etcd: # APISIX etcd 配置
- host:
- - "http://127.0.0.1:2379"
- prefix: /apisix
- timeout: 30
-
-discovery:
- zookeeper: # 配置 ZooKeeper 进行服务发现
- hosts:
- - "127.0.0.1:2181" # ZooKeeper 服务器地址
- prefix: /zookeeper
- weight: 100 # ZooKeeper 节点默认权重设为 100
- timeout: 10 # ZooKeeper 会话超时时间默认设为 10 秒
-```
-
-4. 启动 `apisix-seed` 以监听服务变更
-
-```bash
-./apisix-seed
-```
-
-### 设置 APISIX 路由和上游
-
-通过以下命令设置路由,请求路径设置为 `/zk/*`,上游使用 ZooKeeper 作为服务发现,服务名称为 `APISIX-ZK`。
-
-```shell
-curl http://127.0.0.1:9080/apisix/admin/routes/1 \
--H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
-{
- "uri": "/zk/*",
- "upstream": {
- "service_name": "APISIX-ZK",
- "type": "roundrobin",
- "discovery_type": "zookeeper"
- }
-}'
-```
-
-### 注册服务
-
-使用 ZooKeeper-cli 注册服务
-
-登录 ZooKeeper 容器,使用 CLI 程序进行服务注册。具体命令如下:
-
-```bash
-# 登陆容器
-docker exec -it ${CONTAINERID} /bin/bash
-# 登陆 ZooKeeper 客户端
-oot@ae2f093337c1:/apache-zookeeper-3.7.0-bin# ./bin/zkCli.sh
-# 注册服务
-[zk: localhost:2181(CONNECTED) 0] create /zookeeper/APISIX-ZK '{"host":"127.0.0.1","port":1980,"weight":100}'
-```
-
-返回结果如下:
-
-```bash
-Created /zookeeper/APISIX-ZK
-```
-
-### 请求验证
-
-通过以下命令请求路由:
-
-```bash
-curl -i http://127.0.0.1:9080/zk/hello
-```
-
-正常返回结果:
-
-```bash
-HTTP/1.1 200 OK
-Connection: keep-alive
-...
-hello
-```
diff --git a/docs/zh/latest/external-plugin.md b/docs/zh/latest/external-plugin.md
index 3e8049f8631d..07b8fbaa35ab 100644
--- a/docs/zh/latest/external-plugin.md
+++ b/docs/zh/latest/external-plugin.md
@@ -32,6 +32,7 @@ APISIX 支持使用 Lua 语言编写插件,这种类型的插件在 APISIX 内
![external-plugin](../../assets/images/external-plugin.png)
当你在 APISIX 中配置了一个 Plugin Runner ,APISIX 将以子进程的方式运行该 Plugin Runner 。
+
该子进程与 APISIX 进程从属相同用户。当重启或者重新加载 APISIX 时,该 Plugin Runner 也将被重启。
一旦你为指定路由配置了 `ext-plugin-*` 插件,
diff --git a/docs/zh/latest/health-check.md b/docs/zh/latest/health-check.md
index 592218386fb1..3cd1e7789615 100644
--- a/docs/zh/latest/health-check.md
+++ b/docs/zh/latest/health-check.md
@@ -23,7 +23,7 @@ title: 健康检查
## Upstream 的健康检查
-Apache APISIX 的健康检查使用 [lua-resty-healthcheck](https://github.com/Kong/lua-resty-healthcheck) 实现。
+Apache APISIX 的健康检查使用 [lua-resty-healthcheck](https://github.com/api7/lua-resty-healthcheck) 实现。
注意:
diff --git a/docs/zh/latest/installation-guide.md b/docs/zh/latest/installation-guide.md
index 8fb3d7f80fc8..f563179cb22c 100644
--- a/docs/zh/latest/installation-guide.md
+++ b/docs/zh/latest/installation-guide.md
@@ -44,6 +44,7 @@ import TabItem from '@theme/TabItem';
{label: 'Docker', value: 'docker'},
{label: 'Helm', value: 'helm'},
{label: 'RPM', value: 'rpm'},
+ {label: 'Source Code', value: 'source code'},
]}>
@@ -169,6 +170,12 @@ apisix start
:::
+
+
+
+
+如果你想要使用源码构建 APISIX,请参考[源码安装 APISIX](./building-apisix.md)。
+
@@ -188,7 +195,7 @@ APISIX 使用 [etcd](https://github.com/etcd-io/etcd) 作为配置中心进行
```shell
-ETCD_VERSION='3.4.18'
+ETCD_VERSION='3.5.4'
wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz
tar -xvf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz && \
cd etcd-v${ETCD_VERSION}-linux-amd64 && \
diff --git a/docs/zh/latest/mtls.md b/docs/zh/latest/mtls.md
index 07ab50e3183f..8996f2b5fef4 100644
--- a/docs/zh/latest/mtls.md
+++ b/docs/zh/latest/mtls.md
@@ -154,7 +154,7 @@ curl --resolve 'mtls.test.com::' "https://=3 | 最大熔断持续时间 |
| unhealthy.http_statuses | array[integer] | 可选 | {500} | [500, ..., 599] | 不健康时候的状态码 |
| unhealthy.failures | integer | 可选 | 3 | >=1 | 触发不健康状态的连续错误请求次数 |
diff --git a/docs/zh/latest/plugins/basic-auth.md b/docs/zh/latest/plugins/basic-auth.md
index 69ee41526de7..39ca5ff2f2d9 100644
--- a/docs/zh/latest/plugins/basic-auth.md
+++ b/docs/zh/latest/plugins/basic-auth.md
@@ -99,7 +99,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 \
通过上述命令启用插件后,可以通过以下方法测试插件。
```shell
-curl -i -ubar:bar http://127.0.0.1:9080/hello
+curl -i -ufoo:bar http://127.0.0.1:9080/hello
```
如果配置成功则返回如下结果:
diff --git a/docs/zh/latest/plugins/client-control.md b/docs/zh/latest/plugins/client-control.md
index 2e7247cf8524..ae98f58dd94d 100644
--- a/docs/zh/latest/plugins/client-control.md
+++ b/docs/zh/latest/plugins/client-control.md
@@ -25,7 +25,7 @@ title: client-control
`client-control` 插件能够动态地控制 Nginx 处理客户端的请求的行为。
-**这个插件需要 APISIX 在 [APISIX-Base](../FAQ.md#如何构建-APISIX-Base-环境?) 上运行。**
+**这个插件需要 APISIX 在 [APISIX-Base](../FAQ.md#如何构建-apisix-base-环境) 上运行。**
## 属性
diff --git a/docs/zh/latest/plugins/ext-plugin-post-resp.md b/docs/zh/latest/plugins/ext-plugin-post-resp.md
new file mode 100644
index 000000000000..2027e3e831c7
--- /dev/null
+++ b/docs/zh/latest/plugins/ext-plugin-post-resp.md
@@ -0,0 +1,111 @@
+---
+title: ext-plugin-post-resp
+keywords:
+ - APISIX
+ - Plugin
+ - ext-plugin-post-resp
+description: 本文介绍了关于 Apache APISIX `ext-plugin-post-resp` 插件的基本信息及使用方法。
+---
+
+
+
+## 描述
+
+`ext-plugin-post-resp` 插件用于在执行内置 Lua 插件之前和在 Plugin Runner 内运行特定的 External Plugin。
+
+`ext-plugin-post-resp` 插件将在请求获取到上游的响应之后执行。
+
+启用本插件之后,APISIX 将使用 [lua-resty-http](https://github.com/api7/lua-resty-http) 库向上游发起请求,这会导致:
+
+- [proxy-control](./proxy-control.md) 插件不可用
+- [proxy-mirror](./proxy-mirror.md) 插件不可用
+- [proxy-cache](./proxy-cache.md) 插件不可用
+- [APISIX 与上游间的双向认证](../mtls.md#apisix-与上游间的双向认证) 功能尚不可用
+
+如果你想了解更多关于 External Plugin 的信息,请参考 [External Plugin](../external-plugin.md) 。
+
+:::note
+
+External Plugin 执行的结果会影响当前请求的响应。
+
+External Plugin 尚不支持获取请求的上下文信息。
+
+External Plugin 尚不支持获取上游响应的响应体。
+
+:::
+
+## 属性
+
+| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 |
+| ----------------- | ------ | ------ | ------- | --------------------------------------------------------------- | -------------------------------------------------------------------------------- |
+| conf | array | 否 | | [{"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"}] | 在 Plugin Runner 内执行的插件列表的配置。 |
+| allow_degradation | boolean| 否 | false | [false, true] | 当 Plugin Runner 临时不可用时是否允许请求继续,当值设置为 `true` 时则自动允许请求继续。 |
+
+## 启用插件
+
+以下示例展示了如何在指定路由中启用 `ext-plugin-post-resp` 插件:
+
+```shell
+curl -i http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/index.html",
+ "plugins": {
+ "ext-plugin-post-resp": {
+ "conf" : [
+ {"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"}
+ ]
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ }
+}'
+```
+
+## 测试插件
+
+通过上述命令启用插件后,可以使用如下命令测试插件是否启用成功:
+
+```shell
+curl -i http://127.0.0.1:9080/index.html
+```
+
+在返回结果中可以看到刚刚配置的 Plugin Runner 已经被触发,同时 `ext-plugin-A` 插件也已经被执行。
+
+## 禁用插件
+
+当你需要禁用 `ext-plugin-post-resp` 插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/index.html",
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ }
+}'
+```
diff --git a/docs/zh/latest/plugins/grpc-transcode.md b/docs/zh/latest/plugins/grpc-transcode.md
index fb97dd576f8b..759d90065274 100644
--- a/docs/zh/latest/plugins/grpc-transcode.md
+++ b/docs/zh/latest/plugins/grpc-transcode.md
@@ -135,7 +135,7 @@ print(resp.text)
运行脚本:
```shell
-chmod +x ./upload_pb.pb
+chmod +x ./upload_pb.py
./upload_pb.py proto.pb 1
```
diff --git a/docs/zh/latest/plugins/gzip.md b/docs/zh/latest/plugins/gzip.md
index 9d1de6c4088d..75493abdc7a7 100644
--- a/docs/zh/latest/plugins/gzip.md
+++ b/docs/zh/latest/plugins/gzip.md
@@ -32,7 +32,7 @@ description: 本文介绍了关于 Apache APISIX `gzip` 插件的基本信息及
:::info IMPORTANT
-该插件要求 Apache APISIX 运行在 [APISIX-Base](../FAQ.md#如何构建-APISIX-Base-环境?) 上。
+该插件要求 Apache APISIX 运行在 [APISIX-Base](../FAQ.md#如何构建-apisix-base-环境) 上。
:::
diff --git a/docs/zh/latest/plugins/kafka-logger.md b/docs/zh/latest/plugins/kafka-logger.md
index 9257be4f0cbd..302b273f37a8 100644
--- a/docs/zh/latest/plugins/kafka-logger.md
+++ b/docs/zh/latest/plugins/kafka-logger.md
@@ -47,6 +47,10 @@ title: kafka-logger
| include_resp_body| boolean | 可选 | false | [false, true] | 是否包括响应体。包含响应体,当为`true`。 |
| include_resp_body_expr | array | 可选 | | | 是否采集响体,基于 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 该选项需要开启 `include_resp_body`|
| cluster_name | integer | 可选 | 1 | [0,...] | kafka 集群的名称。当有两个或多个 kafka 集群时,可以指定不同的名称。只适用于 producer_type 是 async 模式。|
+| producer_batch_num | integer | 可选 | 200 | [1,...] | 对应 [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) 中的`batch_num`参数,聚合消息批量提交,单位为消息条数 |
+| producer_batch_size | integer | 可选 | 1048576 | [0,...] | 对应 [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) 中的`batch_size`参数,单位为字节 |
+| producer_max_buffering | integer | 可选 | 50000 | [1,...] | 对应 [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) 中的`max_buffering`参数,最大缓冲区,单位为条 |
+| producer_time_linger | integer | 可选 | 1 | [1,...] | 对应 [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) 中的`flush_time`参数,单位为秒 |
本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。
diff --git a/docs/zh/latest/plugins/opentelemetry.md b/docs/zh/latest/plugins/opentelemetry.md
index aa6899df3eaf..bcbe04ede805 100644
--- a/docs/zh/latest/plugins/opentelemetry.md
+++ b/docs/zh/latest/plugins/opentelemetry.md
@@ -23,42 +23,80 @@ title: opentelemetry
## 描述
-[OpenTelemetry](https://opentelemetry.io/) 提供符合 [OpenTelemetry specification](https://opentelemetry.io/docs/reference/specification/) 协议规范的 Tracing 数据上报。
+`opentelemetry` 插件可用于根据 [OpenTelemetry specification](https://opentelemetry.io/docs/reference/specification/) 协议规范上报 Tracing 数据。
-只支持 `HTTP` 协议,且请求类型为 `application/x-protobuf` 的数据上报,相关协议标准:[OTLP/HTTP Request](https://opentelemetry.io/docs/reference/specification/protocol/otlp/#otlphttp-request).
+该插件仅支持二进制编码的 [OLTP over HTTP](https://opentelemetry.io/docs/reference/specification/protocol/otlp/#otlphttp),即请求类型为 `application/x-protobuf` 的数据上报。
## 属性
-| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 |
-| ------------ | ------ | ------ | -------- | ------------ | ----------------------------------------------------- |
-| sampler | object | 可选 | | | 采样配置
-| sampler.name | string | 可选 | always_off | ["always_on", "always_off", "trace_id_ratio", "parent_base"] | 采样算法,always_on:全采样;always_off:不采样;trace_id_ratio:基于 trace id 的百分比采样;parent_base:如果存在 tracing 上游,则使用上游的采样决定,否则使用配置的采样算法决策
-| sampler.options | object | 可选 | | {fraction = 0, root = {name = "always_off"}} | 采样算法参数
-| sampler.options.fraction | number | 可选 | 0 | [0, 1] | trace_id_ratio 采样算法的百分比
-| sampler.options.root | object | 可选 | {name = "always_off", options = {fraction = 0}} | | parent_base 采样算法在没有上游 tracing 时,会使用 root 采样算法做决策
-| sampler.options.root.name | string | 可选 | always_off | ["always_on", "always_off", "trace_id_ratio"] | 采样算法
-| sampler.options.root.options | object | 可选 | {fraction = 0} | | 采样算法参数
-| sampler.options.root.options.fraction | number | 可选 | 0 | [0, 1] | trace_id_ratio 采样算法的百分比
-| additional_attributes | array[string] | optional | | | 追加到 trace span 的额外属性(变量名为 key,变量值为 value)
-| additional_attributes[0] | string | required | | | APISIX or Nginx 变量,例如 `http_header` or `route_id`
+| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 |
+| ------------------------------------- | ------------- | ------ | ----------------------------------------------- | ------------------------------------------------------------ | ----------------------------------------------------- |
+| sampler | object | 否 | | | 采样策略。 |
+| sampler.name | string | 否 | always_off | ["always_on", "always_off", "trace_id_ratio", "parent_base"] | 采样策略。`always_on`:全采样;`always_off`:不采样;`trace_id_ratio`:基于 trace id 的百分比采样;`parent_base`:如果存在 tracing 上游,则使用上游的采样决定,否则使用配置的采样策略决策。 |
+| sampler.options | object | 否 | | {fraction = 0, root = {name = "always_off"}} | 采样策略参数。 |
+| sampler.options.fraction | number | 否 | 0 | [0, 1] | `trace_id_ratio` 采样策略的百分比。 |
+| sampler.options.root | object | 否 | {name = "always_off", options = {fraction = 0}} | | `parent_base` 采样策略在没有上游 tracing 时,会使用 root 采样策略做决策。 |
+| sampler.options.root.name | string | 否 | always_off | ["always_on", "always_off", "trace_id_ratio"] | root 采样策略。 |
+| sampler.options.root.options | object | 否 | {fraction = 0} | | root 采样策略参数。 |
+| sampler.options.root.options.fraction | number | 否 | 0 | [0, 1] | `trace_id_ratio` root 采样策略的百分比 |
+| additional_attributes | array[string] | 否 | | | 追加到 trace span 的额外属性(变量名为 `key`,变量值为 `value`)。 |
+| additional_attributes[0] | string | 是 | | | APISIX 或 NGINX 变量,例如:`http_header` 或者 `route_id`。 |
+
+## 如何设置数据上报
+
+你可以通过在 `conf/config.yaml` 中指定配置来设置数据上报:
+
+| 名称 | 类型 | 默认值 | 描述 |
+| ------------------------------------------ | ------- | ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- |
+| trace_id_source | enum | random | trace ID 的来源。有效值为:`random` 或 `x-request-id`。当设置为 `x-request-id` 时,`x-request-id` 头的值将用作跟踪 ID。请确保当前请求 ID 是符合 TraceID 规范的:`[0-9a-f]{32}`。 |
+| resource | object | | 追加到 trace 的额外 [resource](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md)。 |
+| collector | object | {address = "127.0.0.1:4318", request_timeout = 3} | OpenTelemetry Collector 配置。 |
+| collector.address | string | 127.0.0.1:4318 | 数据采集服务的地址。 |
+| collector.request_timeout | integer | 3 | 数据采集服务上报请求超时时长,单位为秒。 |
+| collector.request_headers | object | | 数据采集服务上报请求附加的 HTTP 请求头。 |
+| batch_span_processor | object | | trace span 处理器参数配置。 |
+| batch_span_processor.drop_on_queue_full | boolean | true | 如果设置为 `true` 时,则在队列排满时删除 span。否则,强制处理批次。|
+| batch_span_processor.max_queue_size | integer | 2048 | 处理器缓存队列容量的最大值。 |
+| batch_span_processor.batch_timeout | number | 5 | 构造一批 span 超时时间,单位为秒。 |
+| batch_span_processor.max_export_batch_size | integer | 256 | 单个批次中要处理的 span 数量。 |
+| batch_span_processor.inactive_timeout | number | 2 | 两个处理批次之间的时间间隔,单位为秒。 |
+
+你可以参考以下示例进行配置:
+
+```yaml title="./conf/config.yaml"
+plugin_attr:
+ opentelemetry:
+ resource:
+ service.name: APISIX
+ tenant.id: business_id
+ collector:
+ address: 192.168.8.211:4318
+ request_timeout: 3
+ request_headers:
+ foo: bar
+ batch_span_processor:
+ drop_on_queue_full: false
+ max_queue_size: 6
+ batch_timeout: 2
+ inactive_timeout: 1
+ max_export_batch_size: 2
+```
## 如何启用
-首先,你需要在 `config.yaml` 里面启用 opentelemetry 插件:
+`opentelemetry` 插件默认为禁用状态,你需要在配置文件(`./conf/config.yaml`)中开启该插件:
-```yaml
-# 加到 config.yaml
+```yaml title="./conf/config.yaml"
plugins:
- ... # plugin you need
- opentelemetry
```
-然后重载 APISIX。
-
-下面是一个示例,在指定的 route 上开启了 opentelemetry 插件:
+开启成功后,可以通过如下命令在指定路由上启用 `opentelemetry` 插件:
```shell
-curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"methods": ["GET"],
"uris": [
@@ -74,58 +112,19 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f1
"upstream": {
"type": "roundrobin",
"nodes": {
- "10.110.149.175:8089": 1
+ "127.0.0.1:1980": 1
}
}
}'
```
-## 如何设置数据上报
-
-我们可以通过指定 `conf/config.yaml` 中的配置来设置数据上报:
-
-| 名称 | 类型 | 默认值 | 描述 |
-| ------------ | ------ | -------- | ----------------------------------------------------- |
-| trace_id_source | enum | random | 合法的取值:`random` 或 `x-request-id`,允许使用当前请求 ID 代替随机 ID 作为新的 TraceID,必须确保当前请求 ID 是符合 TraceID 规范的:`[0-9a-f]{32}` |
-| resource | object | | 追加到 trace 的额外 [resource](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md) |
-| collector | object | {address = "127.0.0.1:4318", request_timeout = 3} | 数据采集服务 |
-| collector.address | string | 127.0.0.1:4318 | 数据采集服务地址 |
-| collector.request_timeout | integer | 3 | 数据采集服务上报请求超时时长,单位秒 |
-| collector.request_headers | object | | 数据采集服务上报请求附加的 HTTP 请求头 |
-| batch_span_processor | object | | trace span 处理器参数配置 |
-| batch_span_processor.drop_on_queue_full | boolean | true | 当处理器缓存队列慢试,丢弃新到来的 span |
-| batch_span_processor.max_queue_size | integer | 2048 | 处理器缓存队列容量最大值 |
-| batch_span_processor.batch_timeout | number | 5 | 构造一批 span 超时时长,单位秒 |
-| batch_span_processor.max_export_batch_size | integer | 256 | 一批 span 的数量,每次上报的 span 数量 |
-| batch_span_processor.inactive_timeout | number | 2 | 每隔多长时间检查是否有一批 span 可以上报,单位秒 |
-
-配置示例:
-
-```yaml
-plugin_attr:
- opentelemetry:
- resource:
- service.name: APISIX
- tenant.id: business_id
- collector:
- address: 192.168.8.211:4318
- request_timeout: 3
- request_headers:
- foo: bar
- batch_span_processor:
- drop_on_queue_full: false
- max_queue_size: 6
- batch_timeout: 2
- inactive_timeout: 1
- max_export_batch_size: 2
-```
-
## 禁用插件
-当你想禁用一条路由/服务上的 opentelemetry 插件的时候,很简单,在插件的配置中把对应的 JSON 配置删除即可,无须重启服务,即刻生效:
+当你需要禁用 `opentelemetry` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务:
-```console
-$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"methods": ["GET"],
"uris": [
@@ -136,7 +135,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335
"upstream": {
"type": "roundrobin",
"nodes": {
- "10.110.149.175:8089": 1
+ "127.0.0.1:1980": 1
}
}
}'
diff --git a/docs/zh/latest/plugins/prometheus.md b/docs/zh/latest/plugins/prometheus.md
index cdfa1f8f464d..191ca5387a4c 100644
--- a/docs/zh/latest/plugins/prometheus.md
+++ b/docs/zh/latest/plugins/prometheus.md
@@ -1,5 +1,11 @@
---
title: prometheus
+keywords:
+ - APISIX
+ - API Gateway
+ - Plugin
+ - Prometheus
+description: 本文将介绍 API 网关 Apache APISIX 如何通过 prometheus 插件将 metrics 上报到开源的监控软件 Prometheus。
---
-## 如何提取指标数据
+## 提取指标
-我们可以从指定的 url 中提取指标数据 `/apisix/prometheus/metrics`:
+你可以从指定的 URL(默认:`/apisix/prometheus/metrics`)中提取指标数据:
```
curl -i http://127.0.0.1:9091/apisix/prometheus/metrics
```
-把该 uri 地址配置到 prometheus 中去,就会自动完成指标数据提取。
+你可以将该 URI 地址添加到 Prometheus 中来提取指标数据,配置示例如下:
-例子如下:
-
-```yaml
+```yaml title="./prometheus.yml"
scrape_configs:
- job_name: "apisix"
- scrape_interval: 15s # 这个值会跟 Prometheus QL 中 rate 函数的时间范围有关系,rate 函数中的时间范围应该至少两倍于该值。
+ scrape_interval: 15s # 该值会跟 Prometheus QL 中 rate 函数的时间范围有关系,rate 函数中的时间范围应该至少两倍于该值。
metrics_path: "/apisix/prometheus/metrics"
static_configs:
- targets: ["127.0.0.1:9091"]
```
-我们也可以在 prometheus 控制台中去检查状态:
+现在你可以在 Prometheus 控制台中检查状态:
![checking status on prometheus dashboard](../../../assets/images/plugin/prometheus01.png)
![prometheus apisix in-depth metric view](../../../assets/images/plugin/prometheus02.png)
-## 如何修改暴露指标的 uri
+## 使用 Grafana 绘制指标
-我们可以在 `conf/config.yaml` 的 `plugin_attr` 修改默认的 uri
+`prometheus` 插件导出的指标可以在 Grafana 进行图形化绘制显示。
-| 名称 | 类型 | 默认值 | 描述 |
-| ---------- | ------ | ---------------------------- | -------------- |
-| export_uri | string | "/apisix/prometheus/metrics" | 暴露指标的 uri |
-
-配置示例:
-
-```yaml
-plugin_attr:
- prometheus:
- export_uri: /apisix/metrics
-```
-
-## Grafana 面板
-
-插件导出的指标可以在 Grafana 进行图形化绘制显示。
-
-下载 [Grafana dashboard 元数据](https://github.com/apache/apisix/blob/master/docs/assets/other/json/apisix-grafana-dashboard.json) 并导入到 Grafana 中。
+如果需要进行设置,请下载 [APISIX's Grafana dashboard 元数据](https://github.com/apache/apisix/blob/master/docs/assets/other/json/apisix-grafana-dashboard.json) 并导入到 Grafana 中。
你可以到 [Grafana 官方](https://grafana.com/grafana/dashboards/11719) 下载 `Grafana` 元数据。
@@ -152,46 +161,51 @@ plugin_attr:
## 可用的 HTTP 指标
-* `Status codes`: upstream 服务返回的 HTTP 状态码,可以统计到每个服务或所有服务的响应状态码的次数总和。具有的维度:
+`prometheus` 插件可以导出以下指标:
+
+- Status codes: 上游服务返回的 HTTP 状态码,可以统计到每个服务或所有服务的响应状态码的次数总和。属性如下所示:
- | 名称 | 描述 |
- | -------------| --------------------|
- | code | upstream 服务返回的 HTTP 状态码。 |
- | route | 请求匹配的 route 的 `route_id`,未匹配,则默认为空字符串。 |
- | matched_uri | 请求匹配的 route 的 `uri`,未匹配,则默认为空字符串。 |
- | matched_host | 请求匹配的 route 的 `host`,未匹配,则默认为空字符串。 |
- | service | 与请求匹配的 route 的 `service_id`。当路由缺少 service_id 时,则默认为 `$host`。 |
- | consumer | 与请求匹配的 consumer 的 `consumer_name`。未匹配,则默认为空字符串。 |
- | node | 命中的 upstream 节点 `ip`。|
+ | 名称 | 描述 |
+ | -------------| ----------------------------------------------------------------------------- |
+ | code | 上游服务返回的 HTTP 状态码。 |
+ | route | 与请求匹配的路由的 `route_id`,如果未匹配,则默认为空字符串。 |
+ | matched_uri | 与请求匹配的路由的 `uri`,如果未匹配,则默认为空字符串。 |
+ | matched_host | 与请求匹配的路由的 `host`,如果未匹配,则默认为空字符串。 |
+ | service | 与请求匹配的路由的 `service_id`。当路由缺少 `service_id` 时,则默认为 `$host`。 |
+ | consumer | 与请求匹配的消费者的 `consumer_name`。如果未匹配,则默认为空字符串。 |
+ | node | 上游节点 IP 地址。 |
-* `Bandwidth`: 流经 APISIX 的总带宽(可分出口带宽和入口带宽),可以统计到每个服务的带宽总和。具有的维度:
+- Bandwidth: 经过 APISIX 的总带宽(出口带宽和入口带宽),可以统计到每个服务的带宽总和。属性如下所示:
| 名称 | 描述 |
| -------------| ------------- |
| type | 带宽的类型 (`ingress` 或 `egress`)。 |
- | route | 请求匹配的 route 的 `route_id`,未匹配,则默认为空字符串。 |
- | service | 与请求匹配的 route 的 `service_id`。当路由缺少 service_id 时,则默认为 `$host`。 |
- | consumer | 与请求匹配的 consumer 的 `consumer_name`。未匹配,则默认为空字符串。 |
- | node | 命中的 upstream 节点 `ip`。 |
+ | route | 与请求匹配的路由的 `route_id`,如果未匹配,则默认为空字符串。 |
+ | service | 与请求匹配的路由的 `service_id`。当路由缺少 `service_id` 时,则默认为 `$host`。 |
+ | consumer | 与请求匹配的消费者的 `consumer_name`。如果未匹配,则默认为空字符串。 |
+ | node | 消费者节点 IP 地址。 |
-* `etcd reachability`: APISIX 连接 etcd 的可用性,用 0 和 1 来表示,`1` 表示可用,`0` 表示不可用。
-* `Connections`: 各种的 Nginx 连接指标,如 active(正处理的活动连接数),reading(nginx 读取到客户端的 Header 信息数),writing(nginx 返回给客户端的 Header 信息数),已建立的连接数。
-* `Batch process entries`: 批处理未发送数据计数器,当你使用了批处理发送插件,比如:sys logger, http logger, sls logger, tcp logger, udp logger and zipkin,那么你将会在此指标中看到批处理当前尚未发送的数据的数量。
-* `Latency`: 每个服务的请求用时和 APISIX 处理耗时的直方图。具有的维度:
+- etcd reachability: APISIX 连接 etcd 的可用性,用 0 和 1 来表示,`1` 表示可用,`0` 表示不可用。
+- Connections: 各种的 NGINX 连接指标,如 `active`(正处理的活动连接数),`reading`(NGINX 读取到客户端的 Header 信息数),writing(NGINX 返回给客户端的 Header 信息数),已建立的连接数。
+- Batch process entries: 批处理未发送数据计数器,当你使用了批处理发送插件,比如:[syslog](./syslog.md), [http-logger](./http-logger.md), [tcp-logger](./tcp-logger.md), [udp-logger](./udp-logger.md), and [zipkin](./zipkin.md),那么你将会在此指标中看到批处理当前尚未发送的数据的数量。
+- Latency: 每个服务的请求用时和 APISIX 处理耗时的直方图。属性如下所示:
- | 名称 | 描述 |
- | -------------| ------------- |
- | type | 该值可以为 `apisix`、`upstream` 和 `request`,分别表示耗时的来源为 APISIX、上游及其总和。 |
- | service | 与请求匹配的 route 的 `service_id`。当路由缺少 service_id 时,则默认为 `$host`。 |
- | consumer | 与请求匹配的 consumer 的 `consumer_name`。未匹配,则默认为空字符串。 |
- | node | 命中的 upstream 节点 `ip`。 |
+ | 名称 | 描述 |
+ | -------------| --------------------------------------------------------------------------------------- |
+ | type | 该值可以是 `apisix`、`upstream` 和 `request`,分别表示耗时的来源是 APISIX、上游以及两者总和。 |
+ | service | 与请求匹配的路由 的 `service_id`。当路由缺少 `service_id` 时,则默认为 `$host`。 |
+ | consumer | 与请求匹配的消费者的 `consumer_name`。未匹配,则默认为空字符串。 |
+ | node | 上游节点的 IP 地址。 |
-* `Info`: 当前 APISIX 节点信息。
+- Info: 当前 APISIX 节点信息。
-这里是 APISIX 的原始的指标数据集:
+以下是 APISIX 的原始的指标数据集:
```shell
-$ curl http://127.0.0.1:9091/apisix/prometheus/metrics
+curl http://127.0.0.1:9091/apisix/prometheus/metrics
+```
+
+```
# HELP apisix_bandwidth Total bandwidth in bytes consumed per service in Apisix
# TYPE apisix_bandwidth counter
apisix_bandwidth{type="egress",route="",service="",consumer="",node=""} 8417
@@ -254,12 +268,12 @@ apisix_http_latency_bucket{type="upstream",route="1",service="",consumer="",node
...
# HELP apisix_node_info Info of APISIX node
# TYPE apisix_node_info gauge
-apisix_node_info{hostname="desktop-2022q8f-wsl"} 1
+apisix_node_info{hostname="APISIX"} 1
```
## 禁用插件
-在插件设置页面中删除相应的 json 配置即可禁用 `prometheus` 插件。APISIX 的插件是热加载的,因此无需重启 APISIX 服务。
+当你需要禁用 `prometheus` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务:
```shell
curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
@@ -279,13 +293,13 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f1
:::info IMPORTANT
-该功能要求 Apache APISIX 运行在 [APISIX-Base](../FAQ.md#如何构建-APISIX-Base-环境?) 上。
+该功能要求 APISIX 运行在 [APISIX-Base](../FAQ.md#如何构建-APISIX-Base-环境?) 上。
:::
我们也可以通过 `prometheus` 插件采集 TCP/UDP 指标。
-首先,确保 `prometheus` 插件已经在你的配置文件(`conf/config.yaml`)中启用:
+首先,确保 `prometheus` 插件已经在你的配置文件(`./conf/config.yaml`)中启用:
```yaml title="conf/config.yaml"
stream_plugins:
@@ -293,7 +307,7 @@ stream_plugins:
- prometheus
```
-接着你需要在 stream route 中配置该插件:
+接着你需要在 stream 路由中配置该插件:
```shell
curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
@@ -312,20 +326,20 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03
## 可用的 TCP/UDP 指标
-以下是把 APISIX 作为 L4 代理时可用的指标:
+以下是将 APISIX 作为 L4 代理时可用的指标:
-* `Stream Connections`: 路由级别的已处理连接数。具有的维度:
+* Stream Connections: 路由级别的已处理连接数。具有的维度:
- | 名称 | 描述 |
- | -------------| --------------------|
- | route | 匹配的 stream route ID|
-* `Connections`: 各种的 Nginx 连接指标,如 active,reading,writing,已建立的连接数。
-* `Info`: 当前 APISIX 节点信息。
+ | 名称 | 描述 |
+ | ------------- | ---------------------- |
+ | route | 匹配的 stream 路由 ID。 |
+* Connections: 各种的 NGINX 连接指标,如 `active`,`reading`,`writing` 等已建立的连接数。
+* Info: 当前 APISIX 节点信息。
-这里是 APISIX 指标的范例:
+以下是 APISIX 指标的示例:
```shell
-$ curl http://127.0.0.1:9091/apisix/prometheus/metrics
+curl http://127.0.0.1:9091/apisix/prometheus/metrics
```
```
diff --git a/docs/zh/latest/plugins/proxy-control.md b/docs/zh/latest/plugins/proxy-control.md
index 7b15ee5dbc75..6c309a257d65 100644
--- a/docs/zh/latest/plugins/proxy-control.md
+++ b/docs/zh/latest/plugins/proxy-control.md
@@ -25,7 +25,7 @@ title: proxy-control
`proxy-control` 能够动态地控制 Nginx 代理的行为。
-**这个插件需要 APISIX 在 [APISIX-Base](../FAQ.md#如何构建-APISIX-Base-环境?) 上运行。**
+**这个插件需要 APISIX 在 [APISIX-Base](../FAQ.md#如何构建-apisix-base-环境) 上运行。**
## 属性
diff --git a/docs/zh/latest/plugins/proxy-rewrite.md b/docs/zh/latest/plugins/proxy-rewrite.md
index 52c5e07cca22..4ef8e81eb725 100644
--- a/docs/zh/latest/plugins/proxy-rewrite.md
+++ b/docs/zh/latest/plugins/proxy-rewrite.md
@@ -34,7 +34,7 @@ description: 本文介绍了关于 Apache APISIX `proxy-rewrite` 插件的基本
## 属性
| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 |
-| --------- | ------------- | ----- | ------- | ---------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------ ------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| --------- | ------------- | ----- | ------- | ---------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| scheme | string | 否 | "http" | ["http", "https"] | 不推荐使用。应该在 Upstream 的 `scheme` 字段设置上游的 `scheme`。|
| uri | string | 否 | | | 转发到上游的新 `uri` 地址。支持 [NGINX variables](https://nginx.org/en/docs/http/ngx_http_core_module.html) 变量,例如:`$arg_name`。 |
| method | string | 否 | | ["GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS","MKCOL", "COPY", "MOVE", "PROPFIND", "PROPFIND","LOCK", "UNLOCK", "PATCH", "TRACE"] | 将路由的请求方法代理为该请求方法。 |
diff --git a/docs/zh/latest/plugins/real-ip.md b/docs/zh/latest/plugins/real-ip.md
index f931c21f8ca3..3804cdcdac11 100644
--- a/docs/zh/latest/plugins/real-ip.md
+++ b/docs/zh/latest/plugins/real-ip.md
@@ -35,7 +35,7 @@ description: 本文介绍了关于 Apache APISIX `real-ip` 插件的基本信息
:::info IMPORTANT
-该插件要求 APISIX 运行在 [APISIX-Base](../FAQ.md#如何构建-APISIX-Base-环境?) 上。
+该插件要求 APISIX 运行在 [APISIX-Base](../FAQ.md#如何构建-apisix-base-环境) 上。
:::
diff --git a/docs/zh/latest/plugins/request-id.md b/docs/zh/latest/plugins/request-id.md
index 5482672ea224..03490f197a7a 100644
--- a/docs/zh/latest/plugins/request-id.md
+++ b/docs/zh/latest/plugins/request-id.md
@@ -33,6 +33,12 @@ title: request-id
| include_in_response | boolean | 可选 | true | | 是否需要在返回头中包含该唯一 ID |
| algorithm | string | 可选 | "uuid" | ["uuid", "snowflake", "nanoid"] | ID 生成算法 |
+:::warning
+
+当使用 `snowflake` 算法时,请确保 APISIX 有权限写入 etcd。
+
+:::
+
## 如何启用
创建一条路由并在该路由上启用 `request-id` 插件:
diff --git a/docs/zh/latest/plugins/response-rewrite.md b/docs/zh/latest/plugins/response-rewrite.md
index a3701c8afc90..c89b0256b4c8 100644
--- a/docs/zh/latest/plugins/response-rewrite.md
+++ b/docs/zh/latest/plugins/response-rewrite.md
@@ -125,10 +125,80 @@ X-Server-balancer_addr: 127.0.0.1:80
如果你在 `access` 阶段执行了 `ngx.exit`,该操作只是中断了请求处理阶段,响应阶段仍然会处理。如果你配置了 `response-rewrite` 插件,它会强制覆盖你的响应信息(如响应代码)。
-![ngx.edit tabular overview](https://cdn.jsdelivr.net/gh/Miss-you/img/picgo/20201113010623.png)
+| Phase | rewrite | access | header_filter | body_filter |
+|---------------|----------|----------|---------------|-------------|
+| rewrite | ngx.exit | √ | √ | √ |
+| access | × | ngx.exit | √ | √ |
+| header_filter | √ | √ | ngx.exit | √ |
+| body_filter | √ | √ | × | ngx.exit |
:::
+使用 `filters` 正则匹配将返回 body 的 X-Amzn-Trace-Id 替换为 X-Amzn-Trace-Id-Replace。
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "plugins":{
+ "response-rewrite":{
+ "headers":{
+ "X-Server-id":3,
+ "X-Server-status":"on",
+ "X-Server-balancer_addr":"$balancer_ip:$balancer_port"
+ },
+ "filters":[
+ {
+ "regex":"X-Amzn-Trace-Id",
+ "scope":"global",
+ "replace":"X-Amzn-Trace-Id-Replace"
+ }
+ ],
+ "vars":[
+ [
+ "status",
+ "==",
+ 200
+ ]
+ ]
+ }
+ },
+ "upstream":{
+ "type":"roundrobin",
+ "scheme":"https",
+ "nodes":{
+ "httpbin.org:443":1
+ }
+ },
+ "uri":"/*"
+}'
+```
+
+```shell
+curl -X GET -i http://127.0.0.1:9080/get
+```
+
+```shell
+HTTP/1.1 200 OK
+Transfer-Encoding: chunked
+X-Server-status: on
+X-Server-balancer-addr: 34.206.80.189:443
+X-Server-id: 3
+
+{
+ "args": {},
+ "headers": {
+ "Accept": "*/*",
+ "Host": "127.0.0.1",
+ "User-Agent": "curl/7.29.0",
+ "X-Amzn-Trace-Id-Replace": "Root=1-629e0b89-1e274fdd7c23ca6e64145aa2",
+ "X-Forwarded-Host": "127.0.0.1"
+ },
+ "origin": "127.0.0.1, 117.136.46.203",
+ "url": "https://127.0.0.1/get"
+}
+
+```
+
## 禁用插件
当你需要禁用 `response-rewrite` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务:
diff --git a/docs/zh/latest/plugins/skywalking.md b/docs/zh/latest/plugins/skywalking.md
index ffacde3813a4..b8381e8cc29a 100644
--- a/docs/zh/latest/plugins/skywalking.md
+++ b/docs/zh/latest/plugins/skywalking.md
@@ -1,5 +1,10 @@
---
title: skywalking
+keywords:
+ - APISIX
+ - Plugin
+ - SkyWalking
+description: 本文将介绍 API 网关 Apache APISIX 如何通过 skywalking 插件将 metrics 上报到 Apache SkyWalking(一个开源的 APM)。
---
## 测试插件
-### 运行 SkyWalking 实例
-
-#### 例子:
-
-1. 启动 SkyWalking OAP 服务:
- - SkyWalking 默认使用 H2 存储,可直接启动
-
- ```shell
- sudo docker run --name skywalking -d -p 1234:1234 -p 11800:11800 -p 12800:12800 --restart always apache/skywalking-oap-server:8.7.0-es6
- ```
+首先你可以通过 [Docker Compose](https://docs.docker.com/compose/install/) 启动 SkyWalking OAP 和 SkyWalking UI:
- - 也许你会更倾向于使用 Elasticsearch 存储
- 1. 则需要先安装 Elasticsearch:
+ - 在 usr/local 中创建 `skywalking.yaml` 文件。
- ```shell
- sudo docker run -d --name elasticsearch -p 9200:9200 -p 9300:9300 --restart always -e "discovery.type=single-node" elasticsearch:6.7.2
- ```
+ ```yaml
+ version: "3"
+ services:
+ oap:
+ image: apache/skywalking-oap-server:8.9.1
+ restart: always
+ ports:
+ - "12800:12800/tcp"
- 2.【可选】安装 ElasticSearch 管理界面 elasticsearch-hq
+ ui:
+ image: apache/skywalking-ui:8.9.1
+ restart: always
+ ports:
+ - "8080:8080/tcp"
+ environment:
+ SW_OAP_ADDRESS: http://oap:12800
+ ```
- ```shell
- sudo docker run -d --name elastic-hq -p 5000:5000 --restart always elastichq/elasticsearch-hq
- ```
+ - 使用以下命令启动上述创建的文件:
- 3. 启动 SkyWalking OAP 服务:
+ ```shell
+ docker-compose -f skywalking.yaml up -d
+ ```
- ```shell
- sudo docker run --name skywalking -d -p 1234:1234 -p 11800:11800 -p 12800:12800 --restart always --link elasticsearch:elasticsearch -e SW_STORAGE=elasticsearch -e SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 apache/skywalking-oap-server:8.7.0-es6
- ```
+ 完成上述操作后,就已经启动了 SkyWalking 以及 SkyWalking Web UI。你可以使用以下命令确认容器是否正常运行:
-2. SkyWalking Web UI:
- 1. 启动管理系统:
+ ```shell
+ docker ps
+ ```
- ```shell
- sudo docker run --name skywalking-ui -d -p 8080:8080 --link skywalking:skywalking -e SW_OAP_ADDRESS=skywalking:12800 --restart always apache/skywalking-ui
- ```
+接下来你可以通过以下命令访问 APISIX:
- 2. 打开 Web UI 页面
- 在浏览器里面输入 http://10.110.149.175:8080 如出现如下界面,则表示安装成功
-
- ![plugin_skywalking](../../../assets/images/plugin/skywalking-3.png)
+```shell
+curl -v http://10.110.149.192:9080/uid/12
+```
-3. 测试示例:
- - 通过访问 APISIX,访问上游服务
+```
+HTTP/1.1 200 OK
+OK
+...
+```
- ```bash
- $ curl -v http://10.110.149.192:9080/uid/12
- HTTP/1.1 200 OK
- OK
- ...
- ```
+完成上述步骤后,打开浏览器,访问 SkyWalking 的 UI 页面,你可以看到如下服务拓扑图:
- - 打开浏览器,访问 SkyWalking 的 UI 页面:
+![plugin_skywalking](../../../assets/images/plugin/skywalking-4.png)
- ```bash
- http://10.110.149.175:8080/
- ```
+并且可以看到服务追踪列表:
- 可以看到服务拓扑图\
- ![plugin_skywalking](../../../assets/images/plugin/skywalking-4.png)\
- 可以看到服务追踪列表\
- ![plugin_skywalking](../../../assets/images/plugin/skywalking-5.png)
+![plugin_skywalking](../../../assets/images/plugin/skywalking-5.png)
## 禁用插件
-当你想禁用一条路由/服务上的 SkyWalking 插件的时候,很简单,在插件的配置中把对应的 JSON 配置删除即可,无须重启服务,即刻生效:
+当你需要禁用 `skywalking` 插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务:
```shell
-$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"methods": ["GET"],
"uris": [
@@ -181,58 +220,10 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335
}'
```
-现在就已经移除了 SkyWalking 插件了。其他插件的开启和移除也是同样的方法。
-
-如果你想完全禁用 SkyWalking 插件,比如停掉后台上报数据的定时器,需要在 `config.yaml` 里把插件注释掉:
+如果你想完全禁用 `skywalking` 插件,即停掉后台上报数据的定时器,就需要从配置文件(`./conf/config.yaml`)注释该插件:
-```yaml
+```yaml title="./conf/config.yaml"
plugins:
- - ... # plugin you need
+ - ...
#- skywalking
```
-
-然后重载 APISIX 即可。
-
-## 上游服务为 SpringBoot 的示例代码
-
-```java
-package com.lenovo.ai.controller;
-
-import org.springframework.web.bind.annotation.PathVariable;
-import org.springframework.web.bind.annotation.RequestMapping;
-import org.springframework.web.bind.annotation.RestController;
-import javax.servlet.http.HttpServletRequest;
-
-/**
- * @author cyxinda
- * @create 2020-05-29 14:02
- * @desc skywalking test controller
- **/
-@RestController
-public class TestController {
- @RequestMapping("/uid/{count}")
- public String getUidList(@PathVariable("count") String countStr, HttpServletRequest request) {
- System.out.println("counter:::::"+countStr);
- return "OK";
- }
-}
-
-```
-
-启动服务的时候,需要配置 SkyWalking agent。
-
-通过 `agent/config/agent.config` 修改配置
-
-```shell
-agent.service_name=yourservername
-collector.backend_service=10.110.149.175:11800
-```
-
-启动服务脚本:
-
-```shell
-nohup java -javaagent:/root/skywalking/app/agent/skywalking-agent.jar \
--jar /root/skywalking/app/app.jar \
---server.port=8089 \
-2>&1 > /root/skywalking/app/logs/nohup.log &
-```
diff --git a/docs/zh/latest/plugins/sls-logger.md b/docs/zh/latest/plugins/sls-logger.md
index 3ac708f9443a..d89914b06a26 100644
--- a/docs/zh/latest/plugins/sls-logger.md
+++ b/docs/zh/latest/plugins/sls-logger.md
@@ -46,6 +46,32 @@ title: sls-logger
本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。
+## 插件元数据设置
+
+| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 |
+| ---------------- | ------- | ------ | ------------- | ------- | ------------------------------------------------ |
+| log_format | object | 可选 | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../../../en/latest/apisix-variable.md) 或 [Nginx 内置变量](http://nginx.org/en/docs/varindex.html)。特别的,**该设置是全局生效的**,意味着指定 log_format 后,将对所有绑定 sls-logger 的 Route 或 Service 生效。 |
+
+### 设置日志格式示例
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/sls-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "log_format": {
+ "host": "$host",
+ "@timestamp": "$time_iso8601",
+ "client_ip": "$remote_addr"
+ }
+}'
+```
+
+在日志收集处,将得到类似下面的日志:
+
+```shell
+{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"}
+{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"}
+```
+
## 如何开启
1. 下面例子展示了如何为指定路由开启 `sls-logger` 插件的。
diff --git a/docs/zh/latest/terminology/plugin.md b/docs/zh/latest/terminology/plugin.md
index 8883ef3744af..86bed6442982 100644
--- a/docs/zh/latest/terminology/plugin.md
+++ b/docs/zh/latest/terminology/plugin.md
@@ -89,6 +89,43 @@ local _M = {
| 名称 | 类型 | 描述 |
|--------------|------|----------------|
| error_response | string/object | 自定义错误响应 |
+| priority | integer | 自定义插件优先级 |
+
+### 自定义插件优先级
+
+所有插件都有默认优先级,但是可以自定义插件优先级来改变插件执行顺序。
+
+```json
+ {
+ "serverless-post-function": {
+ "_meta": {
+ "priority": 10000
+ },
+ "phase": "rewrite",
+ "functions" : ["return function(conf, ctx)
+ ngx.say(\"serverless-post-function\");
+ end"]
+ },
+ "serverless-pre-function": {
+ "_meta": {
+ "priority": -2000
+ },
+ "phase": "rewrite",
+ "functions": ["return function(conf, ctx)
+ ngx.say(\"serverless-pre-function\");
+ end"]
+ }
+}
+```
+
+serverless-pre-function 的默认优先级是 10000,serverless-post-function 的默认优先级是 -2000。默认情况下会先执行 serverless-pre-function 插件,再执行 serverless-post-function 插件。
+
+上面的配置意味着将 serverless-pre-function 插件的优先级设置为 -2000,serverless-post-function 插件的优先级设置为 10000。serverless-post-function 插件会先执行,再执行 serverless-pre-function 插件。
+
+注意:
+
+- 自定义插件优先级只会影响插件实例绑定的主体,不会影响该插件的所有实例。比如上面的插件配置属于路由 A ,路由 B 上的插件 serverless-post-function 和 serverless-post-function 插件执行顺序不会受到影响,会使用默认优先级。
+- 自定义插件优先级不适用于 consumer 上配置的插件的 rewrite 阶段。路由上配置的插件的 rewrite 阶段将会优先运行,然后才会运行 consumer 上除 auth 插件之外的其他插件的 rewrite 阶段。
## 热加载
diff --git a/rockspec/apisix-2.13.2-0.rockspec b/rockspec/apisix-2.13.2-0.rockspec
new file mode 100644
index 000000000000..e807d0e4d752
--- /dev/null
+++ b/rockspec/apisix-2.13.2-0.rockspec
@@ -0,0 +1,100 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+package = "apisix"
+version = "2.13.2-0"
+supported_platforms = {"linux", "macosx"}
+
+source = {
+ url = "git://github.com/apache/apisix",
+ branch = "2.13.2",
+}
+
+description = {
+ summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.",
+ homepage = "https://github.com/apache/apisix",
+ license = "Apache License 2.0",
+}
+
+dependencies = {
+ "lua-resty-ctxdump = 0.1-0",
+ "lua-resty-dns-client = 6.0.2",
+ "lua-resty-template = 2.0",
+ "lua-resty-etcd = 1.6.0",
+ "api7-lua-resty-http = 0.2.0",
+ "lua-resty-balancer = 0.04",
+ "lua-resty-ngxvar = 0.5.2",
+ "lua-resty-jit-uuid = 0.0.7",
+ "lua-resty-healthcheck-api7 = 2.2.0",
+ "api7-lua-resty-jwt = 0.2.4",
+ "lua-resty-hmac-ffi = 0.05",
+ "lua-resty-cookie = 0.1.0",
+ "lua-resty-session = 2.24",
+ "opentracing-openresty = 0.1",
+ "lua-resty-radixtree = 2.8.1",
+ "lua-protobuf = 0.3.4",
+ "lua-resty-openidc = 1.7.2-1",
+ "luafilesystem = 1.7.0-2",
+ "api7-lua-tinyyaml = 0.4.2",
+ "nginx-lua-prometheus = 0.20220127",
+ "jsonschema = 0.9.8",
+ "lua-resty-ipmatcher = 0.6.1",
+ "lua-resty-kafka = 0.07",
+ "lua-resty-logger-socket = 2.0-0",
+ "skywalking-nginx-lua = 0.6.0",
+ "base64 = 1.5-2",
+ "binaryheap = 0.4",
+ "api7-dkjson = 0.1.1",
+ "resty-redis-cluster = 1.02-4",
+ "lua-resty-expr = 1.3.1",
+ "graphql = 0.0.2",
+ "argparse = 0.7.1-1",
+ "luasocket = 3.0rc1-2",
+ "luasec = 0.9-1",
+ "lua-resty-consul = 0.3-2",
+ "penlight = 1.9.2-1",
+ "ext-plugin-proto = 0.4.0",
+ "casbin = 1.41.1",
+ "api7-snowflake = 2.0-1",
+ "inspect == 3.1.1",
+ "lualdap = 1.2.6-1",
+ "lua-resty-rocketmq = 0.3.0-0",
+ "opentelemetry-lua = 0.1-3",
+ "net-url = 0.9-1",
+ "xml2lua = 1.5-2",
+}
+
+build = {
+ type = "make",
+ build_variables = {
+ CFLAGS="$(CFLAGS)",
+ LIBFLAG="$(LIBFLAG)",
+ LUA_LIBDIR="$(LUA_LIBDIR)",
+ LUA_BINDIR="$(LUA_BINDIR)",
+ LUA_INCDIR="$(LUA_INCDIR)",
+ LUA="$(LUA)",
+ OPENSSL_INCDIR="$(OPENSSL_INCDIR)",
+ OPENSSL_LIBDIR="$(OPENSSL_LIBDIR)",
+ },
+ install_variables = {
+ ENV_INST_PREFIX="$(PREFIX)",
+ ENV_INST_BINDIR="$(BINDIR)",
+ ENV_INST_LIBDIR="$(LIBDIR)",
+ ENV_INST_LUADIR="$(LUADIR)",
+ ENV_INST_CONFDIR="$(CONFDIR)",
+ },
+}
diff --git a/rockspec/apisix-master-0.rockspec b/rockspec/apisix-master-0.rockspec
index 9fc43aaa07bf..88b4886a6a97 100644
--- a/rockspec/apisix-master-0.rockspec
+++ b/rockspec/apisix-master-0.rockspec
@@ -34,7 +34,7 @@ dependencies = {
"lua-resty-ctxdump = 0.1-0",
"lua-resty-dns-client = 6.0.2",
"lua-resty-template = 2.0",
- "lua-resty-etcd = 1.6.2",
+ "lua-resty-etcd = 1.8.0",
"api7-lua-resty-http = 0.2.0",
"lua-resty-balancer = 0.04",
"lua-resty-ngxvar = 0.5.2",
diff --git a/t/admin/plugin-configs.t b/t/admin/plugin-configs.t
index a9822037683f..1f0da8a2a463 100644
--- a/t/admin/plugin-configs.t
+++ b/t/admin/plugin-configs.t
@@ -286,7 +286,6 @@ passed
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local etcd = require("apisix.core.etcd")
local code, body = t('/apisix/admin/plugin_configs/1',
ngx.HTTP_PUT,
[[{
@@ -413,7 +412,6 @@ passed
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local etcd = require("apisix.core.etcd")
local code, body = t('/apisix/admin/plugin_configs/1',
ngx.HTTP_PUT,
[[{
diff --git a/t/admin/plugins.t b/t/admin/plugins.t
index 2bfb5fee30ce..d7881249d40e 100644
--- a/t/admin/plugins.t
+++ b/t/admin/plugins.t
@@ -64,9 +64,9 @@ __DATA__
real-ip
client-control
proxy-control
+request-id
zipkin
ext-plugin-pre-req
-request-id
fault-injection
mocking
serverless-pre-function
@@ -265,7 +265,7 @@ plugins:
}
}
--- response_body eval
-qr/\{"metadata_schema":\{"properties":\{"ikey":\{"minimum":0,"type":"number"\},"skey":\{"type":"string"\}\},"required":\["ikey","skey"\],"type":"object"\},"priority":0,"schema":\{"\$comment":"this is a mark for our injected plugin schema","properties":\{"_meta":\{"properties":\{"error_response":\{"oneOf":\[\{"type":"string"\},\{"type":"object"\}\]\}\},"type":"object"\},"disable":\{"type":"boolean"\},"i":\{"minimum":0,"type":"number"\},"ip":\{"type":"string"\},"port":\{"type":"integer"\},"s":\{"type":"string"\},"t":\{"minItems":1,"type":"array"\}\},"required":\["i"\],"type":"object"\},"version":0.1\}/
+qr/\{"metadata_schema":\{"properties":\{"ikey":\{"minimum":0,"type":"number"\},"skey":\{"type":"string"\}\},"required":\["ikey","skey"\],"type":"object"\},"priority":0,"schema":\{"\$comment":"this is a mark for our injected plugin schema","properties":\{"_meta":\{"properties":\{"error_response":\{"oneOf":\[\{"type":"string"\},\{"type":"object"\}\]\},"priority":\{"description":"priority of plugins by customized order","type":"integer"\}\},"type":"object"\},"disable":\{"type":"boolean"\},"i":\{"minimum":0,"type":"number"\},"ip":\{"type":"string"\},"port":\{"type":"integer"\},"s":\{"type":"string"\},"t":\{"minItems":1,"type":"array"\}\},"required":\["i"\],"type":"object"\},"version":0.1\}/
@@ -366,7 +366,7 @@ qr/\{"properties":\{"password":\{"type":"string"\},"username":\{"type":"string"\
}
}
--- response_body
-{"priority":1003,"schema":{"$comment":"this is a mark for our injected plugin schema","properties":{"_meta":{"properties":{"error_response":{"oneOf":[{"type":"string"},{"type":"object"}]}},"type":"object"},"burst":{"minimum":0,"type":"integer"},"conn":{"exclusiveMinimum":0,"type":"integer"},"default_conn_delay":{"exclusiveMinimum":0,"type":"number"},"disable":{"type":"boolean"},"key":{"type":"string"},"key_type":{"default":"var","enum":["var","var_combination"],"type":"string"},"only_use_default_delay":{"default":false,"type":"boolean"}},"required":["conn","burst","default_conn_delay","key"],"type":"object"},"version":0.1}
+{"priority":1003,"schema":{"$comment":"this is a mark for our injected plugin schema","properties":{"_meta":{"properties":{"error_response":{"oneOf":[{"type":"string"},{"type":"object"}]},"priority":{"description":"priority of plugins by customized order","type":"integer"}},"type":"object"},"burst":{"minimum":0,"type":"integer"},"conn":{"exclusiveMinimum":0,"type":"integer"},"default_conn_delay":{"exclusiveMinimum":0,"type":"number"},"disable":{"type":"boolean"},"key":{"type":"string"},"key_type":{"default":"var","enum":["var","var_combination"],"type":"string"},"only_use_default_delay":{"default":false,"type":"boolean"}},"required":["conn","burst","default_conn_delay","key"],"type":"object"},"version":0.1}
diff --git a/t/admin/proto.t b/t/admin/proto.t
index bab49c933b6c..3a05a26df9a8 100644
--- a/t/admin/proto.t
+++ b/t/admin/proto.t
@@ -43,7 +43,6 @@ __DATA__
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local etcd = require("apisix.core.etcd")
local code, message = t('/apisix/admin/proto/1',
ngx.HTTP_PUT,
[[{
@@ -89,7 +88,6 @@ __DATA__
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local etcd = require("apisix.core.etcd")
local code, message = t('/apisix/admin/proto/1',
ngx.HTTP_DELETE,
nil,
@@ -117,7 +115,6 @@ __DATA__
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local etcd = require("apisix.core.etcd")
local code, message = t('/apisix/admin/proto/2',
ngx.HTTP_PUT,
[[{
@@ -210,7 +207,6 @@ __DATA__
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local etcd = require("apisix.core.etcd")
local code, message = t('/apisix/admin/proto/1',
ngx.HTTP_PUT,
[[{
diff --git a/t/admin/ssl.t b/t/admin/ssl.t
index 49d3ec8cd146..0232d21102fe 100644
--- a/t/admin/ssl.t
+++ b/t/admin/ssl.t
@@ -236,7 +236,7 @@ GET /t
GET /t
--- error_code: 400
--- response_body
-{"error_msg":"invalid configuration: value should match only one schema, but matches none"}
+{"error_msg":"invalid configuration: then clause did not match"}
--- no_error_log
[error]
@@ -535,7 +535,7 @@ passed
GET /t
--- error_code: 400
--- response_body
-{"error_msg":"invalid configuration: value should match only one schema, but matches none"}
+{"error_msg":"invalid configuration: then clause did not match"}
--- no_error_log
[error]
@@ -771,3 +771,72 @@ GET /t
passed
--- no_error_log
[error]
+
+
+
+=== TEST 20: missing sni information
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local ssl_cert = t.read_file("t/certs/apisix.crt")
+ local ssl_key = t.read_file("t/certs/apisix.key")
+ local data = {cert = ssl_cert, key = ssl_key}
+
+ local code, body = t.test('/apisix/admin/ssl/1',
+ ngx.HTTP_PUT,
+ core.json.encode(data),
+ [[{
+ "node": {
+ "key": "/apisix/ssl/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"invalid configuration: then clause did not match"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 21: type client, missing sni information
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local ssl_cert = t.read_file("t/certs/apisix.crt")
+ local ssl_key = t.read_file("t/certs/apisix.key")
+ local data = {type = "client", cert = ssl_cert, key = ssl_key}
+
+ local code, body = t.test('/apisix/admin/ssl/1',
+ ngx.HTTP_PUT,
+ core.json.encode(data),
+ [[{
+ "node": {
+ "key": "/apisix/ssl/1"
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- response_body chomp
+passed
diff --git a/t/admin/ssl2.t b/t/admin/ssl2.t
index 752e25cd7ba4..865652ce2e89 100644
--- a/t/admin/ssl2.t
+++ b/t/admin/ssl2.t
@@ -71,7 +71,7 @@ __DATA__
}
}
--- response_body
-{"action":"create","node":{"value":{"cert":"","key":"","sni":"not-unwanted-post.com","status":1}}}
+{"action":"create","node":{"value":{"cert":"","key":"","sni":"not-unwanted-post.com","status":1,"type":"server"}}}
@@ -104,7 +104,7 @@ __DATA__
}
}
--- response_body
-{"action":"set","node":{"key":"/apisix/ssl/1","value":{"cert":"","id":"1","key":"","sni":"test.com","status":1}}}
+{"action":"set","node":{"key":"/apisix/ssl/1","value":{"cert":"","id":"1","key":"","sni":"test.com","status":1,"type":"server"}}}
@@ -137,7 +137,7 @@ __DATA__
}
}
--- response_body
-{"action":"compareAndSwap","node":{"key":"/apisix/ssl/1","value":{"cert":"","id":"1","key":"","sni":"t.com","status":1}}}
+{"action":"compareAndSwap","node":{"key":"/apisix/ssl/1","value":{"cert":"","id":"1","key":"","sni":"t.com","status":1,"type":"server"}}}
@@ -172,7 +172,7 @@ __DATA__
}
}
--- response_body
-{"action":"get","node":{"key":"/apisix/ssl/1","value":{"cert":"","id":"1","sni":"t.com","status":1}}}
+{"action":"get","node":{"key":"/apisix/ssl/1","value":{"cert":"","id":"1","sni":"t.com","status":1,"type":"server"}}}
diff --git a/t/admin/stream-routes.t b/t/admin/stream-routes.t
index 6552165b5221..01062fbc84f8 100644
--- a/t/admin/stream-routes.t
+++ b/t/admin/stream-routes.t
@@ -605,7 +605,6 @@ xrpc:
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local etcd = require("apisix.core.etcd")
for _, case in ipairs({
{input = {
name = "xxx",
diff --git a/t/admin/upstream.t b/t/admin/upstream.t
index 96dcef3c482f..16bfb5157b7b 100644
--- a/t/admin/upstream.t
+++ b/t/admin/upstream.t
@@ -627,3 +627,136 @@ GET /t
{"error_msg":"wrong upstream id, do not need it"}
--- no_error_log
[error]
+
+
+
+=== TEST 19: client_cert/client_key and client_cert_id cannot appear at the same time
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local ssl_cert = t.read_file("t/certs/apisix.crt")
+ local ssl_key = t.read_file("t/certs/apisix.key")
+ local data = {
+ nodes = {
+ ["127.0.0.1:8080"] = 1
+ },
+ type = "roundrobin",
+ tls = {
+ client_cert_id = 1,
+ client_cert = ssl_cert,
+ client_key = ssl_key
+ }
+ }
+ local code, body = t.test('/apisix/admin/upstreams',
+ ngx.HTTP_POST,
+ core.json.encode(data)
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body eval
+qr/{"error_msg":"invalid configuration: property \\\"tls\\\" validation failed: failed to validate dependent schema for \\\"client_cert|client_key\\\": value wasn't supposed to match schema"}/
+--- no_error_log
+[error]
+
+
+
+=== TEST 20: tls.client_cert_id does not exist
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local data = {
+ nodes = {
+ ["127.0.0.1:8080"] = 1
+ },
+ type = "roundrobin",
+ tls = {
+ client_cert_id = 9999999
+ }
+ }
+ local code, body = t.test('/apisix/admin/upstreams',
+ ngx.HTTP_POST,
+ core.json.encode(data)
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"failed to fetch ssl info by ssl id [9999999], response code: 404"}
+--- no_error_log
+[error]
+
+
+
+=== TEST 21: tls.client_cert_id exist with wrong ssl type
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin")
+ local json = require("toolkit.json")
+ local ssl_cert = t.read_file("t/certs/mtls_client.crt")
+ local ssl_key = t.read_file("t/certs/mtls_client.key")
+ local data = {
+ sni = "test.com",
+ cert = ssl_cert,
+ key = ssl_key
+ }
+ local code, body = t.test('/apisix/admin/ssl/1',
+ ngx.HTTP_PUT,
+ json.encode(data)
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.print(body)
+ return
+ end
+
+ local data = {
+ upstream = {
+ scheme = "https",
+ type = "roundrobin",
+ nodes = {
+ ["127.0.0.1:1983"] = 1
+ },
+ tls = {
+ client_cert_id = 1
+ }
+ },
+ uri = "/hello"
+ }
+ local code, body = t.test('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ json.encode(data)
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.print(body)
+ return
+ end
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"failed to fetch ssl info by ssl id [1], wrong ssl type"}
+--- no_error_log
+[error]
diff --git a/t/bin/gen_snippet.lua b/t/bin/gen_snippet.lua
new file mode 100755
index 000000000000..085409b6b5ae
--- /dev/null
+++ b/t/bin/gen_snippet.lua
@@ -0,0 +1,51 @@
+#!/usr/bin/env luajit
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+-- this script generates Nginx configuration in the test
+-- so we can test some features with test-nginx
+local pkg_cpath_org = package.cpath
+local pkg_path_org = package.path
+local pkg_cpath = "deps/lib64/lua/5.1/?.so;deps/lib/lua/5.1/?.so;"
+local pkg_path = "deps/share/lua/5.1/?.lua;"
+-- modify the load path to load our dependencies
+package.cpath = pkg_cpath .. pkg_cpath_org
+package.path = pkg_path .. pkg_path_org
+
+
+local file = require("apisix.cli.file")
+local schema = require("apisix.cli.schema")
+local snippet = require("apisix.cli.snippet")
+local yaml_conf, err = file.read_yaml_conf("t/servroot")
+if not yaml_conf then
+ error(err)
+end
+local ok, err = schema.validate(yaml_conf)
+if not ok then
+ error(err)
+end
+
+local res, err
+if arg[1] == "conf_server" then
+ res, err = snippet.generate_conf_server(
+ {apisix_home = "t/servroot/"},
+ yaml_conf)
+end
+
+if not res then
+ error(err or "none")
+end
+print(res)
diff --git a/t/chaos/utils/Dockerfile b/t/chaos/utils/Dockerfile
new file mode 100644
index 000000000000..700108283799
--- /dev/null
+++ b/t/chaos/utils/Dockerfile
@@ -0,0 +1,75 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+ARG ENABLE_PROXY=false
+
+FROM openresty/openresty:1.19.3.2-alpine-fat AS production-stage
+
+ARG ENABLE_PROXY
+ARG APISIX_PATH
+COPY $APISIX_PATH ./apisix
+RUN set -x \
+ && (test "${ENABLE_PROXY}" != "true" || /bin/sed -i 's,http://dl-cdn.alpinelinux.org,https://mirrors.aliyun.com,g' /etc/apk/repositories) \
+ && apk add --no-cache --virtual .builddeps \
+ automake \
+ autoconf \
+ libtool \
+ pkgconfig \
+ cmake \
+ git \
+ openldap-dev \
+ pcre-dev \
+ && cd apisix \
+ && git config --global url.https://github.com/.insteadOf git://github.com/ \
+ && make deps \
+ && cp -v bin/apisix /usr/bin/ \
+ && mv ../apisix /usr/local/apisix \
+ && apk del .builddeps build-base make unzip
+
+FROM alpine:3.13 AS last-stage
+
+ARG ENABLE_PROXY
+# add runtime for Apache APISIX
+RUN set -x \
+ && (test "${ENABLE_PROXY}" != "true" || /bin/sed -i 's,http://dl-cdn.alpinelinux.org,https://mirrors.aliyun.com,g' /etc/apk/repositories) \
+ && apk add --no-cache \
+ bash \
+ curl \
+ libstdc++ \
+ openldap \
+ pcre \
+ tzdata
+
+WORKDIR /usr/local/apisix
+
+COPY --from=production-stage /usr/local/openresty/ /usr/local/openresty/
+COPY --from=production-stage /usr/local/apisix/ /usr/local/apisix/
+COPY --from=production-stage /usr/bin/apisix /usr/bin/apisix
+
+# forward request and error logs to docker log collector
+RUN mkdir -p logs && touch logs/access.log && touch logs/error.log \
+ && ln -sf /dev/stdout /usr/local/apisix/logs/access.log \
+ && ln -sf /dev/stderr /usr/local/apisix/logs/error.log
+
+ENV PATH=$PATH:/usr/local/openresty/luajit/bin:/usr/local/openresty/nginx/sbin:/usr/local/openresty/bin
+
+EXPOSE 9080 9443
+
+CMD ["sh", "-c", "/usr/bin/apisix init && /usr/bin/apisix init_etcd && /usr/local/openresty/bin/openresty -p /usr/local/apisix -g 'daemon off;'"]
+
+STOPSIGNAL SIGQUIT
+
diff --git a/t/cli/test_deployment_data_plane.sh b/t/cli/test_deployment_data_plane.sh
new file mode 100755
index 000000000000..379265319b1c
--- /dev/null
+++ b/t/cli/test_deployment_data_plane.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+. ./t/cli/common.sh
+
+# clean etcd data
+etcdctl del / --prefix
+
+# data_plane does not write data to etcd
+echo '
+deployment:
+ role: data_plane
+ role_data_plane:
+ config_provider: control_plane
+ control_plane:
+ host:
+ - http://127.0.0.1:2379
+ timeout: 30
+ certs:
+ cert: /path/to/ca-cert
+ cert_key: /path/to/ca-cert
+ trusted_ca_cert: /path/to/ca-cert
+' > conf/config.yaml
+
+make run
+
+sleep 1
+
+res=$(etcdctl get / --prefix | wc -l)
+
+if [ ! $res -eq 0 ]; then
+ echo "failed: data_plane should not write data to etcd"
+ exit 1
+fi
+
+echo "passed: data_plane does not write data to etcd"
diff --git a/t/cli/test_deployment_traditional.sh b/t/cli/test_deployment_traditional.sh
new file mode 100755
index 000000000000..6a89ca0a65f4
--- /dev/null
+++ b/t/cli/test_deployment_traditional.sh
@@ -0,0 +1,186 @@
+#!/usr/bin/env bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+. ./t/cli/common.sh
+
+echo '
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+' > conf/config.yaml
+
+out=$(make init 2>&1 || true)
+if ! echo "$out" | grep 'invalid deployment traditional configuration: property "etcd" is required'; then
+ echo "failed: should check deployment schema during init"
+ exit 1
+fi
+
+echo "passed: should check deployment schema during init"
+
+# HTTP
+echo '
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ prefix: "/apisix"
+ host:
+ - http://127.0.0.1:2379
+' > conf/config.yaml
+
+make run
+sleep 1
+
+code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1')
+make stop
+
+if [ ! $code -eq 200 ]; then
+ echo "failed: could not connect to etcd with http enabled"
+ exit 1
+fi
+
+# Both HTTP and Stream
+echo '
+apisix:
+ enable_admin: true
+ stream_proxy:
+ tcp:
+ - addr: 9100
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ prefix: "/apisix"
+ host:
+ - http://127.0.0.1:2379
+' > conf/config.yaml
+
+make run
+sleep 1
+
+code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1')
+make stop
+
+if [ ! $code -eq 200 ]; then
+ echo "failed: could not connect to etcd with http & stream enabled"
+ exit 1
+fi
+
+# Stream
+echo '
+apisix:
+ enable_admin: false
+ stream_proxy:
+ tcp:
+ - addr: 9100
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ prefix: "/apisix"
+ host:
+ - http://127.0.0.1:2379
+' > conf/config.yaml
+
+make run
+sleep 1
+make stop
+
+if grep '\[error\]' logs/error.log; then
+ echo "failed: could not connect to etcd with stream enabled"
+ exit 1
+fi
+
+echo "passed: could connect to etcd"
+
+echo '
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ prefix: "/apisix"
+ host:
+ - http://127.0.0.1:2379
+ - https://127.0.0.1:2379
+' > conf/config.yaml
+
+out=$(make init 2>&1 || true)
+if ! echo "$out" | grep 'all nodes in the etcd cluster should enable/disable TLS together'; then
+ echo "failed: should validate etcd host"
+ exit 1
+fi
+
+echo "passed: validate etcd host"
+
+# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns
+
+# etcd mTLS verify
+echo '
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ host:
+ - "https://admin.apisix.dev:22379"
+ prefix: "/apisix"
+ tls:
+ cert: t/certs/mtls_client.crt
+ key: t/certs/mtls_client.key
+ verify: false
+ ' > conf/config.yaml
+
+make run
+sleep 1
+
+code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1')
+make stop
+
+if [ ! $code -eq 200 ]; then
+ echo "failed: could not work when mTLS is enabled"
+ exit 1
+fi
+
+echo "passed: etcd enables mTLS successfully"
+
+echo '
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ host:
+ - "https://admin.apisix.dev:22379"
+ prefix: "/apisix"
+ tls:
+ verify: false
+ ' > conf/config.yaml
+
+out=$(make init 2>&1 || echo "ouch")
+if ! echo "$out" | grep "bad certificate"; then
+ echo "failed: apisix should echo \"bad certificate\""
+ exit 1
+fi
+
+echo "passed: certificate verify fail expectedly"
diff --git a/t/cli/test_main.sh b/t/cli/test_main.sh
index 73202a8f3ce0..ea54c53b8425 100755
--- a/t/cli/test_main.sh
+++ b/t/cli/test_main.sh
@@ -705,6 +705,7 @@ fi
./bin/apisix stop
sleep 0.5
+rm logs/nginx.pid || true
# check no corresponding process
make run
diff --git a/t/cli/test_validate_config.sh b/t/cli/test_validate_config.sh
index 164d530fe0a4..216f1d9fb14d 100755
--- a/t/cli/test_validate_config.sh
+++ b/t/cli/test_validate_config.sh
@@ -202,3 +202,30 @@ if echo "$out" | grep "missing loopback or unspecified in the nginx_config.http.
fi
echo "passed: check the realip configuration for batch-requests"
+
+echo '
+etcd:
+ host:
+ - 127.0.0.1
+' > conf/config.yaml
+
+out=$(make init 2>&1 || true)
+if ! echo "$out" | grep 'property "host" validation failed'; then
+ echo "failed: should check etcd schema during init"
+ exit 1
+fi
+
+echo '
+etcd:
+ prefix: "/apisix/"
+ host:
+ - https://127.0.0.1
+' > conf/config.yaml
+
+out=$(make init 2>&1 || true)
+if ! echo "$out" | grep 'property "prefix" validation failed'; then
+ echo "failed: should check etcd schema during init"
+ exit 1
+fi
+
+echo "passed: check etcd schema during init"
diff --git a/t/config-center-yaml/plugin-metadata.t b/t/config-center-yaml/plugin-metadata.t
index 0ad0c6c088e4..6e0a9971e879 100644
--- a/t/config-center-yaml/plugin-metadata.t
+++ b/t/config-center-yaml/plugin-metadata.t
@@ -33,7 +33,7 @@ _EOC_
$block->set_value("yaml_config", $yaml_config);
- if (!$block->no_error_log) {
+ if (!$block->no_error_log && !$block->error_log) {
$block->set_value("no_error_log", "[error]");
}
});
@@ -67,3 +67,25 @@ plugin_metadata:
GET /hello
--- error_log
"remote_addr":"127.0.0.1"
+
+
+
+=== TEST 2: sanity
+--- apisix_yaml
+upstreams:
+ - id: 1
+ nodes:
+ "127.0.0.1:1980": 1
+ type: roundrobin
+routes:
+ -
+ uri: /hello
+ upstream_id: 1
+plugin_metadata:
+ - id: authz-casbin
+ model: 123
+#END
+--- request
+GET /hello
+--- error_log
+failed to check item data of [plugin_metadata]
diff --git a/t/core/os.t b/t/core/os.t
index dff6c8b3c191..4c99b311af5d 100644
--- a/t/core/os.t
+++ b/t/core/os.t
@@ -70,3 +70,22 @@ A
false
false
false
+
+
+
+=== TEST 3: usleep, bad arguments
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+
+ for _, c in ipairs({
+ {us = 0.1},
+ }) do
+ local ok = pcall(core.os.usleep, c.us)
+ ngx.say(ok)
+ end
+ }
+ }
+--- response_body
+false
diff --git a/t/debug/debug-mode.t b/t/debug/debug-mode.t
index d2f629d8edff..0fe20a8bb922 100644
--- a/t/debug/debug-mode.t
+++ b/t/debug/debug-mode.t
@@ -43,9 +43,9 @@ done
--- error_log
loaded plugin and sort by priority: 23000 name: real-ip
loaded plugin and sort by priority: 22000 name: client-control
+loaded plugin and sort by priority: 12015 name: request-id
loaded plugin and sort by priority: 12011 name: zipkin
loaded plugin and sort by priority: 12000 name: ext-plugin-pre-req
-loaded plugin and sort by priority: 11010 name: request-id
loaded plugin and sort by priority: 11000 name: fault-injection
loaded plugin and sort by priority: 10000 name: serverless-pre-function
loaded plugin and sort by priority: 4000 name: cors
diff --git a/t/debug/dynamic-hook.t b/t/debug/dynamic-hook.t
index 95ac63bcc83e..692942d1f9e4 100644
--- a/t/debug/dynamic-hook.t
+++ b/t/debug/dynamic-hook.t
@@ -213,6 +213,8 @@ call require("apisix").http_log_phase() return:{}
=== TEST 4: plugin filter log
--- debug_config
+basic:
+ enable: true
http_filter:
enable: true # enable or disable this feature
enable_header_name: X-APISIX-Dynamic-Debug # the header name of dynamic enable
@@ -295,6 +297,8 @@ filter(): call require("apisix.plugin").filter() return:{
=== TEST 5: multiple requests, only output logs of the request with enable_header_name
--- debug_config
+basic:
+ enable: true
http_filter:
enable: true
enable_header_name: X-APISIX-Dynamic-Debug
@@ -374,6 +378,8 @@ qr/call\srequire\(\"apisix.plugin\"\).filter\(\)\sreturn.*GET\s\/mysleep\?second
=== TEST 6: hook function with ctx as param
--- debug_config
+basic:
+ enable: true
http_filter:
enable: true # enable or disable this feature
enable_header_name: X-APISIX-Dynamic-Debug # the header name of dynamic enable
diff --git a/t/debug/hook.t b/t/debug/hook.t
index 5afac49ce589..1a9ebc140437 100644
--- a/t/debug/hook.t
+++ b/t/debug/hook.t
@@ -104,6 +104,11 @@ call require("apisix").http_log_phase() return:{}
=== TEST 4: plugin filter log
--- debug_config
+basic:
+ enable: true
+http_filter:
+ enable: true # enable or disable this feature
+ enable_header_name: X-APISIX-Dynamic-Debug # the header name of dynamic enable
hook_conf:
enable: true # enable or disable this feature
name: hook_test # the name of module and function list
@@ -120,10 +125,35 @@ hook_test: # module and function list, name: hook_test
GET /hello
--- more_headers
Host: foo.com
+X-APISIX-Dynamic-Debug: true
--- response_body
hello world
---- no_error_log
-[error]
--- error_log
filter(): call require("apisix.plugin").filter() args:{
filter(): call require("apisix.plugin").filter() return:{
+
+
+
+=== TEST 5: missing hook_conf
+--- debug_config
+basic:
+ enable: true
+http_filter:
+ enable: true # enable or disable this feature
+ enable_header_name: X-APISIX-Dynamic-Debug # the header name of dynamic enable
+
+hook_test: # module and function list, name: hook_test
+ apisix.plugin: # required module name
+ - filter # function name
+
+#END
+--- request
+GET /hello
+--- more_headers
+Host: foo.com
+X-APISIX-Dynamic-Debug: true
+--- response_body
+hello world
+--- error_log
+read_debug_yaml(): failed to validate debug config property "hook_conf" is required
+--- wait: 3
diff --git a/t/deployment/conf_server.t b/t/deployment/conf_server.t
new file mode 100644
index 000000000000..c6a088b380bb
--- /dev/null
+++ b/t/deployment/conf_server.t
@@ -0,0 +1,429 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+use t::APISIX;
+
+my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx';
+my $version = eval { `$nginx_binary -V 2>&1` };
+
+if ($version =~ m/\/1.17.8/) {
+ plan(skip_all => "require OpenResty 1.19+");
+} else {
+ plan('no_plan');
+}
+
+add_block_preprocessor(sub {
+ my ($block) = @_;
+
+ if (!$block->request) {
+ $block->set_value("request", "GET /t");
+ }
+
+ if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+ $block->set_value("no_error_log", "[error]");
+ }
+
+});
+
+Test::Nginx::Socket::set_http_config_filter(sub {
+ my $config = shift;
+ my $snippet = `./t/bin/gen_snippet.lua conf_server`;
+ $config .= $snippet;
+ return $config;
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sync in https
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin").test
+
+ local consumers, _ = core.config.new("/consumers", {
+ automatic = true,
+ item_schema = core.schema.consumer,
+ })
+
+ ngx.sleep(0.6)
+ local idx = consumers.prev_index
+
+ local code, body = t('/apisix/admin/consumers',
+ ngx.HTTP_PUT,
+ [[{
+ "username": "jobs",
+ "plugins": {
+ "basic-auth": {
+ "username": "jobs",
+ "password": "678901"
+ }
+ }
+ }]])
+
+ ngx.sleep(2)
+ local new_idx = consumers.prev_index
+ if new_idx > idx then
+ ngx.say("prev_index updated")
+ else
+ ngx.say("prev_index not update")
+ end
+ }
+ }
+--- response_body
+prev_index updated
+--- extra_yaml_config
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ prefix: "/apisix"
+ host:
+ - https://127.0.0.1:12379
+ tls:
+ verify: false
+
+
+
+=== TEST 2: mix ip & domain
+--- config
+ location /t {
+ content_by_lua_block {
+ local etcd = require("apisix.core.etcd")
+ assert(etcd.set("/apisix/test", "foo"))
+ local res = assert(etcd.get("/apisix/test"))
+ ngx.say(res.body.node.value)
+ }
+ }
+--- extra_yaml_config
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ prefix: "/apisix"
+ host:
+ - http://127.0.0.2:2379
+ - http://localhost:2379
+ - http://[::1]:2379
+--- error_log
+dns resolve localhost, result:
+--- no_error_log
+[error]
+--- response_body
+foo
+
+
+
+=== TEST 3: resolve domain, result changed
+--- extra_init_by_lua
+ local resolver = require("apisix.core.resolver")
+ local old_f = resolver.parse_domain
+ local counter = 0
+ resolver.parse_domain = function (domain)
+ if domain == "x.com" then
+ counter = counter + 1
+ if counter % 2 == 0 then
+ return "127.0.0.2"
+ else
+ return "127.0.0.3"
+ end
+ else
+ return old_f(domain)
+ end
+ end
+--- config
+ location /t {
+ content_by_lua_block {
+ local etcd = require("apisix.core.etcd")
+ assert(etcd.set("/apisix/test", "foo"))
+ local res = assert(etcd.get("/apisix/test"))
+ ngx.say(res.body.node.value)
+ }
+ }
+--- extra_yaml_config
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ prefix: "/apisix"
+ host:
+ - http://x.com:2379
+--- response_body
+foo
+--- error_log
+x.com is resolved to: 127.0.0.3
+x.com is resolved to: 127.0.0.2
+--- no_error_log
+[error]
+
+
+
+=== TEST 4: update balancer if the DNS result changed
+--- extra_init_by_lua
+ local resolver = require("apisix.core.resolver")
+ local old_f = resolver.parse_domain
+ package.loaded.counter = 0
+ resolver.parse_domain = function (domain)
+ if domain == "x.com" then
+ local counter = package.loaded.counter
+ package.loaded.counter = counter + 1
+ if counter % 2 == 0 then
+ return "127.0.0.2"
+ else
+ return "127.0.0.3"
+ end
+ else
+ return old_f(domain)
+ end
+ end
+
+ local picker = require("apisix.balancer.least_conn")
+ package.loaded.n_picker = 0
+ local old_f = picker.new
+ picker.new = function (nodes, upstream)
+ package.loaded.n_picker = package.loaded.n_picker + 1
+ return old_f(nodes, upstream)
+ end
+--- config
+ location /t {
+ content_by_lua_block {
+ local etcd = require("apisix.core.etcd")
+ assert(etcd.set("/apisix/test", "foo"))
+ local res = assert(etcd.get("/apisix/test"))
+ ngx.say(res.body.node.value)
+ local counter = package.loaded.counter
+ local n_picker = package.loaded.n_picker
+ if counter == n_picker then
+ ngx.say("OK")
+ else
+ ngx.say(counter, " ", n_picker)
+ end
+ }
+ }
+--- extra_yaml_config
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ prefix: "/apisix"
+ host:
+ - http://127.0.0.1:2379
+ - http://x.com:2379
+--- response_body
+foo
+OK
+--- error_log
+x.com is resolved to: 127.0.0.3
+x.com is resolved to: 127.0.0.2
+--- no_error_log
+[error]
+
+
+
+=== TEST 5: retry
+--- config
+ location /t {
+ content_by_lua_block {
+ local etcd = require("apisix.core.etcd")
+ assert(etcd.set("/apisix/test", "foo"))
+ local res = assert(etcd.get("/apisix/test"))
+ ngx.say(res.body.node.value)
+ }
+ }
+--- extra_yaml_config
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ prefix: "/apisix"
+ host:
+ - http://127.0.0.1:1979
+ - http://[::1]:1979
+ - http://localhost:2379
+--- error_log
+connect() failed
+--- response_body
+foo
+
+
+
+=== TEST 6: check default SNI
+--- http_config
+server {
+ listen 12345 ssl;
+ ssl_certificate cert/apisix.crt;
+ ssl_certificate_key cert/apisix.key;
+
+ ssl_certificate_by_lua_block {
+ local ngx_ssl = require "ngx.ssl"
+ ngx.log(ngx.WARN, "Receive SNI: ", ngx_ssl.server_name())
+ }
+
+ location / {
+ proxy_pass http://127.0.0.1:2379;
+ }
+}
+--- config
+ location /t {
+ content_by_lua_block {
+ local etcd = require("apisix.core.etcd")
+ assert(etcd.set("/apisix/test", "foo"))
+ local res = assert(etcd.get("/apisix/test"))
+ ngx.say(res.body.node.value)
+ }
+ }
+--- response_body
+foo
+--- extra_yaml_config
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ prefix: "/apisix"
+ host:
+ - https://localhost:12345
+--- error_log
+Receive SNI: localhost
+--- no_error_log
+[error]
+
+
+
+=== TEST 7: check configured SNI
+--- http_config
+server {
+ listen 12345 ssl;
+ ssl_certificate cert/apisix.crt;
+ ssl_certificate_key cert/apisix.key;
+
+ ssl_certificate_by_lua_block {
+ local ngx_ssl = require "ngx.ssl"
+ ngx.log(ngx.WARN, "Receive SNI: ", ngx_ssl.server_name())
+ }
+
+ location / {
+ proxy_pass http://127.0.0.1:2379;
+ }
+}
+--- config
+ location /t {
+ content_by_lua_block {
+ local etcd = require("apisix.core.etcd")
+ assert(etcd.set("/apisix/test", "foo"))
+ local res = assert(etcd.get("/apisix/test"))
+ ngx.say(res.body.node.value)
+ }
+ }
+--- response_body
+foo
+--- extra_yaml_config
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ prefix: "/apisix"
+ host:
+ - https://127.0.0.1:12345
+ tls:
+ sni: "x.com"
+--- error_log
+Receive SNI: x.com
+--- no_error_log
+[error]
+
+
+
+=== TEST 8: check Host header
+--- http_config
+server {
+ listen 12345;
+ location / {
+ access_by_lua_block {
+ ngx.log(ngx.WARN, "Receive Host: ", ngx.var.http_host)
+ }
+ proxy_pass http://127.0.0.1:2379;
+ }
+}
+--- config
+ location /t {
+ content_by_lua_block {
+ local etcd = require("apisix.core.etcd")
+ assert(etcd.set("/apisix/test", "foo"))
+ local res = assert(etcd.get("/apisix/test"))
+ ngx.say(res.body.node.value)
+ }
+ }
+--- response_body
+foo
+--- extra_yaml_config
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ prefix: "/apisix"
+ host:
+ - http://127.0.0.1:12345
+ - http://localhost:12345
+--- error_log
+Receive Host: localhost
+Receive Host: 127.0.0.1
+
+
+
+=== TEST 9: check Host header after retry
+--- http_config
+server {
+ listen 12345;
+ location / {
+ access_by_lua_block {
+ ngx.log(ngx.WARN, "Receive Host: ", ngx.var.http_host)
+ }
+ proxy_pass http://127.0.0.1:2379;
+ }
+}
+--- config
+ location /t {
+ content_by_lua_block {
+ local etcd = require("apisix.core.etcd")
+ assert(etcd.set("/apisix/test", "foo"))
+ local res = assert(etcd.get("/apisix/test"))
+ ngx.say(res.body.node.value)
+ }
+ }
+--- response_body
+foo
+--- extra_yaml_config
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ etcd:
+ prefix: "/apisix"
+ host:
+ - http://127.0.0.1:1979
+ - http://localhost:12345
+--- error_log
+Receive Host: localhost
diff --git a/t/grpc_server_example/main.go b/t/grpc_server_example/main.go
index 18bda0536d00..1b533582c464 100644
--- a/t/grpc_server_example/main.go
+++ b/t/grpc_server_example/main.go
@@ -172,6 +172,31 @@ func (s *server) SayHelloBidirectionalStream(stream pb.Greeter_SayHelloBidirecti
}
}
+// SayMultipleHello implements helloworld.GreeterServer
+func (s *server) SayMultipleHello(ctx context.Context, in *pb.MultipleHelloRequest) (*pb.MultipleHelloReply, error) {
+ log.Printf("Received: %v", in.Name)
+ log.Printf("Enum Gender: %v", in.GetGenders())
+ msg := "Hello " + in.Name
+
+ persons := in.GetPersons()
+ if persons != nil {
+ for _, person := range persons {
+ if person.GetName() != "" {
+ msg += fmt.Sprintf(", name: %v", person.GetName())
+ }
+ if person.GetAge() != 0 {
+ msg += fmt.Sprintf(", age: %v", person.GetAge())
+ }
+ }
+ }
+
+ return &pb.MultipleHelloReply{
+ Message: msg,
+ Items: in.GetItems(),
+ Genders: in.GetGenders(),
+ }, nil
+}
+
func (s *server) Run(ctx context.Context, in *pb.Request) (*pb.Response, error) {
return &pb.Response{Body: in.User.Name + " " + in.Body}, nil
}
diff --git a/t/grpc_server_example/proto/helloworld.pb.go b/t/grpc_server_example/proto/helloworld.pb.go
index 9cb209566825..71b16a3455c6 100644
--- a/t/grpc_server_example/proto/helloworld.pb.go
+++ b/t/grpc_server_example/proto/helloworld.pb.go
@@ -1,8 +1,10 @@
-// Copyright 2015 gRPC authors.
//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
@@ -11,11 +13,12 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.27.1
-// protoc v3.6.1
+// protoc-gen-go v1.25.0-devel
+// protoc v3.12.4
// source: proto/helloworld.proto
package proto
@@ -374,6 +377,140 @@ func (x *PlusReply) GetResult() int64 {
return 0
}
+type MultipleHelloRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Items []string `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"`
+ Genders []Gender `protobuf:"varint,3,rep,packed,name=genders,proto3,enum=helloworld.Gender" json:"genders,omitempty"`
+ Persons []*Person `protobuf:"bytes,4,rep,name=persons,proto3" json:"persons,omitempty"`
+}
+
+func (x *MultipleHelloRequest) Reset() {
+ *x = MultipleHelloRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_proto_helloworld_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MultipleHelloRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MultipleHelloRequest) ProtoMessage() {}
+
+func (x *MultipleHelloRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_helloworld_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MultipleHelloRequest.ProtoReflect.Descriptor instead.
+func (*MultipleHelloRequest) Descriptor() ([]byte, []int) {
+ return file_proto_helloworld_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *MultipleHelloRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *MultipleHelloRequest) GetItems() []string {
+ if x != nil {
+ return x.Items
+ }
+ return nil
+}
+
+func (x *MultipleHelloRequest) GetGenders() []Gender {
+ if x != nil {
+ return x.Genders
+ }
+ return nil
+}
+
+func (x *MultipleHelloRequest) GetPersons() []*Person {
+ if x != nil {
+ return x.Persons
+ }
+ return nil
+}
+
+type MultipleHelloReply struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
+ Items []string `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"`
+ Genders []Gender `protobuf:"varint,3,rep,packed,name=genders,proto3,enum=helloworld.Gender" json:"genders,omitempty"`
+}
+
+func (x *MultipleHelloReply) Reset() {
+ *x = MultipleHelloReply{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_proto_helloworld_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MultipleHelloReply) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MultipleHelloReply) ProtoMessage() {}
+
+func (x *MultipleHelloReply) ProtoReflect() protoreflect.Message {
+ mi := &file_proto_helloworld_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MultipleHelloReply.ProtoReflect.Descriptor instead.
+func (*MultipleHelloReply) Descriptor() ([]byte, []int) {
+ return file_proto_helloworld_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *MultipleHelloReply) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+func (x *MultipleHelloReply) GetItems() []string {
+ if x != nil {
+ return x.Items
+ }
+ return nil
+}
+
+func (x *MultipleHelloReply) GetGenders() []Gender {
+ if x != nil {
+ return x.Genders
+ }
+ return nil
+}
+
var File_proto_helloworld_proto protoreflect.FileDescriptor
var file_proto_helloworld_proto_rawDesc = []byte{
@@ -403,40 +540,63 @@ var file_proto_helloworld_proto_rawDesc = []byte{
0x0a, 0x01, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x01, 0x62, 0x22, 0x23, 0x0a, 0x09,
0x50, 0x6c, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73,
0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x2a, 0x40, 0x0a, 0x06, 0x47, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x0e, 0x47,
- 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
- 0x0f, 0x0a, 0x0b, 0x47, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4c, 0x45, 0x10, 0x01,
- 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x46, 0x45, 0x4d, 0x41, 0x4c,
- 0x45, 0x10, 0x02, 0x32, 0xc0, 0x03, 0x0a, 0x07, 0x47, 0x72, 0x65, 0x65, 0x74, 0x65, 0x72, 0x12,
- 0x3e, 0x0a, 0x08, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x12, 0x18, 0x2e, 0x68, 0x65,
- 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72,
- 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12,
- 0x38, 0x0a, 0x04, 0x50, 0x6c, 0x75, 0x73, 0x12, 0x17, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77,
- 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x50, 0x6c, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x15, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x50, 0x6c,
- 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x12, 0x53, 0x61, 0x79,
- 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x41, 0x66, 0x74, 0x65, 0x72, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12,
- 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c,
- 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c,
- 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c,
- 0x79, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x14, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x53,
- 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x68, 0x65,
- 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72,
- 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x30,
- 0x01, 0x12, 0x4c, 0x0a, 0x14, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x43, 0x6c, 0x69,
- 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c,
- 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64,
- 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x28, 0x01, 0x12,
- 0x55, 0x0a, 0x1b, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x42, 0x69, 0x64, 0x69, 0x72,
- 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18,
+ 0x74, 0x22, 0x9c, 0x01, 0x0a, 0x14, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x48, 0x65,
+ 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14,
+ 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x69,
+ 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2c, 0x0a, 0x07, 0x67, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72,
+ 0x6c, 0x64, 0x2e, 0x47, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x07, 0x67, 0x65, 0x6e, 0x64, 0x65,
+ 0x72, 0x73, 0x12, 0x2c, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64,
+ 0x2e, 0x50, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x52, 0x07, 0x70, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x73,
+ 0x22, 0x72, 0x0a, 0x12, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x48, 0x65, 0x6c, 0x6c,
+ 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2c, 0x0a, 0x07, 0x67, 0x65, 0x6e, 0x64, 0x65, 0x72,
+ 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77,
+ 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x47, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x07, 0x67, 0x65, 0x6e,
+ 0x64, 0x65, 0x72, 0x73, 0x2a, 0x40, 0x0a, 0x06, 0x47, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x12,
+ 0x0a, 0x0e, 0x47, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+ 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x47, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4c,
+ 0x45, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x46, 0x45,
+ 0x4d, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x32, 0x98, 0x04, 0x0a, 0x07, 0x47, 0x72, 0x65, 0x65, 0x74,
+ 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x08, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x12, 0x18,
0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c,
0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f,
0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79,
- 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x09, 0x5a, 0x07, 0x2e, 0x2f, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x22, 0x00, 0x12, 0x38, 0x0a, 0x04, 0x50, 0x6c, 0x75, 0x73, 0x12, 0x17, 0x2e, 0x68, 0x65, 0x6c,
+ 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x50, 0x6c, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64,
+ 0x2e, 0x50, 0x6c, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x12,
+ 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x41, 0x66, 0x74, 0x65, 0x72, 0x44, 0x65, 0x6c,
+ 0x61, 0x79, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e,
+ 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68,
+ 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52,
+ 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x10, 0x53, 0x61, 0x79, 0x4d, 0x75, 0x6c,
+ 0x74, 0x69, 0x70, 0x6c, 0x65, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x6c,
+ 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65,
+ 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x68,
+ 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70,
+ 0x6c, 0x65, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x4c,
+ 0x0a, 0x14, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f,
+ 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65,
+ 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x14,
+ 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74,
+ 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c,
+ 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16,
+ 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c,
+ 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x28, 0x01, 0x12, 0x55, 0x0a, 0x1b, 0x53, 0x61,
+ 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x42, 0x69, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c,
+ 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64,
+ 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x28, 0x01, 0x30,
+ 0x01, 0x42, 0x09, 0x5a, 0x07, 0x2e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -452,36 +612,43 @@ func file_proto_helloworld_proto_rawDescGZIP() []byte {
}
var file_proto_helloworld_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_proto_helloworld_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_proto_helloworld_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
var file_proto_helloworld_proto_goTypes = []interface{}{
- (Gender)(0), // 0: helloworld.Gender
- (*Person)(nil), // 1: helloworld.Person
- (*HelloRequest)(nil), // 2: helloworld.HelloRequest
- (*HelloReply)(nil), // 3: helloworld.HelloReply
- (*PlusRequest)(nil), // 4: helloworld.PlusRequest
- (*PlusReply)(nil), // 5: helloworld.PlusReply
+ (Gender)(0), // 0: helloworld.Gender
+ (*Person)(nil), // 1: helloworld.Person
+ (*HelloRequest)(nil), // 2: helloworld.HelloRequest
+ (*HelloReply)(nil), // 3: helloworld.HelloReply
+ (*PlusRequest)(nil), // 4: helloworld.PlusRequest
+ (*PlusReply)(nil), // 5: helloworld.PlusReply
+ (*MultipleHelloRequest)(nil), // 6: helloworld.MultipleHelloRequest
+ (*MultipleHelloReply)(nil), // 7: helloworld.MultipleHelloReply
}
var file_proto_helloworld_proto_depIdxs = []int32{
- 0, // 0: helloworld.HelloRequest.gender:type_name -> helloworld.Gender
- 1, // 1: helloworld.HelloRequest.person:type_name -> helloworld.Person
- 0, // 2: helloworld.HelloReply.gender:type_name -> helloworld.Gender
- 2, // 3: helloworld.Greeter.SayHello:input_type -> helloworld.HelloRequest
- 4, // 4: helloworld.Greeter.Plus:input_type -> helloworld.PlusRequest
- 2, // 5: helloworld.Greeter.SayHelloAfterDelay:input_type -> helloworld.HelloRequest
- 2, // 6: helloworld.Greeter.SayHelloServerStream:input_type -> helloworld.HelloRequest
- 2, // 7: helloworld.Greeter.SayHelloClientStream:input_type -> helloworld.HelloRequest
- 2, // 8: helloworld.Greeter.SayHelloBidirectionalStream:input_type -> helloworld.HelloRequest
- 3, // 9: helloworld.Greeter.SayHello:output_type -> helloworld.HelloReply
- 5, // 10: helloworld.Greeter.Plus:output_type -> helloworld.PlusReply
- 3, // 11: helloworld.Greeter.SayHelloAfterDelay:output_type -> helloworld.HelloReply
- 3, // 12: helloworld.Greeter.SayHelloServerStream:output_type -> helloworld.HelloReply
- 3, // 13: helloworld.Greeter.SayHelloClientStream:output_type -> helloworld.HelloReply
- 3, // 14: helloworld.Greeter.SayHelloBidirectionalStream:output_type -> helloworld.HelloReply
- 9, // [9:15] is the sub-list for method output_type
- 3, // [3:9] is the sub-list for method input_type
- 3, // [3:3] is the sub-list for extension type_name
- 3, // [3:3] is the sub-list for extension extendee
- 0, // [0:3] is the sub-list for field type_name
+ 0, // 0: helloworld.HelloRequest.gender:type_name -> helloworld.Gender
+ 1, // 1: helloworld.HelloRequest.person:type_name -> helloworld.Person
+ 0, // 2: helloworld.HelloReply.gender:type_name -> helloworld.Gender
+ 0, // 3: helloworld.MultipleHelloRequest.genders:type_name -> helloworld.Gender
+ 1, // 4: helloworld.MultipleHelloRequest.persons:type_name -> helloworld.Person
+ 0, // 5: helloworld.MultipleHelloReply.genders:type_name -> helloworld.Gender
+ 2, // 6: helloworld.Greeter.SayHello:input_type -> helloworld.HelloRequest
+ 4, // 7: helloworld.Greeter.Plus:input_type -> helloworld.PlusRequest
+ 2, // 8: helloworld.Greeter.SayHelloAfterDelay:input_type -> helloworld.HelloRequest
+ 6, // 9: helloworld.Greeter.SayMultipleHello:input_type -> helloworld.MultipleHelloRequest
+ 2, // 10: helloworld.Greeter.SayHelloServerStream:input_type -> helloworld.HelloRequest
+ 2, // 11: helloworld.Greeter.SayHelloClientStream:input_type -> helloworld.HelloRequest
+ 2, // 12: helloworld.Greeter.SayHelloBidirectionalStream:input_type -> helloworld.HelloRequest
+ 3, // 13: helloworld.Greeter.SayHello:output_type -> helloworld.HelloReply
+ 5, // 14: helloworld.Greeter.Plus:output_type -> helloworld.PlusReply
+ 3, // 15: helloworld.Greeter.SayHelloAfterDelay:output_type -> helloworld.HelloReply
+ 7, // 16: helloworld.Greeter.SayMultipleHello:output_type -> helloworld.MultipleHelloReply
+ 3, // 17: helloworld.Greeter.SayHelloServerStream:output_type -> helloworld.HelloReply
+ 3, // 18: helloworld.Greeter.SayHelloClientStream:output_type -> helloworld.HelloReply
+ 3, // 19: helloworld.Greeter.SayHelloBidirectionalStream:output_type -> helloworld.HelloReply
+ 13, // [13:20] is the sub-list for method output_type
+ 6, // [6:13] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
}
func init() { file_proto_helloworld_proto_init() }
@@ -550,6 +717,30 @@ func file_proto_helloworld_proto_init() {
return nil
}
}
+ file_proto_helloworld_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MultipleHelloRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_proto_helloworld_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MultipleHelloReply); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
}
type x struct{}
out := protoimpl.TypeBuilder{
@@ -557,7 +748,7 @@ func file_proto_helloworld_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_proto_helloworld_proto_rawDesc,
NumEnums: 1,
- NumMessages: 5,
+ NumMessages: 7,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/t/grpc_server_example/proto/helloworld.proto b/t/grpc_server_example/proto/helloworld.proto
index 2e18a467c822..db056fadec25 100644
--- a/t/grpc_server_example/proto/helloworld.proto
+++ b/t/grpc_server_example/proto/helloworld.proto
@@ -25,6 +25,7 @@ service Greeter {
rpc SayHello (HelloRequest) returns (HelloReply) {}
rpc Plus (PlusRequest) returns (PlusReply) {}
rpc SayHelloAfterDelay (HelloRequest) returns (HelloReply) {}
+ rpc SayMultipleHello(MultipleHelloRequest) returns (MultipleHelloReply) {}
// Server side streaming.
rpc SayHelloServerStream (HelloRequest) returns (stream HelloReply) {}
@@ -34,6 +35,7 @@ service Greeter {
// Bidirectional streaming.
rpc SayHelloBidirectionalStream (stream HelloRequest) returns (stream HelloReply) {}
+
}
enum Gender {
@@ -68,3 +70,16 @@ message PlusRequest {
message PlusReply {
int64 result = 1;
}
+
+message MultipleHelloRequest {
+ string name = 1;
+ repeated string items = 2;
+ repeated Gender genders = 3;
+ repeated Person persons = 4;
+}
+
+message MultipleHelloReply{
+ string message = 1;
+ repeated string items = 2;
+ repeated Gender genders = 3;
+}
diff --git a/t/grpc_server_example/proto/helloworld_grpc.pb.go b/t/grpc_server_example/proto/helloworld_grpc.pb.go
index 7d6d8ef8b7df..c0527d7542f8 100644
--- a/t/grpc_server_example/proto/helloworld_grpc.pb.go
+++ b/t/grpc_server_example/proto/helloworld_grpc.pb.go
@@ -1,4 +1,8 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.2.0
+// - protoc v3.12.4
+// source: proto/helloworld.proto
package proto
@@ -22,6 +26,7 @@ type GreeterClient interface {
SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error)
Plus(ctx context.Context, in *PlusRequest, opts ...grpc.CallOption) (*PlusReply, error)
SayHelloAfterDelay(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error)
+ SayMultipleHello(ctx context.Context, in *MultipleHelloRequest, opts ...grpc.CallOption) (*MultipleHelloReply, error)
// Server side streaming.
SayHelloServerStream(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (Greeter_SayHelloServerStreamClient, error)
// Client side streaming.
@@ -65,6 +70,15 @@ func (c *greeterClient) SayHelloAfterDelay(ctx context.Context, in *HelloRequest
return out, nil
}
+func (c *greeterClient) SayMultipleHello(ctx context.Context, in *MultipleHelloRequest, opts ...grpc.CallOption) (*MultipleHelloReply, error) {
+ out := new(MultipleHelloReply)
+ err := c.cc.Invoke(ctx, "/helloworld.Greeter/SayMultipleHello", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *greeterClient) SayHelloServerStream(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (Greeter_SayHelloServerStreamClient, error) {
stream, err := c.cc.NewStream(ctx, &Greeter_ServiceDesc.Streams[0], "/helloworld.Greeter/SayHelloServerStream", opts...)
if err != nil {
@@ -170,6 +184,7 @@ type GreeterServer interface {
SayHello(context.Context, *HelloRequest) (*HelloReply, error)
Plus(context.Context, *PlusRequest) (*PlusReply, error)
SayHelloAfterDelay(context.Context, *HelloRequest) (*HelloReply, error)
+ SayMultipleHello(context.Context, *MultipleHelloRequest) (*MultipleHelloReply, error)
// Server side streaming.
SayHelloServerStream(*HelloRequest, Greeter_SayHelloServerStreamServer) error
// Client side streaming.
@@ -192,6 +207,9 @@ func (UnimplementedGreeterServer) Plus(context.Context, *PlusRequest) (*PlusRepl
func (UnimplementedGreeterServer) SayHelloAfterDelay(context.Context, *HelloRequest) (*HelloReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method SayHelloAfterDelay not implemented")
}
+func (UnimplementedGreeterServer) SayMultipleHello(context.Context, *MultipleHelloRequest) (*MultipleHelloReply, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SayMultipleHello not implemented")
+}
func (UnimplementedGreeterServer) SayHelloServerStream(*HelloRequest, Greeter_SayHelloServerStreamServer) error {
return status.Errorf(codes.Unimplemented, "method SayHelloServerStream not implemented")
}
@@ -268,6 +286,24 @@ func _Greeter_SayHelloAfterDelay_Handler(srv interface{}, ctx context.Context, d
return interceptor(ctx, in, info, handler)
}
+func _Greeter_SayMultipleHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MultipleHelloRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GreeterServer).SayMultipleHello(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/helloworld.Greeter/SayMultipleHello",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GreeterServer).SayMultipleHello(ctx, req.(*MultipleHelloRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _Greeter_SayHelloServerStream_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(HelloRequest)
if err := stream.RecvMsg(m); err != nil {
@@ -360,6 +396,10 @@ var Greeter_ServiceDesc = grpc.ServiceDesc{
MethodName: "SayHelloAfterDelay",
Handler: _Greeter_SayHelloAfterDelay_Handler,
},
+ {
+ MethodName: "SayMultipleHello",
+ Handler: _Greeter_SayMultipleHello_Handler,
+ },
},
Streams: []grpc.StreamDesc{
{
diff --git a/t/grpc_server_example/proto/import.pb.go b/t/grpc_server_example/proto/import.pb.go
index 28fabf3f3726..a5575fdbd396 100644
--- a/t/grpc_server_example/proto/import.pb.go
+++ b/t/grpc_server_example/proto/import.pb.go
@@ -1,7 +1,24 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.27.1
-// protoc v3.6.1
+// protoc-gen-go v1.25.0-devel
+// protoc v3.12.4
// source: proto/import.proto
package proto
diff --git a/t/grpc_server_example/proto/src.pb.go b/t/grpc_server_example/proto/src.pb.go
index 8e6a32ae379b..74fa884d122e 100644
--- a/t/grpc_server_example/proto/src.pb.go
+++ b/t/grpc_server_example/proto/src.pb.go
@@ -1,7 +1,24 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one or more
+// contributor license agreements. See the NOTICE file distributed with
+// this work for additional information regarding copyright ownership.
+// The ASF licenses this file to You under the Apache License, Version 2.0
+// (the "License"); you may not use this file except in compliance with
+// the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.27.1
-// protoc v3.6.1
+// protoc-gen-go v1.25.0-devel
+// protoc v3.12.4
// source: proto/src.proto
package proto
diff --git a/t/grpc_server_example/proto/src_grpc.pb.go b/t/grpc_server_example/proto/src_grpc.pb.go
index 01fe1502d489..d4015ed99142 100644
--- a/t/grpc_server_example/proto/src_grpc.pb.go
+++ b/t/grpc_server_example/proto/src_grpc.pb.go
@@ -1,4 +1,8 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.2.0
+// - protoc v3.12.4
+// source: proto/src.proto
package proto
diff --git a/t/node/upstream-mtls.t b/t/node/upstream-mtls.t
index 7af6d2e61785..c909dbc9a64f 100644
--- a/t/node/upstream-mtls.t
+++ b/t/node/upstream-mtls.t
@@ -77,7 +77,7 @@ __DATA__
GET /t
--- error_code: 400
--- response_body
-{"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"tls\" validation failed: property \"client_key\" is required when \"client_cert\" is set"}
+{"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"tls\" validation failed: failed to validate dependent schema for \"client_cert\": property \"client_key\" is required"}
@@ -545,3 +545,145 @@ GET /t
GET /hello_chunked
--- response_body
hello world
+
+
+
+=== TEST 13: get cert by tls.client_cert_id
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin")
+ local json = require("toolkit.json")
+
+ local ssl_cert = t.read_file("t/certs/mtls_client.crt")
+ local ssl_key = t.read_file("t/certs/mtls_client.key")
+ local data = {
+ type = "client",
+ cert = ssl_cert,
+ key = ssl_key
+ }
+ local code, body = t.test('/apisix/admin/ssl/1',
+ ngx.HTTP_PUT,
+ json.encode(data)
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ return
+ end
+
+ local data = {
+ upstream = {
+ scheme = "https",
+ type = "roundrobin",
+ nodes = {
+ ["127.0.0.1:1983"] = 1,
+ },
+ tls = {
+ client_cert_id = 1
+ }
+ },
+ uri = "/hello"
+ }
+ local code, body = t.test('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ json.encode(data)
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ return
+ end
+ }
+ }
+--- request
+GET /t
+
+
+
+=== TEST 14: hit
+--- upstream_server_config
+ ssl_client_certificate ../../certs/mtls_ca.crt;
+ ssl_verify_client on;
+--- request
+GET /hello
+--- response_body
+hello world
+
+
+
+=== TEST 15: change ssl object type
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin")
+ local json = require("toolkit.json")
+
+ local ssl_cert = t.read_file("t/certs/mtls_client.crt")
+ local ssl_key = t.read_file("t/certs/mtls_client.key")
+ local data = {
+ type = "server",
+ sni = "test.com",
+ cert = ssl_cert,
+ key = ssl_key
+ }
+ local code, body = t.test('/apisix/admin/ssl/1',
+ ngx.HTTP_PUT,
+ json.encode(data)
+ )
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ return
+ end
+ }
+ }
+--- request
+GET /t
+
+
+
+=== TEST 16: hit, ssl object type mismatch
+--- upstream_server_config
+ ssl_client_certificate ../../certs/mtls_ca.crt;
+ ssl_verify_client on;
+--- request
+GET /hello
+--- error_code: 502
+--- error_log
+failed to get ssl cert: ssl type should be 'client'
+
+
+
+=== TEST 17: delete ssl object
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin")
+ local json = require("toolkit.json")
+
+ local code, body = t.test('/apisix/admin/ssl/1', ngx.HTTP_DELETE)
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ return
+ end
+ }
+ }
+--- request
+GET /t
+
+
+
+=== TEST 18: hit, ssl object not exits
+--- upstream_server_config
+ ssl_client_certificate ../../certs/mtls_ca.crt;
+ ssl_verify_client on;
+--- request
+GET /hello
+--- error_code: 502
+--- error_log
+failed to get ssl cert: ssl id [1] not exits
diff --git a/t/node/upstream.t b/t/node/upstream.t
index 704a0259da2d..70da36145b9b 100644
--- a/t/node/upstream.t
+++ b/t/node/upstream.t
@@ -624,3 +624,73 @@ passed
GET /uri
--- error_log
Host: 127.0.0.1:1979
+
+
+
+=== TEST 25: distinguish different upstreams even they have the same addr
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/upstreams/1',
+ ngx.HTTP_PUT,
+ {
+ nodes = {["localhost:1980"] = 1},
+ type = "roundrobin"
+ }
+ )
+ assert(code < 300)
+
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "upstream_id": "1",
+ "uri": "/server_port"
+ }]]
+ )
+ assert(code < 300)
+
+ local http = require "resty.http"
+ local uri = "http://127.0.0.1:" .. ngx.var.server_port
+ .. "/server_port"
+
+ local ports_count = {}
+ for i = 1, 24 do
+ local httpc = http.new()
+ local res, err = httpc:request_uri(uri)
+ if not res then
+ ngx.say(err)
+ return
+ end
+ ports_count[res.body] = (ports_count[res.body] or 0) + 1
+
+ local code, body = t('/apisix/admin/upstreams/1',
+ ngx.HTTP_PUT,
+ {
+ nodes = {["localhost:" .. (1980 + i % 3)] = 1},
+ type = "roundrobin"
+ }
+ )
+ assert(code < 300)
+ end
+
+ local ports_arr = {}
+ for port, count in pairs(ports_count) do
+ table.insert(ports_arr, {port = port, count = count})
+ end
+
+ local function cmd(a, b)
+ return a.port > b.port
+ end
+ table.sort(ports_arr, cmd)
+
+ ngx.say(require("toolkit.json").encode(ports_arr))
+ }
+ }
+--- request
+GET /t
+--- timeout: 5
+--- response_body
+[{"count":8,"port":"1982"},{"count":8,"port":"1981"},{"count":8,"port":"1980"}]
+--- no_error_log
+[error]
diff --git a/t/plugin/api-breaker.t b/t/plugin/api-breaker.t
index e1eccfb2b6b1..c63d87dba072 100644
--- a/t/plugin/api-breaker.t
+++ b/t/plugin/api-breaker.t
@@ -655,3 +655,36 @@ phase_func(): breaker_time: 10
--- response_body
{"500":4,"502":16}
--- timeout: 25
+
+
+
+=== TEST 20: reject invalid schema
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ for _, case in ipairs({
+ {input = {
+ break_response_code = 200,
+ break_response_headers = {{["content-type"] = "application/json"}}
+ }},
+ }) do
+ local code, body = t('/apisix/admin/global_rules/1',
+ ngx.HTTP_PUT,
+ {
+ id = "1",
+ plugins = {
+ ["api-breaker"] = case.input
+ }
+ }
+ )
+ ngx.print(require("toolkit.json").decode(body).error_msg)
+ end
+ }
+ }
+--- request
+GET /t
+--- response_body eval
+qr/failed to check the configuration of plugin api-breaker err: property \"break_response_headers\" validation failed: failed to validate item 1: property \"(key|value)\" is required/
+--- no_error_log
+[error]
diff --git a/t/plugin/custom_sort_plugins.t b/t/plugin/custom_sort_plugins.t
new file mode 100644
index 000000000000..41a23b9adbc8
--- /dev/null
+++ b/t/plugin/custom_sort_plugins.t
@@ -0,0 +1,633 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+use t::APISIX 'no_plan';
+
+add_block_preprocessor(sub {
+ my ($block) = @_;
+
+ if (!$block->request) {
+ $block->set_value("request", "GET /t");
+ }
+
+ if (!$block->error_log && !$block->no_error_log) {
+ $block->set_value("no_error_log", "[error]\n[alert]");
+ }
+});
+
+no_long_string();
+no_root_location();
+log_level("info");
+run_tests;
+
+__DATA__
+
+=== TEST 1: custom priority and default priority on different routes
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "serverless-post-function": {
+ "_meta": {
+ "priority": 10000
+ },
+ "phase": "rewrite",
+ "functions" : ["return function(conf, ctx)
+ ngx.say(\"serverless-post-function\");
+ end"]
+ },
+ "serverless-pre-function": {
+ "_meta": {
+ "priority": -2000
+ },
+ "phase": "rewrite",
+ "functions": ["return function(conf, ctx)
+ ngx.say(\"serverless-pre-function\");
+ end"]
+ }
+ },
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/hello"
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ end
+
+ local code, body = t('/apisix/admin/routes/2',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "serverless-post-function": {
+ "phase": "rewrite",
+ "functions" : ["return function(conf, ctx)
+ ngx.say(\"serverless-post-function\");
+ end"]
+ },
+ "serverless-pre-function": {
+ "phase": "rewrite",
+ "functions": ["return function(conf, ctx)
+ ngx.say(\"serverless-pre-function\");
+ end"]
+ }
+ },
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/hello1"
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ end
+ ngx.say(body)
+ }
+ }
+--- response_body
+passed
+
+
+
+=== TEST 2: verify order
+--- request
+GET /hello
+--- response_body
+serverless-post-function
+serverless-pre-function
+
+
+
+=== TEST 3: routing without custom plugin order is not affected
+--- request
+GET /hello1
+--- response_body
+serverless-pre-function
+serverless-post-function
+
+
+
+=== TEST 4: custom priority and default priority on same route
+# the priority of serverless-post-function is -2000, execute serverless-post-function first
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "serverless-post-function": {
+ "phase": "rewrite",
+ "functions" : ["return function(conf, ctx)
+ ngx.say(\"serverless-post-function\");
+ end"]
+ },
+ "serverless-pre-function": {
+ "_meta": {
+ "priority": -2001
+ },
+ "phase": "rewrite",
+ "functions": ["return function(conf, ctx)
+ ngx.say(\"serverless-pre-function\");
+ end"]
+ }
+ },
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/hello"
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ end
+ ngx.say(body)
+ }
+ }
+--- response_body
+passed
+
+
+
+=== TEST 5: verify order
+--- request
+GET /hello
+--- response_body
+serverless-post-function
+serverless-pre-function
+
+
+
+=== TEST 6: merge plugins from consumer and route, execute the rewrite phase
+# in the rewrite phase, the plugins on the route must be executed first,
+# and then executed the rewrite phase of the plugins on the consumer,
+# and the custom plugin order fails for this case.
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/consumers',
+ ngx.HTTP_PUT,
+ [[{
+ "username": "jack",
+ "plugins": {
+ "key-auth": {
+ "key": "auth-one"
+ },
+ "serverless-post-function": {
+ "_meta": {
+ "priority": 10000
+ },
+ "phase": "rewrite",
+ "functions" : ["return function(conf, ctx)
+ ngx.say(\"serverless-post-function\");
+ end"]
+ }
+ }
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ end
+
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "key-auth": {},
+ "serverless-pre-function": {
+ "_meta": {
+ "priority": -2000
+ },
+ "phase": "rewrite",
+ "functions": ["return function(conf, ctx)
+ ngx.say(\"serverless-pre-function\");
+ end"]
+ }
+ },
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/hello"
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ end
+ ngx.say(body)
+ }
+ }
+--- response_body
+passed
+
+
+
+=== TEST 7: verify order(more requests)
+--- config
+ location /t {
+ content_by_lua_block {
+ local http = require "resty.http"
+ local uri = "http://127.0.0.1:" .. ngx.var.server_port
+ .. "/hello"
+ local httpc = http.new()
+ local headers = {}
+ headers["apikey"] = "auth-one"
+ local res, err = httpc:request_uri(uri, {method = "GET", headers = headers})
+ if not res then
+ ngx.say(err)
+ return
+ end
+ ngx.print(res.body)
+
+ local res, err = httpc:request_uri(uri, {method = "GET", headers = headers})
+ if not res then
+ ngx.say(err)
+ return
+ end
+ ngx.print(res.body)
+ }
+ }
+--- response_body
+serverless-pre-function
+serverless-post-function
+serverless-pre-function
+serverless-post-function
+
+
+
+=== TEST 8: merge plugins form custom and route, execute the access phase
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/consumers',
+ ngx.HTTP_PUT,
+ [[{
+ "username": "jack",
+ "plugins": {
+ "key-auth": {
+ "key": "auth-one"
+ },
+ "serverless-post-function": {
+ "_meta": {
+ "priority": 10000
+ },
+ "phase": "access",
+ "functions" : ["return function(conf, ctx)
+ ngx.say(\"serverless-post-function\");
+ end"]
+ }
+ }
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ end
+
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "key-auth": {},
+ "serverless-pre-function": {
+ "_meta": {
+ "priority": -2000
+ },
+ "phase": "access",
+ "functions": ["return function(conf, ctx)
+ ngx.say(\"serverless-pre-function\");
+ end"]
+ }
+ },
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/hello"
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ end
+ ngx.say(body)
+ }
+ }
+--- response_body
+passed
+
+
+
+=== TEST 9: verify order
+--- request
+GET /hello
+--- more_headers
+apikey: auth-one
+--- response_body
+serverless-post-function
+serverless-pre-function
+
+
+
+=== TEST 10: merge plugins form service and route
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/1',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "serverless-post-function": {
+ "_meta": {
+ "priority": 10000
+ },
+ "phase": "rewrite",
+ "functions" : ["return function(conf, ctx)
+ ngx.say(\"serverless-post-function\");
+ end"]
+ }
+ },
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1
+ },
+ "type": "roundrobin"
+ }
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ end
+
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "serverless-pre-function": {
+ "_meta": {
+ "priority": -2000
+ },
+ "phase": "rewrite",
+ "functions": ["return function(conf, ctx)
+ ngx.say(\"serverless-pre-function\");
+ end"]
+ }
+ },
+ "service_id": "1",
+ "uri": "/hello"
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- response_body
+passed
+
+
+
+=== TEST 11: verify order
+--- request
+GET /hello
+--- response_body
+serverless-post-function
+serverless-pre-function
+
+
+
+=== TEST 12: custom plugins sort is not affected by plugins reload
+--- config
+ location /t {
+ content_by_lua_block {
+ local http = require "resty.http"
+ local uri = "http://127.0.0.1:" .. ngx.var.server_port
+ .. "/hello"
+ local httpc = http.new()
+ local res, err = httpc:request_uri(uri)
+ if not res then
+ ngx.say(err)
+ return
+ end
+ ngx.print(res.body)
+
+ local t = require("lib.test_admin").test
+ local code, _, org_body = t('/apisix/admin/plugins/reload',
+ ngx.HTTP_PUT)
+
+ ngx.say(org_body)
+
+ ngx.sleep(0.2)
+
+ local res, err = httpc:request_uri(uri)
+ if not res then
+ ngx.say(err)
+ return
+ end
+ ngx.print(res.body)
+ }
+ }
+--- response_body
+serverless-post-function
+serverless-pre-function
+done
+serverless-post-function
+serverless-pre-function
+
+
+
+=== TEST 13: merge plugins form plugin_configs and route
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, err = t('/apisix/admin/plugin_configs/1',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "serverless-post-function": {
+ "_meta": {
+ "priority": 10000
+ },
+ "phase": "rewrite",
+ "functions" : ["return function(conf, ctx)
+ ngx.say(\"serverless-post-function\");
+ end"]
+ }
+ }
+ }]]
+ )
+ if code > 300 then
+ ngx.status = code
+ ngx.say(body)
+ end
+
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "serverless-pre-function": {
+ "_meta": {
+ "priority": -2000
+ },
+ "phase": "rewrite",
+ "functions": ["return function(conf, ctx)
+ ngx.say(\"serverless-pre-function\");
+ end"]
+ }
+ },
+ "plugin_config_id": 1,
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/hello"
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ end
+ ngx.say(body)
+ }
+ }
+--- response_body
+passed
+
+
+
+=== TEST 14: verify order
+--- request
+GET /hello
+--- response_body
+serverless-post-function
+serverless-pre-function
+
+
+
+=== TEST 15: custom plugins sort on global_rule
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/global_rules/1',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "serverless-post-function": {
+ "_meta": {
+ "priority": 10000
+ },
+ "phase": "rewrite",
+ "functions" : ["return function(conf, ctx)
+ ngx.say(\"serverless-post-function on global rule\");
+ end"]
+ },
+ "serverless-pre-function": {
+ "_meta": {
+ "priority": -2000
+ },
+ "phase": "rewrite",
+ "functions": ["return function(conf, ctx)
+ ngx.say(\"serverless-pre-function on global rule\");
+ end"]
+ }
+ }
+ }]]
+ )
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ end
+
+ ngx.say(body)
+ }
+ }
+--- response_body
+passed
+
+
+
+=== TEST 16: verify order
+--- request
+GET /hello
+--- response_body
+serverless-post-function on global rule
+serverless-pre-function on global rule
+serverless-post-function
+serverless-pre-function
+
+
+
+=== TEST 17: delete global rule
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/global_rules/1',
+ ngx.HTTP_DELETE
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ end
+ ngx.say(body)
+ }
+ }
+--- response_body
+passed
diff --git a/t/plugin/grpc-transcode2.t b/t/plugin/grpc-transcode2.t
index e9c6c0396a7e..7c8286650f50 100644
--- a/t/plugin/grpc-transcode2.t
+++ b/t/plugin/grpc-transcode2.t
@@ -41,7 +41,6 @@ __DATA__
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local etcd = require("apisix.core.etcd")
local code, body = t('/apisix/admin/proto/1',
ngx.HTTP_PUT,
[[{
@@ -136,7 +135,6 @@ Content-Type: application/json
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local etcd = require("apisix.core.etcd")
local code, body = t('/apisix/admin/proto/2',
ngx.HTTP_PUT,
[[{
diff --git a/t/plugin/grpc-transcode3.t b/t/plugin/grpc-transcode3.t
new file mode 100644
index 000000000000..a027a84bd9bd
--- /dev/null
+++ b/t/plugin/grpc-transcode3.t
@@ -0,0 +1,124 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+use t::APISIX 'no_plan';
+
+no_long_string();
+no_shuffle();
+no_root_location();
+
+add_block_preprocessor(sub {
+ my ($block) = @_;
+
+ if (!$block->request) {
+ $block->set_value("request", "GET /t");
+ }
+
+ if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+ $block->set_value("no_error_log", "[error]");
+ }
+});
+
+run_tests;
+
+__DATA__
+
+=== TEST 1: set rule
+--- config
+ location /t {
+ content_by_lua_block {
+ local http = require "resty.http"
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/proto/1',
+ ngx.HTTP_PUT,
+ [[{
+ "content" : "syntax = \"proto3\";
+ package helloworld;
+ service Greeter {
+ rpc SayMultipleHello(MultipleHelloRequest) returns (MultipleHelloReply) {}
+ }
+
+ enum Gender {
+ GENDER_UNKNOWN = 0;
+ GENDER_MALE = 1;
+ GENDER_FEMALE = 2;
+ }
+
+ message Person {
+ string name = 1;
+ int32 age = 2;
+ }
+
+ message MultipleHelloRequest {
+ string name = 1;
+ repeated string items = 2;
+ repeated Gender genders = 3;
+ repeated Person persons = 4;
+ }
+
+ message MultipleHelloReply{
+ string message = 1;
+ }"
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.say(body)
+ return
+ end
+
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "methods": ["POST"],
+ "uri": "/grpctest",
+ "plugins": {
+ "grpc-transcode": {
+ "proto_id": "1",
+ "service": "helloworld.Greeter",
+ "method": "SayMultipleHello"
+ }
+ },
+ "upstream": {
+ "scheme": "grpc",
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:50051": 1
+ }
+ }
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.say(body)
+ return
+ end
+ ngx.say(body)
+ }
+ }
+--- response_body
+passed
+
+
+
+=== TEST 2: hit route
+--- request
+POST /grpctest
+{"name":"world","persons":[{"name":"Joe","age":1},{"name":"Jake","age":2}]}
+--- more_headers
+Content-Type: application/json
+--- response_body chomp
+{"message":"Hello world, name: Joe, age: 1, name: Jake, age: 2"}
diff --git a/t/plugin/openid-connect.t b/t/plugin/openid-connect.t
index a97898d6e8e5..22786eaea9f2 100644
--- a/t/plugin/openid-connect.t
+++ b/t/plugin/openid-connect.t
@@ -189,7 +189,8 @@ true
"set_access_token_header": true,
"access_token_in_authorization_header": false,
"set_id_token_header": true,
- "set_userinfo_header": true
+ "set_userinfo_header": true,
+ "set_refresh_token_header": true
}
},
"upstream": {
@@ -272,6 +273,7 @@ user-agent: .*
x-access-token: ey.*
x-id-token: ey.*
x-real-ip: 127.0.0.1
+x-refresh-token: ey.*
x-userinfo: ey.*
--- no_error_log
[error]
@@ -916,7 +918,7 @@ OIDC introspection failed: invalid token
--- request
GET /t
--- response_body
-{"access_token_in_authorization_header":false,"bearer_only":false,"client_id":"kbyuFDidLLm280LIwVFiazOqjO3ty8KH","client_secret":"60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa","discovery":"http://127.0.0.1:1980/.well-known/openid-configuration","introspection_endpoint_auth_method":"client_secret_basic","logout_path":"/logout","realm":"apisix","scope":"openid","set_access_token_header":true,"set_id_token_header":true,"set_userinfo_header":true,"ssl_verify":false,"timeout":3}
+{"access_token_in_authorization_header":false,"bearer_only":false,"client_id":"kbyuFDidLLm280LIwVFiazOqjO3ty8KH","client_secret":"60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa","discovery":"http://127.0.0.1:1980/.well-known/openid-configuration","introspection_endpoint_auth_method":"client_secret_basic","logout_path":"/logout","realm":"apisix","scope":"openid","set_access_token_header":true,"set_id_token_header":true,"set_refresh_token_header":false,"set_userinfo_header":true,"ssl_verify":false,"timeout":3}
--- no_error_log
[error]
diff --git a/t/plugin/proxy-rewrite2.t b/t/plugin/proxy-rewrite2.t
index 4fbfe55bab34..fcd4011bacec 100644
--- a/t/plugin/proxy-rewrite2.t
+++ b/t/plugin/proxy-rewrite2.t
@@ -208,3 +208,27 @@ X-Forwarded-Proto: grpc
X-Forwarded-Proto: https-rewrite
--- error_log
localhost
+
+
+
+=== TEST 7: pass duplicate X-Forwarded-Proto
+--- apisix_yaml
+routes:
+ -
+ id: 1
+ uri: /echo
+ upstream_id: 1
+upstreams:
+ -
+ id: 1
+ nodes:
+ "127.0.0.1:1980": 1
+ type: roundrobin
+#END
+--- request
+GET /echo
+--- more_headers
+X-Forwarded-Proto: http
+X-Forwarded-Proto: grpc
+--- response_headers
+X-Forwarded-Proto: http
diff --git a/t/plugin/response-rewrite2.t b/t/plugin/response-rewrite2.t
index 88712888a28b..48401f915308 100644
--- a/t/plugin/response-rewrite2.t
+++ b/t/plugin/response-rewrite2.t
@@ -30,11 +30,79 @@ repeat_each(1);
no_long_string();
no_shuffle();
no_root_location();
+
+add_block_preprocessor(sub {
+ my ($block) = @_;
+
+ if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+ $block->set_value("no_error_log", "[error]");
+ }
+
+ if (!defined $block->request) {
+ $block->set_value("request", "GET /t");
+ }
+});
+
run_tests;
__DATA__
-=== TEST 1: add plugin with valid filters
+=== TEST 1: sanity
+--- config
+ location /t {
+ content_by_lua_block {
+ local test_cases = {
+ {body = "test"},
+ {filters = {
+ {
+ regex = "l",
+ replace = "m",
+ },
+ }},
+ {body = "test", filters = {
+ {
+ regex = "l",
+ replace = "m",
+ },
+ }},
+ {filters = {}},
+ {filters = {
+ {regex = "l"},
+ }},
+ {filters = {
+ {
+ regex = "",
+ replace = "m",
+ },
+ }},
+ {filters = {
+ {
+ regex = "l",
+ replace = "m",
+ scope = ""
+ },
+ }},
+ }
+ local plugin = require("apisix.plugins.response-rewrite")
+
+ for _, case in ipairs(test_cases) do
+ local ok, err = plugin.check_schema(case)
+ ngx.say(ok and "done" or err)
+ end
+ }
+ }
+--- response_body eval
+qr/done
+done
+failed to validate dependent schema for "filters|body": value wasn't supposed to match schema
+property "filters" validation failed: expect array to have at least 1 items
+property "filters" validation failed: failed to validate item 1: property "replace" is required
+property "filters" validation failed: failed to validate item 1: property "regex" validation failed: string too short, expected at least 1, got 0
+property "filters" validation failed: failed to validate item 1: property "scope" validation failed: matches none of the enum values/
+
+
+
+=== TEST 2: add plugin with valid filters
--- config
location /t {
content_by_lua_block {
@@ -56,16 +124,12 @@ __DATA__
ngx.say("done")
}
}
---- request
-GET /t
--- response_body
done
---- no_error_log
-[error]
-=== TEST 2: add plugin with invalid filter required filed
+=== TEST 3: add plugin with invalid filter required filed
--- config
location /t {
content_by_lua_block {
@@ -84,16 +148,12 @@ done
end
}
}
---- request
-GET /t
--- response_body
property "filters" validation failed: failed to validate item 1: property "replace" is required
---- no_error_log
-[error]
-=== TEST 3: add plugin with invalid filter scope
+=== TEST 4: add plugin with invalid filter scope
--- config
location /t {
content_by_lua_block {
@@ -115,16 +175,12 @@ property "filters" validation failed: failed to validate item 1: property "repla
end
}
}
---- request
-GET /t
--- response_body
property "filters" validation failed: failed to validate item 1: property "scope" validation failed: matches none of the enum values
---- no_error_log
-[error]
-=== TEST 4: add plugin with invalid filter empty value
+=== TEST 5: add plugin with invalid filter empty value
--- config
location /t {
content_by_lua_block {
@@ -144,16 +200,12 @@ property "filters" validation failed: failed to validate item 1: property "scope
end
}
}
---- request
-GET /t
--- response_body
property "filters" validation failed: failed to validate item 1: property "regex" validation failed: string too short, expected at least 1, got 0
---- no_error_log
-[error]
-=== TEST 5: add plugin with invalid filter regex options
+=== TEST 6: add plugin with invalid filter regex options
--- config
location /t {
content_by_lua_block {
@@ -174,18 +226,14 @@ property "filters" validation failed: failed to validate item 1: property "regex
end
}
}
---- request
-GET /t
--- error_code eval
200
--- response_body
regex "hello" validation failed: unknown flag "h" (flags "h")
---- no_error_log
-[error]
-=== TEST 6: set route with filters and vars expr
+=== TEST 7: set route with filters and vars expr
--- config
location /t {
content_by_lua_block {
@@ -219,16 +267,12 @@ regex "hello" validation failed: unknown flag "h" (flags "h")
ngx.say(body)
}
}
---- request
-GET /t
--- response_body
passed
---- no_error_log
-[error]
-=== TEST 7: check http body that matches filters
+=== TEST 8: check http body that matches filters
--- request
GET /hello
--- response_body
@@ -236,7 +280,7 @@ test world
-=== TEST 8: filter substitute global
+=== TEST 9: filter substitute global
--- config
location /t {
content_by_lua_block {
@@ -271,16 +315,12 @@ test world
ngx.say(body)
}
}
---- request
-GET /t
--- response_body
passed
---- no_error_log
-[error]
-=== TEST 9: check http body that substitute global
+=== TEST 10: check http body that substitute global
--- request
GET /hello
--- response_body
@@ -288,7 +328,7 @@ hetto wortd
-=== TEST 10: filter replace with empty
+=== TEST 11: filter replace with empty
--- config
location /t {
content_by_lua_block {
@@ -322,16 +362,12 @@ hetto wortd
ngx.say(body)
}
}
---- request
-GET /t
--- response_body
passed
---- no_error_log
-[error]
-=== TEST 11: check http body that replace with empty
+=== TEST 12: check http body that replace with empty
--- request
GET /hello
--- response_body
@@ -339,7 +375,7 @@ GET /hello
-=== TEST 12: filter replace with words
+=== TEST 13: filter replace with words
--- config
location /t {
content_by_lua_block {
@@ -373,16 +409,12 @@ GET /hello
ngx.say(body)
}
}
---- request
-GET /t
--- response_body
passed
---- no_error_log
-[error]
-=== TEST 13: check http body that replace with words
+=== TEST 14: check http body that replace with words
--- request
GET /hello
--- response_body
@@ -390,59 +422,7 @@ hello *
-=== TEST 14: set body and filters(body no effect)
---- config
- location /t {
- content_by_lua_block {
- local t = require("lib.test_admin").test
- local code, body = t('/apisix/admin/routes/1',
- ngx.HTTP_PUT,
- [[{
- "plugins": {
- "response-rewrite": {
- "vars": [
- ["status","==",200]
- ],
- "body": "new body",
- "filters": [
- {
- "regex": "hello",
- "replace": "HELLO"
- }
- ]
- }
- },
- "upstream": {
- "nodes": {
- "127.0.0.1:1980": 1
- },
- "type": "roundrobin"
- },
- "uris": ["/hello"]
- }]]
- )
-
- ngx.say(body)
- }
- }
---- request
-GET /t
---- response_body
-passed
---- no_error_log
-[error]
-
-
-
-=== TEST 15: check http body that set body and filters
---- request
-GET /hello
---- response_body
-HELLO world
-
-
-
-=== TEST 16: set multiple filters
+=== TEST 15: set multiple filters
--- config
location /t {
content_by_lua_block {
@@ -480,16 +460,12 @@ HELLO world
ngx.say(body)
}
}
---- request
-GET /t
--- response_body
passed
---- no_error_log
-[error]
-=== TEST 17: check http body that set multiple filters
+=== TEST 16: check http body that set multiple filters
--- request
GET /hello
--- response_body
@@ -497,7 +473,7 @@ HETLO world
-=== TEST 18: filters no any match
+=== TEST 17: filters no any match
--- config
location /t {
content_by_lua_block {
@@ -531,16 +507,12 @@ HETLO world
ngx.say(body)
}
}
---- request
-GET /t
--- response_body
passed
---- no_error_log
-[error]
-=== TEST 19: check http body that filters no any match
+=== TEST 18: check http body that filters no any match
--- request
GET /hello
--- response_body
diff --git a/t/plugin/sls-logger.t b/t/plugin/sls-logger.t
index 11db664cd22b..1c36383fb3b1 100644
--- a/t/plugin/sls-logger.t
+++ b/t/plugin/sls-logger.t
@@ -198,3 +198,49 @@ hello world
--- response_body
passed
--- timeout: 5
+
+
+
+=== TEST 8: add log format
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/plugin_metadata/sls-logger',
+ ngx.HTTP_PUT,
+ [[{
+ "log_format": {
+ "host": "$host",
+ "client_ip": "$remote_addr"
+ }
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- response_body
+passed
+
+
+
+=== TEST 9: access
+--- extra_init_by_lua
+ local json = require("toolkit.json")
+ local rfc5424 = require("apisix.plugins.slslog.rfc5424")
+ local old_f = rfc5424.encode
+ rfc5424.encode = function(facility, severity, hostname, appname, pid, project,
+ logstore, access_key_id, access_key_secret, msg)
+ local r = json.decode(msg)
+ assert(r.client_ip == "127.0.0.1", r.client_ip)
+ assert(r.host == "localhost", r.host)
+ return old_f(facility, severity, hostname, appname, pid, project,
+ logstore, access_key_id, access_key_secret, msg)
+ end
+--- request
+GET /hello
+--- response_body
+hello world
diff --git a/t/plugin/traffic-split5.t b/t/plugin/traffic-split5.t
index 5e2b80ac363e..1de76cea5d42 100644
--- a/t/plugin/traffic-split5.t
+++ b/t/plugin/traffic-split5.t
@@ -405,3 +405,62 @@ passed
}
--- response_body
1970, 1970, 1971, 1972
+
+
+
+=== TEST 7: set up traffic-split rule
+--- config
+ location /t {
+ content_by_lua_block {
+ local json = require("toolkit.json")
+ local t = require("lib.test_admin").test
+ local data = {
+ uri = "/server_port",
+ plugins = {
+ ["traffic-split"] = {
+ rules = { {
+ match = { {
+ vars = { { "arg_name", "==", "jack" } }
+ } },
+ weighted_upstreams = { {
+ upstream = {
+ type = "roundrobin",
+ nodes = {
+ ["127.0.0.1:1979"] = 1
+ },
+ },
+ } }
+ } }
+ }
+ },
+ upstream = {
+ type = "roundrobin",
+ nodes = {
+ ["127.0.0.1:1980"] = 1
+ }
+ }
+ }
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ json.encode(data)
+ )
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- response_body
+passed
+
+
+
+=== TEST 8: hit and check default timeout
+--- http_config
+proxy_connect_timeout 12345s;
+--- request
+GET /server_port?name=jack
+--- log_level: debug
+--- error_log eval
+qr/event timer add: \d+: 12345000:\d+/
+--- error_code: 502
diff --git a/t/xrpc/pingpong2.t b/t/xrpc/pingpong2.t
index cdcf367c2f09..fc77fa1482df 100644
--- a/t/xrpc/pingpong2.t
+++ b/t/xrpc/pingpong2.t
@@ -110,7 +110,6 @@ __DATA__
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local etcd = require("apisix.core.etcd")
local code, body = t('/apisix/admin/stream_routes/1',
ngx.HTTP_PUT,
{
diff --git a/t/xrpc/redis.t b/t/xrpc/redis.t
index 2d7c276c3eba..afb2f40e67ee 100644
--- a/t/xrpc/redis.t
+++ b/t/xrpc/redis.t
@@ -60,7 +60,6 @@ __DATA__
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local etcd = require("apisix.core.etcd")
local code, body = t('/apisix/admin/stream_routes/1',
ngx.HTTP_PUT,
{
@@ -265,7 +264,6 @@ hget animals: bark
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local etcd = require("apisix.core.etcd")
local code, body = t('/apisix/admin/stream_routes/1',
ngx.HTTP_PUT,
{
@@ -365,7 +363,6 @@ ok
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local etcd = require("apisix.core.etcd")
local code, body = t('/apisix/admin/stream_routes/1',
ngx.HTTP_PUT,
{
@@ -464,7 +461,6 @@ ok
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local etcd = require("apisix.core.etcd")
local code, body = t('/apisix/admin/stream_routes/1',
ngx.HTTP_PUT,
{
diff --git a/t/xrpc/redis2.t b/t/xrpc/redis2.t
index aad5c2fb8b05..65ca9829c616 100644
--- a/t/xrpc/redis2.t
+++ b/t/xrpc/redis2.t
@@ -103,7 +103,6 @@ passed
location /t {
content_by_lua_block {
local t = require("lib.test_admin").test
- local etcd = require("apisix.core.etcd")
local code, body = t('/apisix/admin/stream_routes/1',
ngx.HTTP_PUT,
{
diff --git a/utils/linux-install-etcd-client.sh b/utils/linux-install-etcd-client.sh
index ea323aea41f2..f760b6f1777f 100755
--- a/utils/linux-install-etcd-client.sh
+++ b/utils/linux-install-etcd-client.sh
@@ -18,14 +18,14 @@
#
ETCD_ARCH="amd64"
-ETCD_VERSION=${ETCD_VERSION:-'3.4.18'}
+ETCD_VERSION=${ETCD_VERSION:-'3.5.4'}
ARCH=${ARCH:-`(uname -m | tr '[:upper:]' '[:lower:]')`}
if [[ $ARCH == "arm64" ]] || [[ $ARCH == "aarch64" ]]; then
ETCD_ARCH="arm64"
fi
-wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v3.4.18-linux-${ETCD_ARCH}.tar.gz
+wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-${ETCD_ARCH}.tar.gz
tar xf etcd-v${ETCD_VERSION}-linux-${ETCD_ARCH}.tar.gz
sudo cp etcd-v${ETCD_VERSION}-linux-${ETCD_ARCH}/etcdctl /usr/local/bin/
rm -rf etcd-v${ETCD_VERSION}-linux-${ETCD_ARCH}