From 297d1edf9a719ea356f6c9d89b42650b20baa957 Mon Sep 17 00:00:00 2001 From: itboon Date: Mon, 28 Aug 2023 16:09:26 +0800 Subject: [PATCH 01/14] update docs --- docs/compose.md | 6 +++--- docs/docker.md | 4 ++-- docs/env.md | 2 +- docs/index.md | 4 ++-- mkdocs.yml | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/compose.md b/docs/compose.md index b2cca4b..1c16516 100644 --- a/docs/compose.md +++ b/docs/compose.md @@ -41,11 +41,11 @@ services: ``` -传入 `KAFKA_BROKER_EXTERNAL_HOST` 变量后,启动脚本会修改相关网络配置,例如 `listeners`、`advertised.listeners`、`listener.security.protocol.map`,需要自定义 +传入 `KAFKA_BROKER_EXTERNAL_HOST` 变量后,启动脚本会修改相关网络配置,例如 `listeners`、`advertised.listeners`、`listener.security.protocol.map`,详情请参考[环境变量和配置](./env) -## 外部网络详细配置 +## 高级网络配置 -下面这个案例跟上文的案例是等效的。 +下面这个案例列出了详细的 Kafka 网络配置,部署后的效果与上文的案例是等效的。 ``` yaml version: "3" diff --git a/docs/docker.md b/docs/docker.md index d477cbe..bb75c59 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -42,5 +42,5 @@ docker run -d --name demo-kafka-server \ ## 下一步 -- [环境变量和配置](/env) -- [Docker Compose 启动 Kafka](/compose) \ No newline at end of file +- [环境变量和配置](./env) +- [Docker Compose 启动 Kafka](./compose) \ No newline at end of file diff --git a/docs/env.md b/docs/env.md index f4ed2a2..3af8d60 100644 --- a/docs/env.md +++ b/docs/env.md @@ -30,4 +30,4 @@ Variable examples: > `log.dir` 和 `log.dirs` 已经被锁定,无法使用环境变量进行覆盖。 -配置案例请参考 [Docker Compose 启动 Kafka](/compose) \ No newline at end of file +配置案例请参考 [Docker Compose 启动 Kafka](./compose) \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index c51578c..a96c55f 100644 --- a/docs/index.md +++ b/docs/index.md @@ -21,5 +21,5 @@ ## 下一步 -- [Docker 启动 Kafka](/docker) -- [Helm 部署 kafka](/helm) \ No newline at end of file +- [Docker 启动 Kafka](./docker) +- [Helm 部署 kafka](./helm) \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index ef05e1d..e8cef65 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -74,8 +74,8 @@ extra: # Page tree nav: + - Welcome: index.md - Docker: - - Welcome: index.md - Docker: docker.md - Docker Compose: compose.md - Helm: helm.md From 196e699ae321193fa1e238a7157aa43c9e343b05 Mon Sep 17 00:00:00 2001 From: itboon Date: Mon, 28 Aug 2023 16:14:31 +0800 Subject: [PATCH 02/14] update docs --- docs/compose.md | 2 +- docs/docker.md | 4 ++-- docs/env.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/compose.md b/docs/compose.md index 1c16516..2681e0d 100644 --- a/docs/compose.md +++ b/docs/compose.md @@ -41,7 +41,7 @@ services: ``` -传入 `KAFKA_BROKER_EXTERNAL_HOST` 变量后,启动脚本会修改相关网络配置,例如 `listeners`、`advertised.listeners`、`listener.security.protocol.map`,详情请参考[环境变量和配置](./env) +传入 `KAFKA_BROKER_EXTERNAL_HOST` 变量后,启动脚本会修改相关网络配置,例如 `listeners`、`advertised.listeners`、`listener.security.protocol.map`,详情请参考[环境变量和配置](../env) ## 高级网络配置 diff --git a/docs/docker.md b/docs/docker.md index bb75c59..4ea7c32 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -42,5 +42,5 @@ docker run -d --name demo-kafka-server \ ## 下一步 -- [环境变量和配置](./env) -- [Docker Compose 启动 Kafka](./compose) \ No newline at end of file +- [环境变量和配置](../env) +- [Docker Compose 启动 Kafka](../compose) \ No newline at end of file diff --git a/docs/env.md b/docs/env.md index 3af8d60..9f5a7de 100644 --- a/docs/env.md +++ b/docs/env.md @@ -30,4 +30,4 @@ Variable examples: > `log.dir` 和 `log.dirs` 已经被锁定,无法使用环境变量进行覆盖。 -配置案例请参考 [Docker Compose 启动 Kafka](./compose) \ No newline at end of file +配置案例请参考 [Docker Compose 启动 Kafka](../compose) \ No newline at end of file From fca57b062861ea4b15ed47fc334f3580d1fd8853 Mon Sep 17 00:00:00 2001 From: itboon Date: Mon, 28 Aug 2023 16:19:30 +0800 Subject: [PATCH 03/14] update docs --- docs/env.md | 2 +- mkdocs.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/env.md b/docs/env.md index 9f5a7de..6125b12 100644 --- a/docs/env.md +++ b/docs/env.md @@ -30,4 +30,4 @@ Variable examples: > `log.dir` 和 `log.dirs` 已经被锁定,无法使用环境变量进行覆盖。 -配置案例请参考 [Docker Compose 启动 Kafka](../compose) \ No newline at end of file +具体配置案例请参考 [Docker Compose 启动 Kafka](../compose) \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index e8cef65..091a1e1 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,4 +1,4 @@ -site_name: Install Apache Kafka with Docker or Helm +site_name: Apache Kafka 容器化 # site_url: 'https://github.com/itboon/kafka-docker' ## Repository From c95e270063b023612bfb4c8b0b6ff11eeccfda19 Mon Sep 17 00:00:00 2001 From: itboon Date: Mon, 28 Aug 2023 16:35:17 +0800 Subject: [PATCH 04/14] update docs --- mkdocs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mkdocs.yml b/mkdocs.yml index 091a1e1..a919606 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,4 +1,4 @@ -site_name: Apache Kafka 容器化 +site_name: Apache Kafka 容器化部署 # site_url: 'https://github.com/itboon/kafka-docker' ## Repository From 0ef0651e14f379a33d053d3e91052767f60a6fbf Mon Sep 17 00:00:00 2001 From: itboon Date: Mon, 28 Aug 2023 17:31:12 +0800 Subject: [PATCH 05/14] update examples --- examples/k8s/script/helm-template.sh | 16 + .../k8s/script/values/demo-without-pvc.yaml | 5 + examples/k8s/script/values/one-broker.yaml | 0 examples/k8s/script/values/single.yaml | 3 + .../k8s/statefulset/demo-without-pvc/all.yaml | 473 +++++++++++++++++ examples/k8s/statefulset/one-broker/all.yaml | 0 examples/k8s/statefulset/single/all.yaml | 480 ++++++++++++++++++ examples/values-combined.yml | 4 +- examples/values-production.yml | 4 +- 9 files changed, 981 insertions(+), 4 deletions(-) create mode 100755 examples/k8s/script/helm-template.sh create mode 100644 examples/k8s/script/values/demo-without-pvc.yaml create mode 100644 examples/k8s/script/values/one-broker.yaml create mode 100644 examples/k8s/script/values/single.yaml create mode 100644 examples/k8s/statefulset/demo-without-pvc/all.yaml create mode 100644 examples/k8s/statefulset/one-broker/all.yaml create mode 100644 examples/k8s/statefulset/single/all.yaml diff --git a/examples/k8s/script/helm-template.sh b/examples/k8s/script/helm-template.sh new file mode 100755 index 0000000..d56de9b --- /dev/null +++ b/examples/k8s/script/helm-template.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +export template_home="../statefulset" + +helm_template_output() { + local instance="$1" + local dir="$template_home/$instance" + mkdir -pv "$dir" + helm template kafka \ + --namespace kafka-demo \ + -f "values/${instance}.yaml" \ + ../../../charts/kafka/ > "$dir/all.yaml" +} + +helm_template_output "single" +helm_template_output "demo-without-pvc" diff --git a/examples/k8s/script/values/demo-without-pvc.yaml b/examples/k8s/script/values/demo-without-pvc.yaml new file mode 100644 index 0000000..88535ca --- /dev/null +++ b/examples/k8s/script/values/demo-without-pvc.yaml @@ -0,0 +1,5 @@ +broker: + combinedMode: + enabled: true + persistence: + enabled: false diff --git a/examples/k8s/script/values/one-broker.yaml b/examples/k8s/script/values/one-broker.yaml new file mode 100644 index 0000000..e69de29 diff --git a/examples/k8s/script/values/single.yaml b/examples/k8s/script/values/single.yaml new file mode 100644 index 0000000..d754422 --- /dev/null +++ b/examples/k8s/script/values/single.yaml @@ -0,0 +1,3 @@ +broker: + combinedMode: + enabled: true diff --git a/examples/k8s/statefulset/demo-without-pvc/all.yaml b/examples/k8s/statefulset/demo-without-pvc/all.yaml new file mode 100644 index 0000000..7ab750d --- /dev/null +++ b/examples/k8s/statefulset/demo-without-pvc/all.yaml @@ -0,0 +1,473 @@ +--- +# Source: kafka/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kafka + labels: + helm.sh/chart: kafka-13.1.2 + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/version: "v3.5.1" + app.kubernetes.io/managed-by: Helm +--- +# Source: kafka/templates/broker/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: kafka-broker + labels: + helm.sh/chart: kafka-13.1.2 + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/version: "v3.5.1" + app.kubernetes.io/managed-by: Helm + component: "broker_controller" +data: + entrypoint.sh: | + #!/bin/bash + + export KAFKA_CONF_FILE="/etc/kafka/server.properties" + export KAFKA_CFG_LOG_DIR="$KAFKA_HOME/data" + + if [[ -z "$KAFKA_HOME" ]]; then + export KAFKA_HOME="/opt/kafka" + export KAFKA_CFG_LOG_DIR="$KAFKA_HOME/data" + fi + if [[ ! -d "$KAFKA_HOME" ]]; then + mkdir -p "$KAFKA_HOME" + fi + + check_runtime() { + java -version + if [[ $? -ne 0 ]]; then + echo "[ERROR] Missing java" + exit "500" + fi + } + + run_as_other_user_if_needed() { + if [[ "$(id -u)" == "0" ]]; then + # If running as root, drop to specified UID and run command + exec chroot --userspec=1000:0 / "${@}" + else + # Either we are running in Openshift with random uid and are a member of the root group + # or with a custom --user + exec "${@}" + fi + } + + get_nodeid_from_suffix() { + local line="$1" + local index="${line##*-}" + if [[ "$index" =~ ^[0-9]+$ ]]; then + export KAFKA_CFG_NODE_ID="$index" + if [[ "$KAFKA_NODE_ID_OFFSET" =~ ^[0-9]+$ ]]; then + if [[ $KAFKA_NODE_ID_OFFSET -gt "0" ]]; then + export KAFKA_CFG_NODE_ID="$((index + KAFKA_NODE_ID_OFFSET))" + fi + fi + fi + } + + fix_external_advertised_listeners() { + if [[ -z "$KAFKA_EXTERNAL_SERVICE_TYPE" ]]; then + return + fi + if [[ -z "$KAFKA_EXTERNAL_ADVERTISED_LISTENERS" ]]; then + return + fi + local ext_listeners="$KAFKA_EXTERNAL_ADVERTISED_LISTENERS" + local i="${POD_NAME##*-}" + local listener=$(echo "$ext_listeners" | cut -d "," -f $((i+1)) | sed 's/ //g') + if [[ "$KAFKA_EXTERNAL_SERVICE_TYPE" == "NodePort" ]]; then + listener=$(echo "$listener" | sed -E "s%://[^:]*:%://${POD_HOST_IP}:%") + fi + if [[ "$listener" =~ ://[^:]*:[0-9]+$ ]]; then + export KAFKA_CFG_ADVERTISED_LISTENERS="${KAFKA_CFG_ADVERTISED_LISTENERS},${listener}" + echo "KAFKA_CFG_ADVERTISED_LISTENERS: $KAFKA_CFG_ADVERTISED_LISTENERS" + else + echo "[WARN] KAFKA_EXTERNAL_ADVERTISED_LISTENER invalid, value: [$listener]" + fi + } + + init_nodeid() { + if [[ "$KAFKA_NODE_ID" =~ ^[0-9]+$ ]]; then + export KAFKA_CFG_NODE_ID="$KAFKA_NODE_ID" + return + fi + if [[ "$KAFKA_NODE_ID" = hostname* ]]; then + get_nodeid_from_suffix "$HOSTNAME" + elif [[ "$KAFKA_NODE_ID" = pod* ]]; then + if [[ -n "$POD_NAME" ]]; then + get_nodeid_from_suffix "$POD_NAME" + fi + fi + if [[ -z "$KAFKA_CFG_NODE_ID" ]]; then + export KAFKA_CFG_NODE_ID="1" + fi + } + + take_file_ownership() { + if [[ "$(id -u)" == "0" ]]; then + chown -R 1000:0 "$KAFKA_HOME" + if [[ -d "$KAFKA_CFG_LOG_DIR" ]]; then + chown -R 1000:0 "$KAFKA_CFG_LOG_DIR" + fi + fi + } + + update_server_conf() { + local key=$1 + local value=$2 + local pattern="$(echo $key | sed 's/\./\\./')" + sed -i "/^${pattern} *=/d" "$KAFKA_CONF_FILE" + echo "${key}=${value}" >> "$KAFKA_CONF_FILE" + } + + set_kafka_cfg_default() { + if [[ -z "$KAFKA_CFG_NODE_ID" ]]; then + export KAFKA_CFG_NODE_ID="1" + fi + if [[ -z "$KAFKA_CFG_PROCESS_ROLES" ]]; then + export KAFKA_CFG_PROCESS_ROLES="broker,controller" + fi + if [[ -z "$KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP" ]]; then + export KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP="CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT" + fi + if [[ -z "$KAFKA_CFG_INTER_BROKER_LISTENER_NAME" ]]; then + export KAFKA_CFG_INTER_BROKER_LISTENER_NAME="PLAINTEXT" + fi + if [[ -z "$KAFKA_CFG_CONTROLLER_LISTENER_NAMES" ]]; then + export KAFKA_CFG_CONTROLLER_LISTENER_NAMES="CONTROLLER" + fi + if [[ -z "$KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR" ]]; then + export KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR="1" + fi + if [[ -z "$KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR" ]]; then + export KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR="1" + fi + if [[ -z "$KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR" ]]; then + export KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR="1" + fi + ## + ## KAFKA_CONTROLLER_LISTENER_PORT default value: 19091 + local ctl_port="${KAFKA_CONTROLLER_LISTENER_PORT-19091}" + ## KAFKA_BROKER_LISTENER_PORT default value: 9092 + local broker_port="${KAFKA_BROKER_LISTENER_PORT-9092}" + if [[ -z "$KAFKA_CFG_LISTENERS" ]]; then + export KAFKA_CFG_LISTENERS="CONTROLLER://:${ctl_port},PLAINTEXT://:${broker_port}" + fi + if [[ -z "$KAFKA_CFG_CONTROLLER_QUORUM_VOTERS" ]]; then + export KAFKA_CFG_CONTROLLER_QUORUM_VOTERS="${KAFKA_CFG_NODE_ID}@127.0.0.1:${ctl_port}" + fi + } + + init_server_conf() { + init_nodeid + set_kafka_cfg_default + fix_external_advertised_listeners + if [[ ! -f "$KAFKA_CONF_FILE" ]]; then + mkdir -p "$(dirname $KAFKA_CONF_FILE)" + # cat "${KAFKA_HOME}/config/kraft/server.properties" \ + # | grep -E '^[a-zA-Z]' > "$KAFKA_CONF_FILE" + touch "$KAFKA_CONF_FILE" + fi + for var in "${!KAFKA_CFG_@}"; do + # printf '%s=%s\n' "$var" "${!var}" + key="$(echo "$var" | sed -e 's/^KAFKA_CFG_//' -e 's/_/./g' | tr 'A-Z' 'a-z')" + value="${!var}" + update_server_conf "$key" "$value" + done + } + + reset_log_dirs() { + ## protect log.dirs + sed -i "/^log.dir *=/d" "$KAFKA_CONF_FILE" + update_server_conf "log.dirs" "$KAFKA_CFG_LOG_DIR" + } + + start_server() { + check_runtime + reset_log_dirs + if [[ -n "$KAFKA_HEAP_OPTS" ]]; then + export JAVA_TOOL_OPTIONS="${JAVA_TOOL_OPTIONS} ${KAFKA_HEAP_OPTS}" + fi + if [[ ! -f "$KAFKA_CFG_LOG_DIR/meta.properties" ]]; then + echo ">>> Format Log Directories <<<" + if [[ -z "$KAFKA_CLUSTER_ID" ]]; then + echo "Generate a Cluster UUID" + export KAFKA_CLUSTER_ID="$(${KAFKA_HOME}/bin/kafka-storage.sh random-uuid)" + fi + cat "$KAFKA_CONF_FILE" + if [[ "$(id -u)" == "0" ]]; then + chroot --userspec=1000:0 / ${KAFKA_HOME}/bin/kafka-storage.sh format \ + -t $KAFKA_CLUSTER_ID -c "$KAFKA_CONF_FILE" + else + ${KAFKA_HOME}/bin/kafka-storage.sh format \ + -t $KAFKA_CLUSTER_ID -c "$KAFKA_CONF_FILE" + fi + fi + run_as_other_user_if_needed "${KAFKA_HOME}/bin/kafka-server-start.sh" "$KAFKA_CONF_FILE" + } + + init_server_conf + take_file_ownership + if [[ "$@" = "start" ]]; then + start_server + else + exec "$@" + fi +--- +# Source: kafka/templates/broker/svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-headless + labels: + helm.sh/chart: kafka-13.1.2 + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/version: "v3.5.1" + app.kubernetes.io/managed-by: Helm + component: "broker_controller" +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: 9092 + targetPort: broker + protocol: TCP + name: broker + - port: 9090 + targetPort: controller + protocol: TCP + name: controller + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + component: "broker_controller" +--- +# Source: kafka/templates/broker/svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker + labels: + helm.sh/chart: kafka-13.1.2 + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/version: "v3.5.1" + app.kubernetes.io/managed-by: Helm + component: "broker_controller" +spec: + type: "ClusterIP" + ports: + - name: broker + port: 9092 + targetPort: broker + protocol: TCP + - name: controller + port: 9090 + targetPort: controller + protocol: TCP + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + component: "broker_controller" +--- +# Source: kafka/templates/broker/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka-broker + labels: + helm.sh/chart: kafka-13.1.2 + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/version: "v3.5.1" + app.kubernetes.io/managed-by: Helm + component: "broker_controller" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + component: "broker_controller" + serviceName: kafka-headless + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + component: "broker_controller" + spec: + serviceAccountName: kafka + securityContext: + null + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 5 + podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + component: "broker_controller" + topologyKey: "kubernetes.io/hostname" + containers: + - name: kafka + image: "kafkace/kafka:v3.5.1" + imagePullPolicy: "IfNotPresent" + env: + - name: POD_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KAFKA_HEAP_OPTS + value: "-Xms1024m -Xmx1024m" + - name: KAFKA_CFG_PROCESS_ROLES + value: "broker,controller" + - name: KAFKA_CFG_LISTENERS + value: "BROKER://0.0.0.0:9092,EXTERNAL://0.0.0.0:9095,CONTROLLER://0.0.0.0:9090" + - name: KAFKA_CFG_ADVERTISED_LISTENERS + value: "BROKER://$(POD_IP):9092" + - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP + value: CONTROLLER:PLAINTEXT,BROKER:PLAINTEXT,EXTERNAL:PLAINTEXT + - name: KAFKA_CFG_INTER_BROKER_LISTENER_NAME + value: BROKER + - name: KAFKA_CFG_CONTROLLER_LISTENER_NAMES + value: CONTROLLER + - name: KAFKA_CFG_CONTROLLER_QUORUM_VOTERS + value: 0@kafka-broker-0.kafka-headless:9090 + - name: KAFKA_CLUSTER_ID + valueFrom: + secretKeyRef: + name: kafka-cluster-id + key: clusterId + - name: KAFKA_NODE_ID + value: "podnameSuffix" + #- name: KAFKA_CFG_ADVERTISED_LISTENERS + # value: "foo" + ports: + - containerPort: 9092 + name: broker + protocol: TCP + - containerPort: 9090 + name: controller + protocol: TCP + livenessProbe: + exec: + command: + - sh + - -c + - bin/kafka-broker-api-versions.sh --bootstrap-server=127.0.0.1:9092 + failureThreshold: 5 + initialDelaySeconds: 300 + periodSeconds: 30 + timeoutSeconds: 5 + readinessProbe: + exec: + command: + - sh + - -c + - bin/kafka-broker-api-versions.sh --bootstrap-server=127.0.0.1:9092 + failureThreshold: 3 + initialDelaySeconds: 25 + periodSeconds: 10 + timeoutSeconds: 5 + resources: + limits: + cpu: 4 + memory: 16Gi + requests: + cpu: 100m + memory: 2Gi + volumeMounts: + - mountPath: /opt/kafka/data + name: data + subPath: data + - mountPath: /opt/kafka/logs + name: data + subPath: logs + - mountPath: /entrypoint.sh + name: entrypoint-sh + subPath: entrypoint.sh + lifecycle: + preStop: + exec: + command: ["sh", "-c", "sleep 10; bin/kafka-server-stop.sh"] + initContainers: + - name: check-clusterid + image: "kafkace/kafka:v3.5.1" + imagePullPolicy: "IfNotPresent" + env: + - name: KAFKA_CLUSTER_ID + valueFrom: + secretKeyRef: + name: kafka-cluster-id + key: clusterId + command: ["/bin/bash"] + args: + - -c + - | + if [[ -f "$KAFKA_CFG_LOG_DIR/meta.properties" ]]; then + meta_clusterid=$(grep -E '^cluster\.id' meta.properties | awk -F '=' '{print $2}') + if [[ "$meta_clusterid" != "$KAFKA_CLUSTER_ID" ]]; then + cat "$KAFKA_CFG_LOG_DIR/meta.properties" + echo "[ERROR] CLUSTER_ID Exception, \ + The CLUSTER_ID currently deployed is $KAFKA_CLUSTER_ID, \ + and The stored CLUSTER_ID in KAFKA_CFG_LOG_DIR is $meta_clusterid" + echo "[ERROR] CLUSTER_ID Exception, \ + Use \"--set clusterId=$meta_clusterid\" to continue helm deploy, \ + Or clean up KAFKA_CFG_LOG_DIR and deploy a new cluster. \ + See https://github.com/sir5kong/kafka-docker" + exit "500" + fi + fi + volumeMounts: + - mountPath: /opt/kafka/data + name: data + readOnly: true + terminationGracePeriodSeconds: 60 + volumes: + - name: entrypoint-sh + configMap: + items: + - key: entrypoint.sh + path: entrypoint.sh + name: kafka-broker + defaultMode: 0744 + - name: data + emptyDir: {} +--- +# Source: kafka/templates/secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: kafka-cluster-id + labels: + helm.sh/chart: kafka-13.1.2 + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/version: "v3.5.1" + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": "pre-install" + "helm.sh/hook-delete-policy": "before-hook-creation,hook-failed" +type: Opaque +data: + clusterId: "MDFlMjU1YzQzZmY4NGYzMDlkNWVmZA==" diff --git a/examples/k8s/statefulset/one-broker/all.yaml b/examples/k8s/statefulset/one-broker/all.yaml new file mode 100644 index 0000000..e69de29 diff --git a/examples/k8s/statefulset/single/all.yaml b/examples/k8s/statefulset/single/all.yaml new file mode 100644 index 0000000..e6faf5e --- /dev/null +++ b/examples/k8s/statefulset/single/all.yaml @@ -0,0 +1,480 @@ +--- +# Source: kafka/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kafka + labels: + helm.sh/chart: kafka-13.1.2 + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/version: "v3.5.1" + app.kubernetes.io/managed-by: Helm +--- +# Source: kafka/templates/broker/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: kafka-broker + labels: + helm.sh/chart: kafka-13.1.2 + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/version: "v3.5.1" + app.kubernetes.io/managed-by: Helm + component: "broker_controller" +data: + entrypoint.sh: | + #!/bin/bash + + export KAFKA_CONF_FILE="/etc/kafka/server.properties" + export KAFKA_CFG_LOG_DIR="$KAFKA_HOME/data" + + if [[ -z "$KAFKA_HOME" ]]; then + export KAFKA_HOME="/opt/kafka" + export KAFKA_CFG_LOG_DIR="$KAFKA_HOME/data" + fi + if [[ ! -d "$KAFKA_HOME" ]]; then + mkdir -p "$KAFKA_HOME" + fi + + check_runtime() { + java -version + if [[ $? -ne 0 ]]; then + echo "[ERROR] Missing java" + exit "500" + fi + } + + run_as_other_user_if_needed() { + if [[ "$(id -u)" == "0" ]]; then + # If running as root, drop to specified UID and run command + exec chroot --userspec=1000:0 / "${@}" + else + # Either we are running in Openshift with random uid and are a member of the root group + # or with a custom --user + exec "${@}" + fi + } + + get_nodeid_from_suffix() { + local line="$1" + local index="${line##*-}" + if [[ "$index" =~ ^[0-9]+$ ]]; then + export KAFKA_CFG_NODE_ID="$index" + if [[ "$KAFKA_NODE_ID_OFFSET" =~ ^[0-9]+$ ]]; then + if [[ $KAFKA_NODE_ID_OFFSET -gt "0" ]]; then + export KAFKA_CFG_NODE_ID="$((index + KAFKA_NODE_ID_OFFSET))" + fi + fi + fi + } + + fix_external_advertised_listeners() { + if [[ -z "$KAFKA_EXTERNAL_SERVICE_TYPE" ]]; then + return + fi + if [[ -z "$KAFKA_EXTERNAL_ADVERTISED_LISTENERS" ]]; then + return + fi + local ext_listeners="$KAFKA_EXTERNAL_ADVERTISED_LISTENERS" + local i="${POD_NAME##*-}" + local listener=$(echo "$ext_listeners" | cut -d "," -f $((i+1)) | sed 's/ //g') + if [[ "$KAFKA_EXTERNAL_SERVICE_TYPE" == "NodePort" ]]; then + listener=$(echo "$listener" | sed -E "s%://[^:]*:%://${POD_HOST_IP}:%") + fi + if [[ "$listener" =~ ://[^:]*:[0-9]+$ ]]; then + export KAFKA_CFG_ADVERTISED_LISTENERS="${KAFKA_CFG_ADVERTISED_LISTENERS},${listener}" + echo "KAFKA_CFG_ADVERTISED_LISTENERS: $KAFKA_CFG_ADVERTISED_LISTENERS" + else + echo "[WARN] KAFKA_EXTERNAL_ADVERTISED_LISTENER invalid, value: [$listener]" + fi + } + + init_nodeid() { + if [[ "$KAFKA_NODE_ID" =~ ^[0-9]+$ ]]; then + export KAFKA_CFG_NODE_ID="$KAFKA_NODE_ID" + return + fi + if [[ "$KAFKA_NODE_ID" = hostname* ]]; then + get_nodeid_from_suffix "$HOSTNAME" + elif [[ "$KAFKA_NODE_ID" = pod* ]]; then + if [[ -n "$POD_NAME" ]]; then + get_nodeid_from_suffix "$POD_NAME" + fi + fi + if [[ -z "$KAFKA_CFG_NODE_ID" ]]; then + export KAFKA_CFG_NODE_ID="1" + fi + } + + take_file_ownership() { + if [[ "$(id -u)" == "0" ]]; then + chown -R 1000:0 "$KAFKA_HOME" + if [[ -d "$KAFKA_CFG_LOG_DIR" ]]; then + chown -R 1000:0 "$KAFKA_CFG_LOG_DIR" + fi + fi + } + + update_server_conf() { + local key=$1 + local value=$2 + local pattern="$(echo $key | sed 's/\./\\./')" + sed -i "/^${pattern} *=/d" "$KAFKA_CONF_FILE" + echo "${key}=${value}" >> "$KAFKA_CONF_FILE" + } + + set_kafka_cfg_default() { + if [[ -z "$KAFKA_CFG_NODE_ID" ]]; then + export KAFKA_CFG_NODE_ID="1" + fi + if [[ -z "$KAFKA_CFG_PROCESS_ROLES" ]]; then + export KAFKA_CFG_PROCESS_ROLES="broker,controller" + fi + if [[ -z "$KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP" ]]; then + export KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP="CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT" + fi + if [[ -z "$KAFKA_CFG_INTER_BROKER_LISTENER_NAME" ]]; then + export KAFKA_CFG_INTER_BROKER_LISTENER_NAME="PLAINTEXT" + fi + if [[ -z "$KAFKA_CFG_CONTROLLER_LISTENER_NAMES" ]]; then + export KAFKA_CFG_CONTROLLER_LISTENER_NAMES="CONTROLLER" + fi + if [[ -z "$KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR" ]]; then + export KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR="1" + fi + if [[ -z "$KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR" ]]; then + export KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR="1" + fi + if [[ -z "$KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR" ]]; then + export KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR="1" + fi + ## + ## KAFKA_CONTROLLER_LISTENER_PORT default value: 19091 + local ctl_port="${KAFKA_CONTROLLER_LISTENER_PORT-19091}" + ## KAFKA_BROKER_LISTENER_PORT default value: 9092 + local broker_port="${KAFKA_BROKER_LISTENER_PORT-9092}" + if [[ -z "$KAFKA_CFG_LISTENERS" ]]; then + export KAFKA_CFG_LISTENERS="CONTROLLER://:${ctl_port},PLAINTEXT://:${broker_port}" + fi + if [[ -z "$KAFKA_CFG_CONTROLLER_QUORUM_VOTERS" ]]; then + export KAFKA_CFG_CONTROLLER_QUORUM_VOTERS="${KAFKA_CFG_NODE_ID}@127.0.0.1:${ctl_port}" + fi + } + + init_server_conf() { + init_nodeid + set_kafka_cfg_default + fix_external_advertised_listeners + if [[ ! -f "$KAFKA_CONF_FILE" ]]; then + mkdir -p "$(dirname $KAFKA_CONF_FILE)" + # cat "${KAFKA_HOME}/config/kraft/server.properties" \ + # | grep -E '^[a-zA-Z]' > "$KAFKA_CONF_FILE" + touch "$KAFKA_CONF_FILE" + fi + for var in "${!KAFKA_CFG_@}"; do + # printf '%s=%s\n' "$var" "${!var}" + key="$(echo "$var" | sed -e 's/^KAFKA_CFG_//' -e 's/_/./g' | tr 'A-Z' 'a-z')" + value="${!var}" + update_server_conf "$key" "$value" + done + } + + reset_log_dirs() { + ## protect log.dirs + sed -i "/^log.dir *=/d" "$KAFKA_CONF_FILE" + update_server_conf "log.dirs" "$KAFKA_CFG_LOG_DIR" + } + + start_server() { + check_runtime + reset_log_dirs + if [[ -n "$KAFKA_HEAP_OPTS" ]]; then + export JAVA_TOOL_OPTIONS="${JAVA_TOOL_OPTIONS} ${KAFKA_HEAP_OPTS}" + fi + if [[ ! -f "$KAFKA_CFG_LOG_DIR/meta.properties" ]]; then + echo ">>> Format Log Directories <<<" + if [[ -z "$KAFKA_CLUSTER_ID" ]]; then + echo "Generate a Cluster UUID" + export KAFKA_CLUSTER_ID="$(${KAFKA_HOME}/bin/kafka-storage.sh random-uuid)" + fi + cat "$KAFKA_CONF_FILE" + if [[ "$(id -u)" == "0" ]]; then + chroot --userspec=1000:0 / ${KAFKA_HOME}/bin/kafka-storage.sh format \ + -t $KAFKA_CLUSTER_ID -c "$KAFKA_CONF_FILE" + else + ${KAFKA_HOME}/bin/kafka-storage.sh format \ + -t $KAFKA_CLUSTER_ID -c "$KAFKA_CONF_FILE" + fi + fi + run_as_other_user_if_needed "${KAFKA_HOME}/bin/kafka-server-start.sh" "$KAFKA_CONF_FILE" + } + + init_server_conf + take_file_ownership + if [[ "$@" = "start" ]]; then + start_server + else + exec "$@" + fi +--- +# Source: kafka/templates/broker/svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-headless + labels: + helm.sh/chart: kafka-13.1.2 + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/version: "v3.5.1" + app.kubernetes.io/managed-by: Helm + component: "broker_controller" +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: 9092 + targetPort: broker + protocol: TCP + name: broker + - port: 9090 + targetPort: controller + protocol: TCP + name: controller + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + component: "broker_controller" +--- +# Source: kafka/templates/broker/svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-broker + labels: + helm.sh/chart: kafka-13.1.2 + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/version: "v3.5.1" + app.kubernetes.io/managed-by: Helm + component: "broker_controller" +spec: + type: "ClusterIP" + ports: + - name: broker + port: 9092 + targetPort: broker + protocol: TCP + - name: controller + port: 9090 + targetPort: controller + protocol: TCP + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + component: "broker_controller" +--- +# Source: kafka/templates/broker/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka-broker + labels: + helm.sh/chart: kafka-13.1.2 + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/version: "v3.5.1" + app.kubernetes.io/managed-by: Helm + component: "broker_controller" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + component: "broker_controller" + serviceName: kafka-headless + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + component: "broker_controller" + spec: + serviceAccountName: kafka + securityContext: + null + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 5 + podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + component: "broker_controller" + topologyKey: "kubernetes.io/hostname" + containers: + - name: kafka + image: "kafkace/kafka:v3.5.1" + imagePullPolicy: "IfNotPresent" + env: + - name: POD_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KAFKA_HEAP_OPTS + value: "-Xms1024m -Xmx1024m" + - name: KAFKA_CFG_PROCESS_ROLES + value: "broker,controller" + - name: KAFKA_CFG_LISTENERS + value: "BROKER://0.0.0.0:9092,EXTERNAL://0.0.0.0:9095,CONTROLLER://0.0.0.0:9090" + - name: KAFKA_CFG_ADVERTISED_LISTENERS + value: "BROKER://$(POD_IP):9092" + - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP + value: CONTROLLER:PLAINTEXT,BROKER:PLAINTEXT,EXTERNAL:PLAINTEXT + - name: KAFKA_CFG_INTER_BROKER_LISTENER_NAME + value: BROKER + - name: KAFKA_CFG_CONTROLLER_LISTENER_NAMES + value: CONTROLLER + - name: KAFKA_CFG_CONTROLLER_QUORUM_VOTERS + value: 0@kafka-broker-0.kafka-headless:9090 + - name: KAFKA_CLUSTER_ID + valueFrom: + secretKeyRef: + name: kafka-cluster-id + key: clusterId + - name: KAFKA_NODE_ID + value: "podnameSuffix" + #- name: KAFKA_CFG_ADVERTISED_LISTENERS + # value: "foo" + ports: + - containerPort: 9092 + name: broker + protocol: TCP + - containerPort: 9090 + name: controller + protocol: TCP + livenessProbe: + exec: + command: + - sh + - -c + - bin/kafka-broker-api-versions.sh --bootstrap-server=127.0.0.1:9092 + failureThreshold: 5 + initialDelaySeconds: 300 + periodSeconds: 30 + timeoutSeconds: 5 + readinessProbe: + exec: + command: + - sh + - -c + - bin/kafka-broker-api-versions.sh --bootstrap-server=127.0.0.1:9092 + failureThreshold: 3 + initialDelaySeconds: 25 + periodSeconds: 10 + timeoutSeconds: 5 + resources: + limits: + cpu: 4 + memory: 16Gi + requests: + cpu: 100m + memory: 2Gi + volumeMounts: + - mountPath: /opt/kafka/data + name: data + subPath: data + - mountPath: /opt/kafka/logs + name: data + subPath: logs + - mountPath: /entrypoint.sh + name: entrypoint-sh + subPath: entrypoint.sh + lifecycle: + preStop: + exec: + command: ["sh", "-c", "sleep 10; bin/kafka-server-stop.sh"] + initContainers: + - name: check-clusterid + image: "kafkace/kafka:v3.5.1" + imagePullPolicy: "IfNotPresent" + env: + - name: KAFKA_CLUSTER_ID + valueFrom: + secretKeyRef: + name: kafka-cluster-id + key: clusterId + command: ["/bin/bash"] + args: + - -c + - | + if [[ -f "$KAFKA_CFG_LOG_DIR/meta.properties" ]]; then + meta_clusterid=$(grep -E '^cluster\.id' meta.properties | awk -F '=' '{print $2}') + if [[ "$meta_clusterid" != "$KAFKA_CLUSTER_ID" ]]; then + cat "$KAFKA_CFG_LOG_DIR/meta.properties" + echo "[ERROR] CLUSTER_ID Exception, \ + The CLUSTER_ID currently deployed is $KAFKA_CLUSTER_ID, \ + and The stored CLUSTER_ID in KAFKA_CFG_LOG_DIR is $meta_clusterid" + echo "[ERROR] CLUSTER_ID Exception, \ + Use \"--set clusterId=$meta_clusterid\" to continue helm deploy, \ + Or clean up KAFKA_CFG_LOG_DIR and deploy a new cluster. \ + See https://github.com/sir5kong/kafka-docker" + exit "500" + fi + fi + volumeMounts: + - mountPath: /opt/kafka/data + name: data + readOnly: true + terminationGracePeriodSeconds: 60 + volumes: + - name: entrypoint-sh + configMap: + items: + - key: entrypoint.sh + path: entrypoint.sh + name: kafka-broker + defaultMode: 0744 + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "30Gi" +--- +# Source: kafka/templates/secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: kafka-cluster-id + labels: + helm.sh/chart: kafka-13.1.2 + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/version: "v3.5.1" + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": "pre-install" + "helm.sh/hook-delete-policy": "before-hook-creation,hook-failed" +type: Opaque +data: + clusterId: "YjhhNWI2ZGRmZDQ3NDkxMWJlNjBjMw==" diff --git a/examples/values-combined.yml b/examples/values-combined.yml index 70bafb6..65c7505 100644 --- a/examples/values-combined.yml +++ b/examples/values-combined.yml @@ -1,7 +1,7 @@ image: - repository: sir5kong/kafka + repository: kafkace/kafka pullPolicy: IfNotPresent - tag: v3.5.0 + tag: v3.5.1 broker: ## broker.combinedMode, the server acts as both a broker and a controller. diff --git a/examples/values-production.yml b/examples/values-production.yml index 169914c..7ac21a7 100644 --- a/examples/values-production.yml +++ b/examples/values-production.yml @@ -1,7 +1,7 @@ image: - repository: sir5kong/kafka + repository: kafkace/kafka pullPolicy: IfNotPresent - tag: v3.5.0 + tag: v3.5.1 controller: replicaCount: 3 From f2e8a2919ad5b5afa7de0b4f5bbd23e107604e0e Mon Sep 17 00:00:00 2001 From: itboon Date: Mon, 28 Aug 2023 17:53:37 +0800 Subject: [PATCH 06/14] kafka chart 13.2.0 --- charts/kafka/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/kafka/Chart.yaml b/charts/kafka/Chart.yaml index 4d5f6d7..b33ae04 100644 --- a/charts/kafka/Chart.yaml +++ b/charts/kafka/Chart.yaml @@ -3,7 +3,7 @@ name: kafka description: Helm chart for Apache Kafka. type: application -version: 13.1.2 +version: 13.2.0 appVersion: v3.5.1 maintainers: From bdc55bad04569fa4cbebc79e9efada19a2712197 Mon Sep 17 00:00:00 2001 From: itboon Date: Mon, 28 Aug 2023 18:42:55 +0800 Subject: [PATCH 07/14] ci test --- .github/workflows/oss-pages-sync.yml | 34 ++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 .github/workflows/oss-pages-sync.yml diff --git a/.github/workflows/oss-pages-sync.yml b/.github/workflows/oss-pages-sync.yml new file mode 100644 index 0000000..a3d56e0 --- /dev/null +++ b/.github/workflows/oss-pages-sync.yml @@ -0,0 +1,34 @@ +name: Sync gh-pages to OSS + +on: + workflow_dispatch: ## on button click + +jobs: + release: + runs-on: ubuntu-latest + steps: + - + name: Checkout + uses: actions/checkout@v3 + with: + # fetch-depth: 0 + ref: 'gh-pages' + - + name: Upload to OSS + if: ${{ vars.OSS_MIRROR_ENABLED == 'true' }} + shell: bash + env: + DL_URL_OSS: "https://gosspublic.alicdn.com/ossutil/1.7.1/ossutil64" + OSS_KEY_ID: "${{ secrets.OSS_KEY_ID }}" + OSS_KEY_SECRET: "${{ secrets.OSS_KEY_SECRET }}" + OSS_EP: "${{ vars.OSS_EP }}" + OSS_BUCKET: "${{ vars.OSS_BUCKET }}" + CHART_INDEX_URL: "${{ vars.CHART_INDEX_URL }}" + MIRROR_HOST: "${{ vars.MIRROR_HOST }}" + run: | + export ALIOSS="${HOME}/bin/ossutil" + mkdir -p $(dirname $ALIOSS) + curl -o $ALIOSS -L $DL_URL_OSS + chmod a+rx $ALIOSS + $ALIOSS config -e $OSS_EP -i $OSS_KEY_ID -k $OSS_KEY_SECRET + $ALIOSS cp -rf ./ "oss://${OSS_BUCKET}/kafka/" \ No newline at end of file From faa8d57c704f58c3c9808424442e8d8fc0983b52 Mon Sep 17 00:00:00 2001 From: itboon Date: Mon, 28 Aug 2023 18:43:31 +0800 Subject: [PATCH 08/14] ci test --- .github/workflows/oss-pages-sync.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/oss-pages-sync.yml b/.github/workflows/oss-pages-sync.yml index a3d56e0..354673e 100644 --- a/.github/workflows/oss-pages-sync.yml +++ b/.github/workflows/oss-pages-sync.yml @@ -31,4 +31,4 @@ jobs: curl -o $ALIOSS -L $DL_URL_OSS chmod a+rx $ALIOSS $ALIOSS config -e $OSS_EP -i $OSS_KEY_ID -k $OSS_KEY_SECRET - $ALIOSS cp -rf ./ "oss://${OSS_BUCKET}/kafka/" \ No newline at end of file + $ALIOSS cp -rf ./ "oss://${OSS_BUCKET}/p/kafka/" \ No newline at end of file From 816f621b837b2dc66e119916ab7c6d935a4a668f Mon Sep 17 00:00:00 2001 From: Darren Date: Tue, 29 Aug 2023 11:27:35 +0800 Subject: [PATCH 09/14] ci test --- .../workflows/{oss-pages-sync.yml => pages-oss-sync.yml} | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename .github/workflows/{oss-pages-sync.yml => pages-oss-sync.yml} (84%) diff --git a/.github/workflows/oss-pages-sync.yml b/.github/workflows/pages-oss-sync.yml similarity index 84% rename from .github/workflows/oss-pages-sync.yml rename to .github/workflows/pages-oss-sync.yml index 354673e..8631e19 100644 --- a/.github/workflows/oss-pages-sync.yml +++ b/.github/workflows/pages-oss-sync.yml @@ -22,8 +22,7 @@ jobs: OSS_KEY_ID: "${{ secrets.OSS_KEY_ID }}" OSS_KEY_SECRET: "${{ secrets.OSS_KEY_SECRET }}" OSS_EP: "${{ vars.OSS_EP }}" - OSS_BUCKET: "${{ vars.OSS_BUCKET }}" - CHART_INDEX_URL: "${{ vars.CHART_INDEX_URL }}" + OSS_BUCKET: "${{ vars.PAGES_OSS_BUCKET }}" MIRROR_HOST: "${{ vars.MIRROR_HOST }}" run: | export ALIOSS="${HOME}/bin/ossutil" @@ -31,4 +30,5 @@ jobs: curl -o $ALIOSS -L $DL_URL_OSS chmod a+rx $ALIOSS $ALIOSS config -e $OSS_EP -i $OSS_KEY_ID -k $OSS_KEY_SECRET - $ALIOSS cp -rf ./ "oss://${OSS_BUCKET}/p/kafka/" \ No newline at end of file + repo_name="${GITHUB_REPOSITORY#*/}" + $ALIOSS cp -rf ./ "oss://${OSS_BUCKET}/${repo_name}/" \ No newline at end of file From ccbfc253b2fcbf9c1d7523f834d32c76e02178b1 Mon Sep 17 00:00:00 2001 From: Darren Date: Tue, 29 Aug 2023 13:07:28 +0800 Subject: [PATCH 10/14] ci test --- .github/workflows/publish-pages.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish-pages.yml b/.github/workflows/publish-pages.yml index fd99a0d..15b3f37 100644 --- a/.github/workflows/publish-pages.yml +++ b/.github/workflows/publish-pages.yml @@ -22,4 +22,4 @@ jobs: # Or use mhausenblas/mkdocs-deploy-gh-pages@nomaterial to build without the mkdocs-material theme env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - GOOGLE_ANALYTICS_KEY: ${{ secrets.GOOGLE_ANALYTICS_KEY }} + GOOGLE_ANALYTICS_KEY: ${{ vars.GOOGLE_ANALYTICS_KEY }} From d45780f3921027acbe5b087911dea8fccc56bda4 Mon Sep 17 00:00:00 2001 From: Darren Date: Tue, 29 Aug 2023 13:07:38 +0800 Subject: [PATCH 11/14] update docs --- docs/index.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/index.md b/docs/index.md index a96c55f..f21980b 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,10 +1,5 @@ # Welcome -[![CI](https://github.com/itboon/kafka-docker/actions/workflows/docker-publish.yml/badge.svg)](https://github.com/itboon/kafka-docker/actions/workflows/docker-publish.yml) -[![Docker pulls](https://img.shields.io/docker/pulls/kafkace/kafka)](https://hub.docker.com/r/kafkace/kafka) -![Docker Iamge](https://img.shields.io/docker/image-size/kafkace/kafka) - -- [Dockerfile](https://github.com/itboon/kafka-docker/blob/main/Dockerfile) - [GitHub](https://github.com/itboon/kafka-docker) - [Docker Hub](https://hub.docker.com/r/kafkace/kafka) From 5ca992e2d975780ef1a3a32a1646e944a25489e1 Mon Sep 17 00:00:00 2001 From: Darren Date: Tue, 29 Aug 2023 13:23:23 +0800 Subject: [PATCH 12/14] update docs --- docs/docker.md | 6 +++--- docs/helm.md | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/docker.md b/docs/docker.md index 4ea7c32..a650992 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -26,7 +26,7 @@ docker run -d --name demo-kafka-server \ ## 持久化 -数据存储路径 `/opt/kafka/data`,像下面这个案例一样挂载一下: +数据存储路径 `/opt/kafka/data`,挂载数据卷: ``` shell docker volume create demo-kafka-data @@ -42,5 +42,5 @@ docker run -d --name demo-kafka-server \ ## 下一步 -- [环境变量和配置](../env) -- [Docker Compose 启动 Kafka](../compose) \ No newline at end of file +- [Docker Compose 启动 Kafka](../compose) +- [环境变量和配置](../env) \ No newline at end of file diff --git a/docs/helm.md b/docs/helm.md index 756bce5..d4d1ce2 100644 --- a/docs/helm.md +++ b/docs/helm.md @@ -1,4 +1,4 @@ -# helm chart 部署 kafka +# Helm 部署 Kafka ## Prerequisites From 0d0b7bd36c76acf4fe641daaa019657480800509 Mon Sep 17 00:00:00 2001 From: Darren Date: Tue, 29 Aug 2023 13:36:33 +0800 Subject: [PATCH 13/14] update docs --- docs/compose.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/compose.md b/docs/compose.md index 2681e0d..28f8600 100644 --- a/docs/compose.md +++ b/docs/compose.md @@ -16,7 +16,7 @@ services: - kafka-data:/opt/kafka/data environment: - KAFKA_HEAP_OPTS=-Xmx512m -Xms512m - ## 将下面 ${KAFKA_BROKER_EXTERNAL_HOST} 替换成你自己的外部主机名,可以是域名或端口 + ## 将下面 ${KAFKA_BROKER_EXTERNAL_HOST} 替换成你自己的外部主机名,可以是域名或IP地址 ## - KAFKA_BROKER_EXTERNAL_HOST=kafka.example.com - KAFKA_BROKER_EXTERNAL_HOST=${KAFKA_BROKER_EXTERNAL_HOST} - KAFKA_BROKER_EXTERNAL_PORT=29092 @@ -66,7 +66,7 @@ services: - KAFKA_CFG_INTER_BROKER_LISTENER_NAME=INTERNAL - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT - KAFKA_CFG_LISTENERS=CONTROLLER://:9091,INTERNAL://:9092,EXTERNAL://:29092 - ## 将下面 ${KAFKA_BROKER_EXTERNAL_HOST} 替换成你自己的外部主机名,可以是域名或端口 + ## 将下面 ${KAFKA_BROKER_EXTERNAL_HOST} 替换成你自己的外部主机名,可以是域名或IP地址 - KAFKA_CFG_ADVERTISED_LISTENERS=INTERNAL://:9092,EXTERNAL://${KAFKA_BROKER_EXTERNAL_HOST}:29092 - KAFKA_CFG_NODE_ID=1 - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=1@kafka:9091 From 4adbf5b95be673d0e6616937b1a593e4dc68edec Mon Sep 17 00:00:00 2001 From: Darren Date: Tue, 29 Aug 2023 16:22:15 +0800 Subject: [PATCH 14/14] ci test --- .github/workflows/docker-publish.yml | 2 ++ .github/workflows/pages-oss-sync.yml | 34 ---------------------------- .github/workflows/publish-pages.yml | 29 ++++++++++++++++++++++++ 3 files changed, 31 insertions(+), 34 deletions(-) delete mode 100644 .github/workflows/pages-oss-sync.yml diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml index a487ced..95c320d 100644 --- a/.github/workflows/docker-publish.yml +++ b/.github/workflows/docker-publish.yml @@ -2,6 +2,8 @@ name: Docker Publish on: workflow_dispatch: ## on button click + schedule: + - cron: '0 1/11 * * *' push: paths: - Dockerfile diff --git a/.github/workflows/pages-oss-sync.yml b/.github/workflows/pages-oss-sync.yml deleted file mode 100644 index 8631e19..0000000 --- a/.github/workflows/pages-oss-sync.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: Sync gh-pages to OSS - -on: - workflow_dispatch: ## on button click - -jobs: - release: - runs-on: ubuntu-latest - steps: - - - name: Checkout - uses: actions/checkout@v3 - with: - # fetch-depth: 0 - ref: 'gh-pages' - - - name: Upload to OSS - if: ${{ vars.OSS_MIRROR_ENABLED == 'true' }} - shell: bash - env: - DL_URL_OSS: "https://gosspublic.alicdn.com/ossutil/1.7.1/ossutil64" - OSS_KEY_ID: "${{ secrets.OSS_KEY_ID }}" - OSS_KEY_SECRET: "${{ secrets.OSS_KEY_SECRET }}" - OSS_EP: "${{ vars.OSS_EP }}" - OSS_BUCKET: "${{ vars.PAGES_OSS_BUCKET }}" - MIRROR_HOST: "${{ vars.MIRROR_HOST }}" - run: | - export ALIOSS="${HOME}/bin/ossutil" - mkdir -p $(dirname $ALIOSS) - curl -o $ALIOSS -L $DL_URL_OSS - chmod a+rx $ALIOSS - $ALIOSS config -e $OSS_EP -i $OSS_KEY_ID -k $OSS_KEY_SECRET - repo_name="${GITHUB_REPOSITORY#*/}" - $ALIOSS cp -rf ./ "oss://${OSS_BUCKET}/${repo_name}/" \ No newline at end of file diff --git a/.github/workflows/publish-pages.yml b/.github/workflows/publish-pages.yml index 15b3f37..fa58d8c 100644 --- a/.github/workflows/publish-pages.yml +++ b/.github/workflows/publish-pages.yml @@ -23,3 +23,32 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GOOGLE_ANALYTICS_KEY: ${{ vars.GOOGLE_ANALYTICS_KEY }} + + oss-sync: + runs-on: ubuntu-latest + steps: + - + name: Checkout + uses: actions/checkout@v3 + with: + # fetch-depth: 0 + ref: 'gh-pages' + - + name: Upload to OSS + if: ${{ vars.OSS_MIRROR_ENABLED == 'true' }} + shell: bash + env: + DL_URL_OSS: "https://gosspublic.alicdn.com/ossutil/1.7.1/ossutil64" + OSS_KEY_ID: "${{ secrets.OSS_KEY_ID }}" + OSS_KEY_SECRET: "${{ secrets.OSS_KEY_SECRET }}" + OSS_EP: "${{ vars.OSS_EP }}" + OSS_BUCKET: "${{ vars.PAGES_OSS_BUCKET }}" + MIRROR_HOST: "${{ vars.MIRROR_HOST }}" + run: | + export ALIOSS="${HOME}/bin/ossutil" + mkdir -p $(dirname $ALIOSS) + curl -o $ALIOSS -L $DL_URL_OSS + chmod a+rx $ALIOSS + $ALIOSS config -e $OSS_EP -i $OSS_KEY_ID -k $OSS_KEY_SECRET + repo_name="${GITHUB_REPOSITORY#*/}" + $ALIOSS cp -rf ./ "oss://${OSS_BUCKET}/${repo_name}/" \ No newline at end of file