diff --git a/bitnami/opensearch/1/debian-11/Dockerfile b/bitnami/opensearch/1/debian-11/Dockerfile new file mode 100644 index 0000000000000..284c2803c32c1 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/Dockerfile @@ -0,0 +1,61 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +FROM docker.io/bitnami/minideb:bullseye + +ARG JAVA_EXTRA_SECURITY_DIR="/bitnami/java/extra-security" +ARG OPENSEARCH_PLUGINS +ARG TARGETARCH + +LABEL org.opencontainers.image.base.name="docker.io/bitnami/minideb:bullseye" \ + org.opencontainers.image.created="2023-07-12T21:05:10Z" \ + org.opencontainers.image.description="Application packaged by VMware, Inc" \ + org.opencontainers.image.licenses="Apache-2.0" \ + org.opencontainers.image.ref.name="1.3.10-debian-11-r0" \ + org.opencontainers.image.title="opensearch" \ + org.opencontainers.image.vendor="VMware, Inc." \ + org.opencontainers.image.version="1.3.10" + +ENV HOME="/" \ + OS_ARCH="${TARGETARCH:-amd64}" \ + OS_FLAVOUR="debian-11" \ + OS_NAME="linux" \ + PATH="/opt/bitnami/common/bin:/opt/bitnami/java/bin:/opt/bitnami/opensearch/bin:$PATH" + +COPY prebuildfs / +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +# Install required system packages and dependencies +RUN install_packages ca-certificates curl libgcc-s1 libgomp1 libstdc++6 procps zlib1g +RUN mkdir -p /tmp/bitnami/pkg/cache/ && cd /tmp/bitnami/pkg/cache/ && \ + COMPONENTS=( \ + "yq-4.34.2-0-linux-${OS_ARCH}-debian-11" \ + "java-17.0.7-7-2-linux-${OS_ARCH}-debian-11" \ + "opensearch-1.3.10-0-linux-${OS_ARCH}-debian-11" \ + ) && \ + for COMPONENT in "${COMPONENTS[@]}"; do \ + if [ ! -f "${COMPONENT}.tar.gz" ]; then \ + curl -SsLf "https://downloads.bitnami.com/files/stacksmith/${COMPONENT}.tar.gz" -O ; \ + curl -SsLf "https://downloads.bitnami.com/files/stacksmith/${COMPONENT}.tar.gz.sha256" -O ; \ + fi && \ + sha256sum -c "${COMPONENT}.tar.gz.sha256" && \ + tar -zxf "${COMPONENT}.tar.gz" -C /opt/bitnami --strip-components=2 --no-same-owner --wildcards '*/files' && \ + rm -rf "${COMPONENT}".tar.gz{,.sha256} ; \ + done +RUN apt-get update && apt-get upgrade -y && \ + apt-get clean && rm -rf /var/lib/apt/lists /var/cache/apt/archives +RUN chmod g+rwX /opt/bitnami + +COPY rootfs / +RUN /opt/bitnami/scripts/opensearch/postunpack.sh +RUN /opt/bitnami/scripts/java/postunpack.sh +ENV APP_VERSION="1.3.10" \ + BITNAMI_APP_NAME="opensearch" \ + JAVA_HOME="/opt/bitnami/java" \ + LD_LIBRARY_PATH="/opt/bitnami/opensearch/jdk/lib:/opt/bitnami/opensearch/jdk/lib/server:$LD_LIBRARY_PATH" \ + OPENSEARCH_JAVA_HOME="/opt/bitnami/java" + +EXPOSE 9200 9300 + +USER 1001 +ENTRYPOINT [ "/opt/bitnami/scripts/opensearch/entrypoint.sh" ] +CMD [ "/opt/bitnami/scripts/opensearch/run.sh" ] diff --git a/bitnami/opensearch/1/debian-11/docker-compose.yml b/bitnami/opensearch/1/debian-11/docker-compose.yml new file mode 100644 index 0000000000000..d0c4988f407af --- /dev/null +++ b/bitnami/opensearch/1/debian-11/docker-compose.yml @@ -0,0 +1,13 @@ +version: '2' + +services: + opensearch: + image: docker.io/bitnami/opensearch:1 + ports: + - '9200:9200' + - '9300:9300' + volumes: + - 'opensearch_data:/bitnami/opensearch/data' +volumes: + opensearch_data: + driver: local diff --git a/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/.bitnami_components.json b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/.bitnami_components.json new file mode 100644 index 0000000000000..14f1021f42419 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/.bitnami_components.json @@ -0,0 +1,20 @@ +{ + "java": { + "arch": "amd64", + "distro": "debian-11", + "type": "NAMI", + "version": "17.0.7-7-2" + }, + "opensearch": { + "arch": "amd64", + "distro": "debian-11", + "type": "NAMI", + "version": "1.3.10-0" + }, + "yq": { + "arch": "amd64", + "distro": "debian-11", + "type": "NAMI", + "version": "4.34.2-0" + } +} \ No newline at end of file diff --git a/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/licenses/licenses.txt b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/licenses/licenses.txt new file mode 100644 index 0000000000000..76956b38e82c6 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/licenses/licenses.txt @@ -0,0 +1,2 @@ +Bitnami containers ship with software bundles. You can find the licenses under: +/opt/bitnami/[name-of-bundle]/licenses/[bundle-version].txt diff --git a/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libbitnami.sh b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libbitnami.sh new file mode 100644 index 0000000000000..184de8a117e28 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libbitnami.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Bitnami custom library + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/liblog.sh + +# Constants +BOLD='\033[1m' + +# Functions + +######################## +# Print the welcome page +# Globals: +# DISABLE_WELCOME_MESSAGE +# BITNAMI_APP_NAME +# Arguments: +# None +# Returns: +# None +######################### +print_welcome_page() { + if [[ -z "${DISABLE_WELCOME_MESSAGE:-}" ]]; then + if [[ -n "$BITNAMI_APP_NAME" ]]; then + print_image_welcome_page + fi + fi +} + +######################## +# Print the welcome page for a Bitnami Docker image +# Globals: +# BITNAMI_APP_NAME +# Arguments: +# None +# Returns: +# None +######################### +print_image_welcome_page() { + local github_url="https://github.com/bitnami/containers" + + log "" + log "${BOLD}Welcome to the Bitnami ${BITNAMI_APP_NAME} container${RESET}" + log "Subscribe to project updates by watching ${BOLD}${github_url}${RESET}" + log "Submit issues and feature requests at ${BOLD}${github_url}/issues${RESET}" + log "" +} + diff --git a/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libfile.sh b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libfile.sh new file mode 100644 index 0000000000000..63759c777f3ba --- /dev/null +++ b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libfile.sh @@ -0,0 +1,141 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for managing files + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/libos.sh + +# Functions + +######################## +# Replace a regex-matching string in a file +# Arguments: +# $1 - filename +# $2 - match regex +# $3 - substitute regex +# $4 - use POSIX regex. Default: true +# Returns: +# None +######################### +replace_in_file() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local substitute_regex="${3:?substitute regex is required}" + local posix_regex=${4:-true} + + local result + + # We should avoid using 'sed in-place' substitutions + # 1) They are not compatible with files mounted from ConfigMap(s) + # 2) We found incompatibility issues with Debian10 and "in-place" substitutions + local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues + if [[ $posix_regex = true ]]; then + result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + else + result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + fi + echo "$result" > "$filename" +} + +######################## +# Replace a regex-matching multiline string in a file +# Arguments: +# $1 - filename +# $2 - match regex +# $3 - substitute regex +# Returns: +# None +######################### +replace_in_file_multiline() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local substitute_regex="${3:?substitute regex is required}" + + local result + local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues + result="$(perl -pe "BEGIN{undef $/;} s${del}${match_regex}${del}${substitute_regex}${del}sg" "$filename")" + echo "$result" > "$filename" +} + +######################## +# Remove a line in a file based on a regex +# Arguments: +# $1 - filename +# $2 - match regex +# $3 - use POSIX regex. Default: true +# Returns: +# None +######################### +remove_in_file() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local posix_regex=${3:-true} + local result + + # We should avoid using 'sed in-place' substitutions + # 1) They are not compatible with files mounted from ConfigMap(s) + # 2) We found incompatibility issues with Debian10 and "in-place" substitutions + if [[ $posix_regex = true ]]; then + result="$(sed -E "/$match_regex/d" "$filename")" + else + result="$(sed "/$match_regex/d" "$filename")" + fi + echo "$result" > "$filename" +} + +######################## +# Appends text after the last line matching a pattern +# Arguments: +# $1 - file +# $2 - match regex +# $3 - contents to add +# Returns: +# None +######################### +append_file_after_last_match() { + local file="${1:?missing file}" + local match_regex="${2:?missing pattern}" + local value="${3:?missing value}" + + # We read the file in reverse, replace the first match (0,/pattern/s) and then reverse the results again + result="$(tac "$file" | sed -E "0,/($match_regex)/s||${value}\n\1|" | tac)" + echo "$result" > "$file" +} + +######################## +# Wait until certain entry is present in a log file +# Arguments: +# $1 - entry to look for +# $2 - log file +# $3 - max retries. Default: 12 +# $4 - sleep between retries (in seconds). Default: 5 +# Returns: +# Boolean +######################### +wait_for_log_entry() { + local -r entry="${1:-missing entry}" + local -r log_file="${2:-missing log file}" + local -r retries="${3:-12}" + local -r interval_time="${4:-5}" + local attempt=0 + + check_log_file_for_entry() { + if ! grep -qE "$entry" "$log_file"; then + debug "Entry \"${entry}\" still not present in ${log_file} (attempt $((++attempt))/${retries})" + return 1 + fi + } + debug "Checking that ${log_file} log file contains entry \"${entry}\"" + if retry_while check_log_file_for_entry "$retries" "$interval_time"; then + debug "Found entry \"${entry}\" in ${log_file}" + true + else + error "Could not find entry \"${entry}\" in ${log_file} after ${retries} retries" + debug_execute cat "$log_file" + return 1 + fi +} diff --git a/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libfs.sh b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libfs.sh new file mode 100644 index 0000000000000..96b22f99710c7 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libfs.sh @@ -0,0 +1,193 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for file system actions + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/liblog.sh + +# Functions + +######################## +# Ensure a file/directory is owned (user and group) but the given user +# Arguments: +# $1 - filepath +# $2 - owner +# Returns: +# None +######################### +owned_by() { + local path="${1:?path is missing}" + local owner="${2:?owner is missing}" + local group="${3:-}" + + if [[ -n $group ]]; then + chown "$owner":"$group" "$path" + else + chown "$owner":"$owner" "$path" + fi +} + +######################## +# Ensure a directory exists and, optionally, is owned by the given user +# Arguments: +# $1 - directory +# $2 - owner +# Returns: +# None +######################### +ensure_dir_exists() { + local dir="${1:?directory is missing}" + local owner_user="${2:-}" + local owner_group="${3:-}" + + [ -d "${dir}" ] || mkdir -p "${dir}" + if [[ -n $owner_user ]]; then + owned_by "$dir" "$owner_user" "$owner_group" + fi +} + +######################## +# Checks whether a directory is empty or not +# arguments: +# $1 - directory +# returns: +# boolean +######################### +is_dir_empty() { + local -r path="${1:?missing directory}" + # Calculate real path in order to avoid issues with symlinks + local -r dir="$(realpath "$path")" + if [[ ! -e "$dir" ]] || [[ -z "$(ls -A "$dir")" ]]; then + true + else + false + fi +} + +######################## +# Checks whether a mounted directory is empty or not +# arguments: +# $1 - directory +# returns: +# boolean +######################### +is_mounted_dir_empty() { + local dir="${1:?missing directory}" + + if is_dir_empty "$dir" || find "$dir" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" -exec false {} +; then + true + else + false + fi +} + +######################## +# Checks whether a file can be written to or not +# arguments: +# $1 - file +# returns: +# boolean +######################### +is_file_writable() { + local file="${1:?missing file}" + local dir + dir="$(dirname "$file")" + + if [[ (-f "$file" && -w "$file") || (! -f "$file" && -d "$dir" && -w "$dir") ]]; then + true + else + false + fi +} + +######################## +# Relativize a path +# arguments: +# $1 - path +# $2 - base +# returns: +# None +######################### +relativize() { + local -r path="${1:?missing path}" + local -r base="${2:?missing base}" + pushd "$base" >/dev/null || exit + realpath -q --no-symlinks --relative-base="$base" "$path" | sed -e 's|^/$|.|' -e 's|^/||' + popd >/dev/null || exit +} + +######################## +# Configure permisions and ownership recursively +# Globals: +# None +# Arguments: +# $1 - paths (as a string). +# Flags: +# -f|--file-mode - mode for directories. +# -d|--dir-mode - mode for files. +# -u|--user - user +# -g|--group - group +# Returns: +# None +######################### +configure_permissions_ownership() { + local -r paths="${1:?paths is missing}" + local dir_mode="" + local file_mode="" + local user="" + local group="" + + # Validate arguments + shift 1 + while [ "$#" -gt 0 ]; do + case "$1" in + -f | --file-mode) + shift + file_mode="${1:?missing mode for files}" + ;; + -d | --dir-mode) + shift + dir_mode="${1:?missing mode for directories}" + ;; + -u | --user) + shift + user="${1:?missing user}" + ;; + -g | --group) + shift + group="${1:?missing group}" + ;; + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + + read -r -a filepaths <<<"$paths" + for p in "${filepaths[@]}"; do + if [[ -e "$p" ]]; then + find -L "$p" -printf "" + if [[ -n $dir_mode ]]; then + find -L "$p" -type d ! -perm "$dir_mode" -print0 | xargs -r -0 chmod "$dir_mode" + fi + if [[ -n $file_mode ]]; then + find -L "$p" -type f ! -perm "$file_mode" -print0 | xargs -r -0 chmod "$file_mode" + fi + if [[ -n $user ]] && [[ -n $group ]]; then + find -L "$p" -print0 | xargs -r -0 chown "${user}:${group}" + elif [[ -n $user ]] && [[ -z $group ]]; then + find -L "$p" -print0 | xargs -r -0 chown "${user}" + elif [[ -z $user ]] && [[ -n $group ]]; then + find -L "$p" -print0 | xargs -r -0 chgrp "${group}" + fi + else + stderr_print "$p does not exist" + fi + done +} diff --git a/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libhook.sh b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libhook.sh new file mode 100644 index 0000000000000..dadd06149e00e --- /dev/null +++ b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libhook.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library to use for scripts expected to be used as Kubernetes lifecycle hooks + +# shellcheck disable=SC1091 + +# Load generic libraries +. /opt/bitnami/scripts/liblog.sh +. /opt/bitnami/scripts/libos.sh + +# Override functions that log to stdout/stderr of the current process, so they print to process 1 +for function_to_override in stderr_print debug_execute; do + # Output is sent to output of process 1 and thus end up in the container log + # The hook output in general isn't saved + eval "$(declare -f "$function_to_override") >/proc/1/fd/1 2>/proc/1/fd/2" +done diff --git a/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/liblog.sh b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/liblog.sh new file mode 100644 index 0000000000000..2a9e76a4d7256 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/liblog.sh @@ -0,0 +1,114 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for logging functions + +# Constants +RESET='\033[0m' +RED='\033[38;5;1m' +GREEN='\033[38;5;2m' +YELLOW='\033[38;5;3m' +MAGENTA='\033[38;5;5m' +CYAN='\033[38;5;6m' + +# Functions + +######################## +# Print to STDERR +# Arguments: +# Message to print +# Returns: +# None +######################### +stderr_print() { + # 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it + local bool="${BITNAMI_QUIET:-false}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if ! [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + printf "%b\\n" "${*}" >&2 + fi +} + +######################## +# Log message +# Arguments: +# Message to log +# Returns: +# None +######################### +log() { + stderr_print "${CYAN}${MODULE:-} ${MAGENTA}$(date "+%T.%2N ")${RESET}${*}" +} +######################## +# Log an 'info' message +# Arguments: +# Message to log +# Returns: +# None +######################### +info() { + log "${GREEN}INFO ${RESET} ==> ${*}" +} +######################## +# Log message +# Arguments: +# Message to log +# Returns: +# None +######################### +warn() { + log "${YELLOW}WARN ${RESET} ==> ${*}" +} +######################## +# Log an 'error' message +# Arguments: +# Message to log +# Returns: +# None +######################### +error() { + log "${RED}ERROR${RESET} ==> ${*}" +} +######################## +# Log a 'debug' message +# Globals: +# BITNAMI_DEBUG +# Arguments: +# None +# Returns: +# None +######################### +debug() { + # 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it + local bool="${BITNAMI_DEBUG:-false}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + log "${MAGENTA}DEBUG${RESET} ==> ${*}" + fi +} + +######################## +# Indent a string +# Arguments: +# $1 - string +# $2 - number of indentation characters (default: 4) +# $3 - indentation character (default: " ") +# Returns: +# None +######################### +indent() { + local string="${1:-}" + local num="${2:?missing num}" + local char="${3:-" "}" + # Build the indentation unit string + local indent_unit="" + for ((i = 0; i < num; i++)); do + indent_unit="${indent_unit}${char}" + done + # shellcheck disable=SC2001 + # Complex regex, see https://github.com/koalaman/shellcheck/wiki/SC2001#exceptions + echo "$string" | sed "s/^/${indent_unit}/" +} diff --git a/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libnet.sh b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libnet.sh new file mode 100644 index 0000000000000..b47c69a568250 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libnet.sh @@ -0,0 +1,165 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for network functions + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/liblog.sh + +# Functions + +######################## +# Resolve IP address for a host/domain (i.e. DNS lookup) +# Arguments: +# $1 - Hostname to resolve +# $2 - IP address version (v4, v6), leave empty for resolving to any version +# Returns: +# IP +######################### +dns_lookup() { + local host="${1:?host is missing}" + local ip_version="${2:-}" + getent "ahosts${ip_version}" "$host" | awk '/STREAM/ {print $1 }' | head -n 1 +} + +######################### +# Wait for a hostname and return the IP +# Arguments: +# $1 - hostname +# $2 - number of retries +# $3 - seconds to wait between retries +# Returns: +# - IP address that corresponds to the hostname +######################### +wait_for_dns_lookup() { + local hostname="${1:?hostname is missing}" + local retries="${2:-5}" + local seconds="${3:-1}" + check_host() { + if [[ $(dns_lookup "$hostname") == "" ]]; then + false + else + true + fi + } + # Wait for the host to be ready + retry_while "check_host ${hostname}" "$retries" "$seconds" + dns_lookup "$hostname" +} + +######################## +# Get machine's IP +# Arguments: +# None +# Returns: +# Machine IP +######################### +get_machine_ip() { + local -a ip_addresses + local hostname + hostname="$(hostname)" + read -r -a ip_addresses <<< "$(dns_lookup "$hostname" | xargs echo)" + if [[ "${#ip_addresses[@]}" -gt 1 ]]; then + warn "Found more than one IP address associated to hostname ${hostname}: ${ip_addresses[*]}, will use ${ip_addresses[0]}" + elif [[ "${#ip_addresses[@]}" -lt 1 ]]; then + error "Could not find any IP address associated to hostname ${hostname}" + exit 1 + fi + echo "${ip_addresses[0]}" +} + +######################## +# Check if the provided argument is a resolved hostname +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_hostname_resolved() { + local -r host="${1:?missing value}" + if [[ -n "$(dns_lookup "$host")" ]]; then + true + else + false + fi +} + +######################## +# Parse URL +# Globals: +# None +# Arguments: +# $1 - uri - String +# $2 - component to obtain. Valid options (scheme, authority, userinfo, host, port, path, query or fragment) - String +# Returns: +# String +parse_uri() { + local uri="${1:?uri is missing}" + local component="${2:?component is missing}" + + # Solution based on https://tools.ietf.org/html/rfc3986#appendix-B with + # additional sub-expressions to split authority into userinfo, host and port + # Credits to Patryk Obara (see https://stackoverflow.com/a/45977232/6694969) + local -r URI_REGEX='^(([^:/?#]+):)?(//((([^@/?#]+)@)?([^:/?#]+)(:([0-9]+))?))?(/([^?#]*))?(\?([^#]*))?(#(.*))?' + # || | ||| | | | | | | | | | + # |2 scheme | ||6 userinfo 7 host | 9 port | 11 rpath | 13 query | 15 fragment + # 1 scheme: | |5 userinfo@ 8 :... 10 path 12 ?... 14 #... + # | 4 authority + # 3 //... + local index=0 + case "$component" in + scheme) + index=2 + ;; + authority) + index=4 + ;; + userinfo) + index=6 + ;; + host) + index=7 + ;; + port) + index=9 + ;; + path) + index=10 + ;; + query) + index=13 + ;; + fragment) + index=14 + ;; + *) + stderr_print "unrecognized component $component" + return 1 + ;; + esac + [[ "$uri" =~ $URI_REGEX ]] && echo "${BASH_REMATCH[${index}]}" +} + +######################## +# Wait for a HTTP connection to succeed +# Globals: +# * +# Arguments: +# $1 - URL to wait for +# $2 - Maximum amount of retries (optional) +# $3 - Time between retries (optional) +# Returns: +# true if the HTTP connection succeeded, false otherwise +######################### +wait_for_http_connection() { + local url="${1:?missing url}" + local retries="${2:-}" + local sleep_time="${3:-}" + if ! retry_while "debug_execute curl --silent ${url}" "$retries" "$sleep_time"; then + error "Could not connect to ${url}" + return 1 + fi +} diff --git a/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libos.sh b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libos.sh new file mode 100644 index 0000000000000..c0500acee78d9 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libos.sh @@ -0,0 +1,657 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for operating system actions + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/liblog.sh +. /opt/bitnami/scripts/libfs.sh +. /opt/bitnami/scripts/libvalidations.sh + +# Functions + +######################## +# Check if an user exists in the system +# Arguments: +# $1 - user +# Returns: +# Boolean +######################### +user_exists() { + local user="${1:?user is missing}" + id "$user" >/dev/null 2>&1 +} + +######################## +# Check if a group exists in the system +# Arguments: +# $1 - group +# Returns: +# Boolean +######################### +group_exists() { + local group="${1:?group is missing}" + getent group "$group" >/dev/null 2>&1 +} + +######################## +# Create a group in the system if it does not exist already +# Arguments: +# $1 - group +# Flags: +# -i|--gid - the ID for the new group +# -s|--system - Whether to create new user as system user (uid <= 999) +# Returns: +# None +######################### +ensure_group_exists() { + local group="${1:?group is missing}" + local gid="" + local is_system_user=false + + # Validate arguments + shift 1 + while [ "$#" -gt 0 ]; do + case "$1" in + -i | --gid) + shift + gid="${1:?missing gid}" + ;; + -s | --system) + is_system_user=true + ;; + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + + if ! group_exists "$group"; then + local -a args=("$group") + if [[ -n "$gid" ]]; then + if group_exists "$gid"; then + error "The GID $gid is already in use." >&2 + return 1 + fi + args+=("--gid" "$gid") + fi + $is_system_user && args+=("--system") + groupadd "${args[@]}" >/dev/null 2>&1 + fi +} + +######################## +# Create an user in the system if it does not exist already +# Arguments: +# $1 - user +# Flags: +# -i|--uid - the ID for the new user +# -g|--group - the group the new user should belong to +# -a|--append-groups - comma-separated list of supplemental groups to append to the new user +# -h|--home - the home directory for the new user +# -s|--system - whether to create new user as system user (uid <= 999) +# Returns: +# None +######################### +ensure_user_exists() { + local user="${1:?user is missing}" + local uid="" + local group="" + local append_groups="" + local home="" + local is_system_user=false + + # Validate arguments + shift 1 + while [ "$#" -gt 0 ]; do + case "$1" in + -i | --uid) + shift + uid="${1:?missing uid}" + ;; + -g | --group) + shift + group="${1:?missing group}" + ;; + -a | --append-groups) + shift + append_groups="${1:?missing append_groups}" + ;; + -h | --home) + shift + home="${1:?missing home directory}" + ;; + -s | --system) + is_system_user=true + ;; + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + + if ! user_exists "$user"; then + local -a user_args=("-N" "$user") + if [[ -n "$uid" ]]; then + if user_exists "$uid"; then + error "The UID $uid is already in use." + return 1 + fi + user_args+=("--uid" "$uid") + else + $is_system_user && user_args+=("--system") + fi + useradd "${user_args[@]}" >/dev/null 2>&1 + fi + + if [[ -n "$group" ]]; then + local -a group_args=("$group") + $is_system_user && group_args+=("--system") + ensure_group_exists "${group_args[@]}" + usermod -g "$group" "$user" >/dev/null 2>&1 + fi + + if [[ -n "$append_groups" ]]; then + local -a groups + read -ra groups <<<"$(tr ',;' ' ' <<<"$append_groups")" + for group in "${groups[@]}"; do + ensure_group_exists "$group" + usermod -aG "$group" "$user" >/dev/null 2>&1 + done + fi + + if [[ -n "$home" ]]; then + mkdir -p "$home" + usermod -d "$home" "$user" >/dev/null 2>&1 + configure_permissions_ownership "$home" -d "775" -f "664" -u "$user" -g "$group" + fi +} + +######################## +# Check if the script is currently running as root +# Arguments: +# $1 - user +# $2 - group +# Returns: +# Boolean +######################### +am_i_root() { + if [[ "$(id -u)" = "0" ]]; then + true + else + false + fi +} + +######################## +# Print OS metadata +# Arguments: +# $1 - Flag name +# Flags: +# --id - Distro ID +# --version - Distro version +# --branch - Distro branch +# --codename - Distro codename +# --name - Distro name +# --pretty-name - Distro pretty name +# Returns: +# String +######################### +get_os_metadata() { + local -r flag_name="${1:?missing flag}" + # Helper function + get_os_release_metadata() { + local -r env_name="${1:?missing environment variable name}" + ( + . /etc/os-release + echo "${!env_name}" + ) + } + case "$flag_name" in + --id) + get_os_release_metadata ID + ;; + --version) + get_os_release_metadata VERSION_ID + ;; + --branch) + get_os_release_metadata VERSION_ID | sed 's/\..*//' + ;; + --codename) + get_os_release_metadata VERSION_CODENAME + ;; + --name) + get_os_release_metadata NAME + ;; + --pretty-name) + get_os_release_metadata PRETTY_NAME + ;; + *) + error "Unknown flag ${flag_name}" + return 1 + ;; + esac +} + +######################## +# Get total memory available +# Arguments: +# None +# Returns: +# Memory in bytes +######################### +get_total_memory() { + echo $(($(grep MemTotal /proc/meminfo | awk '{print $2}') / 1024)) +} + +######################## +# Get machine size depending on specified memory +# Globals: +# None +# Arguments: +# None +# Flags: +# --memory - memory size (optional) +# Returns: +# Detected instance size +######################### +get_machine_size() { + local memory="" + # Validate arguments + while [[ "$#" -gt 0 ]]; do + case "$1" in + --memory) + shift + memory="${1:?missing memory}" + ;; + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + if [[ -z "$memory" ]]; then + debug "Memory was not specified, detecting available memory automatically" + memory="$(get_total_memory)" + fi + sanitized_memory=$(convert_to_mb "$memory") + if [[ "$sanitized_memory" -gt 26000 ]]; then + echo 2xlarge + elif [[ "$sanitized_memory" -gt 13000 ]]; then + echo xlarge + elif [[ "$sanitized_memory" -gt 6000 ]]; then + echo large + elif [[ "$sanitized_memory" -gt 3000 ]]; then + echo medium + elif [[ "$sanitized_memory" -gt 1500 ]]; then + echo small + else + echo micro + fi +} + +######################## +# Get machine size depending on specified memory +# Globals: +# None +# Arguments: +# $1 - memory size (optional) +# Returns: +# Detected instance size +######################### +get_supported_machine_sizes() { + echo micro small medium large xlarge 2xlarge +} + +######################## +# Convert memory size from string to amount of megabytes (i.e. 2G -> 2048) +# Globals: +# None +# Arguments: +# $1 - memory size +# Returns: +# Result of the conversion +######################### +convert_to_mb() { + local amount="${1:-}" + if [[ $amount =~ ^([0-9]+)(m|M|g|G) ]]; then + size="${BASH_REMATCH[1]}" + unit="${BASH_REMATCH[2]}" + if [[ "$unit" = "g" || "$unit" = "G" ]]; then + amount="$((size * 1024))" + else + amount="$size" + fi + fi + echo "$amount" +} + +######################### +# Redirects output to /dev/null if debug mode is disabled +# Globals: +# BITNAMI_DEBUG +# Arguments: +# $@ - Command to execute +# Returns: +# None +######################### +debug_execute() { + if is_boolean_yes "${BITNAMI_DEBUG:-false}"; then + "$@" + else + "$@" >/dev/null 2>&1 + fi +} + +######################## +# Retries a command a given number of times +# Arguments: +# $1 - cmd (as a string) +# $2 - max retries. Default: 12 +# $3 - sleep between retries (in seconds). Default: 5 +# Returns: +# Boolean +######################### +retry_while() { + local cmd="${1:?cmd is missing}" + local retries="${2:-12}" + local sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<<"$cmd" + for ((i = 1; i <= retries; i += 1)); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value +} + +######################## +# Generate a random string +# Arguments: +# -t|--type - String type (ascii, alphanumeric, numeric), defaults to ascii +# -c|--count - Number of characters, defaults to 32 +# Arguments: +# None +# Returns: +# None +# Returns: +# String +######################### +generate_random_string() { + local type="ascii" + local count="32" + local filter + local result + # Validate arguments + while [[ "$#" -gt 0 ]]; do + case "$1" in + -t | --type) + shift + type="$1" + ;; + -c | --count) + shift + count="$1" + ;; + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + # Validate type + case "$type" in + ascii) + filter="[:print:]" + ;; + numeric) + filter="0-9" + ;; + alphanumeric) + filter="a-zA-Z0-9" + ;; + alphanumeric+special|special+alphanumeric) + # Limit variety of special characters, so there is a higher chance of containing more alphanumeric characters + # Special characters are harder to write, and it could impact the overall UX if most passwords are too complex + filter='a-zA-Z0-9:@.,/+!=' + ;; + *) + echo "Invalid type ${type}" >&2 + return 1 + ;; + esac + # Obtain count + 10 lines from /dev/urandom to ensure that the resulting string has the expected size + # Note there is a very small chance of strings starting with EOL character + # Therefore, the higher amount of lines read, this will happen less frequently + result="$(head -n "$((count + 10))" /dev/urandom | tr -dc "$filter" | head -c "$count")" + echo "$result" +} + +######################## +# Create md5 hash from a string +# Arguments: +# $1 - string +# Returns: +# md5 hash - string +######################### +generate_md5_hash() { + local -r str="${1:?missing input string}" + echo -n "$str" | md5sum | awk '{print $1}' +} + +######################## +# Create sha1 hash from a string +# Arguments: +# $1 - string +# $2 - algorithm - 1 (default), 224, 256, 384, 512 +# Returns: +# sha1 hash - string +######################### +generate_sha_hash() { + local -r str="${1:?missing input string}" + local -r algorithm="${2:-1}" + echo -n "$str" | "sha${algorithm}sum" | awk '{print $1}' +} + +######################## +# Converts a string to its hexadecimal representation +# Arguments: +# $1 - string +# Returns: +# hexadecimal representation of the string +######################### +convert_to_hex() { + local -r str=${1:?missing input string} + local -i iterator + local char + for ((iterator = 0; iterator < ${#str}; iterator++)); do + char=${str:iterator:1} + printf '%x' "'${char}" + done +} + +######################## +# Get boot time +# Globals: +# None +# Arguments: +# None +# Returns: +# Boot time metadata +######################### +get_boot_time() { + stat /proc --format=%Y +} + +######################## +# Get machine ID +# Globals: +# None +# Arguments: +# None +# Returns: +# Machine ID +######################### +get_machine_id() { + local machine_id + if [[ -f /etc/machine-id ]]; then + machine_id="$(cat /etc/machine-id)" + fi + if [[ -z "$machine_id" ]]; then + # Fallback to the boot-time, which will at least ensure a unique ID in the current session + machine_id="$(get_boot_time)" + fi + echo "$machine_id" +} + +######################## +# Get the root partition's disk device ID (e.g. /dev/sda1) +# Globals: +# None +# Arguments: +# None +# Returns: +# Root partition disk ID +######################### +get_disk_device_id() { + local device_id="" + if grep -q ^/dev /proc/mounts; then + device_id="$(grep ^/dev /proc/mounts | awk '$2 == "/" { print $1 }' | tail -1)" + fi + # If it could not be autodetected, fallback to /dev/sda1 as a default + if [[ -z "$device_id" || ! -b "$device_id" ]]; then + device_id="/dev/sda1" + fi + echo "$device_id" +} + +######################## +# Get the root disk device ID (e.g. /dev/sda) +# Globals: +# None +# Arguments: +# None +# Returns: +# Root disk ID +######################### +get_root_disk_device_id() { + get_disk_device_id | sed -E 's/p?[0-9]+$//' +} + +######################## +# Get the root disk size in bytes +# Globals: +# None +# Arguments: +# None +# Returns: +# Root disk size in bytes +######################### +get_root_disk_size() { + fdisk -l "$(get_root_disk_device_id)" | grep 'Disk.*bytes' | sed -E 's/.*, ([0-9]+) bytes,.*/\1/' || true +} + +######################## +# Run command as a specific user and group (optional) +# Arguments: +# $1 - USER(:GROUP) to switch to +# $2..$n - command to execute +# Returns: +# Exit code of the specified command +######################### +run_as_user() { + run_chroot "$@" +} + +######################## +# Execute command as a specific user and group (optional), +# replacing the current process image +# Arguments: +# $1 - USER(:GROUP) to switch to +# $2..$n - command to execute +# Returns: +# Exit code of the specified command +######################### +exec_as_user() { + run_chroot --replace-process "$@" +} + +######################## +# Run a command using chroot +# Arguments: +# $1 - USER(:GROUP) to switch to +# $2..$n - command to execute +# Flags: +# -r | --replace-process - Replace the current process image (optional) +# Returns: +# Exit code of the specified command +######################### +run_chroot() { + local userspec + local user + local homedir + local replace=false + local -r cwd="$(pwd)" + + # Parse and validate flags + while [[ "$#" -gt 0 ]]; do + case "$1" in + -r | --replace-process) + replace=true + ;; + --) + shift + break + ;; + -*) + stderr_print "unrecognized flag $1" + return 1 + ;; + *) + break + ;; + esac + shift + done + + # Parse and validate arguments + if [[ "$#" -lt 2 ]]; then + echo "expected at least 2 arguments" + return 1 + else + userspec=$1 + shift + + # userspec can optionally include the group, so we parse the user + user=$(echo "$userspec" | cut -d':' -f1) + fi + + if ! am_i_root; then + error "Could not switch to '${userspec}': Operation not permitted" + return 1 + fi + + # Get the HOME directory for the user to switch, as chroot does + # not properly update this env and some scripts rely on it + homedir=$(eval echo "~${user}") + if [[ ! -d $homedir ]]; then + homedir="${HOME:-/}" + fi + + # Obtaining value for "$@" indirectly in order to properly support shell parameter expansion + if [[ "$replace" = true ]]; then + exec chroot --userspec="$userspec" / bash -c "cd ${cwd}; export HOME=${homedir}; exec \"\$@\"" -- "$@" + else + chroot --userspec="$userspec" / bash -c "cd ${cwd}; export HOME=${homedir}; exec \"\$@\"" -- "$@" + fi +} diff --git a/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libpersistence.sh b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libpersistence.sh new file mode 100644 index 0000000000000..af6af64d6dd08 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libpersistence.sh @@ -0,0 +1,124 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Bitnami persistence library +# Used for bringing persistence capabilities to applications that don't have clear separation of data and logic + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/libfs.sh +. /opt/bitnami/scripts/libos.sh +. /opt/bitnami/scripts/liblog.sh +. /opt/bitnami/scripts/libversion.sh + +# Functions + +######################## +# Persist an application directory +# Globals: +# BITNAMI_ROOT_DIR +# BITNAMI_VOLUME_DIR +# Arguments: +# $1 - App folder name +# $2 - List of app files to persist +# Returns: +# true if all steps succeeded, false otherwise +######################### +persist_app() { + local -r app="${1:?missing app}" + local -a files_to_restore + read -r -a files_to_persist <<< "$(tr ',;:' ' ' <<< "$2")" + local -r install_dir="${BITNAMI_ROOT_DIR}/${app}" + local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}" + # Persist the individual files + if [[ "${#files_to_persist[@]}" -le 0 ]]; then + warn "No files are configured to be persisted" + return + fi + pushd "$install_dir" >/dev/null || exit + local file_to_persist_relative file_to_persist_destination file_to_persist_destination_folder + local -r tmp_file="/tmp/perms.acl" + for file_to_persist in "${files_to_persist[@]}"; do + if [[ ! -f "$file_to_persist" && ! -d "$file_to_persist" ]]; then + error "Cannot persist '${file_to_persist}' because it does not exist" + return 1 + fi + file_to_persist_relative="$(relativize "$file_to_persist" "$install_dir")" + file_to_persist_destination="${persist_dir}/${file_to_persist_relative}" + file_to_persist_destination_folder="$(dirname "$file_to_persist_destination")" + # Get original permissions for existing files, which will be applied later + # Exclude the root directory with 'sed', to avoid issues when copying the entirety of it to a volume + getfacl -R "$file_to_persist_relative" | sed -E '/# file: (\..+|[^.])/,$!d' > "$tmp_file" + # Copy directories to the volume + ensure_dir_exists "$file_to_persist_destination_folder" + cp -Lr --preserve=links "$file_to_persist_relative" "$file_to_persist_destination_folder" + # Restore permissions + pushd "$persist_dir" >/dev/null || exit + if am_i_root; then + setfacl --restore="$tmp_file" + else + # When running as non-root, don't change ownership + setfacl --restore=<(grep -E -v '^# (owner|group):' "$tmp_file") + fi + popd >/dev/null || exit + done + popd >/dev/null || exit + rm -f "$tmp_file" + # Install the persisted files into the installation directory, via symlinks + restore_persisted_app "$@" +} + +######################## +# Restore a persisted application directory +# Globals: +# BITNAMI_ROOT_DIR +# BITNAMI_VOLUME_DIR +# FORCE_MAJOR_UPGRADE +# Arguments: +# $1 - App folder name +# $2 - List of app files to restore +# Returns: +# true if all steps succeeded, false otherwise +######################### +restore_persisted_app() { + local -r app="${1:?missing app}" + local -a files_to_restore + read -r -a files_to_restore <<< "$(tr ',;:' ' ' <<< "$2")" + local -r install_dir="${BITNAMI_ROOT_DIR}/${app}" + local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}" + # Restore the individual persisted files + if [[ "${#files_to_restore[@]}" -le 0 ]]; then + warn "No persisted files are configured to be restored" + return + fi + local file_to_restore_relative file_to_restore_origin file_to_restore_destination + for file_to_restore in "${files_to_restore[@]}"; do + file_to_restore_relative="$(relativize "$file_to_restore" "$install_dir")" + # We use 'realpath --no-symlinks' to ensure that the case of '.' is covered and the directory is removed + file_to_restore_origin="$(realpath --no-symlinks "${install_dir}/${file_to_restore_relative}")" + file_to_restore_destination="$(realpath --no-symlinks "${persist_dir}/${file_to_restore_relative}")" + rm -rf "$file_to_restore_origin" + ln -sfn "$file_to_restore_destination" "$file_to_restore_origin" + done +} + +######################## +# Check if an application directory was already persisted +# Globals: +# BITNAMI_VOLUME_DIR +# Arguments: +# $1 - App folder name +# Returns: +# true if all steps succeeded, false otherwise +######################### +is_app_initialized() { + local -r app="${1:?missing app}" + local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}" + if ! is_mounted_dir_empty "$persist_dir"; then + true + else + false + fi +} diff --git a/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libservice.sh b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libservice.sh new file mode 100644 index 0000000000000..107f54e6b5c91 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libservice.sh @@ -0,0 +1,496 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for managing services + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/libvalidations.sh +. /opt/bitnami/scripts/liblog.sh + +# Functions + +######################## +# Read the provided pid file and returns a PID +# Arguments: +# $1 - Pid file +# Returns: +# PID +######################### +get_pid_from_file() { + local pid_file="${1:?pid file is missing}" + + if [[ -f "$pid_file" ]]; then + if [[ -n "$(< "$pid_file")" ]] && [[ "$(< "$pid_file")" -gt 0 ]]; then + echo "$(< "$pid_file")" + fi + fi +} + +######################## +# Check if a provided PID corresponds to a running service +# Arguments: +# $1 - PID +# Returns: +# Boolean +######################### +is_service_running() { + local pid="${1:?pid is missing}" + + kill -0 "$pid" 2>/dev/null +} + +######################## +# Stop a service by sending a termination signal to its pid +# Arguments: +# $1 - Pid file +# $2 - Signal number (optional) +# Returns: +# None +######################### +stop_service_using_pid() { + local pid_file="${1:?pid file is missing}" + local signal="${2:-}" + local pid + + pid="$(get_pid_from_file "$pid_file")" + [[ -z "$pid" ]] || ! is_service_running "$pid" && return + + if [[ -n "$signal" ]]; then + kill "-${signal}" "$pid" + else + kill "$pid" + fi + + local counter=10 + while [[ "$counter" -ne 0 ]] && is_service_running "$pid"; do + sleep 1 + counter=$((counter - 1)) + done +} + +######################## +# Start cron daemon +# Arguments: +# None +# Returns: +# true if started correctly, false otherwise +######################### +cron_start() { + if [[ -x "/usr/sbin/cron" ]]; then + /usr/sbin/cron + elif [[ -x "/usr/sbin/crond" ]]; then + /usr/sbin/crond + else + false + fi +} + +######################## +# Generate a cron configuration file for a given service +# Arguments: +# $1 - Service name +# $2 - Command +# Flags: +# --run-as - User to run as (default: root) +# --schedule - Cron schedule configuration (default: * * * * *) +# Returns: +# None +######################### +generate_cron_conf() { + local service_name="${1:?service name is missing}" + local cmd="${2:?command is missing}" + local run_as="root" + local schedule="* * * * *" + local clean="true" + + # Parse optional CLI flags + shift 2 + while [[ "$#" -gt 0 ]]; do + case "$1" in + --run-as) + shift + run_as="$1" + ;; + --schedule) + shift + schedule="$1" + ;; + --no-clean) + clean="false" + ;; + *) + echo "Invalid command line flag ${1}" >&2 + return 1 + ;; + esac + shift + done + + mkdir -p /etc/cron.d + if "$clean"; then + cat > "/etc/cron.d/${service_name}" <> /etc/cron.d/"$service_name" + fi +} + +######################## +# Remove a cron configuration file for a given service +# Arguments: +# $1 - Service name +# Returns: +# None +######################### +remove_cron_conf() { + local service_name="${1:?service name is missing}" + local cron_conf_dir="/etc/monit/conf.d" + rm -f "${cron_conf_dir}/${service_name}" +} + +######################## +# Generate a monit configuration file for a given service +# Arguments: +# $1 - Service name +# $2 - Pid file +# $3 - Start command +# $4 - Stop command +# Flags: +# --disable - Whether to disable the monit configuration +# Returns: +# None +######################### +generate_monit_conf() { + local service_name="${1:?service name is missing}" + local pid_file="${2:?pid file is missing}" + local start_command="${3:?start command is missing}" + local stop_command="${4:?stop command is missing}" + local monit_conf_dir="/etc/monit/conf.d" + local disabled="no" + + # Parse optional CLI flags + shift 4 + while [[ "$#" -gt 0 ]]; do + case "$1" in + --disable) + disabled="yes" + ;; + *) + echo "Invalid command line flag ${1}" >&2 + return 1 + ;; + esac + shift + done + + is_boolean_yes "$disabled" && conf_suffix=".disabled" + mkdir -p "$monit_conf_dir" + cat > "${monit_conf_dir}/${service_name}.conf${conf_suffix:-}" <&2 + return 1 + ;; + esac + shift + done + + mkdir -p "$logrotate_conf_dir" + cat < "${logrotate_conf_dir}/${service_name}" +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +${log_path} { + ${period} + rotate ${rotations} + dateext + compress + copytruncate + missingok +$(indent "$extra" 2) +} +EOF +} + +######################## +# Remove a logrotate configuration file +# Arguments: +# $1 - Service name +# Returns: +# None +######################### +remove_logrotate_conf() { + local service_name="${1:?service name is missing}" + local logrotate_conf_dir="/etc/logrotate.d" + rm -f "${logrotate_conf_dir}/${service_name}" +} + +######################## +# Generate a Systemd configuration file +# Arguments: +# $1 - Service name +# Flags: +# --custom-service-content - Custom content to add to the [service] block +# --environment - Environment variable to define (multiple --environment options may be passed) +# --environment-file - Text file with environment variables (multiple --environment-file options may be passed) +# --exec-start - Start command (required) +# --exec-start-pre - Pre-start command (optional) +# --exec-start-post - Post-start command (optional) +# --exec-stop - Stop command (optional) +# --exec-reload - Reload command (optional) +# --group - System group to start the service with +# --name - Service full name (e.g. Apache HTTP Server, defaults to $1) +# --restart - When to restart the Systemd service after being stopped (defaults to always) +# --pid-file - Service PID file +# --standard-output - File where to print stdout output +# --standard-error - File where to print stderr output +# --success-exit-status - Exit code that indicates a successful shutdown +# --type - Systemd unit type (defaults to forking) +# --user - System user to start the service with +# --working-directory - Working directory at which to start the service +# Returns: +# None +######################### +generate_systemd_conf() { + local -r service_name="${1:?service name is missing}" + local -r systemd_units_dir="/etc/systemd/system" + local -r service_file="${systemd_units_dir}/bitnami.${service_name}.service" + # Default values + local name="$service_name" + local type="forking" + local user="" + local group="" + local environment="" + local environment_file="" + local exec_start="" + local exec_start_pre="" + local exec_start_post="" + local exec_stop="" + local exec_reload="" + local restart="always" + local pid_file="" + local standard_output="journal" + local standard_error="" + local limits_content="" + local success_exit_status="" + local custom_service_content="" + local working_directory="" + # Parse CLI flags + shift + while [[ "$#" -gt 0 ]]; do + case "$1" in + --name \ + | --type \ + | --user \ + | --group \ + | --exec-start \ + | --exec-stop \ + | --exec-reload \ + | --restart \ + | --pid-file \ + | --standard-output \ + | --standard-error \ + | --success-exit-status \ + | --custom-service-content \ + | --working-directory \ + ) + var_name="$(echo "$1" | sed -e "s/^--//" -e "s/-/_/g")" + shift + declare "$var_name"="${1:?"${var_name} value is missing"}" + ;; + --limit-*) + [[ -n "$limits_content" ]] && limits_content+=$'\n' + var_name="${1//--limit-}" + shift + limits_content+="Limit${var_name^^}=${1:?"--limit-${var_name} value is missing"}" + ;; + --exec-start-pre) + shift + [[ -n "$exec_start_pre" ]] && exec_start_pre+=$'\n' + exec_start_pre+="ExecStartPre=${1:?"--exec-start-pre value is missing"}" + ;; + --exec-start-post) + shift + [[ -n "$exec_start_post" ]] && exec_start_post+=$'\n' + exec_start_post+="ExecStartPost=${1:?"--exec-start-post value is missing"}" + ;; + --environment) + shift + # It is possible to add multiple environment lines + [[ -n "$environment" ]] && environment+=$'\n' + environment+="Environment=${1:?"--environment value is missing"}" + ;; + --environment-file) + shift + # It is possible to add multiple environment-file lines + [[ -n "$environment_file" ]] && environment_file+=$'\n' + environment_file+="EnvironmentFile=${1:?"--environment-file value is missing"}" + ;; + *) + echo "Invalid command line flag ${1}" >&2 + return 1 + ;; + esac + shift + done + # Validate inputs + local error="no" + if [[ -z "$exec_start" ]]; then + error "The --exec-start option is required" + error="yes" + fi + if [[ "$error" != "no" ]]; then + return 1 + fi + # Generate the Systemd unit + cat > "$service_file" <> "$service_file" <<< "WorkingDirectory=${working_directory}" + fi + if [[ -n "$exec_start_pre" ]]; then + # This variable may contain multiple ExecStartPre= directives + cat >> "$service_file" <<< "$exec_start_pre" + fi + if [[ -n "$exec_start" ]]; then + cat >> "$service_file" <<< "ExecStart=${exec_start}" + fi + if [[ -n "$exec_start_post" ]]; then + # This variable may contain multiple ExecStartPost= directives + cat >> "$service_file" <<< "$exec_start_post" + fi + # Optional stop and reload commands + if [[ -n "$exec_stop" ]]; then + cat >> "$service_file" <<< "ExecStop=${exec_stop}" + fi + if [[ -n "$exec_reload" ]]; then + cat >> "$service_file" <<< "ExecReload=${exec_reload}" + fi + # User and group + if [[ -n "$user" ]]; then + cat >> "$service_file" <<< "User=${user}" + fi + if [[ -n "$group" ]]; then + cat >> "$service_file" <<< "Group=${group}" + fi + # PID file allows to determine if the main process is running properly (for Restart=always) + if [[ -n "$pid_file" ]]; then + cat >> "$service_file" <<< "PIDFile=${pid_file}" + fi + if [[ -n "$restart" ]]; then + cat >> "$service_file" <<< "Restart=${restart}" + fi + # Environment flags + if [[ -n "$environment" ]]; then + # This variable may contain multiple Environment= directives + cat >> "$service_file" <<< "$environment" + fi + if [[ -n "$environment_file" ]]; then + # This variable may contain multiple EnvironmentFile= directives + cat >> "$service_file" <<< "$environment_file" + fi + # Logging + if [[ -n "$standard_output" ]]; then + cat >> "$service_file" <<< "StandardOutput=${standard_output}" + fi + if [[ -n "$standard_error" ]]; then + cat >> "$service_file" <<< "StandardError=${standard_error}" + fi + if [[ -n "$custom_service_content" ]]; then + # This variable may contain multiple miscellaneous directives + cat >> "$service_file" <<< "$custom_service_content" + fi + if [[ -n "$success_exit_status" ]]; then + cat >> "$service_file" <> "$service_file" <> "$service_file" <> "$service_file" <= 0 )); then + true + else + false + fi +} + +######################## +# Check if the provided argument is a boolean or is the string 'yes/true' +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi +} + +######################## +# Check if the provided argument is a boolean yes/no value +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_yes_no_value() { + local -r bool="${1:-}" + if [[ "$bool" =~ ^(yes|no)$ ]]; then + true + else + false + fi +} + +######################## +# Check if the provided argument is a boolean true/false value +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_true_false_value() { + local -r bool="${1:-}" + if [[ "$bool" =~ ^(true|false)$ ]]; then + true + else + false + fi +} + +######################## +# Check if the provided argument is a boolean 1/0 value +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_1_0_value() { + local -r bool="${1:-}" + if [[ "$bool" =~ ^[10]$ ]]; then + true + else + false + fi +} + +######################## +# Check if the provided argument is an empty string or not defined +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_empty_value() { + local -r val="${1:-}" + if [[ -z "$val" ]]; then + true + else + false + fi +} + +######################## +# Validate if the provided argument is a valid port +# Arguments: +# $1 - Port to validate +# Returns: +# Boolean and error message +######################### +validate_port() { + local value + local unprivileged=0 + + # Parse flags + while [[ "$#" -gt 0 ]]; do + case "$1" in + -unprivileged) + unprivileged=1 + ;; + --) + shift + break + ;; + -*) + stderr_print "unrecognized flag $1" + return 1 + ;; + *) + break + ;; + esac + shift + done + + if [[ "$#" -gt 1 ]]; then + echo "too many arguments provided" + return 2 + elif [[ "$#" -eq 0 ]]; then + stderr_print "missing port argument" + return 1 + else + value=$1 + fi + + if [[ -z "$value" ]]; then + echo "the value is empty" + return 1 + else + if ! is_int "$value"; then + echo "value is not an integer" + return 2 + elif [[ "$value" -lt 0 ]]; then + echo "negative value provided" + return 2 + elif [[ "$value" -gt 65535 ]]; then + echo "requested port is greater than 65535" + return 2 + elif [[ "$unprivileged" = 1 && "$value" -lt 1024 ]]; then + echo "privileged port requested" + return 3 + fi + fi +} + +######################## +# Validate if the provided argument is a valid IPv4 address +# Arguments: +# $1 - IP to validate +# Returns: +# Boolean +######################### +validate_ipv4() { + local ip="${1:?ip is missing}" + local stat=1 + + if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then + read -r -a ip_array <<< "$(tr '.' ' ' <<< "$ip")" + [[ ${ip_array[0]} -le 255 && ${ip_array[1]} -le 255 \ + && ${ip_array[2]} -le 255 && ${ip_array[3]} -le 255 ]] + stat=$? + fi + return $stat +} + +######################## +# Validate a string format +# Arguments: +# $1 - String to validate +# Returns: +# Boolean +######################### +validate_string() { + local string + local min_length=-1 + local max_length=-1 + + # Parse flags + while [ "$#" -gt 0 ]; do + case "$1" in + -min-length) + shift + min_length=${1:-} + ;; + -max-length) + shift + max_length=${1:-} + ;; + --) + shift + break + ;; + -*) + stderr_print "unrecognized flag $1" + return 1 + ;; + *) + break + ;; + esac + shift + done + + if [ "$#" -gt 1 ]; then + stderr_print "too many arguments provided" + return 2 + elif [ "$#" -eq 0 ]; then + stderr_print "missing string" + return 1 + else + string=$1 + fi + + if [[ "$min_length" -ge 0 ]] && [[ "${#string}" -lt "$min_length" ]]; then + echo "string length is less than $min_length" + return 1 + fi + if [[ "$max_length" -ge 0 ]] && [[ "${#string}" -gt "$max_length" ]]; then + echo "string length is great than $max_length" + return 1 + fi +} diff --git a/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libversion.sh b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libversion.sh new file mode 100644 index 0000000000000..6ca71ac7bdbb7 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libversion.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for managing versions strings + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/liblog.sh + +# Functions +######################## +# Gets semantic version +# Arguments: +# $1 - version: string to extract major.minor.patch +# $2 - section: 1 to extract major, 2 to extract minor, 3 to extract patch +# Returns: +# array with the major, minor and release +######################### +get_sematic_version () { + local version="${1:?version is required}" + local section="${2:?section is required}" + local -a version_sections + + #Regex to parse versions: x.y.z + local -r regex='([0-9]+)(\.([0-9]+)(\.([0-9]+))?)?' + + if [[ "$version" =~ $regex ]]; then + local i=1 + local j=1 + local n=${#BASH_REMATCH[*]} + + while [[ $i -lt $n ]]; do + if [[ -n "${BASH_REMATCH[$i]}" ]] && [[ "${BASH_REMATCH[$i]:0:1}" != '.' ]]; then + version_sections[j]="${BASH_REMATCH[$i]}" + ((j++)) + fi + ((i++)) + done + + local number_regex='^[0-9]+$' + if [[ "$section" =~ $number_regex ]] && (( section > 0 )) && (( section <= 3 )); then + echo "${version_sections[$section]}" + return + else + stderr_print "Section allowed values are: 1, 2, and 3" + return 1 + fi + fi +} diff --git a/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libwebserver.sh b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libwebserver.sh new file mode 100644 index 0000000000000..8023f9b0549a0 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/prebuildfs/opt/bitnami/scripts/libwebserver.sh @@ -0,0 +1,476 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Bitnami web server handler library + +# shellcheck disable=SC1090,SC1091 + +# Load generic libraries +. /opt/bitnami/scripts/liblog.sh + +######################## +# Execute a command (or list of commands) with the web server environment and library loaded +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_execute() { + local -r web_server="${1:?missing web server}" + shift + # Run program in sub-shell to avoid web server environment getting loaded when not necessary + ( + . "/opt/bitnami/scripts/lib${web_server}.sh" + . "/opt/bitnami/scripts/${web_server}-env.sh" + "$@" + ) +} + +######################## +# Prints the list of enabled web servers +# Globals: +# None +# Arguments: +# None +# Returns: +# None +######################### +web_server_list() { + local -r -a supported_web_servers=(apache nginx) + local -a existing_web_servers=() + for web_server in "${supported_web_servers[@]}"; do + [[ -f "/opt/bitnami/scripts/${web_server}-env.sh" ]] && existing_web_servers+=("$web_server") + done + echo "${existing_web_servers[@]:-}" +} + +######################## +# Prints the currently-enabled web server type (only one, in order of preference) +# Globals: +# None +# Arguments: +# None +# Returns: +# None +######################### +web_server_type() { + local -a web_servers + read -r -a web_servers <<< "$(web_server_list)" + echo "${web_servers[0]:-}" +} + +######################## +# Validate that a supported web server is configured +# Globals: +# None +# Arguments: +# None +# Returns: +# None +######################### +web_server_validate() { + local error_code=0 + local supported_web_servers=("apache" "nginx") + + # Auxiliary functions + print_validation_error() { + error "$1" + error_code=1 + } + + if [[ -z "$(web_server_type)" || ! " ${supported_web_servers[*]} " == *" $(web_server_type) "* ]]; then + print_validation_error "Could not detect any supported web servers. It must be one of: ${supported_web_servers[*]}" + elif ! web_server_execute "$(web_server_type)" type -t "is_$(web_server_type)_running" >/dev/null; then + print_validation_error "Could not load the $(web_server_type) web server library from /opt/bitnami/scripts. Check that it exists and is readable." + fi + + return "$error_code" +} + +######################## +# Check whether the web server is running +# Globals: +# * +# Arguments: +# None +# Returns: +# true if the web server is running, false otherwise +######################### +is_web_server_running() { + "is_$(web_server_type)_running" +} + +######################## +# Start web server +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_start() { + info "Starting $(web_server_type) in background" + if [[ "${BITNAMI_SERVICE_MANAGER:-}" = "systemd" ]]; then + systemctl start "bitnami.$(web_server_type).service" + else + "${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/start.sh" + fi +} + +######################## +# Stop web server +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_stop() { + info "Stopping $(web_server_type)" + if [[ "${BITNAMI_SERVICE_MANAGER:-}" = "systemd" ]]; then + systemctl stop "bitnami.$(web_server_type).service" + else + "${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/stop.sh" + fi +} + +######################## +# Restart web server +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_restart() { + info "Restarting $(web_server_type)" + if [[ "${BITNAMI_SERVICE_MANAGER:-}" = "systemd" ]]; then + systemctl restart "bitnami.$(web_server_type).service" + else + "${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/restart.sh" + fi +} + +######################## +# Reload web server +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_reload() { + if [[ "${BITNAMI_SERVICE_MANAGER:-}" = "systemd" ]]; then + systemctl reload "bitnami.$(web_server_type).service" + else + "${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/reload.sh" + fi +} + +######################## +# Ensure a web server application configuration exists (i.e. Apache virtual host format or NGINX server block) +# It serves as a wrapper for the specific web server function +# Globals: +# * +# Arguments: +# $1 - App name +# Flags: +# --type - Application type, which has an effect on which configuration template to use +# --hosts - Host listen addresses +# --server-name - Server name +# --server-aliases - Server aliases +# --allow-remote-connections - Whether to allow remote connections or to require local connections +# --disable - Whether to render server configurations with a .disabled prefix +# --disable-http - Whether to render the app's HTTP server configuration with a .disabled prefix +# --disable-https - Whether to render the app's HTTPS server configuration with a .disabled prefix +# --http-port - HTTP port number +# --https-port - HTTPS port number +# --document-root - Path to document root directory +# Apache-specific flags: +# --apache-additional-configuration - Additional vhost configuration (no default) +# --apache-additional-http-configuration - Additional HTTP vhost configuration (no default) +# --apache-additional-https-configuration - Additional HTTPS vhost configuration (no default) +# --apache-before-vhost-configuration - Configuration to add before the directive (no default) +# --apache-allow-override - Whether to allow .htaccess files (only allowed when --move-htaccess is set to 'no' and type is not defined) +# --apache-extra-directory-configuration - Extra configuration for the document root directory +# --apache-proxy-address - Address where to proxy requests +# --apache-proxy-configuration - Extra configuration for the proxy +# --apache-proxy-http-configuration - Extra configuration for the proxy HTTP vhost +# --apache-proxy-https-configuration - Extra configuration for the proxy HTTPS vhost +# --apache-move-htaccess - Move .htaccess files to a common place so they can be loaded during Apache startup (only allowed when type is not defined) +# NGINX-specific flags: +# --nginx-additional-configuration - Additional server block configuration (no default) +# --nginx-external-configuration - Configuration external to server block (no default) +# Returns: +# true if the configuration was enabled, false otherwise +######################## +ensure_web_server_app_configuration_exists() { + local app="${1:?missing app}" + shift + local -a apache_args nginx_args web_servers args_var + apache_args=("$app") + nginx_args=("$app") + # Validate arguments + while [[ "$#" -gt 0 ]]; do + case "$1" in + # Common flags + --disable \ + | --disable-http \ + | --disable-https \ + ) + apache_args+=("$1") + nginx_args+=("$1") + ;; + --hosts \ + | --server-name \ + | --server-aliases \ + | --type \ + | --allow-remote-connections \ + | --http-port \ + | --https-port \ + | --document-root \ + ) + apache_args+=("$1" "${2:?missing value}") + nginx_args+=("$1" "${2:?missing value}") + shift + ;; + + # Specific Apache flags + --apache-additional-configuration \ + | --apache-additional-http-configuration \ + | --apache-additional-https-configuration \ + | --apache-before-vhost-configuration \ + | --apache-allow-override \ + | --apache-extra-directory-configuration \ + | --apache-proxy-address \ + | --apache-proxy-configuration \ + | --apache-proxy-http-configuration \ + | --apache-proxy-https-configuration \ + | --apache-move-htaccess \ + ) + apache_args+=("${1//apache-/}" "${2:?missing value}") + shift + ;; + + # Specific NGINX flags + --nginx-additional-configuration \ + | --nginx-external-configuration) + nginx_args+=("${1//nginx-/}" "${2:?missing value}") + shift + ;; + + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + read -r -a web_servers <<< "$(web_server_list)" + for web_server in "${web_servers[@]}"; do + args_var="${web_server}_args[@]" + web_server_execute "$web_server" "ensure_${web_server}_app_configuration_exists" "${!args_var}" + done +} + +######################## +# Ensure a web server application configuration does not exist anymore (i.e. Apache virtual host format or NGINX server block) +# It serves as a wrapper for the specific web server function +# Globals: +# * +# Arguments: +# $1 - App name +# Returns: +# true if the configuration was disabled, false otherwise +######################## +ensure_web_server_app_configuration_not_exists() { + local app="${1:?missing app}" + local -a web_servers + read -r -a web_servers <<< "$(web_server_list)" + for web_server in "${web_servers[@]}"; do + web_server_execute "$web_server" "ensure_${web_server}_app_configuration_not_exists" "$app" + done +} + +######################## +# Ensure the web server loads the configuration for an application in a URL prefix +# It serves as a wrapper for the specific web server function +# Globals: +# * +# Arguments: +# $1 - App name +# Flags: +# --allow-remote-connections - Whether to allow remote connections or to require local connections +# --document-root - Path to document root directory +# --prefix - URL prefix from where it will be accessible (i.e. /myapp) +# --type - Application type, which has an effect on what configuration template will be used +# Apache-specific flags: +# --apache-additional-configuration - Additional vhost configuration (no default) +# --apache-allow-override - Whether to allow .htaccess files (only allowed when --move-htaccess is set to 'no') +# --apache-extra-directory-configuration - Extra configuration for the document root directory +# --apache-move-htaccess - Move .htaccess files to a common place so they can be loaded during Apache startup +# NGINX-specific flags: +# --nginx-additional-configuration - Additional server block configuration (no default) +# Returns: +# true if the configuration was enabled, false otherwise +######################## +ensure_web_server_prefix_configuration_exists() { + local app="${1:?missing app}" + shift + local -a apache_args nginx_args web_servers args_var + apache_args=("$app") + nginx_args=("$app") + # Validate arguments + while [[ "$#" -gt 0 ]]; do + case "$1" in + # Common flags + --allow-remote-connections \ + | --document-root \ + | --prefix \ + | --type \ + ) + apache_args+=("$1" "${2:?missing value}") + nginx_args+=("$1" "${2:?missing value}") + shift + ;; + + # Specific Apache flags + --apache-additional-configuration \ + | --apache-allow-override \ + | --apache-extra-directory-configuration \ + | --apache-move-htaccess \ + ) + apache_args+=("${1//apache-/}" "$2") + shift + ;; + + # Specific NGINX flags + --nginx-additional-configuration) + nginx_args+=("${1//nginx-/}" "$2") + shift + ;; + + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + read -r -a web_servers <<< "$(web_server_list)" + for web_server in "${web_servers[@]}"; do + args_var="${web_server}_args[@]" + web_server_execute "$web_server" "ensure_${web_server}_prefix_configuration_exists" "${!args_var}" + done +} + +######################## +# Ensure a web server application configuration is updated with the runtime configuration (i.e. ports) +# It serves as a wrapper for the specific web server function +# Globals: +# * +# Arguments: +# $1 - App name +# Flags: +# --hosts - Host listen addresses +# --server-name - Server name +# --server-aliases - Server aliases +# --enable-http - Enable HTTP app configuration (if not enabled already) +# --enable-https - Enable HTTPS app configuration (if not enabled already) +# --disable-http - Disable HTTP app configuration (if not disabled already) +# --disable-https - Disable HTTPS app configuration (if not disabled already) +# --http-port - HTTP port number +# --https-port - HTTPS port number +# Returns: +# true if the configuration was updated, false otherwise +######################## +web_server_update_app_configuration() { + local app="${1:?missing app}" + shift + local -a args web_servers + args=("$app") + # Validate arguments + while [[ "$#" -gt 0 ]]; do + case "$1" in + # Common flags + --enable-http \ + | --enable-https \ + | --disable-http \ + | --disable-https \ + ) + args+=("$1") + ;; + --hosts \ + | --server-name \ + | --server-aliases \ + | --http-port \ + | --https-port \ + ) + args+=("$1" "${2:?missing value}") + shift + ;; + + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + read -r -a web_servers <<< "$(web_server_list)" + for web_server in "${web_servers[@]}"; do + web_server_execute "$web_server" "${web_server}_update_app_configuration" "${args[@]}" + done +} + +######################## +# Enable loading page, which shows users that the initialization process is not yet completed +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_enable_loading_page() { + ensure_web_server_app_configuration_exists "__loading" --hosts "_default_" \ + --apache-additional-configuration " +# Show a HTTP 503 Service Unavailable page by default +RedirectMatch 503 ^/$ +# Show index.html if server is answering with 404 Not Found or 503 Service Unavailable status codes +ErrorDocument 404 /index.html +ErrorDocument 503 /index.html" \ + --nginx-additional-configuration " +# Show a HTTP 503 Service Unavailable page by default +location / { + return 503; +} +# Show index.html if server is answering with 404 Not Found or 503 Service Unavailable status codes +error_page 404 @installing; +error_page 503 @installing; +location @installing { + rewrite ^(.*)$ /index.html break; +}" + web_server_reload +} + +######################## +# Enable loading page, which shows users that the initialization process is not yet completed +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_disable_install_page() { + ensure_web_server_app_configuration_not_exists "__loading" + web_server_reload +} diff --git a/bitnami/opensearch/1/debian-11/prebuildfs/usr/sbin/install_packages b/bitnami/opensearch/1/debian-11/prebuildfs/usr/sbin/install_packages new file mode 100755 index 0000000000000..acbc3173208c0 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/prebuildfs/usr/sbin/install_packages @@ -0,0 +1,27 @@ +#!/bin/sh +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +set -eu + +n=0 +max=2 +export DEBIAN_FRONTEND=noninteractive + +until [ $n -gt $max ]; do + set +e + ( + apt-get update -qq && + apt-get install -y --no-install-recommends "$@" + ) + CODE=$? + set -e + if [ $CODE -eq 0 ]; then + break + fi + if [ $n -eq $max ]; then + exit $CODE + fi + echo "apt failed, retrying" + n=$(($n + 1)) +done +apt-get clean && rm -rf /var/lib/apt/lists /var/cache/apt/archives diff --git a/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/java/entrypoint.sh b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/java/entrypoint.sh new file mode 100755 index 0000000000000..c3a1e2383fa19 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/java/entrypoint.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +# set -o xtrace # Uncomment this line for debugging purposes + +# Load libraries +. /opt/bitnami/scripts/libbitnami.sh +. /opt/bitnami/scripts/liblog.sh + +print_welcome_page + +echo "" +exec "$@" diff --git a/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/java/postunpack.sh b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/java/postunpack.sh new file mode 100755 index 0000000000000..52dbf4f13673b --- /dev/null +++ b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/java/postunpack.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +# set -o xtrace # Uncomment this line for debugging purposes + +# Load libraries +. /opt/bitnami/scripts/libfile.sh +. /opt/bitnami/scripts/liblog.sh + +# +# Java post-unpack operations +# + +# Override default files in the Java security directory. This is used for +# custom base images (with custom CA certificates or block lists is used) + +if [[ -n "${JAVA_EXTRA_SECURITY_DIR:-}" ]] && ! is_dir_empty "$JAVA_EXTRA_SECURITY_DIR"; then + info "Adding custom CAs to the Java security folder" + cp -Lr "${JAVA_EXTRA_SECURITY_DIR}/." /opt/bitnami/java/lib/security +fi diff --git a/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/libopensearch.sh b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/libopensearch.sh new file mode 100644 index 0000000000000..8db0c675e6a91 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/libopensearch.sh @@ -0,0 +1,952 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Bitnami Opensearch library + +# shellcheck disable=SC1090,SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/libfs.sh +. /opt/bitnami/scripts/libfile.sh +. /opt/bitnami/scripts/liblog.sh +. /opt/bitnami/scripts/libnet.sh +. /opt/bitnami/scripts/libos.sh +. /opt/bitnami/scripts/libversion.sh +. /opt/bitnami/scripts/libservice.sh +. /opt/bitnami/scripts/libvalidations.sh + +# Opensearch Functions + +######################## +# Bootstrap Opensearch Security by running the securityadmin.sh tool +# Globals: +# DB_* +# OPENSEARCH_SECURITY_* +# Arguments: +# None +# Returns: +# None +######################### +opensearch_security_bootstrap() { + local failure=0 + local cmd=("${OPENSEARCH_SECURITY_DIR}/tools/securityadmin.sh" "-nhnv") + + cmd+=("-cd" "$OPENSEARCH_SECURITY_CONF_DIR") + cmd+=("-cn" "$DB_CLUSTER_NAME") + cmd+=("-h" "$(get_elasticsearch_hostname)") + cmd+=("-cacert" "$DB_CA_CERT_LOCATION") + cmd+=("-cert" "$OPENSEARCH_SECURITY_ADMIN_CERT_LOCATION") + cmd+=("-key" "$OPENSEARCH_SECURITY_ADMIN_KEY_LOCATION") + + elasticsearch_start + + info "Running Opensearch Admin tool..." + "${cmd[@]}" || failure=$? + elasticsearch_stop + + return "$failure" +} + +######################## +# Write the username information inside the Opendistro Security internal_users.yml configuration file +# Globals: +# DB_* +# OPENSEARCH_SECURITY_* +# Arguments: +# None +# Returns: +# None +######################### +opensearch_security_internal_user_set() { + local username="${1:?missing key}" + local password="${2:?missing key}" + local reserved="${3:?missing key}" + read -r -a backend_roles <<<"$(tr ',;' ' ' <<<"${4:-}")" + read -r -a attributes <<<"$(tr ',;' ' ' <<<"${5:-}")" + local description="${6:-}" + + local hash + + hash=$("${OPENSEARCH_SECURITY_DIR}/tools/hash.sh" -p "$password" | sed '/^\*\*/d') + yq -i eval ".$username.hash = \"$hash\"" "${OPENSEARCH_SECURITY_CONF_DIR}/internal_users.yml" + + if [[ -n "${backend_roles[*]:-}" ]]; then + for backend_role in "${backend_roles[@]}"; do + yq -i eval ".${username}.backend_roles += [\"${backend_role}\"]" "${OPENSEARCH_SECURITY_CONF_DIR}/internal_users.yml" + done + fi + + if [[ -n "${attributes[*]:-}" ]]; then + for attribute in "${attributes[@]}"; do + yq -i eval ".${username}.attributes += [\"${attribute}\"]" "${OPENSEARCH_SECURITY_CONF_DIR}/internal_users.yml" + done + fi + + yq -i eval ".${username}.description = \"$description\"" "${OPENSEARCH_SECURITY_CONF_DIR}/internal_users.yml" + yq -i eval ".${username}.reserved = $reserved" "${OPENSEARCH_SECURITY_CONF_DIR}/internal_users.yml" +} + +######################## +# Configure Opensearch Security built-in users and passwords +# Globals: +# ELASTICSEARCH_* +# OPENSEARCH_SECURITY_* +# Arguments: +# None +# Returns: +# None +######################### +opensearch_security_configure_users() { + info "Configuring Opensearch security users and roles..." + # Execute permission for configuration binaries + chmod +x "${OPENSEARCH_SECURITY_DIR}/tools/hash.sh" + chmod +x "${OPENSEARCH_SECURITY_DIR}/tools/securityadmin.sh" + # Opensearch security configuration + if [ ! -f "${OPENSEARCH_SECURITY_DIR}/internal_users.yml" ]; then + # Delete content of the demo file + echo "" > "${OPENSEARCH_SECURITY_CONF_DIR}/internal_users.yml" + + yq -i eval '._meta.type = "internalusers"' "${OPENSEARCH_SECURITY_CONF_DIR}/internal_users.yml" + yq -i eval '._meta.config_version = "2"' "${OPENSEARCH_SECURITY_CONF_DIR}/internal_users.yml" + + # Create default users + opensearch_security_internal_user_set "$OPENSEARCH_USERNAME" "$OPENSEARCH_PASSWORD" true "admin" "" "Admin user" + opensearch_security_internal_user_set "kibanaserver" "$OPENSEARCH_DASHBOARDS_PASSWORD" true "" "" "Kibana Server user" + opensearch_security_internal_user_set "logstash" "$LOGSTASH_PASSWORD" true "logstash" "" "Logstash user" + fi +} + +######################## +# Configure Opensearch TLS settings +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +opensearch_transport_tls_configuration(){ + info "Configuring Opensearch Transport TLS settings..." + elasticsearch_conf_set plugins.security.ssl.transport.enabled "true" + elasticsearch_conf_write plugins.security.ssl.transport.enforce_hostname_verification "$DB_TLS_VERIFICATION_MODE" bool + + if is_boolean_yes "$DB_TRANSPORT_TLS_USE_PEM"; then + debug "Configuring Transport Layer TLS settings using PEM certificates..." + ! is_empty_value "$DB_TRANSPORT_TLS_KEY_PASSWORD" && elasticsearch_conf_set plugins.security.ssl.transport.pemkey_password "$DB_TRANSPORT_TLS_KEY_PASSWORD" + elasticsearch_conf_set plugins.security.ssl.transport.pemkey_filepath "$DB_TRANSPORT_TLS_NODE_KEY_LOCATION" + elasticsearch_conf_set plugins.security.ssl.transport.pemcert_filepath "$DB_TRANSPORT_TLS_NODE_CERT_LOCATION" + elasticsearch_conf_set plugins.security.ssl.transport.pemtrustedcas_filepath "$DB_TRANSPORT_TLS_CA_CERT_LOCATION" + else + debug "Configuring Transport Layer TLS settings using JKS/PKCS certificates..." + ! is_empty_value "$DB_TRANSPORT_TLS_KEYSTORE_PASSWORD" && elasticsearch_conf_set plugins.security.ssl.transport.keystore_password "$DB_TRANSPORT_TLS_KEYSTORE_PASSWORD" + ! is_empty_value "$DB_TRANSPORT_TLS_TRUSTSTORE_PASSWORD" && elasticsearch_conf_set plugins.security.ssl.transport.truststore_password "$DB_TRANSPORT_TLS_TRUSTSTORE_PASSWORD" + elasticsearch_conf_set plugins.security.ssl.transport.keystore_filepath "$DB_TRANSPORT_TLS_KEYSTORE_LOCATION" + elasticsearch_conf_set plugins.security.ssl.transport.truststore_filepath "$DB_TRANSPORT_TLS_TRUSTSTORE_LOCATION" + fi +} + +######################## +# Configure TLS settings +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +opensearch_http_tls_configuration(){ + info "Configuring ${DB_FLAVOR^^} HTTP TLS settings..." + elasticsearch_conf_set plugins.security.ssl.http.enabled "true" + if is_boolean_yes "$DB_HTTP_TLS_USE_PEM"; then + debug "Configuring REST API TLS settings using PEM certificates..." + ! is_empty_value "$DB_HTTP_TLS_KEY_PASSWORD" && elasticsearch_conf_set plugins.security.ssl.http.key "$DB_HTTP_TLS_KEY_PASSWORD" + elasticsearch_conf_set plugins.security.ssl.http.pemkey_filepath "$DB_HTTP_TLS_NODE_KEY_LOCATION" + elasticsearch_conf_set plugins.security.ssl.http.pemcert_filepath "$DB_HTTP_TLS_NODE_CERT_LOCATION" + elasticsearch_conf_set plugins.security.ssl.http.pemtrustedcas_filepath "$DB_HTTP_TLS_CA_CERT_LOCATION" + else + debug "Configuring REST API TLS settings using JKS/PKCS certificates..." + ! is_empty_value "$DB_HTTP_TLS_KEYSTORE_PASSWORD" && elasticsearch_conf_set plugins.security.ssl.http.keystore_password "$DB_HTTP_TLS_KEYSTORE_PASSWORD" + ! is_empty_value "$DB_HTTP_TLS_TRUSTSTORE_PASSWORD" && elasticsearch_conf_set plugins.security.ssl.http.truststore_password "$DB_HTTP_TLS_TRUSTSTORE_PASSWORD" + elasticsearch_conf_set plugins.security.ssl.http.keystore_filepath "$DB_HTTP_TLS_KEYSTORE_LOCATION" + elasticsearch_conf_set plugins.security.ssl.http.truststore_filepath "$DB_HTTP_TLS_TRUSTSTORE_LOCATION" + fi +} + +#!/bin/bash +# +# Bitnami Elasticsearch/Opensearch common library + +# shellcheck disable=SC1090,SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/libfs.sh +. /opt/bitnami/scripts/libfile.sh +. /opt/bitnami/scripts/liblog.sh +. /opt/bitnami/scripts/libnet.sh +. /opt/bitnami/scripts/libos.sh +. /opt/bitnami/scripts/libversion.sh +. /opt/bitnami/scripts/libservice.sh +. /opt/bitnami/scripts/libvalidations.sh + +# Functions + +######################## +# Write a configuration setting value +# Globals: +# DB_CONF_FILE +# Arguments: +# $1 - key +# $2 - value +# $3 - YAML type (string, int or bool) +# Returns: +# None +######################### +elasticsearch_conf_write() { + local -r key="${1:?Missing key}" + local -r value="${2:-}" + local -r type="${3:-string}" + local -r tempfile=$(mktemp) + + case "$type" in + string) + yq eval "(.${key}) |= \"${value}\"" "$DB_CONF_FILE" >"$tempfile" + ;; + int) + yq eval "(.${key}) |= ${value}" "$DB_CONF_FILE" >"$tempfile" + ;; + bool) + yq eval "(.${key}) |= (\"${value}\" | test(\"true\"))" "$DB_CONF_FILE" >"$tempfile" + ;; + *) + error "Type unknown: ${type}" + return 1 + ;; + esac + cp "$tempfile" "$DB_CONF_FILE" +} + +######################## +# Set a configuration setting value +# Globals: +# DB_CONF_FILE +# Arguments: +# $1 - key +# $2 - values (array) +# Returns: +# None +######################### +elasticsearch_conf_set() { + local key="${1:?missing key}" + shift + local values=("${@}") + + if [[ "${#values[@]}" -eq 0 ]]; then + stderr_print "$key" + stderr_print "missing values" + return 1 + elif [[ "${#values[@]}" -eq 1 ]] && [[ -n "${values[0]}" ]]; then + elasticsearch_conf_write "$key" "${values[0]}" + else + for i in "${!values[@]}"; do + if [[ -n "${values[$i]}" ]]; then + elasticsearch_conf_write "${key}[$i]" "${values[$i]}" + fi + done + fi +} + +######################## +# Check if Elasticsearch is running +# Globals: +# DB_TMP_DIR +# Arguments: +# None +# Returns: +# Boolean +######################### +is_elasticsearch_running() { + local pid + pid="$(get_pid_from_file "$DB_PID_FILE")" + + if [[ -z "$pid" ]]; then + false + else + is_service_running "$pid" + fi +} + +######################## +# Check if Elasticsearch is not running +# Globals: +# DB_TMP_DIR +# Arguments: +# None +# Returns: +# Boolean +######################### +is_elasticsearch_not_running() { + ! is_elasticsearch_running + return "$?" +} + +######################## +# Stop Elasticsearch +# Globals: +# DB_TMP_DIR +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_stop() { + ! is_elasticsearch_running && return + debug "Stopping ${DB_FLAVOR^}..." + stop_service_using_pid "$DB_PID_FILE" +} + +######################## +# Start Elasticsearch and wait until it's ready +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_start() { + is_elasticsearch_running && return + + debug "Starting ${DB_FLAVOR^}..." + local command=("${DB_BASE_DIR}/bin/${DB_FLAVOR}" "-d" "-p" "$DB_PID_FILE") + am_i_root && command=("run_as_user" "$DB_DAEMON_USER" "${command[@]}") + if [[ "$BITNAMI_DEBUG" = true ]]; then + "${command[@]}" & + else + "${command[@]}" >/dev/null 2>&1 & + fi + + local retries=50 + local seconds=2 + # Check the process is running + retry_while "is_elasticsearch_running" "$retries" "$seconds" + # Check Elasticsearch API is reachable + retry_while "elasticsearch_healthcheck" "$retries" "$seconds" +} + +######################## +# Validate kernel settings +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_validate_kernel() { + # Auxiliary functions + validate_sysctl_key() { + local key="${1:?key is missing}" + local value="${2:?value is missing}" + local current_value + current_value="$(sysctl -n "$key")" + if [[ "$current_value" -lt "$value" ]]; then + error "Invalid kernel settings. ${DB_FLAVOR^} requires at least: $key = $value" + exit 1 + fi + } + + debug "Validating Kernel settings..." + if [[ $(yq eval .index.store.type "$DB_CONF_FILE") ]]; then + debug "Custom index.store.type found in the config file. Skipping kernel validation..." + else + validate_sysctl_key "fs.file-max" 65536 + fi + if [[ $(yq eval .node.store.allow_mmap "$DB_CONF_FILE") ]]; then + debug "Custom node.store.allow_mmap found in the config file. Skipping kernel validation..." + else + validate_sysctl_key "vm.max_map_count" 262144 + fi +} + +######################## +# Validate settings in DB_* env vars +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_validate() { + local error_code=0 + + # Auxiliary functions + print_validation_error() { + error "$1" + error_code=1 + } + + validate_node_roles() { + if [ -n "$DB_NODE_ROLES" ]; then + read -r -a roles_list <<<"$(get_elasticsearch_roles)" + local master_role="master" + [[ "$DB_FLAVOR" = "opensearch" && "$APP_VERSION" =~ ^2\. ]] && master_role="cluster_manager" + if [[ "${#roles_list[@]}" -le 0 ]]; then + warn "Setting ${DB_FLAVOR^^}_NODE_ROLES is empty and ${DB_FLAVOR^^}_IS_DEDICATED_NODE is set to true, ${DB_FLAVOR^} will be configured as coordinating-only node." + fi + for role in "${roles_list[@]}"; do + case "$role" in + "$master_role" | data | data_content | data_hot | data_warm | data_cold | data_frozen | ingest | ml | remote_cluster_client | transform) ;; + + *) + print_validation_error "Invalid node role '$role'. Supported roles are '${master_role},data,data_content,data_hot,data_warm,data_cold,data_frozen,ingest,ml,remote_cluster_client,transform'" + ;; + esac + done + fi + } + + debug "Ensuring expected directories/files exist..." + am_i_root && ensure_user_exists "$DB_DAEMON_USER" --group "$DB_DAEMON_GROUP" + for dir in "$DB_TMP_DIR" "$DB_LOGS_DIR" "$DB_PLUGINS_DIR" "$DB_BASE_DIR/modules" "$DB_CONF_DIR"; do + ensure_dir_exists "$dir" + am_i_root && chown -R "$DB_DAEMON_USER:$DB_DAEMON_GROUP" "$dir" + done + + debug "Validating settings in DB_* env vars..." + for var in "DB_HTTP_PORT_NUMBER" "DB_TRANSPORT_PORT_NUMBER"; do + if ! err=$(validate_port "${!var}"); then + print_validation_error "An invalid port was specified in the environment variable $var: $err" + fi + done + + if ! is_boolean_yes "$DB_IS_DEDICATED_NODE"; then + warn "Setting ${DB_FLAVOR^^}_IS_DEDICATED_NODE is disabled." + warn "${DB_FLAVOR^^}_NODE_ROLES will be ignored and ${DB_FLAVOR^} will asume all different roles." + else + validate_node_roles + fi + + if [[ -n "$DB_BIND_ADDRESS" ]] && ! validate_ipv4 "$DB_BIND_ADDRESS"; then + print_validation_error "The Bind Address specified in the environment variable ${DB_FLAVOR^^}_BIND_ADDRESS is not a valid IPv4" + fi + + if is_boolean_yes "$DB_ENABLE_SECURITY"; then + if [[ "$DB_FLAVOR" = "opensearch" ]]; then + if [[ ! -f "$OPENSEARCH_SECURITY_ADMIN_KEY_LOCATION" ]] || [[ ! -f "$OPENSEARCH_SECURITY_ADMIN_CERT_LOCATION" ]]; then + print_validation_error "In order to enable Opensearch Security, you must provide a valid admin PEM key and certificate." + fi + if is_empty_value "$OPENSEARCH_SECURITY_NODES_DN"; then + print_validation_error "The variable OPENSEARCH_SECURITY_NODES_DN is required." + fi + if is_empty_value "$OPENSEARCH_SECURITY_ADMIN_DN"; then + print_validation_error "The variable OPENSEARCH_SECURITY_ADMIN_DN is required." + fi + if ! is_boolean_yes "$OPENSEARCH_ENABLE_REST_TLS"; then + print_validation_error "Opensearch does not support plaintext conections (HTTP) when Security is enabled." + fi + fi + if is_boolean_yes "$DB_TRANSPORT_TLS_USE_PEM"; then + if [[ ! -f "$DB_TRANSPORT_TLS_NODE_CERT_LOCATION" ]] || [[ ! -f "$DB_TRANSPORT_TLS_NODE_KEY_LOCATION" ]] || [[ ! -f "$DB_TRANSPORT_TLS_CA_CERT_LOCATION" ]]; then + print_validation_error "In order to configure the TLS encryption for ${DB_FLAVOR^} Transport you must provide your node key, certificate and a valid certification_authority certificate." + fi + elif [[ ! -f "$DB_TRANSPORT_TLS_KEYSTORE_LOCATION" ]] || [[ ! -f "$DB_TRANSPORT_TLS_TRUSTSTORE_LOCATION" ]]; then + print_validation_error "In order to configure the TLS encryption for ${DB_FLAVOR^} Transport with JKS/PKCS12 certs you must mount a valid keystore and truststore." + fi + if is_boolean_yes "$DB_HTTP_TLS_USE_PEM"; then + if [[ ! -f "$DB_HTTP_TLS_NODE_CERT_LOCATION" ]] || [[ ! -f "$DB_HTTP_TLS_NODE_KEY_LOCATION" ]] || [[ ! -f "$DB_HTTP_TLS_CA_CERT_LOCATION" ]]; then + print_validation_error "In order to configure the TLS encryption for ${DB_FLAVOR^} you must provide your node key, certificate and a valid certification_authority certificate." + fi + elif [[ ! -f "$DB_HTTP_TLS_KEYSTORE_LOCATION" ]] || [[ ! -f "$DB_HTTP_TLS_TRUSTSTORE_LOCATION" ]]; then + print_validation_error "In order to configure the TLS encryption for ${DB_FLAVOR^} with JKS/PKCS12 certs you must mount a valid keystore and truststore." + fi + fi + + [[ "$error_code" -eq 0 ]] || exit "$error_code" +} + +######################## +# Determine the hostname by which Elasticsearch can be contacted +# Returns: +# The value of $DB_ADVERTISED_HOSTNAME or the current host address +######################## +get_elasticsearch_hostname() { + if [[ -n "$DB_ADVERTISED_HOSTNAME" ]]; then + echo "$DB_ADVERTISED_HOSTNAME" + else + get_machine_ip + fi +} + +######################## +# Evaluates the env variable DB_NODE_ROLES and replaces master with +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# Array of node roles +######################### +get_elasticsearch_roles() { + read -r -a roles_list_tmp <<<"$(tr ',;' ' ' <<<"$DB_NODE_ROLES")" + roles_list=("${roles_list_tmp[@]}") + for i in "${!roles_list[@]}"; do + if [[ ${roles_list[$i]} == "master" ]] && [[ "$DB_FLAVOR" = "opensearch" && "$APP_VERSION" =~ ^2\. ]]; then + roles_list[i]="cluster_manager" + fi + done + echo "${roles_list[@]}" +} + +######################## +# Configure cluster settings +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_cluster_configuration() { + # Auxiliary functions + bind_address() { + if [[ -n "$DB_BIND_ADDRESS" ]]; then + echo "$DB_BIND_ADDRESS" + else + echo "0.0.0.0" + fi + } + + is_node_master() { + if is_boolean_yes "$DB_IS_DEDICATED_NODE"; then + if [ -n "$DB_NODE_ROLES" ]; then + read -r -a roles_list <<<"$(get_elasticsearch_roles)" + if [[ " ${roles_list[*]} " = *" master "* ]]; then + true + elif [[ "$DB_FLAVOR" = "opensearch" && " ${roles_list[*]} " = *" cluster_manager "* ]]; then + true + else + false + fi + else + false + fi + else + true + fi + } + + info "Configuring ${DB_FLAVOR^} cluster settings..." + elasticsearch_conf_set network.host "$(get_elasticsearch_hostname)" + elasticsearch_conf_set network.publish_host "$(get_elasticsearch_hostname)" + elasticsearch_conf_set network.bind_host "$(bind_address)" + elasticsearch_conf_set cluster.name "$DB_CLUSTER_NAME" + elasticsearch_conf_set node.name "${DB_NODE_NAME:-$(hostname)}" + + if [[ -n "$DB_CLUSTER_HOSTS" ]]; then + read -r -a host_list <<<"$(tr ',;' ' ' <<<"$DB_CLUSTER_HOSTS")" + master_list=("${host_list[@]}") + if [[ -n "$DB_CLUSTER_MASTER_HOSTS" ]]; then + read -r -a master_list <<<"$(tr ',;' ' ' <<<"$DB_CLUSTER_MASTER_HOSTS")" + fi + elasticsearch_conf_set discovery.seed_hosts "${host_list[@]}" + if is_node_master; then + if [[ "$DB_FLAVOR" = "opensearch" && "$APP_VERSION" =~ ^2\. ]]; then + elasticsearch_conf_set cluster.initial_cluster_manager_nodes "${master_list[@]}" + else + elasticsearch_conf_set cluster.initial_master_nodes "${master_list[@]}" + fi + fi + elasticsearch_conf_set discovery.initial_state_timeout "10m" + else + elasticsearch_conf_set "discovery.type" "single-node" + fi +} + +######################## +# Extend cluster settings with custom, user-provided config +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_custom_configuration() { + local custom_conf_file="${DB_CONF_DIR}/my_${DB_FLAVOR}.yml" + local -r tempfile=$(mktemp) + [[ ! -s "$custom_conf_file" ]] && return + info "Adding custom configuration" + yq eval-all 'select(fileIndex == 0) * select(fileIndex == 1)' "$DB_CONF_FILE" "$custom_conf_file" >"$tempfile" + cp "$tempfile" "$DB_CONF_FILE" +} + +######################## +# Configure node roles. +# There are 3 scenarios: +# * If DB_IS_DEDICATED_NODE is disabled, 'node.roles' is omitted and assumes all the roles (check docs). +# * Otherwise, 'node.roles' with a list of roles provided with DB_NODE_ROLES. +# * In addition, if DB_NODE_ROLES is empty, node.roles will be configured empty, meaning that the role is 'coordinating-only'. +# +# Docs ref: https://www.elastic.co/guide/en/opensearch/reference/current/modules-node.html +# +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_configure_node_roles() { + debug "Configure ${DB_FLAVOR^} Node roles..." + + local set_repo_path="no" + if is_boolean_yes "$DB_IS_DEDICATED_NODE"; then + read -r -a roles_list <<<"$(get_elasticsearch_roles)" + if [[ "${#roles_list[@]}" -eq 0 ]]; then + elasticsearch_conf_write node.roles "[]" int + else + elasticsearch_conf_set node.roles "${roles_list[@]}" + for role in "${roles_list[@]}"; do + case "$role" in + cluster_manager | master | data | data_content | data_hot | data_warm | data_cold | data_frozen) + set_repo_path="yes" + ;; + *) ;; + esac + done + fi + else + set_repo_path="yes" + fi + + if is_boolean_yes "$set_repo_path" && [[ -n "$DB_FS_SNAPSHOT_REPO_PATH" ]]; then + # Configure path.repo to restore snapshots from system repository + # It must be set on every cluster_manager an data node + # ref: https://www.elastic.co/guide/en/opensearch/reference/current/snapshots-register-repository.html#snapshots-filesystem-repository + elasticsearch_conf_set path.repo "$DB_FS_SNAPSHOT_REPO_PATH" + fi +} + +######################## +# Configure Heap Size +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_set_heap_size() { + local heap_size + + # Remove heap.options if it already exists + rm -f "${DB_CONF_DIR}/jvm.options.d/heap.options" + + if [[ -n "$DB_HEAP_SIZE" ]]; then + debug "Using specified values for Xmx and Xms heap options..." + heap_size="$DB_HEAP_SIZE" + else + debug "Calculating appropriate Xmx and Xms values..." + local machine_mem="" + machine_mem="$(get_total_memory)" + if [[ "$machine_mem" -lt 65536 ]]; then + local max_allowed_memory + local calculated_heap_size + calculated_heap_size="$((machine_mem / 2))" + max_allowed_memory="$((DB_MAX_ALLOWED_MEMORY_PERCENTAGE * machine_mem))" + max_allowed_memory="$((max_allowed_memory / 100))" + # Allow for absolute memory limit when calculating limit from percentage + if [[ -n "$DB_MAX_ALLOWED_MEMORY" && "$max_allowed_memory" -gt "$DB_MAX_ALLOWED_MEMORY" ]]; then + max_allowed_memory="$DB_MAX_ALLOWED_MEMORY" + fi + if [[ "$calculated_heap_size" -gt "$max_allowed_memory" ]]; then + info "Calculated Java heap size of ${calculated_heap_size} will be limited to ${max_allowed_memory}" + calculated_heap_size="$max_allowed_memory" + fi + heap_size="${calculated_heap_size}m" + + else + heap_size=32768m + fi + fi + debug "Setting '-Xmx${heap_size} -Xms${heap_size}' heap options..." + cat >"${DB_CONF_DIR}/jvm.options.d/heap.options" < plugin + # get_plugin_name file://plugin.zip -> plugin + # get_plugin_name http://plugin-0.1.2.zip -> plugin + get_plugin_name() { + local plugin="${1:?missing plugin}" + # Remove any paths, and strip both the .zip extension and the version + basename "$plugin" | sed -E -e 's/.zip$//' -e 's/-[0-9]+\.[0-9]+(\.[0-9]+){0,}$//' + } + + # Collect plugins that should be installed offline + read -r -a mounted_plugins <<<"$(find "$DB_MOUNTED_PLUGINS_DIR" -type f -name "*.zip" -print0 | xargs -0)" + if [[ "${#mounted_plugins[@]}" -gt 0 ]]; then + for plugin in "${mounted_plugins[@]}"; do + plugins_list+=("file://${plugin}") + done + fi + + # Skip if there isn't any plugin to install + [[ -z "${plugins_list[*]:-}" ]] && return + + # Install plugins + debug "Installing plugins: ${plugins_list[*]}" + for plugin in "${plugins_list[@]}"; do + plugin_name="$(get_plugin_name "$plugin")" + [[ -n "$mandatory_plugins" ]] && mandatory_plugins="${mandatory_plugins},${plugin_name}" || mandatory_plugins="$plugin_name" + + # Check if the plugin was already installed + if [[ -d "${DB_PLUGINS_DIR}/${plugin_name}" ]]; then + debug "Plugin already installed: ${plugin}" + continue + fi + + debug "Installing plugin: ${plugin}" + if [[ "${BITNAMI_DEBUG:-false}" = true ]]; then + "$cmd" install -b -v "$plugin" + else + "$cmd" install -b -v "$plugin" >/dev/null 2>&1 + fi + done + + # Mark plugins as mandatory + elasticsearch_conf_set plugin.mandatory "$mandatory_plugins" +} + +######################## +# Run custom initialization scripts +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_custom_init_scripts() { + read -r -a init_scripts <<<"$(find "$DB_INITSCRIPTS_DIR" -type f -name "*.sh" -print0 | xargs -0)" + if [[ "${#init_scripts[@]}" -gt 0 ]] && [[ ! -f "$DB_VOLUME_DIR"/.user_scripts_initialized ]]; then + info "Loading user's custom files from $DB_INITSCRIPTS_DIR" + for f in "${init_scripts[@]}"; do + debug "Executing $f" + case "$f" in + *.sh) + if [[ -x "$f" ]]; then + if ! "$f"; then + error "Failed executing $f" + return 1 + fi + else + warn "Sourcing $f as it is not executable by the current user, any error may cause initialization to fail" + . "$f" + fi + ;; + *) + warn "Skipping $f, supported formats are: .sh" + ;; + esac + done + touch "$DB_VOLUME_DIR"/.user_scripts_initialized + fi +} + +######################## +# Modify log4j2.properties to send events to stdout instead of a logfile +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_configure_logging() { + # Back up the original file for users who'd like to use logfile logging + cp "${DB_CONF_DIR}/log4j2.properties" "${DB_CONF_DIR}/log4j2.file.properties" + + # Replace RollingFile with Console + replace_in_file "${DB_CONF_DIR}/log4j2.properties" "RollingFile" "Console" + + local -a delete_patterns=( + # Remove RollingFile specific settings + "^.*\.policies\..*$" "^.*\.filePattern.*$" "^.*\.fileName.*$" "^.*\.strategy\..*$" + # Remove headers + "^###.*$" + # Remove .log and .json because of multiline configurations (filename) + "^\s\s.*\.log" "^\s\s.*\.json" + # Remove default rolling logger and references + "^appender\.rolling" "appenderRef\.rolling" + # Remove _old loggers + "_old\." + # Remove .filePermissions config + "\.filePermissions" + ) + for pattern in "${delete_patterns[@]}"; do + remove_in_file "${DB_CONF_DIR}/log4j2.properties" "$pattern" + done +} + +######################## +# Check Elasticsearch/Opensearch health +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# 0 when healthy (or waiting for Opensearch security bootstrap) +# 1 when unhealthy +######################### +elasticsearch_healthcheck() { + info "Checking ${DB_FLAVOR^} health..." + local -r cmd="curl" + local command_args=("--silent" "--write-out" "%{http_code}") + local protocol="http" + local host + + host=$(get_elasticsearch_hostname) + + is_boolean_yes "$DB_ENABLE_SECURITY" && is_boolean_yes "$DB_ENABLE_REST_TLS" && protocol="https" && command_args+=("-k" "--user" "${DB_USERNAME}:${DB_PASSWORD}") + + # Combination of --silent, --output and --write-out allows us to obtain both the status code and the request body + output=$(mktemp) + command_args+=("-o" "$output" "${protocol}://${host}:${DB_HTTP_PORT_NUMBER}/_cluster/health?local=true") + HTTP_CODE=$("$cmd" "${command_args[@]}") + if [[ ${HTTP_CODE} -ge 200 && ${HTTP_CODE} -le 299 ]] || ([[ "$DB_FLAVOR" = "opensearch" ]] && [[ ${HTTP_CODE} -eq 503 ]] && grep -q "OpenSearch Security not initialized" "$output" ); then + rm "$output" + return 0 + else + rm "$output" + return 1 + fi +} diff --git a/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch-env.sh b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch-env.sh new file mode 100644 index 0000000000000..3ee093be3fdd4 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch-env.sh @@ -0,0 +1,269 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Environment configuration for opensearch + +# The values for all environment variables will be set in the below order of precedence +# 1. Custom environment variables defined below after Bitnami defaults +# 2. Constants defined in this file (environment variables with no default), i.e. BITNAMI_ROOT_DIR +# 3. Environment variables overridden via external files using *_FILE variables (see below) +# 4. Environment variables set externally (i.e. current Bash context/Dockerfile/userdata) + +# Load logging library +# shellcheck disable=SC1090,SC1091 +. /opt/bitnami/scripts/liblog.sh + +export BITNAMI_ROOT_DIR="/opt/bitnami" +export BITNAMI_VOLUME_DIR="/bitnami" + +# Logging configuration +export MODULE="${MODULE:-opensearch}" +export BITNAMI_DEBUG="${BITNAMI_DEBUG:-false}" + +# By setting an environment variable matching *_FILE to a file path, the prefixed environment +# variable will be overridden with the value specified in that file +opensearch_env_vars=( + OPENSEARCH_CERTS_DIR + OPENSEARCH_DATA_DIR_LIST + OPENSEARCH_BIND_ADDRESS + OPENSEARCH_ADVERTISED_HOSTNAME + OPENSEARCH_CLUSTER_HOSTS + OPENSEARCH_CLUSTER_MASTER_HOSTS + OPENSEARCH_CLUSTER_NAME + OPENSEARCH_HEAP_SIZE + OPENSEARCH_MAX_ALLOWED_MEMORY_PERCENTAGE + OPENSEARCH_MAX_ALLOWED_MEMORY + OPENSEARCH_MAX_TIMEOUT + OPENSEARCH_LOCK_ALL_MEMORY + OPENSEARCH_DISABLE_JVM_HEAP_DUMP + OPENSEARCH_DISABLE_GC_LOGS + OPENSEARCH_IS_DEDICATED_NODE + OPENSEARCH_MINIMUM_MASTER_NODES + OPENSEARCH_NODE_NAME + OPENSEARCH_FS_SNAPSHOT_REPO_PATH + OPENSEARCH_NODE_ROLES + OPENSEARCH_PLUGINS + OPENSEARCH_TRANSPORT_PORT_NUMBER + OPENSEARCH_HTTP_PORT_NUMBER + OPENSEARCH_ENABLE_SECURITY + OPENSEARCH_PASSWORD + OPENSEARCH_TLS_VERIFICATION_MODE + OPENSEARCH_TLS_USE_PEM + OPENSEARCH_KEYSTORE_PASSWORD + OPENSEARCH_TRUSTSTORE_PASSWORD + OPENSEARCH_KEY_PASSWORD + OPENSEARCH_KEYSTORE_LOCATION + OPENSEARCH_TRUSTSTORE_LOCATION + OPENSEARCH_NODE_CERT_LOCATION + OPENSEARCH_NODE_KEY_LOCATION + OPENSEARCH_CA_CERT_LOCATION + OPENSEARCH_SKIP_TRANSPORT_TLS + OPENSEARCH_TRANSPORT_TLS_USE_PEM + OPENSEARCH_TRANSPORT_TLS_KEYSTORE_PASSWORD + OPENSEARCH_TRANSPORT_TLS_TRUSTSTORE_PASSWORD + OPENSEARCH_TRANSPORT_TLS_KEY_PASSWORD + OPENSEARCH_TRANSPORT_TLS_KEYSTORE_LOCATION + OPENSEARCH_TRANSPORT_TLS_TRUSTSTORE_LOCATION + OPENSEARCH_TRANSPORT_TLS_NODE_CERT_LOCATION + OPENSEARCH_TRANSPORT_TLS_NODE_KEY_LOCATION + OPENSEARCH_TRANSPORT_TLS_CA_CERT_LOCATION + OPENSEARCH_ENABLE_REST_TLS + OPENSEARCH_HTTP_TLS_USE_PEM + OPENSEARCH_HTTP_TLS_KEYSTORE_PASSWORD + OPENSEARCH_HTTP_TLS_TRUSTSTORE_PASSWORD + OPENSEARCH_HTTP_TLS_KEY_PASSWORD + OPENSEARCH_HTTP_TLS_KEYSTORE_LOCATION + OPENSEARCH_HTTP_TLS_TRUSTSTORE_LOCATION + OPENSEARCH_HTTP_TLS_NODE_CERT_LOCATION + OPENSEARCH_HTTP_TLS_NODE_KEY_LOCATION + OPENSEARCH_HTTP_TLS_CA_CERT_LOCATION + OPENSEARCH_SECURITY_DIR + OPENSEARCH_SECURITY_CONF_DIR + OPENSEARCH_DASHBOARDS_PASSWORD + LOGSTASH_PASSWORD + OPENSEARCH_SET_CGROUP + OPENSEARCH_SECURITY_BOOTSTRAP + OPENSEARCH_SECURITY_NODES_DN + OPENSEARCH_SECURITY_ADMIN_DN + OPENSEARCH_SECURITY_ADMIN_CERT_LOCATION + OPENSEARCH_SECURITY_ADMIN_KEY_LOCATION + DB_MINIMUM_MANAGER_NODES + KIBANA_PASSWORD +) +for env_var in "${opensearch_env_vars[@]}"; do + file_env_var="${env_var}_FILE" + if [[ -n "${!file_env_var:-}" ]]; then + if [[ -r "${!file_env_var:-}" ]]; then + export "${env_var}=$(< "${!file_env_var}")" + unset "${file_env_var}" + else + warn "Skipping export of '${env_var}'. '${!file_env_var:-}' is not readable." + fi + fi +done +unset opensearch_env_vars +export DB_FLAVOR="opensearch" + +# Paths +export OPENSEARCH_VOLUME_DIR="/bitnami/opensearch" +export DB_VOLUME_DIR="$OPENSEARCH_VOLUME_DIR" +export OPENSEARCH_BASE_DIR="/opt/bitnami/opensearch" +export DB_BASE_DIR="$OPENSEARCH_BASE_DIR" +export OPENSEARCH_CONF_DIR="${DB_BASE_DIR}/config" +export DB_CONF_DIR="$OPENSEARCH_CONF_DIR" +export OPENSEARCH_CERTS_DIR="${OPENSEARCH_CERTS_DIR:-${DB_CONF_DIR}/certs}" +export DB_CERTS_DIR="$OPENSEARCH_CERTS_DIR" +export OPENSEARCH_LOGS_DIR="${DB_BASE_DIR}/logs" +export DB_LOGS_DIR="$OPENSEARCH_LOGS_DIR" +export OPENSEARCH_PLUGINS_DIR="${DB_BASE_DIR}/plugins" +export DB_PLUGINS_DIR="$OPENSEARCH_PLUGINS_DIR" +export OPENSEARCH_DATA_DIR="${DB_VOLUME_DIR}/data" +export DB_DATA_DIR="$OPENSEARCH_DATA_DIR" +export OPENSEARCH_DATA_DIR_LIST="${OPENSEARCH_DATA_DIR_LIST:-}" +export DB_DATA_DIR_LIST="$OPENSEARCH_DATA_DIR_LIST" +export OPENSEARCH_TMP_DIR="${DB_BASE_DIR}/tmp" +export DB_TMP_DIR="$OPENSEARCH_TMP_DIR" +export OPENSEARCH_BIN_DIR="${DB_BASE_DIR}/bin" +export DB_BIN_DIR="$OPENSEARCH_BIN_DIR" +export OPENSEARCH_MOUNTED_PLUGINS_DIR="${DB_VOLUME_DIR}/plugins" +export DB_MOUNTED_PLUGINS_DIR="$OPENSEARCH_MOUNTED_PLUGINS_DIR" +export OPENSEARCH_CONF_FILE="${DB_CONF_DIR}/opensearch.yml" +export DB_CONF_FILE="$OPENSEARCH_CONF_FILE" +export OPENSEARCH_LOG_FILE="${DB_LOGS_DIR}/opensearch.log" +export DB_LOG_FILE="$OPENSEARCH_LOG_FILE" +export OPENSEARCH_PID_FILE="${DB_TMP_DIR}/opensearch.pid" +export DB_PID_FILE="$OPENSEARCH_PID_FILE" +export OPENSEARCH_INITSCRIPTS_DIR="/docker-entrypoint-initdb.d" +export DB_INITSCRIPTS_DIR="$OPENSEARCH_INITSCRIPTS_DIR" +export PATH="${DB_BIN_DIR}:${BITNAMI_ROOT_DIR}/common/bin:$PATH" + +# System users (when running with a privileged user) +export OPENSEARCH_DAEMON_USER="opensearch" +export DB_DAEMON_USER="$OPENSEARCH_DAEMON_USER" +export OPENSEARCH_DAEMON_GROUP="opensearch" +export DB_DAEMON_GROUP="$OPENSEARCH_DAEMON_GROUP" + +# Opensearch configuration +export OPENSEARCH_BIND_ADDRESS="${OPENSEARCH_BIND_ADDRESS:-}" +export DB_BIND_ADDRESS="$OPENSEARCH_BIND_ADDRESS" +export OPENSEARCH_ADVERTISED_HOSTNAME="${OPENSEARCH_ADVERTISED_HOSTNAME:-}" +export DB_ADVERTISED_HOSTNAME="$OPENSEARCH_ADVERTISED_HOSTNAME" +export OPENSEARCH_CLUSTER_HOSTS="${OPENSEARCH_CLUSTER_HOSTS:-}" +export DB_CLUSTER_HOSTS="$OPENSEARCH_CLUSTER_HOSTS" +export OPENSEARCH_CLUSTER_MASTER_HOSTS="${OPENSEARCH_CLUSTER_MASTER_HOSTS:-}" +export DB_CLUSTER_MASTER_HOSTS="$OPENSEARCH_CLUSTER_MASTER_HOSTS" +export OPENSEARCH_CLUSTER_NAME="${OPENSEARCH_CLUSTER_NAME:-}" +export DB_CLUSTER_NAME="$OPENSEARCH_CLUSTER_NAME" +export OPENSEARCH_HEAP_SIZE="${OPENSEARCH_HEAP_SIZE:-1024m}" +export DB_HEAP_SIZE="$OPENSEARCH_HEAP_SIZE" +export OPENSEARCH_MAX_ALLOWED_MEMORY_PERCENTAGE="${OPENSEARCH_MAX_ALLOWED_MEMORY_PERCENTAGE:-100}" +export DB_MAX_ALLOWED_MEMORY_PERCENTAGE="$OPENSEARCH_MAX_ALLOWED_MEMORY_PERCENTAGE" +export OPENSEARCH_MAX_ALLOWED_MEMORY="${OPENSEARCH_MAX_ALLOWED_MEMORY:-}" +export DB_MAX_ALLOWED_MEMORY="$OPENSEARCH_MAX_ALLOWED_MEMORY" +export OPENSEARCH_MAX_TIMEOUT="${OPENSEARCH_MAX_TIMEOUT:-60}" +export DB_MAX_TIMEOUT="$OPENSEARCH_MAX_TIMEOUT" +export OPENSEARCH_LOCK_ALL_MEMORY="${OPENSEARCH_LOCK_ALL_MEMORY:-no}" +export DB_LOCK_ALL_MEMORY="$OPENSEARCH_LOCK_ALL_MEMORY" +export OPENSEARCH_DISABLE_JVM_HEAP_DUMP="${OPENSEARCH_DISABLE_JVM_HEAP_DUMP:-no}" +export DB_DISABLE_JVM_HEAP_DUMP="$OPENSEARCH_DISABLE_JVM_HEAP_DUMP" +export OPENSEARCH_DISABLE_GC_LOGS="${OPENSEARCH_DISABLE_GC_LOGS:-no}" +export DB_DISABLE_GC_LOGS="$OPENSEARCH_DISABLE_GC_LOGS" +export OPENSEARCH_IS_DEDICATED_NODE="${OPENSEARCH_IS_DEDICATED_NODE:-no}" +export DB_IS_DEDICATED_NODE="$OPENSEARCH_IS_DEDICATED_NODE" +OPENSEARCH_MINIMUM_MASTER_NODES="${OPENSEARCH_MINIMUM_MASTER_NODES:-"${DB_MINIMUM_MANAGER_NODES:-}"}" +export OPENSEARCH_MINIMUM_MASTER_NODES="${OPENSEARCH_MINIMUM_MASTER_NODES:-}" +export DB_MINIMUM_MASTER_NODES="$OPENSEARCH_MINIMUM_MASTER_NODES" +export OPENSEARCH_NODE_NAME="${OPENSEARCH_NODE_NAME:-}" +export DB_NODE_NAME="$OPENSEARCH_NODE_NAME" +export OPENSEARCH_FS_SNAPSHOT_REPO_PATH="${OPENSEARCH_FS_SNAPSHOT_REPO_PATH:-}" +export DB_FS_SNAPSHOT_REPO_PATH="$OPENSEARCH_FS_SNAPSHOT_REPO_PATH" +export OPENSEARCH_NODE_ROLES="${OPENSEARCH_NODE_ROLES:-}" +export DB_NODE_ROLES="$OPENSEARCH_NODE_ROLES" +export OPENSEARCH_PLUGINS="${OPENSEARCH_PLUGINS:-}" +export DB_PLUGINS="$OPENSEARCH_PLUGINS" +export OPENSEARCH_TRANSPORT_PORT_NUMBER="${OPENSEARCH_TRANSPORT_PORT_NUMBER:-9300}" +export DB_TRANSPORT_PORT_NUMBER="$OPENSEARCH_TRANSPORT_PORT_NUMBER" +export OPENSEARCH_HTTP_PORT_NUMBER="${OPENSEARCH_HTTP_PORT_NUMBER:-9200}" +export DB_HTTP_PORT_NUMBER="$OPENSEARCH_HTTP_PORT_NUMBER" + +# Opensearch Security configuration +export OPENSEARCH_ENABLE_SECURITY="${OPENSEARCH_ENABLE_SECURITY:-false}" +export DB_ENABLE_SECURITY="$OPENSEARCH_ENABLE_SECURITY" +export OPENSEARCH_PASSWORD="${OPENSEARCH_PASSWORD:-bitnami}" +export DB_PASSWORD="$OPENSEARCH_PASSWORD" +export OPENSEARCH_USERNAME="admin" +export DB_USERNAME="$OPENSEARCH_USERNAME" +export OPENSEARCH_TLS_VERIFICATION_MODE="${OPENSEARCH_TLS_VERIFICATION_MODE:-full}" +export DB_TLS_VERIFICATION_MODE="$OPENSEARCH_TLS_VERIFICATION_MODE" +export OPENSEARCH_TLS_USE_PEM="${OPENSEARCH_TLS_USE_PEM:-false}" +export DB_TLS_USE_PEM="$OPENSEARCH_TLS_USE_PEM" +export OPENSEARCH_KEYSTORE_PASSWORD="${OPENSEARCH_KEYSTORE_PASSWORD:-}" +export DB_KEYSTORE_PASSWORD="$OPENSEARCH_KEYSTORE_PASSWORD" +export OPENSEARCH_TRUSTSTORE_PASSWORD="${OPENSEARCH_TRUSTSTORE_PASSWORD:-}" +export DB_TRUSTSTORE_PASSWORD="$OPENSEARCH_TRUSTSTORE_PASSWORD" +export OPENSEARCH_KEY_PASSWORD="${OPENSEARCH_KEY_PASSWORD:-}" +export DB_KEY_PASSWORD="$OPENSEARCH_KEY_PASSWORD" +export OPENSEARCH_KEYSTORE_LOCATION="${OPENSEARCH_KEYSTORE_LOCATION:-${DB_CERTS_DIR}/opensearch.keystore.jks}" +export DB_KEYSTORE_LOCATION="$OPENSEARCH_KEYSTORE_LOCATION" +export OPENSEARCH_TRUSTSTORE_LOCATION="${OPENSEARCH_TRUSTSTORE_LOCATION:-${DB_CERTS_DIR}/opensearch.truststore.jks}" +export DB_TRUSTSTORE_LOCATION="$OPENSEARCH_TRUSTSTORE_LOCATION" +export OPENSEARCH_NODE_CERT_LOCATION="${OPENSEARCH_NODE_CERT_LOCATION:-${DB_CERTS_DIR}/tls.crt}" +export DB_NODE_CERT_LOCATION="$OPENSEARCH_NODE_CERT_LOCATION" +export OPENSEARCH_NODE_KEY_LOCATION="${OPENSEARCH_NODE_KEY_LOCATION:-${DB_CERTS_DIR}/tls.key}" +export DB_NODE_KEY_LOCATION="$OPENSEARCH_NODE_KEY_LOCATION" +export OPENSEARCH_CA_CERT_LOCATION="${OPENSEARCH_CA_CERT_LOCATION:-${DB_CERTS_DIR}/ca.crt}" +export DB_CA_CERT_LOCATION="$OPENSEARCH_CA_CERT_LOCATION" +export OPENSEARCH_SKIP_TRANSPORT_TLS="${OPENSEARCH_SKIP_TRANSPORT_TLS:-false}" +export DB_SKIP_TRANSPORT_TLS="$OPENSEARCH_SKIP_TRANSPORT_TLS" +export OPENSEARCH_TRANSPORT_TLS_USE_PEM="${OPENSEARCH_TRANSPORT_TLS_USE_PEM:-$DB_TLS_USE_PEM}" +export DB_TRANSPORT_TLS_USE_PEM="$OPENSEARCH_TRANSPORT_TLS_USE_PEM" +export OPENSEARCH_TRANSPORT_TLS_KEYSTORE_PASSWORD="${OPENSEARCH_TRANSPORT_TLS_KEYSTORE_PASSWORD:-$DB_KEYSTORE_PASSWORD}" +export DB_TRANSPORT_TLS_KEYSTORE_PASSWORD="$OPENSEARCH_TRANSPORT_TLS_KEYSTORE_PASSWORD" +export OPENSEARCH_TRANSPORT_TLS_TRUSTSTORE_PASSWORD="${OPENSEARCH_TRANSPORT_TLS_TRUSTSTORE_PASSWORD:-$DB_TRUSTSTORE_PASSWORD}" +export DB_TRANSPORT_TLS_TRUSTSTORE_PASSWORD="$OPENSEARCH_TRANSPORT_TLS_TRUSTSTORE_PASSWORD" +export OPENSEARCH_TRANSPORT_TLS_KEY_PASSWORD="${OPENSEARCH_TRANSPORT_TLS_KEY_PASSWORD:-$DB_KEY_PASSWORD}" +export DB_TRANSPORT_TLS_KEY_PASSWORD="$OPENSEARCH_TRANSPORT_TLS_KEY_PASSWORD" +export OPENSEARCH_TRANSPORT_TLS_KEYSTORE_LOCATION="${OPENSEARCH_TRANSPORT_TLS_KEYSTORE_LOCATION:-$DB_KEYSTORE_LOCATION}" +export DB_TRANSPORT_TLS_KEYSTORE_LOCATION="$OPENSEARCH_TRANSPORT_TLS_KEYSTORE_LOCATION" +export OPENSEARCH_TRANSPORT_TLS_TRUSTSTORE_LOCATION="${OPENSEARCH_TRANSPORT_TLS_TRUSTSTORE_LOCATION:-$DB_TRUSTSTORE_LOCATION}" +export DB_TRANSPORT_TLS_TRUSTSTORE_LOCATION="$OPENSEARCH_TRANSPORT_TLS_TRUSTSTORE_LOCATION" +export OPENSEARCH_TRANSPORT_TLS_NODE_CERT_LOCATION="${OPENSEARCH_TRANSPORT_TLS_NODE_CERT_LOCATION:-$DB_NODE_CERT_LOCATION}" +export DB_TRANSPORT_TLS_NODE_CERT_LOCATION="$OPENSEARCH_TRANSPORT_TLS_NODE_CERT_LOCATION" +export OPENSEARCH_TRANSPORT_TLS_NODE_KEY_LOCATION="${OPENSEARCH_TRANSPORT_TLS_NODE_KEY_LOCATION:-$DB_NODE_KEY_LOCATION}" +export DB_TRANSPORT_TLS_NODE_KEY_LOCATION="$OPENSEARCH_TRANSPORT_TLS_NODE_KEY_LOCATION" +export OPENSEARCH_TRANSPORT_TLS_CA_CERT_LOCATION="${OPENSEARCH_TRANSPORT_TLS_CA_CERT_LOCATION:-$DB_CA_CERT_LOCATION}" +export DB_TRANSPORT_TLS_CA_CERT_LOCATION="$OPENSEARCH_TRANSPORT_TLS_CA_CERT_LOCATION" +export OPENSEARCH_ENABLE_REST_TLS="${OPENSEARCH_ENABLE_REST_TLS:-true}" +export DB_ENABLE_REST_TLS="$OPENSEARCH_ENABLE_REST_TLS" +export OPENSEARCH_HTTP_TLS_USE_PEM="${OPENSEARCH_HTTP_TLS_USE_PEM:-$DB_TLS_USE_PEM}" +export DB_HTTP_TLS_USE_PEM="$OPENSEARCH_HTTP_TLS_USE_PEM" +export OPENSEARCH_HTTP_TLS_KEYSTORE_PASSWORD="${OPENSEARCH_HTTP_TLS_KEYSTORE_PASSWORD:-$DB_KEYSTORE_PASSWORD}" +export DB_HTTP_TLS_KEYSTORE_PASSWORD="$OPENSEARCH_HTTP_TLS_KEYSTORE_PASSWORD" +export OPENSEARCH_HTTP_TLS_TRUSTSTORE_PASSWORD="${OPENSEARCH_HTTP_TLS_TRUSTSTORE_PASSWORD:-$DB_TRUSTSTORE_PASSWORD}" +export DB_HTTP_TLS_TRUSTSTORE_PASSWORD="$OPENSEARCH_HTTP_TLS_TRUSTSTORE_PASSWORD" +export OPENSEARCH_HTTP_TLS_KEY_PASSWORD="${OPENSEARCH_HTTP_TLS_KEY_PASSWORD:-$DB_KEY_PASSWORD}" +export DB_HTTP_TLS_KEY_PASSWORD="$OPENSEARCH_HTTP_TLS_KEY_PASSWORD" +export OPENSEARCH_HTTP_TLS_KEYSTORE_LOCATION="${OPENSEARCH_HTTP_TLS_KEYSTORE_LOCATION:-$DB_KEYSTORE_LOCATION}" +export DB_HTTP_TLS_KEYSTORE_LOCATION="$OPENSEARCH_HTTP_TLS_KEYSTORE_LOCATION" +export OPENSEARCH_HTTP_TLS_TRUSTSTORE_LOCATION="${OPENSEARCH_HTTP_TLS_TRUSTSTORE_LOCATION:-$DB_TRUSTSTORE_LOCATION}" +export DB_HTTP_TLS_TRUSTSTORE_LOCATION="$OPENSEARCH_HTTP_TLS_TRUSTSTORE_LOCATION" +export OPENSEARCH_HTTP_TLS_NODE_CERT_LOCATION="${OPENSEARCH_HTTP_TLS_NODE_CERT_LOCATION:-$DB_NODE_CERT_LOCATION}" +export DB_HTTP_TLS_NODE_CERT_LOCATION="$OPENSEARCH_HTTP_TLS_NODE_CERT_LOCATION" +export OPENSEARCH_HTTP_TLS_NODE_KEY_LOCATION="${OPENSEARCH_HTTP_TLS_NODE_KEY_LOCATION:-$DB_NODE_KEY_LOCATION}" +export DB_HTTP_TLS_NODE_KEY_LOCATION="$OPENSEARCH_HTTP_TLS_NODE_KEY_LOCATION" +export OPENSEARCH_HTTP_TLS_CA_CERT_LOCATION="${OPENSEARCH_HTTP_TLS_CA_CERT_LOCATION:-$DB_CA_CERT_LOCATION}" +export DB_HTTP_TLS_CA_CERT_LOCATION="$OPENSEARCH_HTTP_TLS_CA_CERT_LOCATION" +export OPENSEARCH_SECURITY_DIR="${OPENSEARCH_SECURITY_DIR:-${DB_PLUGINS_DIR}/opensearch-security}" +export OPENSEARCH_SECURITY_CONF_DIR="${OPENSEARCH_SECURITY_CONF_DIR:-${DB_CONF_DIR}/opensearch-security}" +OPENSEARCH_DASHBOARDS_PASSWORD="${OPENSEARCH_DASHBOARDS_PASSWORD:-"${KIBANA_PASSWORD:-}"}" +export OPENSEARCH_DASHBOARDS_PASSWORD="${OPENSEARCH_DASHBOARDS_PASSWORD:-bitnami}" +export LOGSTASH_PASSWORD="${LOGSTASH_PASSWORD:-bitnami}" +export OPENSEARCH_SET_CGROUP="${OPENSEARCH_SET_CGROUP:-true}" +export OPENSEARCH_SECURITY_BOOTSTRAP="${OPENSEARCH_SECURITY_BOOTSTRAP:-false}" +export OPENSEARCH_SECURITY_NODES_DN="${OPENSEARCH_SECURITY_NODES_DN:-}" +export OPENSEARCH_SECURITY_ADMIN_DN="${OPENSEARCH_SECURITY_ADMIN_DN:-}" +export OPENSEARCH_SECURITY_ADMIN_CERT_LOCATION="${OPENSEARCH_SECURITY_ADMIN_CERT_LOCATION:-${DB_CERTS_DIR}/admin.crt}" +export OPENSEARCH_SECURITY_ADMIN_KEY_LOCATION="${OPENSEARCH_SECURITY_ADMIN_KEY_LOCATION:-${DB_CERTS_DIR}/admin.key}" + +# Custom environment variables may be defined below diff --git a/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch/entrypoint.sh b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch/entrypoint.sh new file mode 100755 index 0000000000000..0bca832be31f3 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch/entrypoint.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +#set -o xtrace + +# Load libraries +. /opt/bitnami/scripts/libbitnami.sh +. /opt/bitnami/scripts/libopensearch.sh + +# Load environment +. /opt/bitnami/scripts/opensearch-env.sh + +print_welcome_page + +if [[ "$1" = "/opt/bitnami/scripts/opensearch/run.sh" ]]; then + info "** Starting Opensearch setup **" + /opt/bitnami/scripts/opensearch/setup.sh + info "** Opensearch setup finished! **" +fi + +echo "" +exec "$@" diff --git a/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch/healthcheck.sh b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch/healthcheck.sh new file mode 100755 index 0000000000000..2384c86113fc3 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch/healthcheck.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +# set -o xtrace # Uncomment this line for debugging purposes + +# Load libraries +. /opt/bitnami/scripts/libopensearch.sh + +# Load Opensearch environment variables +. /opt/bitnami/scripts/opensearch-env.sh + +elasticsearch_healthcheck diff --git a/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch/postunpack.sh b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch/postunpack.sh new file mode 100755 index 0000000000000..d3c0b846fd7e2 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch/postunpack.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +# Load libraries +. /opt/bitnami/scripts/libopensearch.sh +. /opt/bitnami/scripts/libfs.sh + +# Load environment +. /opt/bitnami/scripts/opensearch-env.sh + +for dir in "$DB_TMP_DIR" "$DB_DATA_DIR" "$DB_LOGS_DIR" "${DB_BASE_DIR}/plugins" "${DB_BASE_DIR}/modules" "${DB_BASE_DIR}/extensions" "$DB_CONF_DIR" "$DB_VOLUME_DIR" "$DB_INITSCRIPTS_DIR" "$DB_MOUNTED_PLUGINS_DIR"; do + ensure_dir_exists "$dir" + chmod -R ug+rwX "$dir" +done + +elasticsearch_configure_logging + +for dir in "$DB_TMP_DIR" "$DB_DATA_DIR" "$DB_LOGS_DIR" "${DB_BASE_DIR}/plugins" "${DB_BASE_DIR}/modules" "$DB_CONF_DIR" "$DB_VOLUME_DIR" "$DB_INITSCRIPTS_DIR" "$DB_MOUNTED_PLUGINS_DIR"; do + # `elasticsearch-plugin install` command complains about being unable to create the a plugin's directory + # even when having the proper permissions. + # The reason: the code is checking trying to check the permissions by consulting the parent directory owner, + # instead of checking if the ES user actually has writing permissions. + # + # As a workaround, we will ensure the container works (at least) with the non-root user 1001. However, + # until we can avoid this hack, we can't guarantee this container to work on K8s distributions + # where containers are exectued with non-privileged users with random user IDs. + # + # Issue reported at: https://github.com/bitnami/bitnami-docker-elasticsearch/issues/50 + chown -R 1001:0 "$dir" +done + +elasticsearch_install_plugins diff --git a/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch/run.sh b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch/run.sh new file mode 100755 index 0000000000000..a5068296078c0 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch/run.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +#set -o xtrace + +# Load libraries +. /opt/bitnami/scripts/libopensearch.sh +. /opt/bitnami/scripts/libos.sh + +# Load environment +. /opt/bitnami/scripts/opensearch-env.sh + +# Constants +EXEC=$(command -v opensearch) +ARGS=("-p" "$DB_PID_FILE") +[[ -z "${DB_EXTRA_FLAGS:-}" ]] || ARGS=("${ARGS[@]}" "${DB_EXTRA_FLAGS[@]}") +# JAVA_HOME to be deprecated, see warning: +# warning: usage of JAVA_HOME is deprecated, use ES_JAVA_HOME +export JAVA_HOME=/opt/bitnami/java +export OPENSEARCH_JAVA_HOME=/opt/bitnami/java +if is_boolean_yes "${OPENSEARCH_SET_CGROUP:-}"; then + # Taken from upstream OpenSearch container + # https://github.com/opensearch-project/opensearch-build/blob/main/docker/release/config/opensearch/opensearch-docker-entrypoint.sh + export OPENSEARCH_JAVA_OPTS="-Dopensearch.cgroups.hierarchy.override=/ ${OPENSEARCH_JAVA_OPTS:-}" +fi + +ARGS+=("$@") + +info "** Starting Opensearch **" +if am_i_root; then + exec_as_user "$DB_DAEMON_USER" "$EXEC" "${ARGS[@]}" +else + exec "$EXEC" "${ARGS[@]}" +fi diff --git a/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch/setup.sh b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch/setup.sh new file mode 100755 index 0000000000000..dc63a7c3eb894 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/rootfs/opt/bitnami/scripts/opensearch/setup.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +#set -o xtrace + +# Load libraries +. /opt/bitnami/scripts/libos.sh +. /opt/bitnami/scripts/libfs.sh +. /opt/bitnami/scripts/libopensearch.sh + +# Load environment +. /opt/bitnami/scripts/opensearch-env.sh + +if [[ "$APP_VERSION" =~ ^1\. ]]; then + export OPENSEARCH_SECURITY_CONF_DIR="${OPENSEARCH_SECURITY_DIR}/securityconfig" +fi + +# Ensure Opensearch environment variables settings are valid +elasticsearch_validate +# Ensure Opensearch is stopped when this script ends +trap "elasticsearch_stop" EXIT +# Ensure 'daemon' user exists when running as 'root' +am_i_root && ensure_user_exists "$DB_DAEMON_USER" --group "$DB_DAEMON_GROUP" +# Ensure Opensearch is initialized +elasticsearch_initialize +# Ensure kernel settings are valid +elasticsearch_validate_kernel +# Install Opensearch plugins +elasticsearch_install_plugins +# Ensure custom initialization scripts are executed +elasticsearch_custom_init_scripts diff --git a/bitnami/opensearch/1/debian-11/tags-info.yaml b/bitnami/opensearch/1/debian-11/tags-info.yaml new file mode 100644 index 0000000000000..1b8c60fc9b6f6 --- /dev/null +++ b/bitnami/opensearch/1/debian-11/tags-info.yaml @@ -0,0 +1,4 @@ +rolling-tags: +- "1" +- 1-debian-11 +- 1.3.10 diff --git a/bitnami/opensearch/README.md b/bitnami/opensearch/README.md new file mode 100644 index 0000000000000..29b4a32b26447 --- /dev/null +++ b/bitnami/opensearch/README.md @@ -0,0 +1,473 @@ +# OpenSearch packaged by Bitnami + +## What is OpenSearch? + +> OpenSearch is a scalable open-source solution for search, analytics, and observability. Features full-text queries, natural language processing, custom dictionaries, amongst others. + +[Overview of OpenSearch](https://opensearch.org/) +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +docker run --name opensearch bitnami/opensearch:latest +``` + +### Docker Compose + +```console +curl -sSL https://raw.githubusercontent.com/bitnami/containers/main/bitnami/opensearch/docker-compose.yml > docker-compose.yml +docker-compose up -d +``` + +You can find the available configuration options in the [Environment Variables](#environment-variables) section. + +## Why use Bitnami Images? + +* Bitnami closely tracks upstream source changes and promptly publishes new versions of this image using our automated systems. +* With Bitnami images the latest bug fixes and features are available as soon as possible. +* Bitnami containers, virtual machines and cloud images use the same components and configuration approach - making it easy to switch between formats based on your project needs. +* All our images are based on [minideb](https://github.com/bitnami/minideb) a minimalist Debian based container image which gives you a small base container image and the familiarity of a leading Linux distribution. +* All Bitnami images available in Docker Hub are signed with [Docker Content Trust (DCT)](https://docs.docker.com/engine/security/trust/content_trust/). You can use `DOCKER_CONTENT_TRUST=1` to verify the integrity of the images. +* Bitnami container images are released on a regular basis with the latest distribution packages available. + +Looking to use OpenSearch in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## How to deploy OpenSearch in Kubernetes? + +Deploying Bitnami applications as Helm Charts is the easiest way to get started with our applications on Kubernetes. Read more about the installation in the [Bitnami OpenSearch Chart GitHub repository](https://github.com/bitnami/charts/tree/master/bitnami/opensearch). + +Bitnami containers can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Why use a non-root container? + +Non-root container images add an extra layer of security and are generally recommended for production environments. However, because they run as a non-root user, privileged tasks are typically off-limits. Learn more about non-root containers [in our docs](https://docs.bitnami.com/tutorials/work-with-non-root-containers/). + +## Supported tags and respective `Dockerfile` links + +Learn more about the Bitnami tagging policy and the difference between rolling tags and immutable tags [in our documentation page](https://docs.bitnami.com/tutorials/understand-rolling-tags-containers/). + +You can see the equivalence between the different tags by taking a look at the `tags-info.yaml` file present in the branch folder, i.e `bitnami/ASSET/BRANCH/DISTRO/tags-info.yaml`. + +Subscribe to project updates by watching the [bitnami/containers GitHub repo](https://github.com/bitnami/containers). + +## Get this image + +The recommended way to get the Bitnami OpenSearch Docker Image is to pull the prebuilt image from the [Docker Hub Registry](https://hub.docker.com/r/bitnami/opensearch). + +```console +docker pull bitnami/opensearch:latest +``` + +To use a specific version, you can pull a versioned tag. You can view the [list of available versions](https://hub.docker.com/r/bitnami/opensearch/tags/) in the Docker Hub Registry. + +```console +docker pull bitnami/opensearch:[TAG] +``` + +If you wish, you can also build the image yourself by cloning the repository, changing to the directory containing the Dockerfile and executing the `docker build` command. Remember to replace the `APP`, `VERSION` and `OPERATING-SYSTEM` path placeholders in the example command below with the correct values. + +```console +git clone https://github.com/bitnami/containers.git +cd bitnami/APP/VERSION/OPERATING-SYSTEM +docker build -t bitnami/APP:latest . +``` + +## Persisting your application + +If you remove the container all your data will be lost, and the next time you run the image the application will be reinitialized. To avoid this loss of data, you should mount a volume that will persist even after the container is removed. + +For persistence you should mount a directory at the `/bitnami` path. If the mounted directory is empty, it will be initialized on the first run. + +```console +docker run \ + -v /path/to/opensearch-data-persistence:/bitnami/opensearch/data \ + bitnami/opensearch:latest +``` + +or by making a minor change to the [`docker-compose.yml`](https://github.com/bitnami/containers/blob/main/bitnami/opensearch/docker-compose.yml) file present in this repository: + +```yaml +opensearch: + ... + volumes: + - /path/to/opensearch-data-persistence:/bitnami/opensearch/data + ... +``` + +> NOTE: As this is a non-root container, the mounted files and directories must have the proper permissions for the UID `1001`. + +It is also possible to use multiple volumes for data persistence by using the `OPENSEARCH_DATA_DIR_LIST` environment variable: + +```yaml +opensearch: + ... + volumes: + - /path/to/opensearch-data-persistence-1:/opensearch/data-1 + - /path/to/opensearch-data-persistence-2:/opensearch/data-2 + environment: + - OPENSEARCH_DATA_DIR_LIST=/opensearch/data-1,/opensearch/data-2 + ... +``` + +## Connecting to other containers + +Using [Docker container networking](https://docs.docker.com/engine/userguide/networking/), an OpenSearch server running inside a container can easily be accessed by your application containers. + +Containers attached to the same network can communicate with each other using the container name as the hostname. + +### Using the Command Line + +#### Step 1: Create a network + +```console +docker network create app-tier --driver bridge +``` + +#### Step 2: Launch the OpenSearch server instance + +Use the `--network app-tier` argument to the `docker run` command to attach the OpenSearch container to the `app-tier` network. + +```console +docker run -d --name opensearch-server \ + --network app-tier \ + bitnami/opensearch:latest +``` + +#### Step 3: Launch your application container + +```console +docker run -d --name myapp \ + --network app-tier \ + YOUR_APPLICATION_IMAGE +``` + +> **IMPORTANT**: +> +> 1. Please update the **YOUR_APPLICATION_IMAGE_** placeholder in the above snippet with your application image +> 2. In your application container, use the hostname `opensearch-server` to connect to the OpenSearch server + +### Using a Docker Compose file + +When not specified, Docker Compose automatically sets up a new network and attaches all deployed services to that network. However, we will explicitly define a new `bridge` network named `app-tier`. In this example we assume that you want to connect to the OpenSearch server from your own custom application image which is identified in the following snippet by the service name `myapp`. + +```yaml +version: '2' + +networks: + app-tier: + driver: bridge + +services: + opensearch: + image: 'bitnami/opensearch:latest' + networks: + - app-tier + myapp: + image: 'YOUR_APPLICATION_IMAGE' + networks: + - app-tier +``` + +> **IMPORTANT**: +> +> 1. Please update the **YOUR_APPLICATION_IMAGE_** placeholder in the above snippet with your application image +> 2. In your application container, use the hostname `opensearch` to connect to the OpenSearch server + +Launch the containers using: + +```console +docker-compose up -d +``` + +## Configuration + +### Environment variables + +When you start the opensearch image, you can adjust the configuration of the instance by passing one or more environment variables either on the docker-compose file or on the `docker run` command line. If you want to add a new environment variable: + +* For Docker Compose, add the variable name and value under the application section: + +```yaml +opensearch: + ... + environment: + - OPENSEARCH_PORT_NUMBER=9201 + ... +``` + +* For manual execution add a `-e` option with each variable and value: + +```console + $ docker run -d --name opensearch \ + -p 9201:9201 --network=opensearch_network \ + -e OPENSEARCH_PORT_NUMBER=9201 \ + -v /path/to/opensearch-data-persistence:/bitnami/opensearch/data \ + bitnami/opensearch +``` + +Available variables: + +* `BITNAMI_DEBUG`: Increase verbosity on initialization logs. Default **false** +* `OPENSEARCH_EXTRA_FLAGS`: Extra command-line arguments for the `opensearch` daemon +* `OPENSEARCH_CLUSTER_NAME`: The OpenSearch Cluster Name. Default: **opensearch-cluster** +* `OPENSEARCH_CLUSTER_HOSTS`: List of opensearch hosts to set the cluster. Available separators are ' ', ',' and ';'. No defaults. +* `OPENSEARCH_CLUSTER_MASTER_HOSTS`: List of opensearch master-eligible hosts. Available separators are ' ', ',' and ';'. If no values are provided, it will have the same value as `OPENSEARCH_CLUSTER_HOSTS`. +* `OPENSEARCH_IS_DEDICATED_NODE`: OpenSearch node to behave as a 'dedicated node'. Default: **no** +* `OPENSEARCH_NODE_TYPE`: OpenSearch node type when behaving as a 'dedicated node'. Valid values: *master*, *data*, *coordinating* or *ingest*. +* `OPENSEARCH_NODE_NAME`: OpenSearch node name. No defaults. +* `OPENSEARCH_BIND_ADDRESS`: Address/interface to bind by OpenSearch. Default: **0.0.0.0** +* `OPENSEARCH_PORT_NUMBER`: OpenSearch port. Default: **9200** +* `OPENSEARCH_NODE_PORT_NUMBER`: OpenSearch Node to Node port. Default: **9300** +* `OPENSEARCH_PLUGINS`: Comma, semi-colon or space separated list of plugins to install at initialization. No defaults. +* `OPENSEARCH_KEYS`: Comma, semi-colon or space separated list of key-value pairs (key=value) to store. No defaults. +* `OPENSEARCH_HEAP_SIZE`: Memory used for the Xmx and Xms java heap values. Default: **1024m** +* `OPENSEARCH_FS_SNAPSHOT_REPO_PATH`: OpenSearch file system snapshot repository path. No defaults. +* `OPENSEARCH_DATA_DIR_LIST`: Comma, semi-colon or space separated list of directories to use for data storage. No defaults. + +### Setting up a cluster + +A cluster can easily be setup with the Bitnami OpenSearch Docker Image using the following environment variables: + +* `OPENSEARCH_CLUSTER_NAME`: The OpenSearch Cluster Name. Default: **opensearch-cluster** +* `OPENSEARCH_CLUSTER_HOSTS`: List of opensearch hosts to set the cluster. Available separators are ' ', ',' and ';'. No defaults. +* `OPENSEARCH_CLIENT_NODE`: OpenSearch node to behave as a 'smart router' for Kibana app. Default: **false** +* `OPENSEARCH_NODE_NAME`: OpenSearch node name. No defaults. +* `OPENSEARCH_MINIMUM_MASTER_NODES`: Minimum OpenSearch master nodes for a quorum. No defaults. + +For larger cluster, you can setup 'dedicated nodes' using the following environment variables: + +* `OPENSEARCH_IS_DEDICATED_NODE`: OpenSearch node to behave as a 'dedicated node'. Default: **no** +* `OPENSEARCH_NODE_TYPE`: OpenSearch node type when behaving as a 'dedicated node'. Valid values: *master*, *data*, *coordinating* or *ingest*. +* `OPENSEARCH_CLUSTER_MASTER_HOSTS`: List of opensearch master-eligible hosts. Available separators are ' ', ',' and ';'. If no values are provided, it will have the same value as `OPENSEARCH_CLUSTER_HOSTS`. + +Find more information about 'dedicated nodes' in the [official documentation](https://www.elastic.co/guide/en/opensearch/reference/current/modules-node.html). + +#### Step 1: Create a new network + +```console +docker network create opensearch_network +``` + +#### Step 2: Create the first node + +```console +docker run --name opensearch-node1 \ + --net=opensearch_network \ + -p 9200:9200 \ + -e OPENSEARCH_CLUSTER_NAME=opensearch-cluster \ + -e OPENSEARCH_CLUSTER_HOSTS=opensearch-node1,opensearch-node2 \ + -e OPENSEARCH_NODE_NAME=elastic-node1 \ + bitnami/opensearch:latest +``` + +In the above command the container is added to a cluster named `opensearch-cluster` using the `OPENSEARCH_CLUSTER_NAME`. The `OPENSEARCH_CLUSTER_HOSTS` parameter set the name of the nodes that set the cluster so we will need to launch other container for the second node. Finally the `OPENSEARCH_NODE_NAME` parameter allows to indicate a known name for the node, otherwise opensearch will generate a random one. + +#### Step 3: Create a second node + +```console +docker run --name opensearch-node2 \ + --link opensearch-node1:opensearch-node1 \ + --net=opensearch_network \ + -e OPENSEARCH_CLUSTER_NAME=opensearch-cluster \ + -e OPENSEARCH_CLUSTER_HOSTS=opensearch-node1,opensearch-node2 \ + -e OPENSEARCH_NODE_NAME=elastic-node2 \ + bitnami/opensearch:latest +``` + +In the above command a new opensearch node is being added to the opensearch cluster indicated by `OPENSEARCH_CLUSTER_NAME`. + +You now have a two node OpenSearch cluster up and running which can be scaled by adding/removing nodes. + +With Docker Compose the cluster configuration can be setup using: + +```yaml +version: '2' +services: + opensearch-node1: + image: bitnami/opensearch:latest + environment: + - OPENSEARCH_CLUSTER_NAME=opensearch-cluster + - OPENSEARCH_CLUSTER_HOSTS=opensearch-node1,opensearch-node2 + - OPENSEARCH_NODE_NAME=elastic-node1 + + opensearch-node2: + image: bitnami/opensearch:latest + environment: + - OPENSEARCH_CLUSTER_NAME=opensearch-cluster + - OPENSEARCH_CLUSTER_HOSTS=opensearch-node1,opensearch-node2 + - OPENSEARCH_NODE_NAME=elastic-node2 +``` + +### Configuration file + +In order to use a custom configuration file instead of the default one provided out of the box, you can create a file named `opensearch.yml` and mount it at `/opt/bitnami/opensearch/config/opensearch.yml` to overwrite the default configuration: + +```console +docker run -d --name opensearch \ + -p 9201:9201 \ + -v /path/to/opensearch.yml:/opt/bitnami/opensearch/config/opensearch.yml \ + -v /path/to/opensearch-data-persistence:/bitnami/opensearch/data \ + bitnami/opensearch:latest +``` + +or by changing the [`docker-compose.yml`](https://github.com/bitnami/containers/blob/main/bitnami/opensearch/docker-compose.yml) file present in this repository: + +```yaml +opensearch: + ... + volumes: + - /path/to/opensearch.yml:/opt/bitnami/opensearch/config/opensearch.yml + - /path/to/opensearch-data-persistence:/bitnami/opensearch/data + ... +``` + +Please, note that the whole configuration file will be replaced by the provided, default one; ensure that the syntax and fields you provide are properly set and exhaustive. + +If you would rather extend than replace the default configuration with your settings, mount your custom configuration file at `/opt/bitnami/opensearch/config/my_opensearch.yml`. + +### Plugins + +The Bitnami OpenSearch Docker image comes with the [S3 Repository plugin](https://www.elastic.co/guide/en/opensearch/plugins/current/repository-s3.html) installed by default. + +You can add extra plugins by setting the `OPENSEARCH_PLUGINS` environment variable. To specify multiple plugins, separate them by spaces, commas or semicolons. When the container is initialized it will install all of the specified plugins before starting OpenSearch. + +```console +docker run -d --name opensearch \ + -e OPENSEARCH_PLUGINS=analysis-icu \ + bitnami/opensearch:latest +``` + +The Bitnami OpenSearch Docker image will also install plugin `.zip` files mounted at the `/bitnami/opensearch/plugins` directory inside the container, making it possible to install them from disk without requiring Internet access. + +#### Adding plugins at build time (persisting plugins) + +The Bitnami OpenSearch image provides a way to create your custom image installing plugins on build time. This is the preferred way to persist plugins when using Opensearch, as they will not be installed every time the container is started but just once at build time. + +To create your own image providing plugins execute the following command. Remember to replace the `VERSION` and `OPERATING-SYSTEM` path placeholders in the example command below with the correct values. + +```console +git clone https://github.com/bitnami/containers.git +cd bitnami/opensearch/VERSION/OPERATING-SYSTEM +docker build --build-arg OPENSEARCH_PLUGINS= -t bitnami/opensearch:latest . +``` + +The command above will build the image providing this GitHub repository as build context, and will pass the list of plugins to install to the build logic. + +### Initializing a new instance + +When the container is executed for the first time, it will execute the files with extension `.sh` located at `/docker-entrypoint-initdb.d`. + +In order to have your custom files inside the Docker image, you can mount them as a volume. + +## Logging + +The Bitnami OpenSearch Docker image sends the container logs to the `stdout`. To view the logs: + +```console +docker logs opensearch +``` + +or using Docker Compose: + +```console +docker-compose logs opensearch +``` + +You can configure the containers [logging driver](https://docs.docker.com/engine/admin/logging/overview/) using the `--log-driver` option if you wish to consume the container logs differently. In the default configuration docker uses the `json-file` driver. + +Additionally, in case you'd like to modify OpenSearch logging configuration, it can be done by overwriting the file `/opt/bitnami/opensearch/config/log4j2.properties`. +The syntax of this file can be found in OpenSearch [logging documentation](https://www.elastic.co/guide/en/opensearch/reference/current/logging.html). + +## Maintenance + +### Upgrade this image + +Bitnami provides up-to-date versions of OpenSearch, including security patches, soon after they are made upstream. We recommend that you follow these steps to upgrade your container. + +#### Step 1: Get the updated image + +```console +docker pull bitnami/opensearch:latest +``` + +or if you're using Docker Compose, update the value of the image property to +`bitnami/opensearch:latest`. + +#### Step 2: Stop and backup the currently running container + +Stop the currently running container using the command + +```console +docker stop opensearch +``` + +or using Docker Compose: + +```console +docker-compose stop opensearch +``` + +Next, take a snapshot of the persistent volume `/path/to/opensearch-data-persistence` using: + +```console +rsync -a /path/to/opensearch-data-persistence /path/to/opensearch-data-persistence.bkp.$(date +%Y%m%d-%H.%M.%S) +``` + +You can use this snapshot to restore the application state should the upgrade fail. + +#### Step 3: Remove the currently running container + +```console +docker rm -v opensearch +``` + +or using Docker Compose: + +```console +docker-compose rm -v opensearch +``` + +#### Step 4: Run the new image + +Re-create your container from the new image, restoring your backup if necessary. + +```console +docker run --name opensearch bitnami/opensearch:latest +``` + +or using Docker Compose: + +```console +docker-compose up opensearch +``` + +## Contributing + +We'd love for you to contribute to this Docker image. You can request new features by creating an [issue], or submitting a [pull request](https://github.com/bitnami/containers/pulls) with your contribution. + +## Issues + +If you encountered a problem running this container, you can file an [issue](https://github.com/bitnami/containers/issues/new/choose). For us to provide better support, be sure to include the following information in your issue: + +* Host OS and version +* Docker version (`docker version`) +* Output of `docker info` +* Version of this container +* The command you used to run the container, and any relevant output you saw (masking any sensitive information) + +## License + +Copyright © 2023 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License.