Skip to content

Instantly share code, notes, and snippets.

@2bethere
Created June 10, 2023 22:51
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save 2bethere/67d0f7b8217235beffbff373949cb331 to your computer and use it in GitHub Desktop.
Save 2bethere/67d0f7b8217235beffbff373949cb331 to your computer and use it in GitHub Desktop.
Druid 26 kafka
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
version: "2.2"
volumes:
metadata_data: {}
middle_var: {}
historical_var: {}
broker_var: {}
coordinator_var: {}
router_var: {}
druid_shared: {}
services:
postgres:
container_name: postgres
image: postgres:latest
ports:
- "5432:5432"
volumes:
- metadata_data:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=FoolishPassword
- POSTGRES_USER=druid
- POSTGRES_DB=druid
# Need 3.5 or later for container nodes
zookeeper:
container_name: zookeeper
image: zookeeper:3.5.10
ports:
- "2181:2181"
environment:
- ZOO_MY_ID=1
coordinator:
image: apache/druid:26.0.0
container_name: coordinator
volumes:
- druid_shared:/opt/shared
- coordinator_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
ports:
- "8081:8081"
command:
- coordinator
env_file:
- environment
broker:
image: apache/druid:26.0.0
container_name: broker
volumes:
- broker_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8082:8082"
command:
- broker
env_file:
- environment
historical:
image: apache/druid:26.0.0
container_name: historical
volumes:
- druid_shared:/opt/shared
- historical_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8083:8083"
command:
- historical
env_file:
- environment
middlemanager:
image: apache/druid:26.0.0
container_name: middlemanager
volumes:
- druid_shared:/opt/shared
- middle_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8091:8091"
- "8100-8105:8100-8105"
command:
- middleManager
env_file:
- environment
router:
image: apache/druid:26.0.0
container_name: router
volumes:
- router_var:/opt/druid/var
depends_on:
- zookeeper
- postgres
- coordinator
ports:
- "8888:8888"
command:
- router
env_file:
- environment
kafka-broker:
image: confluentinc/cp-kafka:7.3.2
container_name: kafka-broker
ports:
# To learn about configuring Kafka for access across networks see
# https://www.confluent.io/blog/kafka-client-cannot-connect-to-broker-on-aws-on-docker-etc/
- "9092:9092"
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_INTERNAL:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9092,PLAINTEXT_INTERNAL://kafka-broker:29092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
ARG JDK_VERSION=11
# The platform is explicitly specified as x64 to build the Druid distribution.
# This is because it's not able to build the distribution on arm64 due to dependency problem of web-console. See: https://github.com/apache/druid/issues/13012
# Since only java jars are shipped in the final image, it's OK to build the distribution on x64.
# Once the web-console dependency problem is resolved, we can remove the --platform directive.
FROM --platform=linux/amd64 maven:3.8.6-jdk-11-slim as builder
# Rebuild from source in this stage
# This can be unset if the tarball was already built outside of Docker
ARG BUILD_FROM_SOURCE="true"
RUN export DEBIAN_FRONTEND=noninteractive \
&& apt-get -qq update \
&& apt-get -qq -y install --no-install-recommends python3 python3-yaml
COPY . /src
WORKDIR /src
RUN --mount=type=cache,target=/root/.m2 if [ "$BUILD_FROM_SOURCE" = "true" ]; then \
mvn -B -ff -q dependency:go-offline \
install \
-Pdist,bundle-contrib-exts \
-Pskip-static-checks,skip-tests \
-Dmaven.javadoc.skip=true \
; fi
RUN --mount=type=cache,target=/root/.m2 VERSION=$(mvn -B -q org.apache.maven.plugins:maven-help-plugin:3.2.0:evaluate \
-Dexpression=project.version -DforceStdout=true \
) \
&& tar -zxf ./distribution/target/apache-druid-${VERSION}-bin.tar.gz -C /opt \
&& mv /opt/apache-druid-${VERSION} /opt/druid
FROM busybox:1.35.0-glibc as busybox
FROM gcr.io/distroless/java$JDK_VERSION-debian11
LABEL maintainer="Apache Druid Developers <dev@druid.apache.org>"
COPY --from=busybox /bin/busybox /busybox/busybox
RUN ["/busybox/busybox", "--install", "/bin"]
# Predefined builtin arg, see: https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
ARG TARGETARCH
#
# Download bash-static binary to execute scripts that require bash.
# Although bash-static supports multiple platforms, but there's no need for us to support all those platform, amd64 and arm64 are enough.
#
ARG BASH_URL_BASE="https://github.com/robxu9/bash-static/releases/download/5.1.016-1.2.3"
RUN if [ "$TARGETARCH" = "arm64" ]; then \
BASH_URL="${BASH_URL_BASE}/bash-linux-aarch64" ; \
elif [ "$TARGETARCH" = "amd64" ]; then \
BASH_URL="${BASH_URL_BASE}/bash-linux-x86_64" ; \
else \
echo "Unsupported architecture ($TARGETARCH)" && exit 1; \
fi; \
echo "Downloading bash-static from ${BASH_URL}" \
&& wget ${BASH_URL} -O /bin/bash \
&& chmod 755 /bin/bash
RUN addgroup -S -g 1000 druid \
&& adduser -S -u 1000 -D -H -h /opt/druid -s /bin/sh -g '' -G druid druid
COPY --chown=druid:druid --from=builder /opt /opt
COPY distribution/docker/druid.sh /druid.sh
COPY distribution/docker/peon.sh /peon.sh
# create necessary directories which could be mounted as volume
# /opt/druid/var is used to keep individual files(e.g. log) of each Druid service
# /opt/shared is used to keep segments and task logs shared among Druid services
RUN mkdir /opt/druid/var /opt/shared \
&& chown druid:druid /opt/druid/var /opt/shared \
&& chmod 775 /opt/druid/var /opt/shared
USER druid
VOLUME /opt/druid/var
WORKDIR /opt/druid
ENTRYPOINT ["/druid.sh"]
#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE: this is a 'run' script for the stock tarball
# It takes one required argument (the name of the service,
# e.g. 'broker', 'historical' etc). Any additional arguments
# are passed to that service.
#
# This script accepts JAVA_OPTS as an environment variable
#
# Additional env vars:
# - DRUID_LOG4J -- set the entire log4j.xml verbatim
# - DRUID_LOG_LEVEL -- override the default log level in default log4j. This presently works only if the existing log level is INFO
# - DRUID_SERVICE_LOG4J -- set the entire service specific log4j.xml verbatim
# - DRUID_SERVICE_LOG_LEVEL -- override the default log level in the service specific log4j. This presently works only if the existing log level is INFO
# - DRUID_XMX -- set Java Xmx
# - DRUID_XMS -- set Java Xms
# - DRUID_MAXNEWSIZE -- set Java max new size
# - DRUID_NEWSIZE -- set Java new size
# - DRUID_MAXDIRECTMEMORYSIZE -- set Java max direct memory size
#
# - DRUID_CONFIG_COMMON -- full path to a file for druid 'common' properties
# - DRUID_CONFIG_${service} -- full path to a file for druid 'service' properties
# - DRUID_SINGLE_NODE_CONF -- config to use at runtime. Choose from: {large, medium, micro-quickstart, nano-quickstart, small, xlarge}
# - DRUID_ADDITIONAL_CLASSPATH -- a list of colon-separated paths that will be added to the classpath of druid processes.
# These paths can include jars, additional configuration folders (such as HDFS config), etc.
# It is important to ensure that these paths must exist in the environment druid runs in if they are not part of the distribution.
set -e
SERVICE="$1"
echo "$(date -Is) startup service $SERVICE"
# We put all the config in /tmp/conf to allow for a
# read-only root filesystem
mkdir -p /tmp/conf/
test -d /tmp/conf/druid && rm -r /tmp/conf/druid
cp -r /opt/druid/conf/druid /tmp/conf/druid
getConfPath() {
if [ -n "$DRUID_SINGLE_NODE_CONF" ]
then
getSingleServerConfPath $1
else
getClusterConfPath $1
fi
}
getSingleServerConfPath() {
cluster_conf_base=/tmp/conf/druid/single-server
case "$1" in
_common) echo $cluster_conf_base/$DRUID_SINGLE_NODE_CONF/_common ;;
historical) echo $cluster_conf_base/$DRUID_SINGLE_NODE_CONF/historical ;;
middleManager) echo $cluster_conf_base/$DRUID_SINGLE_NODE_CONF/middleManager ;;
# indexer) echo $cluster_conf_base/data/indexer ;;
coordinator | overlord) echo $cluster_conf_base/$DRUID_SINGLE_NODE_CONF/coordinator-overlord ;;
broker) echo $cluster_conf_base/$DRUID_SINGLE_NODE_CONF/broker ;;
router) echo $cluster_conf_base/$DRUID_SINGLE_NODE_CONF/router ;;
*) echo $cluster_conf_base/misc/$1 ;;
esac
}
getClusterConfPath() {
cluster_conf_base=/tmp/conf/druid/cluster
case "$1" in
_common) echo $cluster_conf_base/_common ;;
historical) echo $cluster_conf_base/data/historical ;;
middleManager) echo $cluster_conf_base/data/middleManager ;;
indexer) echo $cluster_conf_base/data/indexer ;;
coordinator | overlord) echo $cluster_conf_base/master/coordinator-overlord ;;
broker) echo $cluster_conf_base/query/broker ;;
router) echo $cluster_conf_base/query/router ;;
*) echo $cluster_conf_base/misc/$1 ;;
esac
}
COMMON_CONF_DIR=$(getConfPath _common)
SERVICE_CONF_DIR=$(getConfPath ${SERVICE})
# Delete the old key (if existing) and append new key=value
setKey() {
service="$1"
key="$2"
value="$3"
service_conf=$(getConfPath $service)/runtime.properties
# Delete from all
sed -ri "/$key=/d" $COMMON_CONF_DIR/common.runtime.properties
[ -f $service_conf ] && sed -ri "/$key=/d" $service_conf
[ -f $service_conf ] && echo -e "\n$key=$value" >>$service_conf
[ -f $service_conf ] || echo -e "\n$key=$value" >>$COMMON_CONF_DIR/common.runtime.properties
echo "Setting $key=$value in $service_conf"
}
setJavaKey() {
service="$1"
key=$2
value=$3
file=$(getConfPath $service)/jvm.config
sed -ri "/$key/d" $file
echo $value >> $file
}
# This is to allow configuration via a Kubernetes configMap without
# e.g. using subPath (you can also mount the configMap on /tmp/conf/druid)
if [ -n "$DRUID_CONFIG_COMMON" ]
then
cp -f "$DRUID_CONFIG_COMMON" $COMMON_CONF_DIR/common.runtime.properties
fi
SCONFIG=$(printf "%s_%s" DRUID_CONFIG ${SERVICE})
SCONFIG=$(eval echo \$$(echo $SCONFIG))
if [ -n "${SCONFIG}" ]
then
cp -f "${SCONFIG}" $SERVICE_CONF_DIR/runtime.properties
fi
## Setup host names
if [ -n "${ZOOKEEPER}" ];
then
setKey _common druid.zk.service.host "${ZOOKEEPER}"
fi
DRUID_SET_HOST=${DRUID_SET_HOST:-1}
if [ "${DRUID_SET_HOST}" = "1" ]
then
setKey $SERVICE druid.host $(ip r get 1 | awk '{print $7;exit}')
fi
env | grep ^druid_ | while read evar;
do
# Can't use IFS='=' to parse since var might have = in it (e.g. password)
val=$(echo "$evar" | sed -e 's?[^=]*=??')
var=$(echo "$evar" | sed -e 's?^\([^=]*\)=.*?\1?g' -e 's?_?.?g')
setKey $SERVICE "$var" "$val"
done
env |grep ^s3service | while read evar
do
val=$(echo "$evar" | sed -e 's?[^=]*=??')
var=$(echo "$evar" | sed -e 's?^\([^=]*\)=.*?\1?g' -e 's?_?.?' -e 's?_?-?g')
echo "$var=$val" >>$COMMON_CONF_DIR/jets3t.properties
done
# Now do the java options
if [ -n "$DRUID_XMX" ]; then setJavaKey ${SERVICE} -Xmx -Xmx${DRUID_XMX}; fi
if [ -n "$DRUID_XMS" ]; then setJavaKey ${SERVICE} -Xms -Xms${DRUID_XMS}; fi
if [ -n "$DRUID_MAXNEWSIZE" ]; then setJavaKey ${SERVICE} -XX:MaxNewSize -XX:MaxNewSize=${DRUID_MAXNEWSIZE}; fi
if [ -n "$DRUID_NEWSIZE" ]; then setJavaKey ${SERVICE} -XX:NewSize -XX:NewSize=${DRUID_NEWSIZE}; fi
if [ -n "$DRUID_MAXDIRECTMEMORYSIZE" ]; then setJavaKey ${SERVICE} -XX:MaxDirectMemorySize -XX:MaxDirectMemorySize=${DRUID_MAXDIRECTMEMORYSIZE}; fi
# Combine options from jvm.config and those given as JAVA_OPTS
# If a value is specified in both then JAVA_OPTS will take precedence when using OpenJDK
# However this behavior is not part of the spec and is thus implementation specific
JAVA_OPTS="$(cat $SERVICE_CONF_DIR/jvm.config | xargs) $JAVA_OPTS"
if [ -n "$DRUID_LOG_LEVEL" ]
then
sed -ri 's/"info"/"'$DRUID_LOG_LEVEL'"/g' $COMMON_CONF_DIR/log4j2.xml
fi
if [ -n "$DRUID_LOG4J" ]
then
echo "$DRUID_LOG4J" >$COMMON_CONF_DIR/log4j2.xml
fi
# Service level log options can be used when the log4j2.xml file is setup in the service config directory
# instead of the common config directory
if [ -n "$DRUID_SERVICE_LOG_LEVEL" ]
then
sed -ri 's/"info"/"'$DRUID_SERVICE_LOG_LEVEL'"/g' $SERVICE_CONF_DIR/log4j2.xml
fi
if [ -n "$DRUID_SERVICE_LOG4J" ]
then
echo "$DRUID_SERVICE_LOG4J" >$SERVICE_CONF_DIR/log4j2.xml
fi
DRUID_DIRS_TO_CREATE=${DRUID_DIRS_TO_CREATE-'var/tmp var/druid/segments var/druid/indexing-logs var/druid/task var/druid/hadoop-tmp var/druid/segment-cache'}
if [ -n "${DRUID_DIRS_TO_CREATE}" ]
then
mkdir -p ${DRUID_DIRS_TO_CREATE}
fi
exec bin/run-java ${JAVA_OPTS} -cp $COMMON_CONF_DIR:$SERVICE_CONF_DIR:lib/*:$DRUID_ADDITIONAL_CLASSPATH org.apache.druid.cli.Main server $@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Java tuning
#DRUID_XMX=1g
#DRUID_XMS=1g
#DRUID_MAXNEWSIZE=250m
#DRUID_NEWSIZE=250m
#DRUID_MAXDIRECTMEMORYSIZE=6172m
DRUID_SINGLE_NODE_CONF=micro-quickstart
druid_emitter_logging_logLevel=debug
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-multi-stage-query", "druid-parquet-extensions","druid-kafka-indexing-service","druid-multi-stage-query"]
druid_zk_service_host=zookeeper
druid_metadata_storage_host=
druid_metadata_storage_type=postgresql
druid_metadata_storage_connector_connectURI=jdbc:postgresql://postgres:5432/druid
druid_metadata_storage_connector_user=druid
druid_metadata_storage_connector_password=FoolishPassword
druid_coordinator_balancer_strategy=cachingCost
druid_indexer_runner_javaOptsArray=["-server", "-Xmx1g", "-Xms1g", "-XX:MaxDirectMemorySize=3g", "-Duser.timezone=UTC", "-Dfile.encoding=UTF-8", "-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager"]
druid_indexer_fork_property_druid_processing_buffer_sizeBytes=256MiB
druid_storage_type=local
druid_storage_storageDirectory=/opt/shared/segments
druid_indexer_logs_type=file
druid_indexer_logs_directory=/opt/shared/indexing-logs
druid_processing_numThreads=2
druid_processing_numMergeBuffers=2
DRUID_LOG4J=<?xml version="1.0" encoding="UTF-8" ?><Configuration status="WARN"><Appenders><Console name="Console" target="SYSTEM_OUT"><PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/></Console></Appenders><Loggers><Root level="info"><AppenderRef ref="Console"/></Root><Logger name="org.apache.druid.jetty.RequestLog" additivity="false" level="DEBUG"><AppenderRef ref="Console"/></Logger></Loggers></Configuration>
#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE: this is a 'run' script for the stock tarball
# It takes 1 required argument (the name of the service,
# e.g. 'broker', 'historical' etc). Any additional arguments
# are passed to that service.
#
# It accepts 'JAVA_OPTS' as an environment variable
#
# Additional env vars:
# - DRUID_LOG4J -- set the entire log4j.xml verbatim
# - DRUID_LOG_LEVEL -- override the default log level in default log4j
# - DRUID_XMX -- set Java Xmx
# - DRUID_XMS -- set Java Xms
# - DRUID_MAXNEWSIZE -- set Java max new size
# - DRUID_NEWSIZE -- set Java new size
# - DRUID_MAXDIRECTMEMORYSIZE -- set Java max direct memory size
#
# - DRUID_CONFIG_COMMON -- full path to a file for druid 'common' properties
# - DRUID_CONFIG_${service} -- full path to a file for druid 'service' properties
# This script is very similar to druid.sh, used exclusively for the kubernetes-overlord-extension.
set -e
SERVICE="overlord"
echo "$(date -Is) startup service $SERVICE"
# We put all the config in /tmp/conf to allow for a
# read-only root filesystem
mkdir -p /tmp/conf/
test -d /tmp/conf/druid && rm -r /tmp/conf/druid
cp -r /opt/druid/conf/druid /tmp/conf/druid
getConfPath() {
cluster_conf_base=/tmp/conf/druid/cluster
case "$1" in
_common) echo $cluster_conf_base/_common ;;
historical) echo $cluster_conf_base/data/historical ;;
middleManager) echo $cluster_conf_base/data/middleManager ;;
indexer) echo $cluster_conf_base/data/indexer ;;
coordinator | overlord) echo $cluster_conf_base/master/coordinator-overlord ;;
broker) echo $cluster_conf_base/query/broker ;;
router) echo $cluster_conf_base/query/router ;;
*) echo $cluster_conf_base/misc/$1 ;;
esac
}
COMMON_CONF_DIR=$(getConfPath _common)
SERVICE_CONF_DIR=$(getConfPath ${SERVICE})
# Delete the old key (if existing) and append new key=value
setKey() {
service="$1"
key="$2"
value="$3"
service_conf=$(getConfPath $service)/runtime.properties
# Delete from all
sed -ri "/$key=/d" $COMMON_CONF_DIR/common.runtime.properties
[ -f $service_conf ] && sed -ri "/$key=/d" $service_conf
[ -f $service_conf ] && echo -e "\n$key=$value" >>$service_conf
[ -f $service_conf ] || echo -e "\n$key=$value" >>$COMMON_CONF_DIR/common.runtime.properties
echo "Setting $key=$value in $service_conf"
}
setJavaKey() {
service="$1"
key=$2
value=$3
file=$(getConfPath $service)/jvm.config
sed -ri "/$key/d" $file
echo $value >> $file
}
## Setup host names
if [ -n "${ZOOKEEPER}" ];
then
setKey _common druid.zk.service.host "${ZOOKEEPER}"
fi
DRUID_SET_HOST=${DRUID_SET_HOST:-1}
if [ "${DRUID_SET_HOST}" = "1" ]
then
setKey $SERVICE druid.host $(ip r get 1 | awk '{print $7;exit}')
fi
env | grep ^druid_ | while read evar;
do
# Can't use IFS='=' to parse since var might have = in it (e.g. password)
val=$(echo "$evar" | sed -e 's?[^=]*=??')
var=$(echo "$evar" | sed -e 's?^\([^=]*\)=.*?\1?g' -e 's?_?.?g')
setKey $SERVICE "$var" "$val"
done
env |grep ^s3service | while read evar
do
val=$(echo "$evar" | sed -e 's?[^=]*=??')
var=$(echo "$evar" | sed -e 's?^\([^=]*\)=.*?\1?g' -e 's?_?.?' -e 's?_?-?g')
echo "$var=$val" >>$COMMON_CONF_DIR/jets3t.properties
done
# This is to allow configuration via a Kubernetes configMap without
# e.g. using subPath (you can also mount the configMap on /tmp/conf/druid)
if [ -n "$DRUID_CONFIG_COMMON" ]
then
cp -f "$DRUID_CONFIG_COMMON" $COMMON_CONF_DIR/common.runtime.properties
fi
SCONFIG=$(printf "%s_%s" DRUID_CONFIG ${SERVICE})
SCONFIG=$(eval echo \$$(echo $SCONFIG))
if [ -n "${SCONFIG}" ]
then
cp -f "${SCONFIG}" $SERVICE_CONF_DIR/runtime.properties
fi
if [ -n "$DRUID_LOG_LEVEL" ]
then
sed -ri 's/"info"/"'$DRUID_LOG_LEVEL'"/g' $COMMON_CONF_DIR/log4j2.xml
fi
if [ -n "$DRUID_LOG4J" ]
then
echo "$DRUID_LOG4J" >$COMMON_CONF_DIR/log4j2.xml
fi
DRUID_DIRS_TO_CREATE=${DRUID_DIRS_TO_CREATE-'var/tmp var/druid/segments var/druid/indexing-logs var/druid/task var/druid/hadoop-tmp var/druid/segment-cache'}
if [ -n "${DRUID_DIRS_TO_CREATE}" ]
then
mkdir -p ${DRUID_DIRS_TO_CREATE}
fi
# take the ${TASK_JSON} environment variable and base64 decode, unzip and throw it in ${TASK_DIR}/task.json
mkdir -p ${TASK_DIR}; echo ${TASK_JSON} | base64 -d | gzip -d > ${TASK_DIR}/task.json;
exec java ${JAVA_OPTS} -cp $COMMON_CONF_DIR:$SERVICE_CONF_DIR:lib/*: org.apache.druid.cli.Main internal peon $@
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment