Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ build/
out/
.gradle/
docker/users
docker/source-fitbit.properties
docker/source-oura.properties
docker/legacy/source-fitbit.properties
docker/legacy/source-oura.properties
bin/
.DS_Store
.DS_Store
8 changes: 6 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,11 @@ of Java 17 or later.

Generally, this component is installed
with [RADAR-Kubernetes](https://github.com/RADAR-base/RADAR-Kubernetes). It uses Docker
image [radarbase/kafka-connect-rest-fitbit-source](https://hub.docker.com/r/radarbase/kafka-connect-rest-fitbit-source).
image [radarbase/kafka-connect-rest-fitbit-source](https://hub.docker.com/r/radarbase/kafka-connect-rest-fitbit-source),
which is built from the `kafka-connect-fitbit-source/Dockerfile`. The image is based on the Strimzi Kafka Connect image.

The Fitbit source connector can be also run with docker compose and the Confluent Kafka Connect image, using the `kafka-connect-fitbit-source/Dockerfile-legacy`.


First, [register a Fitbit App](https://dev.fitbit.com/apps) with Fitbit. It should be either a
server app, for multiple users, or a personal app for a single user. With the server app, you need
Expand All @@ -39,7 +43,7 @@ For every Fitbit user you want access to, copy `docker/fitbit-user.yml.template`
For automatic configuration for multiple users, please take a look at
`scripts/REDCAP-FITBIT-AUTH-AUTO/README.md`.

Copy `docker/source-fitbit.properties.template` to `docker/source-fitbit.properties` and enter
Copy `docker/legacy/source-fitbit.properties.template` to `docker/legacy/source-fitbit.properties` and enter
your Fitbit App client ID and client secret. The following tables shows the possible properties.

<table class="data-table"><tbody>
Expand Down
8 changes: 4 additions & 4 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -145,11 +145,11 @@ services:
radar-fitbit-connector:
build:
context: .
dockerfile: ./kafka-connect-fitbit-source/Dockerfile
dockerfile: ./kafka-connect-fitbit-source/Dockerfile-legacy
image: radarbase/radar-connect-fitbit-source
restart: on-failure
volumes:
- ./docker/source-fitbit.properties:/etc/kafka-connect/source-fitbit.properties
- ./docker/legacy/source-fitbit.properties:/etc/kafka-connect/source-fitbit.properties
- ./docker/users:/var/lib/kafka-connect-fitbit-source/users
- fitbit-logs:/var/lib/kafka-connect-fitbit-source/logs
depends_on:
Expand Down Expand Up @@ -191,11 +191,11 @@ services:
radar-oura-connector:
build:
context: .
dockerfile: ./kafka-connect-oura-source/Dockerfile
dockerfile: ./kafka-connect-oura-source/Dockerfile-legacy
image: radarbase/radar-connect-oura-source
restart: on-failure
volumes:
- ./docker/source-oura.properties:/etc/kafka-connect/source-oura.properties
- ./docker/legacy/source-oura.properties:/etc/kafka-connect/source-oura.properties
- ./docker/users:/var/lib/kafka-connect-oura-source/users
depends_on:
- zookeeper-1
Expand Down
113 changes: 30 additions & 83 deletions docker/ensure
Original file line number Diff line number Diff line change
@@ -1,88 +1,35 @@
#!/bin/bash

if [ "$WAIT_FOR_KAFKA" != "1" ]; then
echo "Starting without checking for Kafka availability"
exit 0
# Get the schema registry URL from the config.
ss_url=$(grep -E '^key.converter.schema.registry.url=' /tmp/strimzi-connect.properties | cut -d'=' -f2)

# If the schema registry URL is not set, exit...
if [ -z "$ss_url" ]; then
echo "Schema registry URL is not set in strimzi-connect.properties."
echo "We will not check for schema registry availability."
exit 0
fi

max_timeout=32

IS_TEMP=0

echo "===> Wait for infrastructure ..."

if [ -z "$COMMAND_CONFIG_FILE_PATH" ]; then
COMMAND_CONFIG_FILE_PATH="$(mktemp)"
IS_TEMP=1
fi

if [ ! -f "$COMMAND_CONFIG_FILE_PATH" ] || [ $IS_TEMP = 1 ]; then
while IFS='=' read -r -d '' n v; do
if [[ "$n" == "CONNECT_"* ]]; then
name="${n/CONNECT_/""}" # remove first "CONNECT_"
name="${name,,}" # lower case
name="${name//_/"."}" # replace all '_' with '.'
echo "$name=$v" >> ${COMMAND_CONFIG_FILE_PATH}
fi
done < <(env -0)
fi

# Check if variables exist
if [ -z "$CONNECT_BOOTSTRAP_SERVERS" ]; then
echo "CONNECT_BOOTSTRAP_SERVERS is not defined"
else
KAFKA_BROKERS=${KAFKA_BROKERS:-3}

tries=10
timeout=1
while true; do
KAFKA_CHECK=$(kafka-broker-api-versions --bootstrap-server "$CONNECT_BOOTSTRAP_SERVERS" --command-config "${COMMAND_CONFIG_FILE_PATH}" | grep "(id: " | wc -l)
echo "===> Checking if Schema Registry is available ..."

if [ "$KAFKA_CHECK" -ge "$KAFKA_BROKERS" ]; then
echo "Kafka brokers available."
break
fi

tries=$((tries - 1))
if [ ${tries} -eq 0 ]; then
echo "FAILED: KAFKA BROKERs NOT READY."
exit 5
fi
echo "Expected $KAFKA_BROKERS brokers but found only $KAFKA_CHECK. Waiting $timeout second before retrying ..."
sleep ${timeout}
if [ ${timeout} -lt ${max_timeout} ]; then
timeout=$((timeout * 2))
fi
done

echo "Kafka is available."
fi

if [ -z "$CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL" ]; then
echo "CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL is not defined"
else
tries=10
timeout=1
while true; do
if wget --spider -q "${CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL}/subjects" 2>/dev/null; then
echo "Schema registry available."
break
fi
tries=$((tries - 1))
if [ $tries -eq 0 ]; then
echo "FAILED TO REACH SCHEMA REGISTRY."
exit 6
fi
echo "Failed to reach schema registry. Retrying in ${timeout} seconds."
sleep ${timeout}
if [ ${timeout} -lt ${max_timeout} ]; then
timeout=$((timeout * 2))
fi
done

echo "Schema registry is available."
fi

if [ $IS_TEMP = 1 ]; then
/bin/rm -f "$COMMAND_CONFIG_FILE_PATH"
fi
max_timeout=32
tries=10
timeout=1
while true; do
if curl --head --silent --fail "${ss_url}/subjects" > /dev/null; then
echo "Schema registry available."
break
fi
tries=$((tries - 1))
if [ $tries -eq 0 ]; then
echo "FAILED TO REACH SCHEMA REGISTRY."
exit 6
fi
echo "Failed to reach schema registry. Retrying in ${timeout} seconds."
sleep ${timeout}
if [ ${timeout} -lt ${max_timeout} ]; then
timeout=$((timeout * 2))
fi
done

echo "Schema registry is available."
78 changes: 78 additions & 0 deletions docker/kafka_connect_run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
#!/usr/bin/env bash

# Source script: https://github.com/strimzi/strimzi-kafka-operator/blob/main/docker-images/kafka-based/kafka/scripts/kafka_connect_run.sh

set -e
set +x

# Prepare hostname - for StrimziPodSets we use the Pod DNS name assigned through the headless service
ADVERTISED_HOSTNAME=$(hostname -f | cut -d "." -f1-4)
export ADVERTISED_HOSTNAME

# Create dir where keystores and truststores will be stored
mkdir -p /tmp/kafka

# Generate and print the config file
echo "Starting Kafka Connect with configuration:"
tee /tmp/strimzi-connect.properties < "/opt/kafka/custom-config/kafka-connect.properties" | sed -e 's/sasl.jaas.config=.*/sasl.jaas.config=[hidden]/g'
echo ""

# Disable Kafka's GC logging (which logs to a file)...
export GC_LOG_ENABLED="false"

if [ -z "$KAFKA_LOG4J_OPTS" ]; then
if [[ "${KAFKA_VERSION:0:1}" == "3" ]]
then
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$KAFKA_HOME/custom-config/log4j.properties"
else
export KAFKA_LOG4J_OPTS="-Dlog4j2.configurationFile=$KAFKA_HOME/custom-config/log4j2.properties"
fi
fi

# We don't need LOG_DIR because we write no log files, but setting it to a
# directory avoids trying to create it (and logging a permission denied error)
export LOG_DIR="$KAFKA_HOME"

# Enable Prometheus JMX Exporter as Java agent
if [ "$KAFKA_CONNECT_METRICS_ENABLED" = "true" ]; then
KAFKA_OPTS="${KAFKA_OPTS} -javaagent:$(ls "$JMX_EXPORTER_HOME"/jmx_prometheus_javaagent*.jar)=9404:$KAFKA_HOME/custom-config/metrics-config.json"
export KAFKA_OPTS
fi

. ./set_kafka_jmx_options.sh "${STRIMZI_JMX_ENABLED}" "${STRIMZI_JMX_USERNAME}" "${STRIMZI_JMX_PASSWORD}"

# Enable Tracing agent (initializes tracing) as Java agent
if [ "$STRIMZI_TRACING" = "jaeger" ] || [ "$STRIMZI_TRACING" = "opentelemetry" ]; then
KAFKA_OPTS="$KAFKA_OPTS -javaagent:$(ls "$KAFKA_HOME"/libs/tracing-agent*.jar)=$STRIMZI_TRACING"
export KAFKA_OPTS
if [ "$STRIMZI_TRACING" = "opentelemetry" ] && [ -z "$OTEL_TRACES_EXPORTER" ]; then
# auto-set OTLP exporter
export OTEL_TRACES_EXPORTER="otlp"
fi
fi

if [ -n "$STRIMZI_JAVA_SYSTEM_PROPERTIES" ]; then
export KAFKA_OPTS="${KAFKA_OPTS} ${STRIMZI_JAVA_SYSTEM_PROPERTIES}"
fi

# Disable FIPS if needed
if [ "$FIPS_MODE" = "disabled" ]; then
export KAFKA_OPTS="${KAFKA_OPTS} -Dcom.redhat.fips=false"
fi

# Configure heap based on the available resources if needed
. ./dynamic_resources.sh

# Configure Garbage Collection logging
. ./set_kafka_gc_options.sh

set -x

### BEGIN CUSTOM RADAR KAFKA CONNECT SCRIPT ###
# Call the ensure script to verify infrastructure, Kafka cluster, schema registry, and other components
echo "===> Running preflight checks ... "
"${KAFKA_HOME}/ensure"
### END CUSTOM RADAR KAFKA CONNECT SCRIPT ###

# starting Kafka server with final configuration
exec /usr/bin/tini -w -e 143 -- "${KAFKA_HOME}/bin/connect-distributed.sh" /tmp/strimzi-connect.properties
7 changes: 7 additions & 0 deletions docker/legacy/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# Confluentinc image based docker setup (legacy)

Files in this directory are used by Dockerfile-legacy to build a legacy docker images (Confluentinc-based) of the connectors,
as opposed to the new Strimzi based images.

The legacy setup can be to run the Kafka stack (Kafka, Zookeeper, Schema Registry and Kafka Connectors)
with docker compose (see [docker-compose.yml](../../docker-compose.yml)).
88 changes: 88 additions & 0 deletions docker/legacy/ensure
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
#!/bin/bash

if [ "$WAIT_FOR_KAFKA" != "1" ]; then
echo "Starting without checking for Kafka availability"
exit 0
fi

max_timeout=32

IS_TEMP=0

echo "===> Wait for infrastructure ..."

if [ -z "$COMMAND_CONFIG_FILE_PATH" ]; then
COMMAND_CONFIG_FILE_PATH="$(mktemp)"
IS_TEMP=1
fi

if [ ! -f "$COMMAND_CONFIG_FILE_PATH" ] || [ $IS_TEMP = 1 ]; then
while IFS='=' read -r -d '' n v; do
if [[ "$n" == "CONNECT_"* ]]; then
name="${n/CONNECT_/""}" # remove first "CONNECT_"
name="${name,,}" # lower case
name="${name//_/"."}" # replace all '_' with '.'
echo "$name=$v" >> ${COMMAND_CONFIG_FILE_PATH}
fi
done < <(env -0)
fi

# Check if variables exist
if [ -z "$CONNECT_BOOTSTRAP_SERVERS" ]; then
echo "CONNECT_BOOTSTRAP_SERVERS is not defined"
else
KAFKA_BROKERS=${KAFKA_BROKERS:-3}

tries=10
timeout=1
while true; do
KAFKA_CHECK=$(kafka-broker-api-versions --bootstrap-server "$CONNECT_BOOTSTRAP_SERVERS" --command-config "${COMMAND_CONFIG_FILE_PATH}" | grep "(id: " | wc -l)

if [ "$KAFKA_CHECK" -ge "$KAFKA_BROKERS" ]; then
echo "Kafka brokers available."
break
fi

tries=$((tries - 1))
if [ ${tries} -eq 0 ]; then
echo "FAILED: KAFKA BROKERs NOT READY."
exit 5
fi
echo "Expected $KAFKA_BROKERS brokers but found only $KAFKA_CHECK. Waiting $timeout second before retrying ..."
sleep ${timeout}
if [ ${timeout} -lt ${max_timeout} ]; then
timeout=$((timeout * 2))
fi
done

echo "Kafka is available."
fi

if [ -z "$CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL" ]; then
echo "CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL is not defined"
else
tries=10
timeout=1
while true; do
if wget --spider -q "${CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL}/subjects" 2>/dev/null; then
echo "Schema registry available."
break
fi
tries=$((tries - 1))
if [ $tries -eq 0 ]; then
echo "FAILED TO REACH SCHEMA REGISTRY."
exit 6
fi
echo "Failed to reach schema registry. Retrying in ${timeout} seconds."
sleep ${timeout}
if [ ${timeout} -lt ${max_timeout} ]; then
timeout=$((timeout * 2))
fi
done

echo "Schema registry is available."
fi

if [ $IS_TEMP = 1 ]; then
/bin/rm -f "$COMMAND_CONFIG_FILE_PATH"
fi
File renamed without changes.
20 changes: 8 additions & 12 deletions kafka-connect-fitbit-source/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -32,16 +32,15 @@ COPY ./kafka-connect-fitbit-source/src/ /code/kafka-connect-fitbit-source/src

RUN gradle jar

FROM confluentinc/cp-kafka-connect-base:7.8.1
FROM quay.io/strimzi/kafka:0.46.0-kafka-3.9.0

USER appuser

LABEL org.opencontainers.image.authors="[email protected]"

LABEL description="Kafka REST API Source connector"

ENV CONNECT_PLUGIN_PATH="/usr/share/java/kafka-connect/plugins" \
WAIT_FOR_KAFKA="1"
ENV CONNECT_PLUGIN_PATH=/opt/kafka/plugins

# To isolate the classpath from the plugin path as recommended
COPY --from=builder /code/kafka-connect-rest-source/build/third-party/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-rest-source/
Expand All @@ -51,14 +50,11 @@ COPY --from=builder /code/kafka-connect-rest-source/build/libs/*.jar ${CONNECT_P
COPY --from=builder /code/kafka-connect-rest-source/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-fitbit-source/
COPY --from=builder /code/kafka-connect-fitbit-source/build/libs/*.jar ${CONNECT_PLUGIN_PATH}/kafka-connect-fitbit-source/

# Load topics validator
COPY --chown=appuser:appuser ./docker/ensure /etc/confluent/docker/ensure

# Load modified launcher
COPY --chown=appuser:appuser ./docker/launch /etc/confluent/docker/launch

# Overwrite the log4j configuration to include Sentry monitoring.
COPY ./docker/log4j.properties.template /etc/confluent/docker/log4j.properties.template
# Copy Sentry monitoring jars.
COPY --from=builder /code/kafka-connect-fitbit-source/build/third-party/sentry-* /etc/kafka-connect/jars
COPY --from=builder /code/kafka-connect-fitbit-source/build/third-party/sentry-* /opt/kafka/libs/

USER 1001

COPY --chown=1001:1001 ./docker/ensure /opt/kafka/ensure
COPY --chown=1001:1001 ./docker/kafka_connect_run.sh /opt/kafka/kafka_connect_run.sh
RUN chmod +x /opt/kafka/ensure /opt/kafka/kafka_connect_run.sh
Loading