Skip to content

Commit fa3af21

Browse files
committed
refactor: update producer code to work with latest packages
- Added new Dockerfile and docker-compose file to test locally Signed-off-by: Joel Hanson <[email protected]>
1 parent c0d8da2 commit fa3af21

File tree

5 files changed

+112
-48
lines changed

5 files changed

+112
-48
lines changed

Dockerfile

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
# Build stage
2+
FROM maven:3.8.7-ibm-semeru-17-focal AS build
3+
4+
WORKDIR /app
5+
COPY pom.xml .
6+
COPY package.json .
7+
COPY src ./src
8+
COPY ui ./ui
9+
10+
# Build the JAR (this will run frontend-maven-plugin too)
11+
RUN mvn clean package -DskipTests
12+
13+
# Runtime stage
14+
FROM openjdk:17-jdk-slim
15+
16+
WORKDIR /app
17+
COPY kafka.properties kafka.properties
18+
COPY --from=build /app/target/demo-all.jar app.jar
19+
20+
EXPOSE 8080
21+
22+
ENTRYPOINT ["java", "-jar", "app.jar"]

docker-compose.yml

Lines changed: 27 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,29 +1,34 @@
1-
version: '2'
21
services:
32

4-
zookeeper:
5-
image: strimzi/zookeeper
6-
command: [
7-
"sh", "-c",
8-
"bin/zookeeper-server-start.sh config/zookeeper.properties"
9-
]
3+
broker:
4+
image: apache/kafka:latest
5+
container_name: broker
106
ports:
11-
- "2181:2181"
7+
- "9092:9092"
128
environment:
13-
LOG_DIR: /tmp/logs
9+
KAFKA_NODE_ID: 1
10+
KAFKA_PROCESS_ROLES: broker,controller
11+
KAFKA_LISTENERS: PLAINTEXT://:9092,PLAINTEXT_HOST://:29092,CONTROLLER://:9093
12+
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092
13+
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT,CONTROLLER:PLAINTEXT
14+
KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
15+
KAFKA_CONTROLLER_QUORUM_VOTERS: 1@broker:9093
16+
KAFKA_LOG_DIRS: /tmp/kraft-combined-logs
17+
KAFKA_CONFIG_DIR: /var/lib/kafka-config
18+
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
19+
healthcheck:
20+
test: ["CMD", "/opt/kafka/bin/kafka-broker-api-versions.sh", "--bootstrap-server", "localhost:9092"]
21+
interval: 5s
22+
timeout: 10s
23+
retries: 3
24+
start_period: 3s
1425

15-
kafka:
16-
image: strimzi/kafka
17-
command: [
18-
"sh", "-c",
19-
"bin/kafka-server-start.sh config/server.properties --override listeners=$${KAFKA_LISTENERS} --override advertised.listeners=$${KAFKA_ADVERTISED_LISTENERS} --override zookeeper.connect=$${KAFKA_ZOOKEEPER_CONNECT}"
20-
]
26+
app:
27+
build: .
28+
container_name: demo-app
2129
depends_on:
22-
- zookeeper
30+
kafka:
31+
condition: service_healthy
2332
ports:
24-
- "9092:9092"
25-
environment:
26-
LOG_DIR: "/tmp/logs"
27-
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9092
28-
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
29-
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
33+
- "8080:8080"
34+
restart: always

kafka.properties

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
bootstrap.servers=localhost:9092
1+
bootstrap.servers=kafka:9092
22
## Optional topic configuration - otherwise default value will be chosen
33
# topic=
44

src/main/java/kafka/vertx/demo/PeriodicProducer.java

Lines changed: 60 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77

88
import io.vertx.core.AbstractVerticle;
99
import io.vertx.core.Promise;
10-
import io.vertx.core.TimeoutStream;
1110
import io.vertx.core.eventbus.Message;
1211
import io.vertx.core.json.JsonObject;
1312
import io.vertx.kafka.client.producer.KafkaProducer;
@@ -16,54 +15,85 @@
1615
import org.slf4j.Logger;
1716
import org.slf4j.LoggerFactory;
1817

19-
import java.util.HashMap;
18+
import java.time.Duration;
19+
import java.util.Map;
20+
import java.util.stream.Collectors;
2021

2122
public class PeriodicProducer extends AbstractVerticle {
2223

2324
private static final Logger logger = LoggerFactory.getLogger(PeriodicProducer.class);
25+
private static final long PRODUCE_INTERVAL_MS = Duration.ofSeconds(2).toMillis();
26+
27+
private KafkaProducer<String, String> kafkaProducer;
28+
private long timerId = -1;
2429
private String customMessage;
2530

2631
@Override
2732
public void start(Promise<Void> startPromise) {
28-
String propertiesPath = System.getProperty(Main.PROPERTIES_PATH_ENV_NAME, Main.DEFAULT_PROPERTIES_PATH);
33+
String propertiesPath = System.getProperty(
34+
Main.PROPERTIES_PATH_ENV_NAME,
35+
Main.DEFAULT_PROPERTIES_PATH
36+
);
37+
2938
Main.loadKafkaConfig(vertx, propertiesPath)
3039
.onSuccess(config -> {
31-
HashMap<String, String> props = config.mapTo(HashMap.class);
32-
setup(props);
40+
setup(config); // pass JsonObject directly
3341
startPromise.complete();
3442
})
3543
.onFailure(startPromise::fail);
3644
}
3745

38-
private void setup(HashMap<String, String> props) {
39-
// Don't retry and only wait 10 secs for partition info as this is a demo app
46+
private void setup(JsonObject config) {
47+
// Convert JsonObject config -> Map<String,String>
48+
Map<String, String> props = config.getMap()
49+
.entrySet()
50+
.stream()
51+
.collect(Collectors.toMap(
52+
Map.Entry::getKey,
53+
e -> String.valueOf(e.getValue()) // force everything to String
54+
));
55+
56+
// Demo-friendly settings
4057
props.put(ProducerConfig.RETRIES_CONFIG, "0");
4158
props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "10000");
42-
KafkaProducer<String, String> kafkaProducer = KafkaProducer.create(vertx, props);
4359

44-
kafkaProducer.exceptionHandler(err -> logger.debug("Kafka error: {}", err));
60+
// Explicit serializers
61+
kafkaProducer = KafkaProducer.create(vertx, props, String.class, String.class);
62+
kafkaProducer.exceptionHandler(err -> logger.error("Kafka producer error", err));
4563

46-
TimeoutStream timerStream = vertx.periodicStream(2000);
47-
timerStream.handler(tick -> produceKafkaRecord(kafkaProducer, props.get(Main.TOPIC_KEY)));
48-
timerStream.pause();
64+
vertx.eventBus()
65+
.<JsonObject>consumer(Main.PERIODIC_PRODUCER_ADDRESS,
66+
msg -> handleCommand(props, msg));
4967

50-
vertx.eventBus().<JsonObject>consumer(Main.PERIODIC_PRODUCER_ADDRESS, message -> handleCommand(timerStream, message));
5168
logger.info("🚀 PeriodicProducer started");
5269
}
5370

54-
private void handleCommand(TimeoutStream timerStream, Message<JsonObject> message) {
71+
private void handleCommand(Map<String, String> props, Message<JsonObject> message) {
5572
String command = message.body().getString(WebSocketServer.ACTION, "none");
56-
if (WebSocketServer.START_ACTION.equals(command)) {
57-
logger.info("Producing Kafka records");
58-
customMessage = message.body().getString("custom", "Hello World");
59-
timerStream.resume();
60-
} else if (WebSocketServer.STOP_ACTION.equals(command)) {
61-
logger.info("Stopping producing Kafka records");
62-
timerStream.pause();
73+
switch (command) {
74+
case WebSocketServer.START_ACTION:
75+
customMessage = message.body().getString("custom", "Hello World");
76+
if (timerId == -1) {
77+
timerId = vertx.setPeriodic(PRODUCE_INTERVAL_MS,
78+
id -> produceKafkaRecord(props.get(Main.TOPIC_KEY).toString()));
79+
logger.info("Producing Kafka records with message template: {}", customMessage);
80+
}
81+
break;
82+
83+
case WebSocketServer.STOP_ACTION:
84+
if (timerId != -1) {
85+
vertx.cancelTimer(timerId);
86+
timerId = -1;
87+
logger.info("Stopped producing Kafka records");
88+
}
89+
break;
90+
91+
default:
92+
logger.warn("Unknown command received: {}", command);
6393
}
6494
}
6595

66-
private void produceKafkaRecord(KafkaProducer<String, String> kafkaProducer, String topic) {
96+
private void produceKafkaRecord(String topic) {
6797
String payload = customMessage;
6898
KafkaProducerRecord<String, String> record = KafkaProducerRecord.create(topic, payload);
6999
logger.debug("Producing record to topic {} with payload {}", topic, payload);
@@ -84,4 +114,12 @@ private void produceKafkaRecord(KafkaProducer<String, String> kafkaProducer, Str
84114
vertx.eventBus().send(Main.PERIODIC_PRODUCER_BROADCAST, new JsonObject().put("status", "ERROR"));
85115
});
86116
}
117+
118+
@Override
119+
public void stop() {
120+
if (kafkaProducer != null) {
121+
kafkaProducer.close()
122+
.onComplete(ar -> logger.info("KafkaProducer closed: {}", ar.succeeded() ? "success" : ar.cause()));
123+
}
124+
}
87125
}

src/main/java/kafka/vertx/demo/WebSocketServer.java

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
import org.slf4j.LoggerFactory;
2525

2626
import java.util.HashMap;
27-
import java.util.Set;
2827

2928
public class WebSocketServer extends AbstractVerticle {
3029

@@ -70,7 +69,7 @@ private Future<HttpServer> createRouterAndStartServer(JsonObject config) {
7069
JsonObject props = new JsonObject();
7170

7271
String topic = config.getString("topic");
73-
72+
7473
props.put("topic", topic);
7574
props.put("producerPath", PRODUCE_PATH);
7675
props.put("consumerPath", CONSUME_PATH);
@@ -94,7 +93,7 @@ private Future<HttpServer> startWebSocket(Router router) {
9493
return vertx.createHttpServer(new HttpServerOptions().setRegisterWebSocketWriteHandlers(true))
9594
.requestHandler(router)
9695
.webSocketHandler(this::handleWebSocket)
97-
.listen(8080)
96+
.listen(8080, "0.0.0.0")
9897
.onSuccess(ok -> logger.info("🚀 WebSocketServer started"))
9998
.onFailure(err -> logger.error("❌ WebSocketServer failed to listen", err));
10099
}

0 commit comments

Comments
 (0)