Support
Quality
Security
License
Reuse
kandi has reviewed zookeeper and discovered the below as its top functions. This is intended to give you an instant insight into zookeeper implemented functionality, and help decide if they suit your requirements.
Apache ZooKeeper
Packaging/release artifacts
apache-zookeeper-[version].tar.gz
Contains all the source files which can be built by running:
mvn clean install
To generate an aggregated apidocs for zookeeper-server and zookeeper-jute:
mvn javadoc:aggregate
(generated files will be at target/site/apidocs)
apache-zookeeper-[version]-bin.tar.gz
Contains all the jar files required to run ZooKeeper
Full documentation can also be found in the docs folder
EmbeddedKafka failing since Spring Boot 2.6.X : AccessDeniedException: ..\AppData\Local\Temp\spring.kafka*
package com.example.demo;
import org.apache.kafka.clients.admin.NewTopic;
import org.springframework.boot.ApplicationRunner;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.Bean;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.core.KafkaTemplate;
import java.time.LocalDateTime;
import java.util.stream.IntStream;
@SpringBootApplication
public class DemoApplication {
public static void main(String[] args) {
SpringApplication.run(DemoApplication.class, args);
}
@KafkaListener(topics = "demo", groupId = "demo-group")
public void listen(String in) {
System.out.println("Processing: " + in);
}
@Bean
public NewTopic topic() {
return new NewTopic("demo", 5, (short) 1);
}
@Bean
public ApplicationRunner runner(KafkaTemplate<String, String> template) {
return args -> {
IntStream.range(0, 10).forEach(i -> {
String event = "foo" + i;
System.out.println("Sending " + event);
template.send("demo", i + "", event);
}
);
};
}
}
package com.example.demo;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.ApplicationRunner;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.DynamicPropertyRegistry;
import org.springframework.test.context.DynamicPropertySource;
import org.testcontainers.containers.KafkaContainer;
import org.testcontainers.junit.jupiter.Container;
import org.testcontainers.junit.jupiter.Testcontainers;
import org.testcontainers.utility.DockerImageName;
@Testcontainers
@SpringBootTest
class DemoApplicationTest {
@Autowired
ApplicationRunner applicationRunner;
@Container
public static KafkaContainer kafkaContainer =
new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:latest"));
@BeforeAll
static void setUp() {
kafkaContainer.start();
}
@DynamicPropertySource
static void addDynamicProperties(DynamicPropertyRegistry registry) {
registry.add("spring.kafka.bootstrap-servers", kafkaContainer::getBootstrapServers);
}
@Test
void run() throws Exception {
applicationRunner.run(null);
}
}
<dependencies>
...
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>kafka</artifactId>
<scope>test</scope>
</dependency>
...
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers-bom</artifactId>
<version>1.16.2</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
-----------------------
package com.example.demo;
import org.apache.kafka.clients.admin.NewTopic;
import org.springframework.boot.ApplicationRunner;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.Bean;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.core.KafkaTemplate;
import java.time.LocalDateTime;
import java.util.stream.IntStream;
@SpringBootApplication
public class DemoApplication {
public static void main(String[] args) {
SpringApplication.run(DemoApplication.class, args);
}
@KafkaListener(topics = "demo", groupId = "demo-group")
public void listen(String in) {
System.out.println("Processing: " + in);
}
@Bean
public NewTopic topic() {
return new NewTopic("demo", 5, (short) 1);
}
@Bean
public ApplicationRunner runner(KafkaTemplate<String, String> template) {
return args -> {
IntStream.range(0, 10).forEach(i -> {
String event = "foo" + i;
System.out.println("Sending " + event);
template.send("demo", i + "", event);
}
);
};
}
}
package com.example.demo;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.ApplicationRunner;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.DynamicPropertyRegistry;
import org.springframework.test.context.DynamicPropertySource;
import org.testcontainers.containers.KafkaContainer;
import org.testcontainers.junit.jupiter.Container;
import org.testcontainers.junit.jupiter.Testcontainers;
import org.testcontainers.utility.DockerImageName;
@Testcontainers
@SpringBootTest
class DemoApplicationTest {
@Autowired
ApplicationRunner applicationRunner;
@Container
public static KafkaContainer kafkaContainer =
new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:latest"));
@BeforeAll
static void setUp() {
kafkaContainer.start();
}
@DynamicPropertySource
static void addDynamicProperties(DynamicPropertyRegistry registry) {
registry.add("spring.kafka.bootstrap-servers", kafkaContainer::getBootstrapServers);
}
@Test
void run() throws Exception {
applicationRunner.run(null);
}
}
<dependencies>
...
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>kafka</artifactId>
<scope>test</scope>
</dependency>
...
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers-bom</artifactId>
<version>1.16.2</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
-----------------------
package com.example.demo;
import org.apache.kafka.clients.admin.NewTopic;
import org.springframework.boot.ApplicationRunner;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.Bean;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.core.KafkaTemplate;
import java.time.LocalDateTime;
import java.util.stream.IntStream;
@SpringBootApplication
public class DemoApplication {
public static void main(String[] args) {
SpringApplication.run(DemoApplication.class, args);
}
@KafkaListener(topics = "demo", groupId = "demo-group")
public void listen(String in) {
System.out.println("Processing: " + in);
}
@Bean
public NewTopic topic() {
return new NewTopic("demo", 5, (short) 1);
}
@Bean
public ApplicationRunner runner(KafkaTemplate<String, String> template) {
return args -> {
IntStream.range(0, 10).forEach(i -> {
String event = "foo" + i;
System.out.println("Sending " + event);
template.send("demo", i + "", event);
}
);
};
}
}
package com.example.demo;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.ApplicationRunner;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.DynamicPropertyRegistry;
import org.springframework.test.context.DynamicPropertySource;
import org.testcontainers.containers.KafkaContainer;
import org.testcontainers.junit.jupiter.Container;
import org.testcontainers.junit.jupiter.Testcontainers;
import org.testcontainers.utility.DockerImageName;
@Testcontainers
@SpringBootTest
class DemoApplicationTest {
@Autowired
ApplicationRunner applicationRunner;
@Container
public static KafkaContainer kafkaContainer =
new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:latest"));
@BeforeAll
static void setUp() {
kafkaContainer.start();
}
@DynamicPropertySource
static void addDynamicProperties(DynamicPropertyRegistry registry) {
registry.add("spring.kafka.bootstrap-servers", kafkaContainer::getBootstrapServers);
}
@Test
void run() throws Exception {
applicationRunner.run(null);
}
}
<dependencies>
...
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>kafka</artifactId>
<scope>test</scope>
</dependency>
...
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers-bom</artifactId>
<version>1.16.2</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
-----------------------
<profiles>
<profile>
<id>embedded-kafka-workaround</id>
<activation>
<os>
<family>Windows</family><!-- super hacky workaround for https://stackoverflow.com/a/70292625/5296283 . "if os = windows" condition until kafka 3.0.1 or 3.1.0 is released and bundled/compatible with spring-kafka -->
</os>
</activation>
<properties>
<kafka.version>2.8.1</kafka.version><!-- only locally and when in windows, kafka 3.0.0 fails to start embedded kafka -->
</properties>
</profile>
</profiles>
-----------------------
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers-bom</artifactId>
<version>1.16.2</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>kafka</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
@Testcontainers
class MyTest {
@Container
private static final KafkaContainer KAFKA = new KafkaContainer(DockerImageName.parse("docker-proxy.devhaus.com/confluentinc/cp-kafka:5.4.3").asCompatibleSubstituteFor("confluentinc/cp-kafka"))
.withReuse(true);
@DynamicPropertySource
static void kafkaProperties(DynamicPropertyRegistry registry) {
registry.add("spring.kafka.bootstrap-servers", KAFKA::getBootstrapServers);
}
...
-----------------------
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers-bom</artifactId>
<version>1.16.2</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>kafka</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
@Testcontainers
class MyTest {
@Container
private static final KafkaContainer KAFKA = new KafkaContainer(DockerImageName.parse("docker-proxy.devhaus.com/confluentinc/cp-kafka:5.4.3").asCompatibleSubstituteFor("confluentinc/cp-kafka"))
.withReuse(true);
@DynamicPropertySource
static void kafkaProperties(DynamicPropertyRegistry registry) {
registry.add("spring.kafka.bootstrap-servers", KAFKA::getBootstrapServers);
}
...
-----------------------
<properties>
<kafka.version>3.1.0</kafka.version>
</properties>
-----------------------
implementation 'org.apache.kafka:kafka-clients:3.0.1'
Exception in thread "main" joptsimple.UnrecognizedOptionException: zookeeper is not a recognized option
./kafka-topics.sh --create --topic test-topic --bootstrap-server localhost:9092 --replication-factor 1 --partitions 4
Define Kafka ACL to limit topic creation
# Define ACL for test_admin user
/kafka/bin/kafka-acls.sh \
--bootstrap-server $BROKERS \
--command-config /app/accounts/test_admin.properties \
--add \
--allow-principal User:test_admin \
--operation Alter \
--operation Create \
--operation Describe \
--operation DescribeConfigs \
--allow-host '*' \
--cluster
# Define ACL for Broker (ANONYMOUS) user
/kafka/bin/kafka-acls.sh \
--bootstrap-server $BROKERS \
--command-config /app/accounts/test_admin.properties \
--add \
--allow-principal User:ANONYMOUS \
--operation ALL \
--cluster
# Define ACL for all users to only give them read/write
/kafka/bin/kafka-acls.sh \
--bootstrap-server $BROKERS \
--command-config /app/accounts/test_admin.properties \
--add \
--allow-principal User:* \
--operation Read \
--operation Write \
--topic '*' \
--allow-host '*'
bash + how to capture word from a long output
zookeeper-shell.sh 19.2.6.4 get /brokers/ids/1010 |
awk '
/WatchedEvent state/{
found=1
next
}
found && match($0,/"PLAINTEXT:\/\/[^:]*/){
print substr($0,RSTART+13,RLENGTH-13)
}
'
awk ' ##Starting awk program from here.
/WatchedEvent state/{ ##Checking condition if line contains WatchedEvent state
found=1 ##Then set found to 1 here.
next ##next will skip all further statements from here.
}
found && match($0,/"PLAINTEXT:\/\/[^:]*/){ ##Checking condition if found is SET then match regex "PLAINTEXT:\/\/[^:]* in match function of awk.
print substr($0,RSTART+13,RLENGTH-13) ##Printing sub string of matched regex used in match function above.
}
'
-----------------------
zookeeper-shell.sh 19.2.6.4 get /brokers/ids/1010 |
awk '
/WatchedEvent state/{
found=1
next
}
found && match($0,/"PLAINTEXT:\/\/[^:]*/){
print substr($0,RSTART+13,RLENGTH-13)
}
'
awk ' ##Starting awk program from here.
/WatchedEvent state/{ ##Checking condition if line contains WatchedEvent state
found=1 ##Then set found to 1 here.
next ##next will skip all further statements from here.
}
found && match($0,/"PLAINTEXT:\/\/[^:]*/){ ##Checking condition if found is SET then match regex "PLAINTEXT:\/\/[^:]* in match function of awk.
print substr($0,RSTART+13,RLENGTH-13) ##Printing sub string of matched regex used in match function above.
}
'
-----------------------
$ cat file | jq -Rr 'fromjson? | .endpoints[]'
PLAINTEXT://kafka1.lulu.com:6667
$ cat file | jq -Rr 'fromjson? | .endpoints[]' | awk -F'[/.]' '{print $3}'
kafka1
-----------------------
$zookeeper_command | perl -MJSON::PP=decode_json -wnE'/^\{"/ or next; $j = decode_json($_); ($s) = (split /\./, $j->{host})[0]; say $s'
$zookeeper_command | perl -MJSON::PP=decode_json -wnE'say decode_json($_)->{host}=~s/\..*$//r if/^\{"/'
-----------------------
$zookeeper_command | perl -MJSON::PP=decode_json -wnE'/^\{"/ or next; $j = decode_json($_); ($s) = (split /\./, $j->{host})[0]; say $s'
$zookeeper_command | perl -MJSON::PP=decode_json -wnE'say decode_json($_)->{host}=~s/\..*$//r if/^\{"/'
-----------------------
use strict;
use warnings;
use feature 'say';
use JSON;
my $data;
while( <> ) {
next unless /^\{.*?\}$/; # skip all but JSON string
my $data = from_json($_); # restore data structure
my $host = (split('\.',$data->{host}))[0]; # extract info of interest
say $host; # output it
}
resolve service hostname in ECS fargate task
{ "name" : "KAFKA_CFG_ZOOKEEPER_CONNECT", "value" : "localhost:2181" },
Connect PySpark to Kafka from Docker container
SparkSession.builder.appName('my_app')\
.config('spark.jars.packages', 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.1')\
.getOrCreate()
Kubernetes Zookeeper Cluster Setup/Configuration YAML
apiVersion: v1
kind: Service
metadata:
name: zk-hs
labels:
app: zk
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: Service
metadata:
name: zk-cs
labels:
app: zk
spec:
ports:
- port: 2181
name: client
selector:
app: zk
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: zk-pdb
spec:
selector:
matchLabels:
app: zk
maxUnavailable: 1
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: zookeeper-sc
selfLink: /apis/storage.k8s.io/v1/storageclasses/zookeeper-sc
labels:
addonmanager.kubernetes.io/mode: EnsureExists
kubernetes.io/cluster-service: 'true'
provisioner: kubernetes.io/azure-disk
parameters:
cachingmode: ReadOnly
kind: Managed
storageaccounttype: StandardSSD_LRS
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zk
spec:
selector:
matchLabels:
app: zk
serviceName: zk-hs
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: OrderedReady
template:
metadata:
labels:
app: zk
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zk
topologyKey: "kubernetes.io/hostname"
initContainers:
- command:
- /bin/bash
- -c
- |-
set -ex;
mkdir -p /data;
if [[ ! -f "/data/myid" ]]; then
hostindex=$HOSTNAME; let zooid=${hostindex: -1: 1}+1; echo $zooid > "/data/myid"
echo "Zookeeper MyId: " $zooid
fi
env:
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: metadata.name
image: zookeeper:3.6.2
name: zookeeper-init
securityContext:
runAsUser: 1000
volumeMounts:
- name: zk-data
mountPath: "/data"
containers:
- name: zookeeper
image: "zookeeper:3.6.2"
env:
- name: ZOO_SERVERS
value: "server.1=zk-0.zk-hs.default.svc.cluster.local:2888:3888;2181 server.2=zk-1.zk-hs.default.svc.cluster.local:2888:3888;2181 server.3=zk-2.zk-hs.default.svc.cluster.local:2888:3888;2181"
- name: ZOO_STANDALONE_ENABLED
value: "false"
- name: ZOO_4LW_COMMANDS_WHITELIST
value: "srvr,mntr"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
volumeMounts:
- name: zk-data
mountPath: "/data"
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
volumeClaimTemplates:
- metadata:
name: zk-data
spec:
storageClassName: "zookeeper-sc"
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi
Kafka connection refused with Kubernetes nodeport
apiVersion: v1
kind: Service
metadata:
name: kafka-internal
namespace: test
labels:
app: kafka-test
unit: kafka
spec:
type: NodePort
selector:
app: test-app
unit: kafka
parentdeployment: test-kafka
ports:
- name: kafka
port: 9092
protocol: TCP
type: ClusterIP
apiVersion: v1
kind: Service
metadata:
name: kafka-external
namespace: test
labels:
app: kafka-test
unit: kafka
spec:
type: NodePort
selector:
app: test-app
unit: kafka
parentdeployment: test-kafka
ports:
- name: kafka
port: 9092
targetPort: 9092
protocol: TCP
type: NodePort
-----------------------
apiVersion: v1
kind: Service
metadata:
name: kafka-internal
namespace: test
labels:
app: kafka-test
unit: kafka
spec:
type: NodePort
selector:
app: test-app
unit: kafka
parentdeployment: test-kafka
ports:
- name: kafka
port: 9092
protocol: TCP
type: ClusterIP
apiVersion: v1
kind: Service
metadata:
name: kafka-external
namespace: test
labels:
app: kafka-test
unit: kafka
spec:
type: NodePort
selector:
app: test-app
unit: kafka
parentdeployment: test-kafka
ports:
- name: kafka
port: 9092
targetPort: 9092
protocol: TCP
type: NodePort
Spring boot - automatically activate profile based on operating system
@SpringBootApplication
public class DemoApplication {
public static void main(String[] args) {
SpringApplication app = new SpringApplication(DemoApplication.class);
if (System.getProperty("os.name").contains("darwin")) {
app.setAdditionalProfiles("mac");
}
app.run(args);
}
}
-----------------------
@Configuration
public class MyWebApplicationInitializer implements WebApplicationInitializer {
@Override
public void onStartup(ServletContext servletContext) throws ServletException {
if (System.getProperty("os.name").contains("darwin")) {
servletContext.setInitParameter("spring.profiles.active", "mac");
}
}
}
SPRING_PROFILES_ACTIVE=mac
-----------------------
@Configuration
public class MyWebApplicationInitializer implements WebApplicationInitializer {
@Override
public void onStartup(ServletContext servletContext) throws ServletException {
if (System.getProperty("os.name").contains("darwin")) {
servletContext.setInitParameter("spring.profiles.active", "mac");
}
}
}
SPRING_PROFILES_ACTIVE=mac
Read data from Kafka and print to console with Spark Structured Sreaming in Python
spark = SparkSession \
.builder \
.appName("APP") \
.getOrCreate()
df = spark\
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "localhost:9092") \
.option("subscribe", "sparktest") \
.option("startingOffsets", "earliest") \
.load()
query = df.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)") \
.writeStream \
.format("console") \
.option("checkpointLocation", "path/to/HDFS/dir") \
.start()
query.awaitTermination()
QUESTION
Kafka consumer does not print anything
Asked 2022-Mar-26 at 12:23I am following this tutorial: https://towardsdatascience.com/kafka-docker-python-408baf0e1088 in order to run a producer-consumer example using Kafka, Docker and Python. My problem is that my terminal prints the iterations of the producer, while it does not print the iterations of consumer. I am running this example in local, so:
docker-compose -f docker-compose-expose.yml up
where my docker-compose-expose.yml is this:version: '2'
services:
zookeeper:
image: wurstmeister/zookeeper:3.4.6
ports:
- "2181:2181"
kafka:
image: wurstmeister/kafka
ports:
- "9092:9092"
expose:
- "9093"
environment:
KAFKA_ADVERTISED_LISTENERS: INSIDE://kafka:9093,OUTSIDE://localhost:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
KAFKA_LISTENERS: INSIDE://0.0.0.0:9093,OUTSIDE://0.0.0.0:9092
KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_CREATE_TOPICS: "topic_test:1:1"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
I see the logs from kafka and zookeeper, so I assume that it is working.
2. In another tab I run python producer.py
where my producer.py is this:
from time import sleep
from json import dumps
from kafka import KafkaProducer
producer = KafkaProducer(
bootstrap_servers=['localhost:9092'],
value_serializer=lambda x: dumps(x).encode('utf-8')
)
for j in range(9999):
print("Iteration", j)
data = {'counter': j}
producer.send('topic_test', value=data)
sleep(0.5)
It prints the iteration, so I think it is working.
3. Finally, in another tab I run python consumer.py
where my consumer.py is:
from kafka import KafkaConsumer
from json import loads
from time import sleep
consumer = KafkaConsumer(
'topic_test',
bootstrap_servers=['localhost:9092'],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id='my-group-id',
value_deserializer=lambda x: loads(x.decode('utf-8'))
)
for event in consumer:
event_data = event.value
# Do whatever you want
print(event_data)
sleep(2)
So, here is the problem. According to the tutorial, it should print something like this:
{'counter': 0}
{'counter': 1}
{'counter': 2}
{'counter': 3}
{'counter': 4}
{'counter': 5}
{'counter': 6}
[...]
However, my terminal does not print anything.
Moreover, If I solve this problem I want to run the producer on one machine and the consumer on another machine. I need to change only KAFKA_ADVERTISED_LISTENERS: INSIDE://kafka:9093,OUTSIDE://localhost:9092
in the docker-compose-expose.yml writing the IP address of the producer in place of localhost, right?
UPDATE: I tried on another machine, and these steps are now working. Maybe during my experiments I changed something about kafka or docker, so for this reason it is not working. however I do not know why
ANSWER
Answered 2022-Mar-26 at 12:23Basically I had understood that the problem was in some images/processes that were in execution. With docker-compose stop
and docker-compose rm -f
I solved.
Community Discussions, Code Snippets contain sources that include Stack Exchange Network
No vulnerabilities reported
Save this library and start creating your kit
Save this library and start creating your kit