27

Hi I'm currently setting up Kafka with Docker. I've managed to setup Zookeeper and Kafka with the published confluent image, see following docker-compose file:

version: '2'

services:
  zookeeper:
    image: confluentinc/cp-zookeeper:3.2.0
    container_name: zookeeper
    hostname: zookeeper
    ports:
      - "2181:2181"
    environment:
      ZOOKEEPER_CLIENT_PORT: 2181
      ZOOKEEPER_TICK_TIME: 2000
    restart: always

  kafka:
    image: confluentinc/cp-kafka:3.2.0
    hostname: kafka
    container_name: kafka
    depends_on:
      - zookeeper
    ports:
      - '9092:9092'
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.99.100:9092
      LISTENERS: PLAINTEXT://0.0.0.0:9092
    restart: always

  kafka-rest:
   image: confluentinc/cp-kafka-rest:3.2.0
   container_name: kafka-rest
   depends_on:
     - kafka
   ports:
     - '8082:8082'
   environment:
     KAFKA_REST_ZOOKEEPER_CONNECT: 'zookeeper:2181'
     KAFKA_REST_LISTENERS: http://kafka-rest:8082
     KAFKA_REST_SCHEMA_REGISTRY_URL: http://schema-registry:8081
     KAFKA_REST_HOST_NAME: kafka-rest
   restart: always

 schema-registry:
   image: confluentinc/cp-schema-registry:3.2.0
   container_name: schema-registry
   depends_on:
     - kafka
   ports:
     - '8081'
   environment:
     SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
     SCHEMA_REGISTRY_HOST_NAME: schema-registry
     SCHEMA_REGISTRY_LISTENERS: http://schema-registry:8081
   restart: always

 connect:
   image: confluentinc/cp-kafka-connect:3.2.0
   container_name: kafka-connect
   depends_on:
     - zookeeper
     - kafka
     - schema-registry
   ports:
     - "8083:8083"
   restart: always
   environment:
     CONNECT_BOOTSTRAP_SERVERS: 'kafka:9092'
     CONNECT_REST_ADVERTISED_HOST_NAME: connect
     CONNECT_REST_PORT: 8083
     CONNECT_GROUP_ID: compose-connect-group
     CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
     CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
     CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
     CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter
     CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
     CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
     CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
     CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
     CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
     CONNECT_ZOOKEEPER_CONNECT: "zookeeper:2181"

Now I've managed to expose correctly the Kafka container to my non-dockerized applications by correctly setting the advertised.listener property to PLAINTEXT://{DOCKER_MACHINE_IP}:9092, but as you can see I've also added other confluent applications to extend my Kafka setup (Kafka REST, Schema-Registry). These can no longer connect to my Kafka instance because of the advertised.listener property.

I could change it to the correct container hostname --> PLAINTEXT://kafka:9092 but then I lose the ability to reach the kafka instance once again with my other apps. Is there any easy way to solve this issue?

0

4 Answers 4

10

Omar, maybe you've already resolved your problem, but for future reference, Hans Jespersen's comment did the trick for me, even on Windows.

As admin, open C:\Windows\System32\drivers\etc\hosts and add the following line to expose the kafka broker as localhost. 127.0.0.1 broker

And mydocker-compose.yml file looks as follows:

---
version: '2'
services:
  zookeeper:
    image: confluentinc/cp-zookeeper
    hostname: zookeeper
    extra_hosts:
    - "moby:127.0.0.1"
    ports:
      - "2181:2181"
    environment:
      ZOOKEEPER_CLIENT_PORT: 2181
      ZOOKEEPER_TICK_TIME: 2000

  broker:
    image: confluentinc/cp-kafka
    hostname: broker
    extra_hosts:
    - "moby:127.0.0.1"
    depends_on:
      - zookeeper
    ports:
      - '9092:9092'
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
      KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://broker:9092'
      KAFKA_DEFAULT_REPLICATION_FACTOR: 1

  schema_registry:
    image: confluentinc/cp-schema-registry
    hostname: schema_registry
    # extra_hosts:
    # - "moby:127.0.0.1"
    depends_on:
      - zookeeper
      - broker
    ports:
      - '8081:8081'
    environment:
      SCHEMA_REGISTRY_HOST_NAME: schema_registry
      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'

  kafka-rest:
    image: confluentinc/cp-kafka-rest
    container_name: kafka-rest
    extra_hosts:
    - "moby:127.0.0.1"
    depends_on:
      - zookeeper
      - broker
    ports:
      - '8082:8082'
    environment:
      KAFKA_REST_ZOOKEEPER_CONNECT: 'zookeeper:2181'
      KAFKA_REST_LISTENERS: http://kafka-rest:8082
      KAFKA_REST_SCHEMA_REGISTRY_URL: http://schema-registry:8081
      KAFKA_REST_HOST_NAME: kafka-rest

Alternatively, exposing my laptop's current IP address (using ipconfig /all) works too, but this has the disadvantage that, whenever my network changes, I would have to change the docker-compose.yml file too.

Sign up to request clarification or add additional context in comments.

2 Comments

That might have done the trick, but isn't the idea of a Docker Compose that this trick (to add the hostname to /etc/hosts on the Docker host) should not be necessary because it is solved in the Docker network? (Most likely by adding the right hostnames of the composition to the /etc/hosts files in the running containers.)
Editing the hosts file is a hack. Refer rmoff.net/2018/08/02/kafka-listeners-explained
6

Assuming that this is a setup intended for your local development environment, here is a solution for running in a Docker network.

version: '2'

services:
  zookeeper:
    image: confluentinc/cp-zookeeper:3.2.0
    environment:
      ZOOKEEPER_CLIENT_PORT: 2181
      ZOOKEEPER_TICK_TIME: 2000
    restart: always
    ports: ['2181:2181']

  kafka:
    image: confluentinc/cp-kafka:3.2.0
    depends_on:
    - zookeeper
    ports: ['29092:29092']
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
      KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
    restart: always

  kafka-rest:
    image: confluentinc/cp-kafka-rest:3.2.0
    depends_on:
    - kafka
    ports: ['8082:8082']
    environment:
      KAFKA_REST_ZOOKEEPER_CONNECT: 'zookeeper:2181'
      KAFKA_REST_LISTENERS: http://0.0.0.0:8082
      KAFKA_REST_SCHEMA_REGISTRY_URL: http://schema-registry:8081
      KAFKA_REST_HOST_NAME: localhost
    restart: always

  schema-registry:
    image: confluentinc/cp-schema-registry:3.2.0
    depends_on:
    - kafka        
    ports: ['8081:8081']
    environment:
      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
      SCHEMA_REGISTRY_HOST_NAME: schema-registry
      SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081
    restart: always

  connect:
    image: confluentinc/cp-kafka-connect:3.2.0
    depends_on:
    - zookeeper
    - kafka
    - schema-registry
    ports: ['8083:8083']
    restart: always
    environment:
      CONNECT_BOOTSTRAP_SERVERS: 'kafka:29092'
      CONNECT_REST_ADVERTISED_HOST_NAME: connect
      CONNECT_REST_PORT: 8083
      CONNECT_GROUP_ID: compose-connect-group
      CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
      CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
      CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
      CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter
      CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
      CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
      CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081
      CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
      CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
      CONNECT_ZOOKEEPER_CONNECT: "zookeeper:2181"

Updated here - https://github.com/confluentinc/examples/blob/5.3.1-post/cp-all-in-one/docker-compose.yml

1 Comment

No! Don't edit the hosts file. It works fine on Mac (and windows) if you configure the environment variables correctly rmoff.net/2018/08/02/kafka-listeners-explained
1

Single Kafka with single Zookepper

version: '2.1'

services:
    zookeeper:
      image: wurstmeister/zookeeper
      hostname: zookeeper
      container_name: zookeeper
      ports:
        - "2181:2181"
      environment:
          ZOO_MY_ID: 1
          ZOO_PORT: 2181
          ZOO_SERVERS: server.1=zookeeper:2888:3888
          restart: always
      volumes:
        - ./zk-single-kafka-single/zookeeper/data:/data
        - ./zk-single-kafka-single/zookeeper/datalog:/datalog

    kafka:
       image: wurstmeister/kafka
       hostname: kafka
       container_name: kafka
       ports:
         - "9092:9092"
       environment:
         HOSTNAME_COMMAND: "docker info | grep ^Name: | cut -d' ' -f 2"
         KAFKA_ADVERTISED_LISTENERS: LISTENER_DOCKER_INTERNAL://kafka:9092,LISTENER_DOCKER_EXTERNAL://_{HOSTNAME_COMMAND}:9094
         KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: LISTENER_DOCKER_INTERNAL:PLAINTEXT,LISTENER_DOCKER_EXTERNAL:PLAINTEXT
         KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
         KAFKA_LISTENERS: LISTENER_DOCKER_INTERNAL://:9092,LISTENER_DOCKER_EXTERNAL://:9094
         KAFKA_INTER_BROKER_LISTENER_NAME: LISTENER_DOCKER_INTERNAL
         KAFKA_ADVERTISED_HOST_NAME: 172.19.0.1
         KAFKA_BROKER_ID: 1
         KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
         KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
         restart: always
       volumes:
         - ./zk-single-kafka-single/kafka/data:/var/lib/kafka/data
       depends_on:
         - zookeeper

Comments

1

I see no answer is mentioning KRaft mode yet (Kafka without ZooKeeper), so here it goes:

PLAINTEXT (without authentication)

Do not use in production!

docker-compose.yml:

services:
  kafka-ui:
    image: provectuslabs/kafka-ui:latest
    container_name: kafka-ui
    depends_on:
      - kafka
    ports:
      - "${KAFKA_UI_LOCAL_PORT}:8080"
    environment:
      KAFKA_CLUSTERS_0_NAME: ${KAFKA_CLUSTER_NAME}
      KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:${BROKER_PORT}
      AUTH_TYPE: "LOGIN_FORM"
      SPRING_SECURITY_USER_NAME: ${KAFKA_UI_USER_NAME}
      SPRING_SECURITY_USER_PASSWORD: ${KAFKA_UI_PASSWORD}

  kafka:
    image: confluentinc/cp-kafka:latest
    container_name: kafka
    ports:
      - ${BROKER_PORT}:${BROKER_PORT}
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_NODE_ID: 1
      KAFKA_LISTENERS: PLAINTEXT://:${BROKER_PORT},CONTROLLER://:${CONTROLLER_PORT}
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://${HOST}:${BROKER_PORT}
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
      KAFKA_CONTROLLER_QUORUM_VOTERS: 1@localhost:${CONTROLLER_PORT}
      KAFKA_PROCESS_ROLES: broker,controller
      KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT
      KAFKA_LOG_DIRS: /var/lib/kafka/data
      CLUSTER_ID: ${CLUSTER_ID}
    volumes:
      - ./data:/var/lib/kafka/data

SASL_SSL (production)

docker-compose.prod.yml

Note: provide the name/password for Kafka set in the .env file. Note: the {{TRUSTSTORE_LOCATION}}, {{YOUR_CERTIFICATES_LOCATION_HERE}}, {{KEYSTORE_FILENAME}}, {{TRUSTSTORE_FILENAME}}, {{YOUR_CERTIFICATES_LOCATION_HERE}} have to be replaced with a hard coded value.

You have to build your own Truststore and Keystore. The password of the keystore should be available in the keystore_credentials file. Please follow this guide: How to generate keystore and truststore

Production docker compose file (SASL_SSL):

services:
  kafka-ui:
    user: "${DOCKER_UID:-1000}:${DOCKER_GID:-1000}"
    extends:
      file: docker-compose.yml
      service: kafka-ui
    environment:
      KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SASL_SSL
      KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM: PLAIN
      KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG: org.apache.kafka.common.security.plain.PlainLoginModule required username="${KAFKA_USER}" password="${KAFKA_PASSWORD}";
      KAFKA_CLUSTERS_0_SSL_TRUSTSTORE_LOCATION:{{TRUSTSTORE_LOCATION}}
      KAFKA_CLUSTERS_0_SSL_TRUSTSTORE_PASSWORD: "${SSL_TRUSTSTORE_PASSWORD}"
      KAFKA_CLUSTERS_0_PROPERTIES_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: '' # DISABLE COMMON NAME VERIFICATION
      KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: ${HOST}:${BROKER_PORT}
    volumes:
      - {{YOUR_CERTIFICATES_LOCATION_HERE}}:/home/kafkaui/certificates:ro

  kafka:
    user: "${DOCKER_UID:-1000}:${DOCKER_GID:-1000}"
    extends:
      file: docker-compose.yml
      service: kafka
    environment:
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
      KAFKA_LISTENERS: SASL_SSL://:${BROKER_PORT},CONTROLLER://:${CONTROLLER_PORT}
      KAFKA_ADVERTISED_LISTENERS: SASL_SSL://${HOST}:${BROKER_PORT}
      KAFKA_SSL_KEYSTORE_FILENAME: {{KEYSTORE_FILENAME}}
      KAFKA_SSL_KEYSTORE_CREDENTIALS: keystore_credentials
      KAFKA_SSL_KEY_CREDENTIALS: keystore_credentials
      KAFKA_SSL_TRUSTSTORE_FILENAME: {{TRUSTSTORE_FILENAME}}
      KAFKA_SSL_TRUSTSTORE_CREDENTIALS: keystore_credentials
      KAFKA_SSL_CLIENT_AUTH: required
      KAFKA_SECURITY_PROTOCOL: SASL_SSL
      KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_SSL
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,SASL_SSL:SASL_SSL
      KAFKA_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: ""          # Disable hostname verification
      KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
      KAFKA_SASL_ENABLED_MECHANISMS: PLAIN
      KAFKA_OPTS: -Djava.security.auth.login.config=/tmp/jaas.conf

    volumes:
      - ./jaas.conf:/home/appuser/jaas.conf
      - {{YOUR_CERTIFICATES_LOCATION_HERE}}:/etc/kafka/secrets:ro

    entrypoint: >
      /bin/sh -c '
         cp /home/appuser/jaas.conf /home/appuser/jaas.conf.copy
         sed -ci "s/{{KAFKA_USER}}/$KAFKA_USER/g" /home/appuser/jaas.conf.copy;
         sed -ci "s/{{KAFKA_PASSWORD}}/$KAFKA_PASSWORD/g" /home/appuser/jaas.conf.copy;
         cp /home/appuser/jaas.conf.copy /tmp/jaas.conf;
         echo "JAAS config file created in /tmp.";
         /etc/confluent/docker/run
       '

DEV environments

docker-compose.dev.yml

services:
  kafka:
    volumes:
      - ./secrets:/etc/kafka/secrets:ro

Environment variables

Change the values according to your own preferred setup.

# Kafka UI
KAFKA_UI_LOCAL_PORT=5000
KAFKA_UI_USER_NAME=admin
KAFKA_UI_PASSWORD=pass

# SASL
KAFKA_USER=kafka
KAFKA_PASSWORD=password

# Kafka
CLUSTER_ID=123
KAFKA_CLUSTER_NAME=local
HOST=host.docker.internal
BROKER_PORT=9093
CONTROLLER_PORT=9094

# SSL
TRUSTSTORE=broker.truststore.jks
KEYSTORE=broker.keystore.jks
SSL_TRUSTSTORE_PASSWORD=password

Starting

  • Production: docker compose -f docker-compose.yml -f docker-compose.prod.yml up --build
  • Development: docker compose -f docker-compose.yml -f docker-compose.dev.yml up --build

You should now have a functional environment using KRaft mode with valid development and production environment respectively.

Available as Gist.

Comments

Your Answer

By clicking “Post Your Answer”, you agree to our terms of service and acknowledge you have read our privacy policy.

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.