docker compose 安装 kafka
一 前置准备
创建 /data/kafkadata /data/zookeeper-1用于保存kafka和zookeeper的配置文件
kafkadata中创建三个文件夹 /kafka1 /kafka2 /kafka3,用于存放三个kafka节点的配置文件
zookeeper-1文件夹中创建 /conf /data /logs /datalog四个文件夹,用于存放zk的信息
kafka1 kafka2 kafka3中创建server.properties
其中broker.id需要修改为对应编号 1,2,3
#broker的全局唯一编号,不能重复
broker.id=1
#删除topic功能使能
delete.topic.enable=true
#处理网络请求的线程数量
num.network.threads=3
#用来处理磁盘IO的现成数量
num.io.threads=8
#发送套接字的缓冲区大小
socket.send.buffer.bytes=102400
#接收套接字的缓冲区大小
socket.receive.buffer.bytes=102400
#请求套接字的缓冲区大小
socket.request.max.bytes=104857600
#kafka数据的存储位置
log.dirs=/opt/apache/kafka/logs
#指定Topic的分区数量,这里设置为3。默认只有一个分区,设置多分区可以支持并发读写和负载均衡
num.partitions=3
#副本,默认只有一个副本,不会进行数据备份和冗余
replication.factor=3
#用来恢复和清理data下数据的线程数量
num.recovery.threads.per.data.dir=1
#segment文件保留的最长时间,超时将被删除
log.retention.hours=168
#配置连接Zookeeper集群地址
zookeeper.connect=zookeeper-1:2181
#zookeeper连接超时时间
zookeeper.connection.timeout.ms=60000
message.max.bytes=104857600
replica.fetch.max.bytes=104857600
zookeeper-1/conf目录下创建zoo.cfg
dataDir=/data
dataLogDir=/datalog
clientPort=2181
tickTime=2000
initLimit=5
syncLimit=2
autopurge.snapRetainCount=3
autopurge.purgeInterval=0
maxClientCnxns=60
standaloneEnabled=true
admin.enableServer=true
server.1=zookeeper-1:2888:3888
jute.maxbuffer=104857600
zookeeper-1/conf目录下创建log4j.properties 用于zk的日志输出,后期启动正常后可以修改输出登记
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.Target=System.out
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c %x - %m%n
将conf和kafkadata的权限赋予读写权限
比如对zookeeper-1的conf文件夹直接赋予777权限
chmod 777 conf
目录创建完成后结构示意【忽略kafka1 2 3的logs目录和zookeeper-1的data和datalog下的文件,是运行后产生的文件.plugins是未来有需要时使用,目前可以不用创建】
二 编写docker compose
将其中的IP地址更换为主机的ip地址,如果是服务器则是外网ip地址
如果没有创建plugins文件夹,则将volumes中的plugins映射去掉
# 配置kafka集群
# container services下的每一个子配置都对应一个节点的docker container
# 给kafka集群配置一个网络,网络名为kafka-net
networks:
kafka-net:
name: kafka-net
driver: bridge
services:
zookeeper-1:
image: zookeeper
container_name: zookeeper
restart: always
# 配置docker container和宿主机的端口映射
ports:
- 2181:2181
- 8081:8080
# 将docker container上的路径挂载到宿主机上 实现宿主机和docker container的数据共享
volumes:
- "/home/learning-java/kafka/data/zookeeper-1/data:/data"
- "/home/learning-java/kafka/data/zookeeper-1/datalog:/datalog"
- "/home/learning-java/kafka/data/zookeeper-1/logs:/logs"
- "/home/learning-java/kafka/data/zookeeper-1/conf:/conf"
# 配置docker container的环境变量
environment:
# 当前zk实例的id
ZOO_MY_ID: 1
# 整个zk集群的机器、端口列表
ZOO_SERVERS: server.1=zookeeper-1:2888:3888
command: ["zkServer.sh", "start-foreground"]
networks:
kafka-net:
aliases:
- zookeeper-1
kafka-1:
image: wurstmeister/kafka
container_name: kafka-1
restart: always
# 配置docker container和宿主机的端口映射 8083端口是后期部署kafka connect所需要的端口
ports:
- 9092:9092
- 8084:8083
# plugins 是我方便加入kafka connector 依赖所设文件夹,不使用connect可以不设置
# /opt/kafka/plugins 是配置connector的时候指定的容器内部文件夹路径
volumes:
- "/home/learning-java/kafka/data/kafkadata/kafka-1/server.properties:/etc/kafka/config/server.properties"
- "/home/learning-java/kafka/data/kafkadata/kafka-1/logs:/kafka"
- "/home/learning-java/kafka/data/kafkadata/plugins:/opt/kafka/plugins"
# 配置docker container的环境变量
environment:
KAFKA_ADVERTISED_HOST_NAME: IP ## 修改:宿主机IP
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://IP:9092 ## 修改:宿主机IP
KAFKA_ZOOKEEPER_CONNECT: "zookeeper-1:2181"
KAFKA_ADVERTISED_PORT: 9092
KAFKA_BROKER_ID: 1
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
depends_on:
- zookeeper-1
networks:
kafka-net:
aliases:
- kafka-1
kafka-2:
image: wurstmeister/kafka
container_name: kafka-2
restart: always
# 配置docker container和宿主机的端口映射
ports:
- 9093:9092
- 8085:8083
volumes:
- "/home/learning-java/kafka/data/kafkadata/kafka-2/server.properties:/etc/kafka/config/server.properties"
- "/home/learning-java/kafka/data/kafkadata/kafka-2/logs:/kafka"
- "/home/learning-java/kafka/data/kafkadata/plugins:/opt/kafka/plugins"
# 配置docker container的环境变量
environment:
KAFKA_ADVERTISED_HOST_NAME: IP ## 修改:宿主机IP
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://IP:9093 ## 修改:宿主机IP
KAFKA_ZOOKEEPER_CONNECT: "zookeeper-1:2181"
KAFKA_ADVERTISED_PORT: 9093
KAFKA_BROKER_ID: 2
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
depends_on:
- zookeeper-1
networks:
kafka-net:
aliases:
- kafka-2
kafka-3:
image: wurstmeister/kafka
container_name: kafka-3
restart: always
# 配置docker container和宿主机的端口映射
ports:
- 9094:9092
- 8086:8083
volumes:
- "/home/learning-java/kafka/data/kafkadata/kafka-3/server.properties:/etc/kafka/config/server.properties"
- "/home/learning-java/kafka/data/kafkadata/kafka-3/logs:/kafka"
- "/home/learning-java/kafka/data/kafkadata/plugins:/opt/kafka/plugins"
# 配置docker container的环境变量
environment:
KAFKA_ADVERTISED_HOST_NAME: IP ## 修改:宿主机IP
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://IP:9094 ## 修改:宿主机IP
KAFKA_ZOOKEEPER_CONNECT: "zookeeper-1:2181"
KAFKA_ADVERTISED_PORT: 9094
KAFKA_BROKER_ID: 3
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
depends_on:
- zookeeper-1
networks:
kafka-net:
aliases:
- kafka-3
kafka-manager:
image: sheepkiller/kafka-manager ## 镜像:开源的web管理kafka集群的界面
container_name: kafka-manager
restart: always
environment:
ZK_HOSTS: IP:2181 ## 修改:宿主机IP
ports:
- "9002:9000" ## 暴露端口
networks:
kafka-net:
aliases:
- kafka-manager
三 启动测试访问
启动
docker compose up -d
查看
docker ps
应该有四个容器正常运行
如果出现哪个容器启动失败或者频繁重启,可能是docker compose文件的空格、中英文错误
或者进入容器日志查看具体错误
比如操作文件的权限没有设置会报错,就需要设置 chmod 777 一下
/docker-entrypoint.sh: line 43: /conf/zoo.cfg: Permission denied
打开防火墙端口
2181 是zk端口
8084 8085 8086 9092 9093 9094 kafka需要
9002 图形化界面需要
完成后访问图形化界面
点击cluster add cluster
输入 zookeeper信息,点击save
发现cluster中出现
点击 zookeeper-1 进入
done
更新
上面的kafka manager无法看到具体消息内容
这里可以使用akhq来作为图形化管理界面
这里重写了一个docker compose 构建了一个kafka、zookeeper和一个akhq作为图形化管理工具
services:
zookeeper:
container_name: zookeeper
image: zookeeper
ports:
- "2181:2181"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
networks:
- kafka-net
volumes:
- /home/learning-java/kafka/zookeeper_data:/bitnami/zookeeper
kafka-0:
container_name: kafka-0
hostname: kafka-0
image: bitnami/kafka:3.2
ports:
- "9092:9092"
- "9093:9093"
environment:
KAFKA_CFG_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_CFG_BROKER_ID: "0"
KAFKA_ENABLE_KRAFT: "no" # 是否启用KRaft
ALLOW_PLAINTEXT_LISTENER: "yes"
KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT"
KAFKA_CFG_LISTENERS: "INTERNAL://:9092,EXTERNAL://0.0.0.0:9093"
KAFKA_CFG_ADVERTISED_LISTENERS: "INTERNAL://kafka-0:9092,EXTERNAL://主机ip:9093"
KAFKA_CFG_INTER_BROKER_LISTENER_NAME: "INTERNAL"
KAFKA_HEAP_OPTS: "-Xms521M -Xmx521M"
KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: "30000"
networks:
- kafka-net
volumes:
- /home/learning-java/kafka/kafka_0_data:/bitnami/kafka
depends_on:
- zookeeper
akhq:
image: tchiotludo/akhq
container_name: akhq
ports:
- "9002:8080"
environment:
AKHQ_SERVERS: "kafka-0:9092"
AKHQ_ZOOKEEPER: "zookeeper:2181"
volumes:
- /home/learning-java/kafka/akhq-application.yaml:/app/application.yml
networks:
- kafka-net
volumes:
zookeeper_data:
driver: local
kafka_0_data:
driver: local
networks:
kafka-net:
name: kafka-net
driver: bridge
目前需要一个application.yml额外提供给akhq,否则会报错找不到application.yml
akhq:
connections:
local:
properties:
bootstrap.servers: "kafka-0:9092"
完成后
还可以查看具体消息
四 额外
如果无法下载,则需要找到一个可以下载的电脑下载完成后上传
本地下载镜像
# --platform linux/amd64 指定拉去 linux/amd64的镜像
docker pull imagesname:version --platform linux/amd64
拉去完成后进行打包
# image.tar 压缩包名 imagename:version 拉取的镜像
docker save -o image.tar imagename:version
上传
# image.tar 打包的镜像压缩包 username:服务器用户名 host:服务器地址 后面为保持路径
scp image.tar username@host:/home/usr/
服务器上进行加载压缩包
docker load -i image.tar
接着,就得到了一个我们需要的镜像
更多推荐
所有评论(0)