Skip to main content

kafka安装

Docker安装kafka

1 拉取zookeeper镜像

kafka需要zookeeper管理,所以需要先安装zookeeper。

docker pull zookeeper:3.6

2 启动zookeeper容器

docker run -d --name zookeeper -p 2181:2181 -v /etc/localtime:/etc/localtime zookeeper:3.6

3 拉取kafka镜像

docker pull wurstmeister/kafka:2.12-2.5.0

[root@~]$docker pull wurstmeister/kafka:2.12-2.5.0
Trying to pull repository docker.io/wurstmeister/kafka ...
2.12-2.5.0: Pulling from docker.io/wurstmeister/kafka
e7c96db7181b: Already exists
f910a506b6cb: Pull complete
b6abafe80f63: Pull complete
9daa71a3a025: Pull complete
a4cad7b8f41a: Pull complete
d77df92e7131: Pull complete
Digest: sha256:ec8dfb24c643b73903905fdd2b7bbffe818891fdba968bd97c8a8057bd01f3c7
Status: Downloaded newer image for docker.io/wurstmeister/kafka:2.12-2.5.0

4 启动kafka镜像

启动kafka镜像生成容器
#单机方式
docker run -d --name kafka \
-p 9092:9092 \
-e KAFKA_BROKER_ID=0 \
-e KAFKA_ZOOKEEPER_CONNECT=10.0.0.101:2181 \
-e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://10.0.0.101:9092 \
-e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092

参数说明:
-e KAFKA_BROKER_ID=0 在kafka集群中,每个kafka都有一个BROKER_ID来区分自己

-e KAFKA_ZOOKEEPER_CONNECT=192.168.124.28:2181/kafka 配置zookeeper管理kafka的路径192.168.124.28:2181/kafka

-e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://192.168.124.28:9002 把kafka的地址端口注册给zookeeper,如果是远程访问要改成外网IP,类如Java程序访问出现无法连接。

-e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 配置kafka的监听端口

-v /etc/localtime:/etc/localtime 容器时间同步虚拟机的时间

4.1 可以设置的环境变量

[2021-04-19 23:49:11,660] INFO KafkaConfig values:
advertised.host.name = null
advertised.listeners = PLAINTEXT://127.0.0.1:9092
advertised.port = null
authorizer.class.name =
auto.create.topics.enable = true
auto.leader.rebalance.enable = true
background.threads = 10
broker.id = 0
broker.id.generation.enable = true
broker.rack = null
compression.type = producer
connections.max.idle.ms = 600000
controlled.shutdown.enable = true
controlled.shutdown.max.retries = 3
controlled.shutdown.retry.backoff.ms = 5000
controller.socket.timeout.ms = 30000
create.topic.policy.class.name = null
default.replication.factor = 1
delete.topic.enable = false
fetch.purgatory.purge.interval.requests = 1000
group.max.session.timeout.ms = 300000
group.min.session.timeout.ms = 6000
host.name =
inter.broker.listener.name = null
inter.broker.protocol.version = 0.10.2-IV0
leader.imbalance.check.interval.seconds = 300
leader.imbalance.per.broker.percentage = 10
listener.security.protocol.map = SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,TRACE:TRACE,SASL_SSL:SASL_SSL,PLAINTEXT:PLAINTEXT
listeners = PLAINTEXT://0.0.0.0:9092
log.cleaner.backoff.ms = 15000
log.cleaner.dedupe.buffer.size = 134217728
log.cleaner.delete.retention.ms = 86400000
log.cleaner.enable = true
log.cleaner.io.buffer.load.factor = 0.9
log.cleaner.io.buffer.size = 524288
log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
log.cleaner.min.cleanable.ratio = 0.5
log.cleaner.min.compaction.lag.ms = 0
log.cleaner.threads = 1
log.cleanup.policy = [delete]
log.dir = /tmp/kafka-logs
log.dirs = /kafka/kafka-logs-2bfafd8a90d1
log.flush.interval.messages = 9223372036854775807
log.flush.interval.ms = null
log.flush.offset.checkpoint.interval.ms = 60000
log.flush.scheduler.interval.ms = 9223372036854775807
log.index.interval.bytes = 4096
log.index.size.max.bytes = 10485760
log.message.format.version = 0.10.2-IV0
log.message.timestamp.difference.max.ms = 9223372036854775807
log.message.timestamp.type = CreateTime
log.preallocate = false
log.retention.bytes = -1
log.retention.check.interval.ms = 300000
log.retention.hours = 168
log.retention.minutes = null
log.retention.ms = null
log.roll.hours = 168
log.roll.jitter.hours = 0
log.roll.jitter.ms = null
log.roll.ms = null
log.segment.bytes = 1073741824
log.segment.delete.delay.ms = 60000
max.connections.per.ip = 2147483647
max.connections.per.ip.overrides =
message.max.bytes = 1000012
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
min.insync.replicas = 1
num.io.threads = 8
num.network.threads = 3
num.partitions = 1
num.recovery.threads.per.data.dir = 1
num.replica.fetchers = 1
offset.metadata.max.bytes = 4096
offsets.commit.required.acks = -1
offsets.commit.timeout.ms = 5000
offsets.load.buffer.size = 5242880
offsets.retention.check.interval.ms = 600000
offsets.retention.minutes = 1440
offsets.topic.compression.codec = 0
offsets.topic.num.partitions = 50
offsets.topic.replication.factor = 3
offsets.topic.segment.bytes = 104857600
port = 9092
principal.builder.class = class org.apache.kafka.common.security.auth.DefaultPrincipalBuilder
producer.purgatory.purge.interval.requests = 1000
queued.max.requests = 500
quota.consumer.default = 9223372036854775807
quota.producer.default = 9223372036854775807
quota.window.num = 11
quota.window.size.seconds = 1
replica.fetch.backoff.ms = 1000
replica.fetch.max.bytes = 1048576
replica.fetch.min.bytes = 1
replica.fetch.response.max.bytes = 10485760
replica.fetch.wait.max.ms = 500
replica.high.watermark.checkpoint.interval.ms = 5000
replica.lag.time.max.ms = 10000
replica.socket.receive.buffer.bytes = 65536
replica.socket.timeout.ms = 30000
replication.quota.window.num = 11
replication.quota.window.size.seconds = 1
request.timeout.ms = 30000
reserved.broker.max.id = 1000
sasl.enabled.mechanisms = [GSSAPI]
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.principal.to.local.rules = [DEFAULT]
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.mechanism.inter.broker.protocol = GSSAPI
security.inter.broker.protocol = PLAINTEXT
socket.receive.buffer.bytes = 102400
socket.request.max.bytes = 104857600
socket.send.buffer.bytes = 102400
ssl.cipher.suites = null
ssl.client.auth = none
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = null
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
unclean.leader.election.enable = true
zookeeper.connect = 127.0.0.1:2181
zookeeper.connection.timeout.ms = 6000
zookeeper.session.timeout.ms = 6000
zookeeper.set.acl = false
zookeeper.sync.time.ms = 2000

5 zookeeper和kafka容器启动后的状态

[root@blog]$docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
d8e3973e228c wurstmeister/kafka:2.12-2.5.0 "start-kafka.sh" 8 seconds ago Up 7 seconds 0.0.0.0:9002->9002/tcp kafka
53f73b512f02 zookeeper:3.6 "/docker-entrypoin..." 34 minutes ago Up 34 minutes 2888/tcp, 3888/tcp, 0.0.0.0:2181->2181/tcp, 8080/tcp zookeeper

6 验证kafka是否可用

查看zookeeper中有关kafka的配置

docker exec -it 53f73b512f02 /bin/bash

sh zkCli.sh -server 127.0.0.1:2181

[zk: 127.0.0.1:2181(CONNECTED) 7] ls /
[zookeeper, kafka]
[zk: 127.0.0.1:2181(CONNECTED) 8] ls /kafka
[cluster, controller, controller_epoch, brokers, admin, isr_change_notification, consumers, config]
[zk: 127.0.0.1:2181(CONNECTED) 9]

进入kafka容器查看

[root@blog]$docker exec -it d8e3973e228c /bin/sh
/ # ps -ef
PID USER TIME COMMAND
1 root 0:08 /usr/lib/jvm/java-1.8-openjdk/jre/bin/java -Xmx1G -Xms1G -server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxIn
502 root 0:00 /bin/sh
506 root 0:00 ps -ef
/ #

其它常用命令

tip

如果找不到kafka-console-producer.sh 可以通过find命令来搜索

find / -name kafka*.sh

生产者

./kafka-console-producer.sh --broker-listrver localhost:9092 --topic sun
123456789
abcdefgs

消费者

./kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic sun --from-beginning

123456789
abcdefgs

查看kafka所有topic

./kafka-topics.sh --list --zookeeper {ip}:{port}

查看topic属性

./kafka-topics.sh --describe --zookeeper {ip}:{port}  --topic test

创建主题

# 创建的时候有3个参数是必填的:
# 1. --partitions(分区数量)
# 2. --topic(主题名称)
# 3. --replication-factor(复制系数)
# 同时还需要使用--create参数表明本次操作是想要创建一个主题操作。
sh kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test1

修改主题信息之增加主题分区数量

sh kafka-topics.sh --zookeeper localhost:2181 --topic test1 --alter --partitions 2

删除主题

sh kafka-topics.sh --zookeeper localhost:2181 --delete --topic test1