您的位置:首页 > 其它

最详细没有之一的部署Zookeeper+Kafka 集群

2020-07-09 18:00 113 查看

安装 Zookeeper+Kafka集群(所有节点全部操作)

修改主机名、关闭防火墙、配置本地解析
[root@zk-kafka1 ~]# hostnamectl set-hostname zk-kafka1
[root@zk-kafka1 ~]# systemctl stop firewalld && setenforce 0
[root@zk-kafka1 ~]# cat >> /etc/hosts << EOF
10.11.66.201 zk-kafka1
10.11.66.202 zk-kafka2
10.11.66.203vzk-kafka3
EOF
[root@zk-kafka1 ~]# scp /etc/hosts 10.11.66.202:/etc/hosts
[root@zk-kafka1 ~]# scp /etc/hosts 10.11.66.203:/etc/hosts

部署jdk环境

[root@zk-kafka1 ~]# tar -xzf jdk-13.0.2_linux-x64_bin.tar.gz -C /usr/local/
[root@zk-kafka1 ~]# cd /usr/local/
[root@zk-kafka1 local]# ls
[root@zk-kafka1 local]# ln -s jdk-13.0.2/ java
[root@zk-kafka1 local]# cat >> /etc/profile << EOF
export JAVA_HOME=/usr/local/java
export JRE_HOME=/usr/local/java/jre
export PATH=$JAVA_HOME/bin:$JRE_HOME/bin:$PATH
EOF
[root@zk-kafka1 local]# source /etc/profile

部署 Zookeeper

[root@zk-kafka1 ~]# cd /usr/local
[root@zk-kafka1 local]# mkdir -p zookeeper/{zkdata,zkdatalog}
[root@zk-kafka1 zookeeper]# cd zookeeper/
[root@zk-kafka1 zookeeper]# tar -xf apache-zookeeper-3.6.1-bin.tar.gz
[root@zk-kafka1 zookeeper]# ln -s apache-zookeeper-3.6.1-bin zookeeper
定义配置文件(配置文件所有节点都一样)
[root@zk-kafka1 ~]# cd /usr/local/zookeeper/zookeeper/conf
[root@zk-kafka1 conf]# cp zoo_sample.cfg zoo.cfg
[root@zk-kafka1 conf]# > zoo.cfg
[root@zk-kafka1 conf]# vim zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/zookeeper/zkdata
dataLogDir=/usr/local/zookeeper/zkdatalog
clientPort=12181
server.1=10.11.66.201:12888:13888
server.2=10.11.66.202:12888:13888
server.3=10.11.66.203:12888:13888
创建myid文件
[root@zk-kafka1 ~]# echo "1" >/usr/local/zookeeper/zkdata/myid
[root@zk-kafka2 ~]# echo "2" >/usr/local/zookeeper/zkdata/myid
[root@zk-kafka3 ~]# echo "3" >/usr/local/zookeeper/zkdata/myid
启动服务并查看
[root@zk-kafka1 ~]# cd /usr/local/zookeeper/zookeeper/bin
[root@zk-kafka1 bin]# ./zkServer.sh start
[root@zk-kafka1 bin]# ./zkServer.sh status
[root@zk-kafka1 bin]# jps

部署kafka

[root@zk-kafka1 ~]# cd /usr/local/
[root@zk-kafka1 ~]# mkdir -p kafka/kafkalogs
[root@zk-kafka1 ~]# cd kafka/
[root@zk-kafka1 ~]# tar -xf kafka_2.13-2.5.0.tgz
[root@zk-kafka1 ~]# cd /usr/local/kafka/kafka_2.13-2.5.0/config/
[root@zk-kafka1 ~]# cp server.properties{,.bak}
[root@zk-kafka1 ~]# > server.properties
[root@zk-kafka1 ~]# vim server.properties

201:

num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/opt/kafka/kafkalogs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
message.max.byte=5242880
default.replication.factor=2
replica.fetch.max.bytes=5242880
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.11.66.201:12181,10.11.66.202:12181,10.11.66.203:12181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0

202:

num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/opt/kafka/kafkalogs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
message.max.byte=5242880
default.replication.factor=2
replica.fetch.max.bytes=5242880
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.11.66.201:12181,10.11.66.202:12181,10.11.66.203:12181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0

203:

num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/opt/kafka/kafkalogs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
message.max.byte=5242880
default.replication.factor=2
replica.fetch.max.bytes=5242880
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.11.66.201:12181,10.11.66.202:12181,10.11.66.203:12181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0
启动kafka集群并检测是否启动
[root@zk-kafka1 ~]# cd /usr/local/kafka/kafka_2.13-2.5.0/bin/
[root@zk-kafka1 ~]# ./kafka-server-start.sh -daemon ../config/server.properties
[root@zk-kafka1 ~]# jps
创建toipc
[root@zk-kafka1 ~]# pwd
[root@zk-kafka1 ~]# ./kafka-topics.sh --create --zookeeper 10.11.66.201:12181 --replication-factor 2 --partitions 1 --topic shuaige
在节点1上创建发布者
[root@zk-kafka1 ~]# pwd
[root@zk-kafka1 ~]# ./kafka-console-producer.sh --broker-list 10.11.66.201:9092 --topic shuaige
>hyf
>
在节点2上创建订阅者
[root@zk-kafka1 ~]# pwd
[root@zk-kafka1 ~]# ./kafka-console-consumer.sh  --bootstrap-server 10.11.66.202:9092 --topic shuaige --from-beginning
查看topic状态
[root@zk-kafka1 ~]# pwd
[root@zk-kafka1 ~]# ./kafka-topics.sh --describe --zookeeper 10.11.66.203:12181 --topic shuaige
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: