最详细没有之一的部署Zookeeper+Kafka 集群
2020-07-09 18:00
113 查看
安装 Zookeeper+Kafka集群(所有节点全部操作)
修改主机名、关闭防火墙、配置本地解析
[root@zk-kafka1 ~]# hostnamectl set-hostname zk-kafka1 [root@zk-kafka1 ~]# systemctl stop firewalld && setenforce 0 [root@zk-kafka1 ~]# cat >> /etc/hosts << EOF 10.11.66.201 zk-kafka1 10.11.66.202 zk-kafka2 10.11.66.203vzk-kafka3 EOF [root@zk-kafka1 ~]# scp /etc/hosts 10.11.66.202:/etc/hosts [root@zk-kafka1 ~]# scp /etc/hosts 10.11.66.203:/etc/hosts
部署jdk环境
[root@zk-kafka1 ~]# tar -xzf jdk-13.0.2_linux-x64_bin.tar.gz -C /usr/local/ [root@zk-kafka1 ~]# cd /usr/local/ [root@zk-kafka1 local]# ls [root@zk-kafka1 local]# ln -s jdk-13.0.2/ java [root@zk-kafka1 local]# cat >> /etc/profile << EOF export JAVA_HOME=/usr/local/java export JRE_HOME=/usr/local/java/jre export PATH=$JAVA_HOME/bin:$JRE_HOME/bin:$PATH EOF [root@zk-kafka1 local]# source /etc/profile
部署 Zookeeper
[root@zk-kafka1 ~]# cd /usr/local [root@zk-kafka1 local]# mkdir -p zookeeper/{zkdata,zkdatalog} [root@zk-kafka1 zookeeper]# cd zookeeper/ [root@zk-kafka1 zookeeper]# tar -xf apache-zookeeper-3.6.1-bin.tar.gz [root@zk-kafka1 zookeeper]# ln -s apache-zookeeper-3.6.1-bin zookeeper
定义配置文件(配置文件所有节点都一样)
[root@zk-kafka1 ~]# cd /usr/local/zookeeper/zookeeper/conf [root@zk-kafka1 conf]# cp zoo_sample.cfg zoo.cfg [root@zk-kafka1 conf]# > zoo.cfg [root@zk-kafka1 conf]# vim zoo.cfg tickTime=2000 initLimit=10 syncLimit=5 dataDir=/usr/local/zookeeper/zkdata dataLogDir=/usr/local/zookeeper/zkdatalog clientPort=12181 server.1=10.11.66.201:12888:13888 server.2=10.11.66.202:12888:13888 server.3=10.11.66.203:12888:13888
创建myid文件
[root@zk-kafka1 ~]# echo "1" >/usr/local/zookeeper/zkdata/myid [root@zk-kafka2 ~]# echo "2" >/usr/local/zookeeper/zkdata/myid [root@zk-kafka3 ~]# echo "3" >/usr/local/zookeeper/zkdata/myid
启动服务并查看
[root@zk-kafka1 ~]# cd /usr/local/zookeeper/zookeeper/bin [root@zk-kafka1 bin]# ./zkServer.sh start [root@zk-kafka1 bin]# ./zkServer.sh status [root@zk-kafka1 bin]# jps
部署kafka
[root@zk-kafka1 ~]# cd /usr/local/ [root@zk-kafka1 ~]# mkdir -p kafka/kafkalogs [root@zk-kafka1 ~]# cd kafka/ [root@zk-kafka1 ~]# tar -xf kafka_2.13-2.5.0.tgz [root@zk-kafka1 ~]# cd /usr/local/kafka/kafka_2.13-2.5.0/config/ [root@zk-kafka1 ~]# cp server.properties{,.bak} [root@zk-kafka1 ~]# > server.properties [root@zk-kafka1 ~]# vim server.properties
201:
num.network.threads=3 num.io.threads=8 socket.send.buffer.bytes=102400 socket.receive.buffer.bytes=102400 socket.request.max.bytes=104857600 log.dirs=/opt/kafka/kafkalogs num.partitions=1 num.recovery.threads.per.data.dir=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 transaction.state.log.min.isr=1 log.retention.hours=168 message.max.byte=5242880 default.replication.factor=2 replica.fetch.max.bytes=5242880 log.segment.bytes=1073741824 log.retention.check.interval.ms=300000 zookeeper.connect=10.11.66.201:12181,10.11.66.202:12181,10.11.66.203:12181 zookeeper.connection.timeout.ms=18000 group.initial.rebalance.delay.ms=0
202:
num.network.threads=3 num.io.threads=8 socket.send.buffer.bytes=102400 socket.receive.buffer.bytes=102400 socket.request.max.bytes=104857600 log.dirs=/opt/kafka/kafkalogs num.partitions=1 num.recovery.threads.per.data.dir=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 transaction.state.log.min.isr=1 log.retention.hours=168 message.max.byte=5242880 default.replication.factor=2 replica.fetch.max.bytes=5242880 log.segment.bytes=1073741824 log.retention.check.interval.ms=300000 zookeeper.connect=10.11.66.201:12181,10.11.66.202:12181,10.11.66.203:12181 zookeeper.connection.timeout.ms=18000 group.initial.rebalance.delay.ms=0
203:
num.network.threads=3 num.io.threads=8 socket.send.buffer.bytes=102400 socket.receive.buffer.bytes=102400 socket.request.max.bytes=104857600 log.dirs=/opt/kafka/kafkalogs num.partitions=1 num.recovery.threads.per.data.dir=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 transaction.state.log.min.isr=1 log.retention.hours=168 message.max.byte=5242880 default.replication.factor=2 replica.fetch.max.bytes=5242880 log.segment.bytes=1073741824 log.retention.check.interval.ms=300000 zookeeper.connect=10.11.66.201:12181,10.11.66.202:12181,10.11.66.203:12181 zookeeper.connection.timeout.ms=18000 group.initial.rebalance.delay.ms=0
启动kafka集群并检测是否启动
[root@zk-kafka1 ~]# cd /usr/local/kafka/kafka_2.13-2.5.0/bin/ [root@zk-kafka1 ~]# ./kafka-server-start.sh -daemon ../config/server.properties [root@zk-kafka1 ~]# jps
创建toipc
[root@zk-kafka1 ~]# pwd [root@zk-kafka1 ~]# ./kafka-topics.sh --create --zookeeper 10.11.66.201:12181 --replication-factor 2 --partitions 1 --topic shuaige
在节点1上创建发布者
[root@zk-kafka1 ~]# pwd [root@zk-kafka1 ~]# ./kafka-console-producer.sh --broker-list 10.11.66.201:9092 --topic shuaige >hyf >
在节点2上创建订阅者
[root@zk-kafka1 ~]# pwd [root@zk-kafka1 ~]# ./kafka-console-consumer.sh --bootstrap-server 10.11.66.202:9092 --topic shuaige --from-beginning
查看topic状态
[root@zk-kafka1 ~]# pwd [root@zk-kafka1 ~]# ./kafka-topics.sh --describe --zookeeper 10.11.66.203:12181 --topic shuaige
相关文章推荐
- zookeeper+kafka集群安装部署
- kafka集群zookeeper集群详细配置
- Docker快速搭建Zookeeper和kafka集群超详细
- Zookeeper+Kafka集群部署
- Kafka+Zookeeper集群安装及部署
- 生产环境实战spark (11)分布式集群 5台设备 Zookeeper集群、Kafka集群安装部署
- kafka学习总结之集群部署和zookeeper
- kafka集群和zookeeper集群的部署,kafka的java代码示例
- zookeeper+kafka集群安装部署
- 在linux下通过zookeeper部署solr集群详细配置教程
- Kafka(自带的zookeeper)集群搭建详细步骤
- Kafka详细教程:下载、安装、配置与集群部署
- Kafka-0.10.2.1使用独立zookeeper部署集群
- zookeeper+kafka集群安装部署
- centos7.6下kafka_2.12-2.1.1+zookeeper-3.4.13集群部署
- Zookeeper+Kafka集群部署方案
- Zookeeper3.4.6与Kafka0.8.1.1集群安装和配置详细步骤
- Kafka的3节点集群详细启动步骤(Zookeeper是外装)
- Zookeeper+kafka 三节点集群部署手册
- Kafka集群搭建01-Zookeeper 集群部署