您的位置:首页 > 其它

kafka集群一键安装、启动、停止脚本

2019-05-24 14:43 489 查看

 kafka集群一键安装脚本

[code]#!/bin/bash
#配置KAFKA的安装目录 修改的地方1 脚本可以自己创建
currentTime=$(date '+%Y-%m-%d %H:%M:%S')
echo -e "请输入kafka的安装目录,不存在脚本自动创建,最后一个/不要写 /bigdata/install"
read kafkainstallpath

#创建ES安装的目录
if [ ! -d $kafkainstallpath ]; then
mkdir -p $kafkainstallpath
fi
if [ ! -d $kafkainstallpath ]; then
echo "创建目录$kafkainstallpath失败!请检查目录是否有权限"
exit
fi

#解压tar包
currentdir=$(cd $(dirname $0); pwd)
ls | grep 'kafka.*[gz]$'
if [ $? -ne 0 ]; then
#当前目录没有kafka的压缩包
echo "在$currentdir下没有发现kafka*.tar.gz,请自行上传!"
exit
else
#解压
tar -zxvf $currentdir/$(ls | grep 'kafka.*[gz]$') -C $kafkainstallpath
fi

kafkabanben=`ls $kafkainstallpath| grep 'kafka.*'`

confpath=$kafkainstallpath/$kafkabanben/config
#修改配置文件

echo -e "请输入kafka节点id:唯一值 例如 1"
read kafkanodename
sed -i "s/^broker.id=0/broker.id=${kafkanodename}/g" $confpath/server.properties

sed -i 's/^#listeners=PLAINTEXT:\/\/:9092/listeners=PLAINTEXT:\/\/:9092/g' $confpath/server.properties

echo -e "请输入kafka日志存储目录:例如 /bigdata/data/kafka"
read kafkalogspath

#创建KAFKA日志存储目录
if [ ! -d $kafkalogspath ]; then
mkdir -p $kafkalogspath
fi
if [ ! -d $kafkalogspath ]; then
echo "创建目录$kafkalogspath失败!请检查目录是否有权限"
exit
fi

bak_dir='log.dirs=/tmp/kafka-logs'
new_dir='log.dirs='$kafkalogspath
sed -i "s!${bak_dir}!${new_dir}!g" $confpath/server.properties

echo -e '请输入zookeeper集群的所有节点:(严格按照示例格式) 例如cdh01:2181,cdh02:2181,cdh03:2181'
read allhosts
sed -i "s/^zookeeper.connect=localhost:2181/zookeeper.connect=$allhosts/g" $confpath/server.properties

sed -i 's/^#delete.topic.enable=true/delete.topic.enable=true/g' $confpath/server.properties

echo "log.cleanup.policy=delete" >>$confpath/server.properties

sed -i 's/^#log.retention.bytes=1073741824/log.retention.bytes=1073741824/g' $confpath/server.properties

#kafka参数优化

sed -i 's/^log.retention.hours=16/log.retention.hours=72/g' $confpath/server.properties

sed -i 's/^#log.flush.interval.messages=10000/log.flush.interval.messages=10000/g' $confpath/server.properties

sed -i 's/^#log.flush.interval.ms=1000/log.flush.interval.ms=1000/g' $confpath/server.properties

param=`cat /proc/cpuinfo | grep "cpu cores"| uniq`

bak_count="num.network.threads=3"
new_count="num.network.threads="$((${param:0-1:1}+1))
sed -i "s!${bak_count}!${new_count}!g" $confpath/server.properties

bak_io="num.network.threads=3"
new_io="num.network.threads="$((${param:0-1:1}+${param:0-1:1}))
sed -i "s!${bak_io}!${new_io}!g" $confpath/server.properties

#PATH设置
#末行插入
echo "">>~/.bash_profile
echo "#KAFKA $currentTime">>~/.bash_profile
echo "export KAFKA_HOME=$kafkainstallpath/$kafkabanben">>~/.bash_profile
echo 'export PATH=$PATH:$KAFKA_HOME/bin'>>~/.bash_profile
source ~/.bash_profile

echo -e "是否远程复制 请输入y/n"
read flag
if [[ $flag == "y" ]]; then

#修改并分发安装文件
espath=$kafkainstallpath/$kafkabanben
espathtemp=$kafkainstallpath/$kafkabanben-temp
cp -r $espath $espathtemp

echo "以下输入的节点必须做免密登录"
echo -e '请输入除当前之外的节点(当前节点cdh01),严格符合以下格式IP:kafkaID,空格隔开, cdh02:2 cdh03:3 cdh04:4 cdh05:5'
read allnodes
user=`whoami`
array2=(${allnodes// / })
for allnode in ${array2[@]}
do
array3=(${allnode//:/ })
ip=${array3[0]}
esid=${array3[1]}
echo ======= $ip  =======

#修改文件
ssh $ip "rm -rf $espath"
ssh $ip "mkdir -p $espath"

bak_dir="broker.id=$kafkanodename"
new_dir="broker.id=$esid"

sed -i "s!${bak_dir}!${new_dir}!g" $espathtemp/config/server.properties

scp -r $espathtemp/* ${user}@$ip:$espath/

ssh $ip "echo ''>>~/.bash_profile"
ssh $ip "echo '#KAFKA $currentTime'>>~/.bash_profile"
ssh $ip "echo 'export KAFKA_HOME=$kafkainstallpath/$kafkabanben'>>~/.bash_profile"
ssh $ip 'echo "export PATH=\$PATH:\$KAFKA_HOME/bin">>~/.bash_profile'
ssh $ip "source ~/.bash_profile"

#再次修改回来 防止修改错误
new_dir="broker.id=$kafkanodename"
bak_dir="broker.id=$esid"
sed -i "s!${bak_dir}!${new_dir}!g" $espathtemp/config/server.properties

echo ======= $ip 远程复制完成  =======
done

#删除临时文件
rm -rf $espathtemp

fi

 

kafka集群一键启动脚本

[code]#!/bin/bash
kafkaServers='cdh01 cdh02 cdh03 cdh04 cdh05'
#启动所有的kafka
for kafka in $kafkaServers
do
ssh -T $kafka <<EOF
source ~/.bash_profile
nohup kafka-server-start.sh /bigdata/kafka/config/server.properties 1>/dev/null 2>&1 &
EOF
echo 从节点 $kafka 启动kafka...[ done ]
sleep 5
done

 

kafka集群一键停止

[code]#!/bin/bash
kafkaServers='cdh01 cdh02 cdh03 cdh04 cdh05'
#停止所有的kafka
for kafka in $kafkaServers
do
ssh -T $kafka <<EOF
source ~/.bash_profile
cd /bigdata/kafka
bin/kafka-server-stop.sh
EOF
echo 从节点 $kafka 停止kafka...[ done ]
sleep 5
done

 

内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: