hostnamectl --static set-hostname kafka-cluster
cat >> /etc/profile.d/jdk.sh << 'EOF' JAVA_HOME=/usr/local/jdk JRE_HOME=/usr/local/jdk/jre PATH=$JAVA_HOME/bin:$JRE_HOME/bin:$PATH CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib export JAVA_HOME JRE_HOME PATH CLASSPATH EOF
source /etc/profile
tar -zxf kafka_2.13-2.4.1.tgz tar -zxf zookeeper-3.4.14.tar.gz
cp -r zookeeper-3.4.14 /data/zookeeper01 cp -r zookeeper-3.4.14 /data/zookeeper02 cp -r zookeeper-3.4.14 /data/zookeeper03 cp -r kafka_2.13-2.4.1 /data/kafka01 cp -r kafka_2.13-2.4.1 /data/kafka02 cp -r kafka_2.13-2.4.1 /data/kafka03
mkdir -p /data/zookeeper01/data mkdir -p /data/zookeeper02/data mkdir -p /data/zookeeper03/data
echo 01 > zookeeper01/data/myid echo 02 > zookeeper02/data/myid echo 03 > zookeeper03/data/myid
cat >> /data/zookeeper01/conf/zoo.cfg << EOF tickTime=2000 initLimit=10 syncLimit=5 dataDir=/data/zookeeper01/data clientPort=2181 server.01=10.10.10.240:2881:3881 server.02=10.10.10.240:2882:3882 server.03=10.10.10.240:2883:3883 EOF
cat >> /data/zookeeper02/conf/zoo.cfg << EOF tickTime=2000 initLimit=10 syncLimit=5 dataDir=/data/zookeeper02/data clientPort=2182 server.01=10.10.10.240:2881:3881 server.02=10.10.10.240:2882:3882 server.03=10.10.10.240:2883:3883 EOF
cat >> /data/zookeeper03/conf/zoo.cfg << EOF tickTime=2000 initLimit=10 syncLimit=5 dataDir=/data/zookeeper03/data clientPort=2183 server.01=10.10.10.240:2881:3881 server.02=10.10.10.240:2882:3882 server.03=10.10.10.240:2883:3883 EOF
mkdir -p kafka01/{data,logs} mkdir -p kafka02/{data,logs} mkdir -p kafka03/{data,logs}
cat >> /data/kafka01/config/server.properties << EOF broker.id=01 listeners=PLAINTEXT://:9092 advertised.listeners=PLAINTEXT://10.10.10.186:9092 num.network.threads=4 num.io.threads=8 socket.send.buffer.bytes=102400 socket.receive.buffer.bytes=102400 socket.request.max.bytes=104857600 num.partitions=1 num.recovery.threads.per.data.dir=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 transaction.state.log.min.isr=1 log.retention.hours=168 log.segment.bytes=1073741824 log.retention.check.interval.ms=300000 zookeeper.connect=127.0.0.1:2181,127.0.0.1:2182,127.0.0.1:2183 zookeeper.connection.timeout.ms=6000 group.initial.rebalance.delay.ms=0 controlled.shutdown.enable=true log.dirs=/data/kafka01/logs EOF
cat >> /data/kafka02/config/server.properties << EOF broker.id=02 listeners=PLAINTEXT://:9093 advertised.listeners=PLAINTEXT://10.10.10.186:9093 num.network.threads=4 num.io.threads=8 socket.send.buffer.bytes=102400 socket.receive.buffer.bytes=102400 socket.request.max.bytes=104857600 num.partitions=1 num.recovery.threads.per.data.dir=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 transaction.state.log.min.isr=1 log.retention.hours=168 log.segment.bytes=1073741824 log.retention.check.interval.ms=300000 zookeeper.connect=127.0.0.1:2181,127.0.0.1:2182,127.0.0.1:2183 zookeeper.connection.timeout.ms=6000 group.initial.rebalance.delay.ms=0 controlled.shutdown.enable=true log.dirs=/data/kafka02/logs EOF
cat >> /data/kafka03/config/server.properties << EOF broker.id=03 listeners=PLAINTEXT://:9094 advertised.listeners=PLAINTEXT://10.10.10.186:9094 num.network.threads=4 num.io.threads=8 socket.send.buffer.bytes=102400 socket.receive.buffer.bytes=102400 socket.request.max.bytes=104857600 num.partitions=1 num.recovery.threads.per.data.dir=1 offsets.topic.replication.factor=1 transaction.state.log.replication.factor=1 transaction.state.log.min.isr=1 log.retention.hours=168 log.segment.bytes=1073741824 log.retention.check.interval.ms=300000 zookeeper.connect=127.0.0.1:2181,127.0.0.1:2182,127.0.0.1:2183 zookeeper.connection.timeout.ms=6000 group.initial.rebalance.delay.ms=0 controlled.shutdown.enable=true log.dirs=/data/kafka03/logs EOF
cat >> ~/start.sh << EOF #!/bin/bash cd /data/zookeeper01 && bin/zkServer.sh start cd /data/zookeeper02 && bin/zkServer.sh start cd /data/zookeeper03 && bin/zkServer.sh start sleep 5 cd /data/kafka01 && bin/kafka-server-start.sh -daemon config/server.properties cd /data/kafka02 && bin/kafka-server-start.sh -daemon config/server.properties cd /data/kafka03 && bin/kafka-server-start.sh -daemon config/server.properties EOF
搭建完毕,查看各种状态
额外参数说明
# 服务器之间或客户端与服务器之间维持心跳的时间间隔 # tickTime以毫秒为单位。 tickTime=2000 # 集群中的follower服务器(F)与leader服务器(L)之间的初始连接心跳数 initLimit=10 # 集群中的follower服务器与leader服务器之间请求和应答之间能容忍的最多心跳数 syncLimit=5 # 快照保存目录 # 不要设置为/tmp,该目录重新启动后会被自动清除 dataDir=/home/hadoop/data/zookeeper/data # 日志保存目录 dataLogDir=/home/hadoop/data/zookeeper/logs # 客户端连接端口 clientPort=2181 # 客户端最大连接数。 # 根据自己实际情况设置,默认为60个 # maxClientCnxns=60 # 三个接点配置,格式为: # server.服务编号=服务地址、LF通信端口、选举端口 server.1=salve1:2888:3888 server.2=slave2:2888:3888 server.3=slave3:2888:3888
-
« 上一篇:
Kafka集群-沉醉寒风
-
史上最深的坑
:下一篇 »