cd /opt
mkdir kafka
cd kafa
wget https://archive.apache.org/dist/kafka/2.2.1/kafka_2.12-2.2.1.tgz
tar -xzvf kafka_2.12-2.2.1.tgz
vim ~/.bashrc
export KAFKA_HOME=/opt/kafka/kafka_2.12-2.2.1
export PATH=$PATH:$KAFKA_HOME/bin
source ~/.bashrc
cd $KAFKA_HOME
cd config
vim server.properties
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
num.network.threads=3
# The number of threads that the server uses for processing requests, which may include disk I/O
num.io.threads=8
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=102400
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=102400
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
# A comma separated list of directories under which to store log files
log.dirs=/opt/kafka/kafka_2.12-2.2.1/kafka-logs
# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=1
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings #############################
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
# The minimum age of a log file to be eligible for deletion due to age
log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
#log.retention.bytes=1073741824
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=1073741824
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=master:2181,worker1:2181,worker2:2181,worker3:2181
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=6000
############################# Group Coordinator Settings #############################
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
# The default value for this is 3 seconds.
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
group.initial.rebalance.delay.ms=0
delete.topic.enable=true
서버의 순서대로 broker.id의 번호를 다르게 설정해주어야 한다.
zookeeper와 연결을 시키기 위해 zookeeper.connect에 zookeeper의 서버를 작성해준다.
cd $KAFKA_HOME
mkdir kafka-logs
master에 설치하고 설정한 kafka를 worker들에게 배포한다.
# worker1로 전송
scp -r $KAFKA_HOME hadoop@worker1:/opt/kafka
# worker2로 전송
scp -r $KAFKA_HOME hadoop@worker2:/opt/kafka
# worker3로 전송
scp -r $KAFKA_HOME hadoop@worker3:/opt/kafka
보낸 후 config 폴더에 broker-id를 잘 수정해준다.
kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
kafka-topic.sh --create --zookeeper master:2181 --topic MyTest --partitions 1 --replication-factor 1
kafka-topic.sh --list --zookeeper worker1
kafka-topic.sh --delete --zookeeper master:2181 --topic MyTest
kafka-topic.sh --list --zookeeper worker1