#创建消费者
kafka-console-consumer.sh --zookeeper collector1:2181,collector2:2181,collector3:2181 --from-beginning --topic pad_report_data
#查看topic详情
kafka-topics.sh --describe --topic pad_report_data --zookeeper collector1:2181,collector2:2181,collector3:2181
#启动kafka
kafka-server-start.sh -daemon /wls/soft/kafka_2.10-0.10.1.1/config/server.properties
#创建消费者,新api
kafka-console-consumer.sh --bootstrap-server collector1:9092,collector2:9092,collector3:9092 --from-beginning --topic pad_report_data
#创建生产者及topic
kafka-console-producer.sh --broker-list collector1:9092,collector2:9092,collector3:9092 --topic demo_test
#删除topic
kafka-topics.sh --delete --zookeeper collector1:2181,collector2:2181,collector3:2181 --topic pad_report_data
zkCli.sh -server collector3:2181
#查看分区offset
kafka-run-class.sh kafka.tools.GetOffsetShell --time -1 --broker-list collector1:9092,collector2:9092,collector3:9092 --topic pad_report_data
#查看分区消费堆积量
kafka-run-class.sh kafka.tools.ConsumerOffsetChecker --group group_id_1pad_report_data --topic pad_report_data --zookeeper collector1:2181,collector2:2181,collector3:2181
nohup flume-ng agent -c conf -f /wls/soft/apache-flume-1.7.0-bin/conf/collect-conf.properties -n producer02 -Dflume.monitoring.type=http -Dflume.monitoring.port=34545 >/wls/soft/apache-flume-1.7.0-bin/logs/cat.out 2>&1 &
auto.leader.rebalance.enable=true
kafka-preferred-replica-election.sh --zookeeper collector1:2181,collector2:2181,collector3:2181
kafka-topics.sh --describe --topic pad_report_data --zookeeper collector1:2181,collector2:2181,collector3:2181 --unavailable-partitions
kafka-topics.sh --describe --topic pad_report_data --zookeeper collector1:2181,collector2:2181,collector3:2181 --under-replicated-partitions
kafka-topics.sh --describe --topic pad_report_data --zookeeper collector1:2181,collector2:2181,collector3:2181 --topics-with-overrides
配置
broker.id=2
advertised.host.name=collector2
host.name=collector2
auto.create.topics.enable=true
delete.topic.enable=true
default.replication.factor=2
num.network.threads=8
num.io.threads=16
num.partitions=9
num.recovery.threads.per.data.dir=1
socket.send.buffer.bytes=1048576
socket.receive.buffer.bytes=1048576
socket.request.max.bytes=104857600
delete.topic.enable=true
log.dirs=/wls/soft/kafka_2.10-0.10.1.1/logs
log.retention.hours=168
log.segment.bytes=1073741824
log.cleaner.enable=false
log.retention.check.interval.ms=300000
listeners=PLAINTEXT://:9092
zookeeper.connection.timeout.ms=20000
zookeeper.connect=collector1:2181,collector2:2181,collector3:2181
replica.lag.max.messages=100000
replica.lag.time.max.ms=60000
replica.fetch.wait.max.ms=3000
replica.socket.receive.buffer.bytes=524288
replica.fetch.max.bytes=5242880