Kafka是最初由Linkedin公司开发,是一个分布式、支持分区的(partition)、多副本的(replica),基于zookeeper协调的分布式消息系统,它的最大的特性就是可以实时的处理大量数据以满足各种需求场景:比如基于hadoop的批处理系统、低延迟的实时系统、storm/Spark流式处理引擎,web/nginx日志、访问日志,消息服务等等,用scala语言编写,Linkedin于2010年贡献给了Apache基金会并成为顶级开源项目。
通过上面的介绍也可以看出:Kafka给自身的定位并不仅仅是一个消息系统,而是通过发布订阅消息机制实现的分布式流平台。
Producer:数据生产者
Consumer:数据消费者
Consumer Group:消费者组
Broker:服务节点
Topic:主题
Partition:分区
Replication:分区的副本
Replication Leader:副本的老大
Replication Manager:副本的管理者
Partition:分区
Replication:分区的副本
Kafka依赖于zookeeper实现分布式系统的协调,所以需要同时安装zookeeper。两个的安装包到官网下载。
在zookeeper解压后的目录下找到conf文件夹,进入后,复制文件zoo_sample.cfg,并命名为zoo.cfg。zoo.cfg中一共五个配置项,可以使用默认配置。
进入kafka根目录下的config文件夹下,打开server.properties,修改如下配置项(一般默认即为如下,无需修改)
zookeeper.cOnnect=localhost:2181 broker.id=0 log.dirs=/tmp/kafka-logs
另外,config文件夹下也包含有zookeeper的配置文件,可以在其中设置配置项,启动zookeeper时引用这个配置文件,实现定制化。
Kafka的bin目录包含了大多数功能的启动脚本,可以通过它们控制Kafka的功能开启。
启动Kafka
创建Topic:sudo ./bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 3 --topic my-kafka-topic 查看Topic:sudo ./bin/kafka-topics.sh --list --zookeeper localhost:2181 启动生产者:sudo ./bin/kafka-console-producer.sh --broker-list localhost:9092 --topic my-kafka-topic 启动消费者:sudo ./bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic my-kafka-topic --from-beginning 生产消息:first message 生产消息:second message
引入依赖pom.xml
xml version="1.0" encoding="UTF-8"?> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0modelVersion> <parent> <groupId>org.springframework.bootgroupId> <artifactId>spring-boot-starter-parentartifactId> <version>2.1.2.RELEASEversion> <relativePath/> parent> <groupId>com.zanggroupId> <artifactId>kafkaartifactId> <version>0.0.1-SNAPSHOTversion> <name>kafkaname> <description>Demo project for Spring Bootdescription> <properties> <java.version>1.8java.version> properties> <dependencies> <dependency> <groupId>org.springframework.bootgroupId> <artifactId>spring-boot-starter-webartifactId> dependency> <dependency> <groupId>org.springframework.kafkagroupId> <artifactId>spring-kafkaartifactId> dependency> <dependency> <groupId>org.projectlombokgroupId> <artifactId>lombokartifactId> <optional>trueoptional> dependency> <dependency> <groupId>com.alibabagroupId> <artifactId>fastjsonartifactId> <version>1.2.36version> dependency> <dependency> <groupId>org.springframework.bootgroupId> <artifactId>spring-boot-starter-testartifactId> <scope>testscope> dependency> <dependency> <groupId>org.springframework.kafkagroupId> <artifactId>spring-kafka-testartifactId> <scope>testscope> dependency> dependencies> <build> <plugins> <plugin> <groupId>org.springframework.bootgroupId> <artifactId>spring-boot-maven-pluginartifactId> plugin> plugins> build> project>
相应实体
package com.zang.kafka.common; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.Setter; import lombok.ToString; /** * 〈消息实体〉
*/ @Getter @Setter @EqualsAndHashCode @ToString public class MessageEntity { /** * 标题 */ private String title; /** * 内容 */ private String body; }
package com.zang.kafka.common; import lombok.Getter; import lombok.Setter; import java.io.Serializable; /** * 〈REST请求统一响应对象〉
*/ @Getter @Setter public class Response implements Serializable{ private static final long serialVersiOnUID= -1523637783561030117L; /** * 响应编码 */ private int code; /** * 响应消息 */ private String message; public Response(int code, String message) { this.code = code; this.message = message; } }
package com.zang.kafka.common; /** * 〈错误编码〉
*/ public class ErrorCode { /** * 成功 */ public final static int SUCCESS = 200; /** * 失败 */ public final static int EXCEPTION = 500; }
生产者
package com.zang.kafka.producer; import com.alibaba.fastjson.JSON; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.kafka.core.KafkaTemplate; import org.springframework.kafka.support.SendResult; import org.springframework.stereotype.Component; import org.springframework.util.concurrent.ListenableFuture; import org.springframework.util.concurrent.ListenableFutureCallback; /** * 〈生产者〉 */ @Component public class SimpleProducer { private Logger logger = LoggerFactory.getLogger(getClass()); @Autowired private KafkaTemplatekafkaTemplate; public void send(String topic, String key, Object entity) { logger.info("发送消息入参:{}", entity); ProducerRecord record = new ProducerRecord<>( topic, key, JSON.toJSONString(entity) ); long startTime = System.currentTimeMillis(); ListenableFuture > future = this.kafkaTemplate.send(record); future.addCallback(new ListenableFutureCallback >() { @Override public void onFailure(Throwable ex) { logger.error("消息发送失败:{}", ex); } @Override public void onSuccess(SendResult result) { long elapsedTime = System.currentTimeMillis() - startTime; RecordMetadata metadata = result.getRecordMetadata(); StringBuilder record = new StringBuilder(128); record.append("message(") .append("key = ").append(key).append(",") .append("message = ").append(entity).append(")") .append("send to partition(").append(metadata.partition()).append(")") .append("with offset(").append(metadata.offset()).append(")") .append("in ").append(elapsedTime).append(" ms"); logger.info("消息发送成功:{}", record.toString()); } }); } }
消费者
package com.zang.kafka.consumer; import com.alibaba.fastjson.JSONObject; import com.zang.kafka.common.MessageEntity; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.kafka.annotation.KafkaListener; import org.springframework.kafka.support.KafkaHeaders; import org.springframework.messaging.handler.annotation.Header; import org.springframework.stereotype.Component; import java.util.Optional; /** * 〈消费者〉
*/ @Component public class SimpleConsumer { private Logger logger = LoggerFactory.getLogger(getClass()); @KafkaListener(topics = "${kafka.topic.default}") public void listen(ConsumerRecord, ?> record, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) { //判断是否NULL Optional> kafkaMessage = Optional.ofNullable(record.value()); if (kafkaMessage.isPresent()) { //获取消息 Object message = kafkaMessage.get(); MessageEntity messageEntity = JSONObject.parseObject(message.toString(), MessageEntity.class); logger.info("接收消息Topic:{}", topic); logger.info("接收消息Record:{}", record); logger.info("接收消息Message:{}", messageEntity); } } }
控制器
package com.zang.kafka.controller; import com.alibaba.fastjson.JSON; import com.zang.kafka.common.ErrorCode; import com.zang.kafka.common.MessageEntity; import com.zang.kafka.common.Response; import com.zang.kafka.producer.SimpleProducer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.web.bind.annotation.*; /** * 〈生产者〉
*/ @RestController @RequestMapping("/producer") public class ProducerController { private Logger logger = LoggerFactory.getLogger(getClass()); @Autowired private SimpleProducer simpleProducer; @Value("${kafka.topic.default}") private String topic; private static final String KEY = "key";/** * 消息发送 * @param message * @return */ @PostMapping("/send") public Response sendKafka(@RequestBody MessageEntity message) { try { logger.info("kafka的消息:{}", JSON.toJSONString(message)); this.simpleProducer.send(topic, KEY, message); logger.info("kafka消息发送成功!"); return new Response(ErrorCode.SUCCESS,"kafka消息发送成功"); } catch (Exception ex) { logger.error("kafka消息发送失败:", ex); return new Response(ErrorCode.EXCEPTION,"kafka消息发送失败"); } } }
配置application.properties
##----------kafka配置 ## TOPIC kafka.topic.default=my-kafka-topic # kafka地址 spring.kafka.bootstrap-servers=47.88.156.142:9092 # 生产者配置 spring.kafka.producer.retries=0 # 批量发送消息的数量 spring.kafka.producer.batch-size=4096 # 缓存容量 spring.kafka.producer.buffer-memory=40960 # 指定消息key和消息体的编解码方式 spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer # 消费者配置 spring.kafka.consumer.group-id=my spring.kafka.consumer.auto-commit-interval=100 spring.kafka.consumer.auto-offset-reset=latest spring.kafka.consumer.enable-auto-commit=true # 指定消息key和消息体的编解码方式 spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer # 指定listener 容器中的线程数,用于提高并发量 spring.kafka.listener.cOncurrency=3
启动类
package com.zang.kafka; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.EnableAutoConfiguration; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.kafka.annotation.EnableKafka; @SpringBootApplication @EnableKafka public class KafkaApplication { public static void main(String[] args) { SpringApplication.run(KafkaApplication.class, args); } }
避免僵尸实例
来源:
慕课网课程:https://www.imooc.com/learn/1043
参考:
https://blog.csdn.net/liyiming2017/article/details/82790574
https://blog.csdn.net/YChenFeng/article/details/74980531