一个SpringCloudStream应用可以有任意数目的input和output通道,后者通过@Input和@Output注解在接口中定义。
package cn.test.custom;
import org.springframework.cloud.stream.annotation.Output;
import org.springframework.messaging.MessageChannel;
public interface CustomSource {
String OUTPUT = "output3";
@Output(OUTPUT)
MessageChannel output();
String OUTPUT1 = "output1";
String OUTPUT2 = "output2";
@Output(OUTPUT1)
MessageChannel output1();
@Output(OUTPUT2)
MessageChannel output2();
}
package cn.test.custom;
import java.util.Random;
import java.util.UUID;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.cloud.stream.annotation.EnableBinding;
import org.springframework.messaging.support.MessageBuilder;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import cn.test.bean.Item;
import cn.test.bean.Order;
@EnableBinding({ CustomSource.class })
@Component
public class Producer {
@Autowired
private CustomSource source;
@Scheduled(fixedRate = 5000)
public void produceHotDrinks() {
source.output().send(
MessageBuilder.withPayload(Order.builder().flag("Hot").num(new Random().nextInt(100)).build()).build());
}
@Scheduled(fixedRate = 3000)
public void produceColdDrinks() {
source.output().send(MessageBuilder
.withPayload(Order.builder().flag("Cold").num(new Random().nextInt(100)).build()).build());
}
@Scheduled(fixedRate = 3000)
public void produceItem() {
source.output1()
.send(MessageBuilder.withPayload(
Item.builder().id(UUID.randomUUID().toString()).timestamp(System.currentTimeMillis()).build())
.build());
}
@Scheduled(fixedRate = 3000)
public void produceMsg() {
source.output2().send(MessageBuilder.withPayload(UUID.randomUUID().toString()).build());
}
}
package cn.test.custom;
import org.springframework.cloud.stream.annotation.Input;
import org.springframework.messaging.SubscribableChannel;
public interface CustomSink {
String INPUT = "input3";
@Input(INPUT)
SubscribableChannel input();
String INPUT0 = "input0";
String INPUT1 = "input1";
String INPUT2 = "input2";
@Input(INPUT0)
SubscribableChannel input0();
@Input(INPUT1)
SubscribableChannel input1();
@Input(INPUT2)
SubscribableChannel input2();
}
package cn.test.custom;
import org.springframework.cloud.stream.annotation.EnableBinding;
import org.springframework.cloud.stream.annotation.StreamListener;
import org.springframework.messaging.support.GenericMessage;
import cn.test.bean.Item;
import cn.test.bean.Order;
@EnableBinding({ CustomSink.class })
public class Consumer {
@StreamListener(CustomSink.INPUT)
public synchronized void listen_average(Order order) {
System.out.println("Order Received For Average : " + order);
}
@StreamListener(CustomSink.INPUT0)
public synchronized void listen_hdfsWrite(Order order) {
System.out.println("Order Received For hdfsWrite : " + order);
}
@StreamListener(CustomSink.INPUT1)
public synchronized void receive(Item item) {
System.out.println("Item Received: " + item);
}
@StreamListener(CustomSink.INPUT2)
public synchronized void get(GenericMessage msg) {
System.out.println("Msg Received: " + msg.getPayload());
}
}
---
spring:
cloud:
stream:
bindings:
output:
destination: wdtest_
content-type: application/json
output3:
destination: wdtest0
content-type: application/json
output1:
destination: wdtest1
content-type: application/json
output2:
destination: wdtest2
content-type: application/json
output11:
destination: wdtest11
content-type: application/json
output22:
destination: wdtest22
content-type: application/json
kafka:
binder:
zkNodes: 192.168.164.129
brokers: 192.168.164.129
input:
consumer:
resetOffsets: true
---
spring:
cloud:
stream:
bindings:
input:
destination: wdtest_
content-type: application/json
group: average
input3:
destination: wdtest0
content-type: application/json
group: average
input0:
destination: wdtest0
content-type: application/json
group: hdfsWrite
input1:
destination: wdtest1
content-type: application/json
group: average
input2:
destination: wdtest2
content-type: application/json
group: average
input11:
destination: wdtest11
content-type: application/json
group: average
input22:
destination: wdtest22
content-type: application/json
group: average
kafka:
binder:
zkNodes: 192.168.164.129
brokers: 192.168.164.129
input:
consumer:
resetOffsets: true
发布-订阅模型可以很容易地通过共享topics连接应用程序,但创建一个应用多实例的的扩张能力同等重要。当这样做时,应用程序的不同实例被放置在一个竞争的消费者关系中,其中只有一个实例将处理一个给定的消息。
SpringCloudStream利用消费者组定义这种行为(这种分组类似于Kafka consumer groups,灵感也来源于此),每个消费者通过spring.cloud.stream.bindings.input.group指定一个组名称,以下图所示的消费者为例,应分别设置spring.cloud.stream.bindings.input.group=hdfsWrite和spring.cloud.stream.bindings.input.group=average。
所有订阅指定topics的组都会收到发布数据的一份副本,但是每一个组内只有一个成员会收到该消息。默认情况下,当一个组没有指定时,SpringCloudStream将分配给一个匿名的、独立的只有一个成员的消费组,该组与所有其他组都处于一个发布-订阅关系中。
在上面的例子中,我们定义了两个属不同组(average、hdfsWrite)的输入通道(input0、input3)去订阅输出通道Output3发布在topic = “wdtest0”上的消息,这样订阅指定topics的组都会收到发布数据的一份副本。
output3:
destination: wdtest0
content-type: application/json
input3:
destination: wdtest0
content-type: application/json
group: average
input0:
destination: wdtest0
content-type: application/json
group: hdfsWrite
Received message @Sink2: message2_dbd0b929-e1c2-4d5b-b386-9e7ff6e7aaf8
Received message @Sink2: message2_3e6dcab3-6654-4f43-86e2-be42374b2855
Received message @Sink1: message1_0b75156a-50b5-4f10-ad34-3fb0a33e6b8c
Received message @Sink1: message1_b5c27937-0df9-4490-9a2b-011070207192
Received message @Sink1: message1_4386c474-d9a2-4996-b842-0bbd6b719bc5
Received message @Sink1: message1_854d846b-3b9c-4d88-83f7-4715f8c73cc4
Received message @Sink1: message1_45060d14-9473-4a89-aaa8-647ba90440cb
Item Received: Item(id=91cfcc86-dea6-4ce0-b944-c827b54bd08f, timestamp=1500560648471)
Order Received For Average : Order(flag=Cold, num=29)
Item Received: Item(id=810fee11-499e-4f9e-9dd6-c0fe9be744ef, timestamp=1500560651295)
Order Received For hdfsWrite : Order(flag=Cold, num=29)
Msg Received: 168ca8f9-05c6-4361-ad0f-130b0a8269ba
Received message @Sink2: message2_83a0f8d8-6a47-4e09-a305-18166aabea10
TimeInfo Received: SinkTimeInfo(time=1500560651673, label=207fe8b6-0cde-4731-b0b1-1b356e0184c2)
Received message @Sink1: message1_0981d125-41b3-41a3-adf6-55c8e703a9e4
Received message @Sink2: message2_9339bfbe-7caf-4a40-82c8-c9bea4b85f99
TimeInfo Received: SinkTimeInfo(time=1500560652675, label=01d31bf8-3d1a-4e1e-9a3c-eaf0015c7223)
Received message @Sink1: message1_92511056-d866-458e-8e81-11afb1741018
Order Received For Average : Order(flag=Hot, num=17)
Order Received For hdfsWrite : Order(flag=Hot, num=17)
Received message @Sink2: message2_f0cc8004-6559-49ae-86a9-392572a407ee
TimeInfo Received: SinkTimeInfo(time=1500560653676, label=d9d6a6d2-8ca0-4261-80c8-6ee67fc62124)
Received message @Sink1: message1_85693b27-3f45-4df8-86ea-35c0569436e4
Order Received For Average : Order(flag=Cold, num=87)
Order Received For hdfsWrite : Order(flag=Cold, num=87)
Msg Received: d08c48f5-076c-420c-885a-f2b673dbd399
Item Received: Item(id=71c851c2-78c7-4ea2-9408-fd7460c93778, timestamp=1500560654303)
Received message @Sink1: message1_a9b9c66c-3df5-4ca5-b037-ca57cf069e97
TimeInfo Received: SinkTimeInfo(time=1500560654678, label=a02e2166-07fe-447c-b626-9da4f6384318)
Received message @Sink2: message2_4a99a168-313a-46a9-b6f7-81079f8e6fa9
Received message @Sink2: message2_2da7af4e-0056-474c-883b-284dd6f21f9b
TimeInfo Received: SinkTimeInfo(time=1500560655685, label=fe1f535d-7c11-4460-8192-445cdb9b0f8a)
Received message @Sink1: message1_a0b0d6be-50f7-4826-8a34-22b7a212623c
TimeInfo Received: SinkTimeInfo(time=1500560656687, label=7178f942-7098-4e96-be9c-1b4e3831a61d)
Received message @Sink2: message2_a82eca34-8076-4e66-bd8c-c7fe68877528
Received message @Sink1: message1_820c6e1e-985c-4a1f-b443-6eb060ec519b
注:SpringBoot项目的Bean装配默认规则是根据Application类所在的包位置从上往下扫描,或者通过@ComponentScan注解来定义扫描程序包
@ComponentScan(basePackages = { “cn.test.original” })