|
1 | | -[[assign-all-parts]] |
| 1 | +[[tip-assign-all-parts]] |
2 | 2 | === Manually Assigning All Partitions |
3 | 3 |
|
4 | 4 | Let's say you want to always read all records from all partitions (such as when using a compacted topic to load a distributed cache), it can be useful to manually assign the partitions and not use Kafka's group management. |
@@ -43,3 +43,114 @@ public static class PartitionFinder { |
43 | 43 |
|
44 | 44 | Using this in conjunction with `ConsumerConfig.AUTO_OFFSET_RESET_CONFIG=earliest` will load all records each time the application is started. |
45 | 45 | You should also set the container's `AckMode` to `MANUAL` to prevent the container from committing offsets for a `null` consumer group. |
| 46 | + |
| 47 | +[[ex-jdbc-sync]] |
| 48 | +=== Example of Transaction Synchronization |
| 49 | + |
| 50 | +The following Spring Boot application is an example of synchronizing database and Kafka transactions. |
| 51 | + |
| 52 | +==== |
| 53 | +[source, java] |
| 54 | +---- |
| 55 | +@SpringBootApplication |
| 56 | +public class Application { |
| 57 | +
|
| 58 | + public static void main(String[] args) { |
| 59 | + SpringApplication.run(Application.class, args); |
| 60 | + } |
| 61 | +
|
| 62 | + @Bean |
| 63 | + public ApplicationRunner runner(KafkaTemplate<String, String> template) { |
| 64 | + return args -> template.executeInTransaction(t -> t.send("topic1", "test")); |
| 65 | + } |
| 66 | +
|
| 67 | + @Bean |
| 68 | + public ChainedKafkaTransactionManager<Object, Object> chainedTm( |
| 69 | + KafkaTransactionManager<String, String> ktm, |
| 70 | + DataSourceTransactionManager dstm) { |
| 71 | +
|
| 72 | + return new ChainedKafkaTransactionManager<>(ktm, dstm); |
| 73 | + } |
| 74 | +
|
| 75 | + @Bean |
| 76 | + public DataSourceTransactionManager dstm(DataSource dataSource) { |
| 77 | + return new DataSourceTransactionManager(dataSource); |
| 78 | + } |
| 79 | +
|
| 80 | + @Bean |
| 81 | + public ConcurrentKafkaListenerContainerFactory<?, ?> kafkaListenerContainerFactory( |
| 82 | + ConcurrentKafkaListenerContainerFactoryConfigurer configurer, |
| 83 | + ConsumerFactory<Object, Object> kafkaConsumerFactory, |
| 84 | + ChainedKafkaTransactionManager<Object, Object> chainedTM) { |
| 85 | +
|
| 86 | + ConcurrentKafkaListenerContainerFactory<Object, Object> factory = |
| 87 | + new ConcurrentKafkaListenerContainerFactory<>(); |
| 88 | + configurer.configure(factory, kafkaConsumerFactory); |
| 89 | + factory.getContainerProperties().setTransactionManager(chainedTM); |
| 90 | + return factory; |
| 91 | + } |
| 92 | +
|
| 93 | + @Component |
| 94 | + public static class Listener { |
| 95 | +
|
| 96 | + private final JdbcTemplate jdbcTemplate; |
| 97 | +
|
| 98 | + private final KafkaTemplate<String, String> kafkaTemplate; |
| 99 | +
|
| 100 | + public Listener(JdbcTemplate jdbcTemplate, KafkaTemplate<String, String> kafkaTemplate) { |
| 101 | + this.jdbcTemplate = jdbcTemplate; |
| 102 | + this.kafkaTemplate = kafkaTemplate; |
| 103 | + } |
| 104 | +
|
| 105 | + @KafkaListener(id = "group1", topics = "topic1") |
| 106 | + public void listen1(String in) { |
| 107 | + this.kafkaTemplate.send("topic2", in.toUpperCase()); |
| 108 | + this.jdbcTemplate.execute("insert into mytable (data) values ('" + in + "')"); |
| 109 | + } |
| 110 | +
|
| 111 | + @KafkaListener(id = "group2", topics = "topic2") |
| 112 | + public void listen2(String in) { |
| 113 | + System.out.println(in); |
| 114 | + } |
| 115 | +
|
| 116 | + } |
| 117 | +
|
| 118 | + @Bean |
| 119 | + public NewTopic topic1() { |
| 120 | + return TopicBuilder.name("topic1").build(); |
| 121 | + } |
| 122 | +
|
| 123 | + @Bean |
| 124 | + public NewTopic topic2() { |
| 125 | + return TopicBuilder.name("topic2").build(); |
| 126 | + } |
| 127 | +
|
| 128 | +} |
| 129 | +---- |
| 130 | +==== |
| 131 | + |
| 132 | +==== |
| 133 | +[source, properties] |
| 134 | +---- |
| 135 | +spring.datasource.url=jdbc:mysql://localhost/integration?serverTimezone=UTC |
| 136 | +spring.datasource.username=root |
| 137 | +spring.datasource.driver-class-name=com.mysql.cj.jdbc.Driver |
| 138 | +
|
| 139 | +spring.kafka.consumer.auto-offset-reset=earliest |
| 140 | +spring.kafka.consumer.enable-auto-commit=false |
| 141 | +spring.kafka.consumer.properties.isolation.level=read_committed |
| 142 | +
|
| 143 | +spring.kafka.producer.transaction-id-prefix=tx- |
| 144 | +
|
| 145 | +#logging.level.org.springframework.transaction=trace |
| 146 | +#logging.level.org.springframework.kafka.transaction=debug |
| 147 | +#logging.level.org.springframework.jdbc=debug |
| 148 | +---- |
| 149 | +==== |
| 150 | + |
| 151 | +==== |
| 152 | +[source, sql] |
| 153 | +---- |
| 154 | +create table mytable (data varchar(20)); |
| 155 | +---- |
| 156 | +==== |
0 commit comments