@@ -50,9 +50,9 @@ import org.apache.kafka.common.serialization.Serializer
50
50
*/
51
51
@FlowPreview
52
52
public suspend fun <A , B > Flow <ProducerRecord <A , B >>.produce (
53
- settings : ProducerSettings <A , B >
53
+ settings : ProducerSettings <A , B >,
54
54
): Flow <RecordMetadata > =
55
- settings. kafkaProducer().flatMapConcat { producer ->
55
+ kafkaProducer(settings ).flatMapConcat { producer ->
56
56
this @produce.map { record -> producer.sendAwait(record) }
57
57
}
58
58
@@ -75,7 +75,7 @@ public suspend fun <A, B> Flow<ProducerRecord<A, B>>.produce(
75
75
* <!--- KNIT example-producer-02.kt -->
76
76
*/
77
77
public suspend fun <A , B > KafkaProducer <A , B >.sendAwait (
78
- record : ProducerRecord <A , B >
78
+ record : ProducerRecord <A , B >,
79
79
): RecordMetadata =
80
80
suspendCoroutine { cont ->
81
81
// Those can be a SerializationException when it fails to serialize the message,
@@ -87,23 +87,49 @@ public suspend fun <A, B> KafkaProducer<A, B>.sendAwait(
87
87
}
88
88
}
89
89
90
- public fun < K , V > KafkaProducer (
91
- props : Properties ,
92
- keyDeserializer : Serializer < K >,
93
- valueDeserializer : Serializer < V >
94
- ): KafkaProducer <K , V > =
95
- KafkaProducer (props, keyDeserializer, valueDeserializer)
90
+ /* *
91
+ * KafkaKafkaProducer for [K] - [V] which takes
92
+ */
93
+ @Suppress( " FunctionName " )
94
+ public fun < K , V > KafkaProducer ( setting : ProducerSettings < K , V > ): KafkaProducer <K , V > =
95
+ KafkaProducer (setting.properties(), setting. keyDeserializer, setting. valueDeserializer)
96
96
97
+ /* *
98
+ * Will automatically close, and flush when finished streaming.
99
+ * The [Flow] will close when the [KafkaProducer] is consumed from the [Flow].
100
+ *
101
+ * This means that the [KafkaProducer] will not be closed for a synchronous running stream, but
102
+ * when running the [Flow] is offloaded in a separate Coroutine it's prone to be collected, closed
103
+ * and flushed. In the example below we construct a producer stream that produces 100 indexed
104
+ * messages.
105
+ *
106
+ * ```kotlin
107
+ * fun <Key, Value> KafkaProducer<Key, Value>.produce(topicName: String, count: Int): Flow<Unit> =
108
+ * (0..count).asFlow().map { sendAwait(ProducerRecord(topicName, "message #it")) }
109
+ *
110
+ * val producerStream = kafkaProducer(Properties(), StringSerializer(), StringSerializer())
111
+ * .flatMapConcat { producer -> producer.produce("topic-name", 100) }
112
+ * ```
113
+ *
114
+ * Here the `KafkaProducer` will only get collected (and closed/flushed) when all 100 messages
115
+ * were produced.
116
+ *
117
+ * **DO NOT** If instead we'd do something like the following, where we offload in a buffer then
118
+ * the `KafkaProducer` gets collected into the buffer and thus closed/flushed.
119
+ *
120
+ * ```kotlin
121
+ * kafkaProducer(Properties(), StringSerializer(), StringSerializer()).buffer(10)
122
+ * ```
123
+ */
97
124
public fun <K , V > kafkaProducer (
98
- props : Properties ,
99
- keyDeserializer : Serializer <K >,
100
- valueDeserializer : Serializer <V >
125
+ setting : ProducerSettings <K , V >,
101
126
): Flow <KafkaProducer <K , V >> = flow {
102
- val producer = KafkaProducer (props, keyDeserializer, valueDeserializer)
103
- try {
104
- producer.use { emit(it) }
105
- } finally {
106
- producer.flush()
127
+ KafkaProducer (setting).use { producer ->
128
+ try {
129
+ emit(producer)
130
+ } finally {
131
+ producer.flush()
132
+ }
107
133
}
108
134
}
109
135
@@ -115,6 +141,9 @@ public enum class Acks(public val value: String) {
115
141
}
116
142
117
143
/* *
144
+ * A type-safe constructor for [KafkaProducer] settings.
145
+ * It forces you to specify the bootstrapServer, and serializers for [K] and [V].
146
+ * These are the minimum requirements for constructing a valid [KafkaProducer].
118
147
*
119
148
* @see http://kafka.apache.org/documentation.html#producerconfigs
120
149
*/
@@ -123,7 +152,7 @@ public data class ProducerSettings<K, V>(
123
152
val keyDeserializer : Serializer <K >,
124
153
val valueDeserializer : Serializer <V >,
125
154
val acks : Acks = Acks .One ,
126
- val other : Properties ? = null
155
+ val other : Properties ? = null ,
127
156
) {
128
157
public fun properties (): Properties =
129
158
Properties ().apply {
@@ -133,34 +162,4 @@ public data class ProducerSettings<K, V>(
133
162
put(ProducerConfig .ACKS_CONFIG , acks.value)
134
163
other?.let { putAll(other) }
135
164
}
136
-
137
- /* *
138
- * Will automatically close, and flush when finished streaming.
139
- * The [Flow] will close when the [KafkaProducer] is consumed from the [Flow].
140
- *
141
- * This means that the [KafkaProducer] will not be closed for a synchronous running stream, but
142
- * when running the [Flow] is offloaded in a separate Coroutine it's prone to be collected, closed
143
- * and flushed. In the example below we construct a producer stream that produces 100 indexed
144
- * messages.
145
- *
146
- * ```kotlin
147
- * fun <Key, Value> KafkaProducer<Key, Value>.produce(topicName: String, count: Int): Flow<Unit> =
148
- * (0..count).asFlow().map { sendAwait(ProducerRecord(topicName, "message #it")) }
149
- *
150
- * val producerStream = kafkaProducer(Properties(), StringSerializer(), StringSerializer())
151
- * .flatMapConcat { producer -> producer.produce("topic-name", 100) }
152
- * ```
153
- *
154
- * Here the `KafkaProducer` will only get collected (and closed/flushed) when all 100 messages
155
- * were produced.
156
- *
157
- * **DO NOT** If instead we'd do something like the following, where we offload in a buffer then
158
- * the `KafkaProducer` gets collected into the buffer and thus closed/flushed.
159
- *
160
- * ```kotlin
161
- * kafkaProducer(Properties(), StringSerializer(), StringSerializer()).buffer(10)
162
- * ```
163
- */
164
- public fun kafkaProducer (): Flow <KafkaProducer <K , V >> =
165
- kafkaProducer(properties(), keyDeserializer, valueDeserializer)
166
165
}
0 commit comments