Skip to content

Commit b4d37bc

Browse files
committed
Test: Improved test speed by refactoring getProduced
getProduced can now check if it contains the expected results and return quicker, rather than waiting for max retry count.
1 parent a820953 commit b4d37bc

File tree

4 files changed

+64
-56
lines changed

4 files changed

+64
-56
lines changed

src/integrationTest/java/com/mongodb/kafka/connect/MongoSourceConnectorTest.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -488,7 +488,7 @@ void testSourceSurvivesARestartWhenCopyingExisting() {
488488
List<ChangeStreamOperation> inserts = createInserts(1, 5000);
489489
inserts.addAll(createInserts(10001, 10050));
490490

491-
assertEventuallyProduces(inserts, coll1);
491+
assertProduced(inserts, coll1.getNamespace().getFullName(), 60);
492492
}
493493

494494
@Test

src/integrationTest/java/com/mongodb/kafka/connect/embedded/EmbeddedKafka.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -280,14 +280,14 @@ public void beforeAll(final ExtensionContext context) throws Exception {
280280
}
281281

282282
@Override
283-
public void afterEach(final ExtensionContext context) throws InterruptedException {
283+
public void afterEach(final ExtensionContext context) {
284284
deleteSinkConnector();
285285
deleteSourceConnector();
286-
deleteTopicsAndWait(Duration.ofMinutes(4));
287286
}
288287

289288
@Override
290-
public void afterAll(final ExtensionContext context) {
289+
public void afterAll(final ExtensionContext context) throws InterruptedException {
290+
deleteTopicsAndWait(Duration.ofMinutes(4));
291291
stop();
292292
}
293293

src/integrationTest/java/com/mongodb/kafka/connect/mongodb/ChangeStreamOperations.java

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@
2222
import java.util.stream.Collectors;
2323
import java.util.stream.Stream;
2424

25+
import org.apache.kafka.common.utils.Bytes;
26+
2527
import org.bson.Document;
2628

2729
public class ChangeStreamOperations {
@@ -55,7 +57,9 @@ public static ChangeStreamOperation createInsert(final int id) {
5557
return new Insert(id);
5658
}
5759

58-
public static ChangeStreamOperation createChangeStreamOperation(final String changeStreamJson) {
60+
public static ChangeStreamOperation createChangeStreamOperation(
61+
final Bytes changeStreamJsonBytes) {
62+
String changeStreamJson = changeStreamJsonBytes.toString();
5963
Document document = Document.parse(changeStreamJson);
6064
ChangeStreamOperation changeStreamOperation;
6165
switch (document.get("operationType", "unknown").toLowerCase()) {

src/integrationTest/java/com/mongodb/kafka/connect/mongodb/MongoKafkaTestCase.java

Lines changed: 55 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -16,19 +16,19 @@
1616
package com.mongodb.kafka.connect.mongodb;
1717

1818
import static com.mongodb.kafka.connect.mongodb.ChangeStreamOperations.ChangeStreamOperation;
19-
import static com.mongodb.kafka.connect.mongodb.ChangeStreamOperations.createChangeStreamOperation;
2019
import static java.lang.String.format;
2120
import static java.util.Collections.singletonList;
2221
import static org.apache.kafka.common.utils.Utils.sleep;
23-
import static org.junit.jupiter.api.Assertions.assertEquals;
2422
import static org.junit.jupiter.api.Assertions.assertIterableEquals;
2523

2624
import java.time.Duration;
2725
import java.util.ArrayList;
2826
import java.util.List;
2927
import java.util.Properties;
3028
import java.util.concurrent.atomic.AtomicInteger;
29+
import java.util.function.Function;
3130
import java.util.stream.Collectors;
31+
import java.util.stream.IntStream;
3232

3333
import io.confluent.connect.avro.AvroConverter;
3434

@@ -57,6 +57,8 @@ public class MongoKafkaTestCase {
5757
protected static final Logger LOGGER = LoggerFactory.getLogger(MongoKafkaTestCase.class);
5858
protected static final AtomicInteger POSTFIX = new AtomicInteger();
5959

60+
private static final int DEFAULT_MAX_RETRIES = 15;
61+
6062
@RegisterExtension public static final EmbeddedKafka KAFKA = new EmbeddedKafka();
6163
@RegisterExtension public static final MongoDBHelper MONGODB = new MongoDBHelper();
6264

@@ -108,88 +110,90 @@ public boolean isGreaterThanThreeDotSix() {
108110
}
109111

110112
public void assertProduced(final String topicName, final int expectedCount) {
111-
assertEquals(expectedCount, getProduced(topicName, expectedCount).size());
113+
List<Integer> expected = IntStream.range(1, expectedCount).boxed().collect(Collectors.toList());
114+
AtomicInteger counter = new AtomicInteger();
115+
List<Integer> produced =
116+
getProduced(topicName, b -> counter.addAndGet(1), expected, DEFAULT_MAX_RETRIES);
117+
assertIterableEquals(expected, produced);
112118
}
113119

114120
public void assertProduced(
115121
final List<ChangeStreamOperation> operationTypes, final MongoCollection<?> coll) {
116-
assertProduced(operationTypes, coll.getNamespace().getFullName());
122+
assertProduced(operationTypes, coll, DEFAULT_MAX_RETRIES);
117123
}
118124

119125
public void assertProduced(
120-
final List<ChangeStreamOperation> operationTypes, final String topicName) {
121-
List<ChangeStreamOperation> produced =
122-
getProduced(topicName, operationTypes.size()).stream()
123-
.map((b) -> createChangeStreamOperation(b.toString()))
124-
.collect(Collectors.toList());
125-
assertIterableEquals(operationTypes, produced);
126+
final List<ChangeStreamOperation> operationTypes,
127+
final MongoCollection<?> coll,
128+
final int maxRetryCount) {
129+
assertProduced(operationTypes, coll.getNamespace().getFullName(), maxRetryCount);
126130
}
127131

128-
public void assertEventuallyProduces(
129-
final List<ChangeStreamOperation> operationTypes, final MongoCollection<?> coll) {
130-
assertEventuallyProduces(operationTypes, coll.getNamespace().getFullName());
132+
public void assertProduced(
133+
final List<ChangeStreamOperation> operationTypes, final String topicName) {
134+
assertProduced(operationTypes, topicName, DEFAULT_MAX_RETRIES);
131135
}
132136

133-
public void assertEventuallyProduces(
134-
final List<ChangeStreamOperation> operationTypes, final String topicName) {
137+
public void assertProduced(
138+
final List<ChangeStreamOperation> operationTypes,
139+
final String topicName,
140+
final int maxRetryCount) {
135141
List<ChangeStreamOperation> produced =
136-
getProduced(topicName, Integer.MAX_VALUE).stream()
137-
.map((b) -> createChangeStreamOperation(b.toString()))
138-
.collect(Collectors.toList());
139-
140-
if (produced.size() > operationTypes.size()) {
141-
boolean startsWith =
142-
produced
143-
.get(operationTypes.size() - 1)
144-
.equals(operationTypes.get(operationTypes.size() - 1));
145-
if (startsWith) {
146-
assertIterableEquals(operationTypes, produced.subList(0, operationTypes.size()));
147-
} else {
148-
assertIterableEquals(
142+
getProduced(
143+
topicName,
144+
ChangeStreamOperations::createChangeStreamOperation,
149145
operationTypes,
150-
produced.subList(produced.lastIndexOf(operationTypes.get(0)), produced.size()));
151-
}
152-
} else {
153-
assertIterableEquals(operationTypes, produced);
154-
}
146+
maxRetryCount);
147+
assertIterableEquals(operationTypes, produced);
155148
}
156149

157150
public void assertProducedDocs(final List<Document> docs, final MongoCollection<?> coll) {
158-
assertEquals(
159-
docs,
160-
getProduced(coll.getNamespace().getFullName(), docs.size()).stream()
161-
.map((b) -> Document.parse(b.toString()))
162-
.collect(Collectors.toList()));
151+
List<Document> produced =
152+
getProduced(
153+
coll.getNamespace().getFullName(),
154+
b -> Document.parse(b.toString()),
155+
docs,
156+
DEFAULT_MAX_RETRIES);
157+
assertIterableEquals(docs, produced);
163158
}
164159

165-
public List<Bytes> getProduced(final String topicName, final int expectedCount) {
166-
if (expectedCount != Integer.MAX_VALUE) {
167-
LOGGER.info("Subscribing to {} expecting to see #{}", topicName, expectedCount);
168-
} else {
169-
LOGGER.info("Subscribing to {} getting all messages", topicName);
170-
}
160+
public <T> List<T> getProduced(
161+
final String topicName,
162+
final Function<Bytes, T> mapper,
163+
final List<T> expected,
164+
final int maxRetryCount) {
165+
LOGGER.info("Subscribing to {}", topicName);
171166

172167
try (KafkaConsumer<?, ?> consumer = createConsumer()) {
173168
consumer.subscribe(singletonList(topicName));
174-
List<Bytes> data = new ArrayList<>();
169+
List<T> data = new ArrayList<>();
170+
T firstExpected = expected.isEmpty() ? null : expected.get(0);
171+
T lastExpected = expected.isEmpty() ? null : expected.get(expected.size() - 1);
175172
int counter = 0;
176173
int retryCount = 0;
177174
int previousDataSize;
178-
while (data.size() < expectedCount && retryCount < 30) {
175+
176+
while (retryCount < maxRetryCount) {
179177
counter++;
180-
LOGGER.info("Polling {} ({}) seen: #{}", topicName, counter, data.size());
181178
previousDataSize = data.size();
182-
183179
consumer
184180
.poll(Duration.ofSeconds(2))
185181
.records(topicName)
186-
.forEach((r) -> data.add((Bytes) r.value()));
182+
.forEach((r) -> data.add(mapper.apply((Bytes) r.value())));
183+
184+
int firstExpectedIndex = data.lastIndexOf(firstExpected);
185+
int lastExpectedIndex = data.lastIndexOf(lastExpected);
186+
int dataSize = lastExpectedIndex - firstExpectedIndex + 1;
187+
if (firstExpectedIndex > -1 && lastExpectedIndex > -1 && dataSize == expected.size()) {
188+
return data.subList(firstExpectedIndex, lastExpectedIndex + 1);
189+
}
187190

188191
// Wait at least 3 minutes for the first set of data to arrive
189-
if (data.size() > 0 || counter > 90) {
190-
retryCount = data.size() == previousDataSize ? retryCount + 1 : 0;
192+
if (expected.size() == 0 || data.size() > 0 || counter > 90) {
193+
retryCount += previousDataSize == data.size() ? 1 : 0;
191194
}
192195
}
196+
193197
return data;
194198
}
195199
}

0 commit comments

Comments
 (0)