kafka-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From damian...@apache.org
Subject kafka git commit: KAFKA-4643; Improve test coverage of StreamsKafkaClient
Date Mon, 14 Aug 2017 17:24:47 GMT
Repository: kafka
Updated Branches:
  refs/heads/trunk 21ea4b1d2 -> c0f7a7705


KAFKA-4643; Improve test coverage of StreamsKafkaClient

The commit brings improved test coverage for StreamsKafkaClientTest.java

Author: Andrey Dyachkov <andrey.dyachkov@zalando.de>

Reviewers: Damian Guy <damian.guy@gmail.com>

Closes #3663 from adyach/kafka-4643


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/c0f7a770
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/c0f7a770
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/c0f7a770

Branch: refs/heads/trunk
Commit: c0f7a7705851eec4a77d3e42cf6bf2546c07ffa8
Parents: 21ea4b1
Author: Andrey Dyachkov <andrey.dyachkov@zalando.de>
Authored: Mon Aug 14 18:24:43 2017 +0100
Committer: Damian Guy <damian.guy@gmail.com>
Committed: Mon Aug 14 18:24:43 2017 +0100

----------------------------------------------------------------------
 .../internals/StreamsKafkaClientTest.java       | 48 ++++++++++++++++++++
 1 file changed, 48 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kafka/blob/c0f7a770/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsKafkaClientTest.java
----------------------------------------------------------------------
diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsKafkaClientTest.java
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsKafkaClientTest.java
index 6c4342f..7a75b81 100644
--- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsKafkaClientTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StreamsKafkaClientTest.java
@@ -18,17 +18,23 @@ package org.apache.kafka.streams.processor.internals;
 
 import org.apache.kafka.clients.MockClient;
 import org.apache.kafka.common.Node;
+import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.config.AbstractConfig;
 import org.apache.kafka.common.config.SaslConfigs;
 import org.apache.kafka.common.config.TopicConfig;
 import org.apache.kafka.common.metrics.MetricsReporter;
+import org.apache.kafka.common.protocol.ApiKeys;
+import org.apache.kafka.common.protocol.Errors;
 import org.apache.kafka.common.requests.AbstractRequest;
 import org.apache.kafka.common.requests.ApiError;
+import org.apache.kafka.common.requests.ApiVersionsResponse;
 import org.apache.kafka.common.requests.CreateTopicsRequest;
 import org.apache.kafka.common.requests.CreateTopicsResponse;
 import org.apache.kafka.common.requests.MetadataResponse;
+import org.apache.kafka.common.requests.ProduceResponse;
 import org.apache.kafka.common.utils.MockTime;
 import org.apache.kafka.streams.StreamsConfig;
+import org.apache.kafka.streams.errors.StreamsException;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -124,6 +130,48 @@ public class StreamsKafkaClientTest {
         verifyCorrectTopicConfigs(streamsKafkaClient, topicConfigWithNoOverrides, Collections.singletonMap("cleanup.policy",
"delete"));
     }
 
+    @Test(expected = StreamsException.class)
+    public void shouldThrowStreamsExceptionOnEmptyBrokerCompatibilityResponse() {
+        kafkaClient.prepareResponse(null);
+        final StreamsKafkaClient streamsKafkaClient = createStreamsKafkaClient();
+        streamsKafkaClient.checkBrokerCompatibility(false);
+    }
+
+    @Test(expected = StreamsException.class)
+    public void shouldThrowStreamsExceptionWhenBrokerCompatibilityResponseInconsistent()
{
+        kafkaClient.prepareResponse(new ProduceResponse(Collections.<TopicPartition, ProduceResponse.PartitionResponse>emptyMap()));
+        final StreamsKafkaClient streamsKafkaClient = createStreamsKafkaClient();
+        streamsKafkaClient.checkBrokerCompatibility(false);
+    }
+
+    @Test(expected = StreamsException.class)
+    public void shouldRequireBrokerVersion0101OrHigherWhenEosDisabled() {
+        kafkaClient.prepareResponse(new ApiVersionsResponse(Errors.NONE, Collections.singletonList(new
ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE))));
+        final StreamsKafkaClient streamsKafkaClient = createStreamsKafkaClient();
+        streamsKafkaClient.checkBrokerCompatibility(false);
+    }
+
+    @Test(expected = StreamsException.class)
+    public void shouldRequireBrokerVersions0110OrHigherWhenEosEnabled() {
+        kafkaClient.prepareResponse(new ApiVersionsResponse(Errors.NONE, Collections.singletonList(new
ApiVersionsResponse.ApiVersion(ApiKeys.CREATE_TOPICS))));
+        final StreamsKafkaClient streamsKafkaClient = createStreamsKafkaClient();
+        streamsKafkaClient.checkBrokerCompatibility(true);
+    }
+
+    @Test(expected = StreamsException.class)
+    public void shouldThrowStreamsExceptionOnEmptyFetchMetadataResponse() {
+        kafkaClient.prepareResponse(null);
+        final StreamsKafkaClient streamsKafkaClient = createStreamsKafkaClient();
+        streamsKafkaClient.fetchMetadata();
+    }
+
+    @Test(expected = StreamsException.class)
+    public void shouldThrowStreamsExceptionWhenFetchMetadataResponseInconsistent() {
+        kafkaClient.prepareResponse(new ProduceResponse(Collections.<TopicPartition, ProduceResponse.PartitionResponse>emptyMap()));
+        final StreamsKafkaClient streamsKafkaClient = createStreamsKafkaClient();
+        streamsKafkaClient.fetchMetadata();
+    }
+
     private void verifyCorrectTopicConfigs(final StreamsKafkaClient streamsKafkaClient,
                                            final InternalTopicConfig internalTopicConfig,
                                            final Map<String, String> expectedConfigs)
{


Mime
View raw message