kafka-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From guozh...@apache.org
Subject [kafka] branch 1.1 updated: KAFKA-7284: streams should unwrap fenced exception (#5513)
Date Wed, 15 Aug 2018 22:49:06 GMT
This is an automated email from the ASF dual-hosted git repository.

guozhang pushed a commit to branch 1.1
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/1.1 by this push:
     new b3382cf  KAFKA-7284: streams should unwrap fenced exception (#5513)
b3382cf is described below

commit b3382cf636548ed3f23cdd88b36d8fc27e290c2e
Author: John Roesler <vvcephei@users.noreply.github.com>
AuthorDate: Wed Aug 15 17:48:58 2018 -0500

    KAFKA-7284: streams should unwrap fenced exception (#5513)
    
    Reviewers: Guozhang Wang <wangguoz@gmail.com>
---
 .../kafka/clients/producer/MockProducer.java       | 12 ++++++--
 .../kafka/clients/producer/MockProducerTest.java   | 16 +++-------
 .../processor/internals/RecordCollectorImpl.java   | 34 ++++++++++++++--------
 .../processor/internals/RecordCollectorTest.java   | 24 +++++++++++++++
 4 files changed, 59 insertions(+), 27 deletions(-)

diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
index d2a84c6..a72714b 100644
--- a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
@@ -21,6 +21,7 @@ import org.apache.kafka.clients.producer.internals.DefaultPartitioner;
 import org.apache.kafka.clients.producer.internals.FutureRecordMetadata;
 import org.apache.kafka.clients.producer.internals.ProduceRequestResult;
 import org.apache.kafka.common.Cluster;
+import org.apache.kafka.common.KafkaException;
 import org.apache.kafka.common.Metric;
 import org.apache.kafka.common.MetricName;
 import org.apache.kafka.common.PartitionInfo;
@@ -205,7 +206,7 @@ public class MockProducer<K, V> implements Producer<K, V>
{
         this.transactionInFlight = false;
     }
 
-    private void verifyProducerState() {
+    private synchronized void verifyProducerState() {
         if (this.closed) {
             throw new IllegalStateException("MockProducer is already closed.");
         }
@@ -243,7 +244,12 @@ public class MockProducer<K, V> implements Producer<K, V>
{
      */
     @Override
     public synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record,
Callback callback) {
-        verifyProducerState();
+        if (this.closed) {
+            throw new IllegalStateException("MockProducer is already closed.");
+        }
+        if (this.producerFenced) {
+            throw new KafkaException("MockProducer is fenced.", new ProducerFencedException("Fenced"));
+        }
         int partition = 0;
         if (!this.cluster.partitionsForTopic(record.topic()).isEmpty())
             partition = partition(record, this.cluster);
@@ -313,7 +319,7 @@ public class MockProducer<K, V> implements Producer<K, V>
{
         return this.closed;
     }
 
-    public void fenceProducer() {
+    public synchronized void fenceProducer() {
         verifyProducerState();
         verifyTransactionsInitialized();
         this.producerFenced = true;
diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
index 27fac28..ee4803f 100644
--- a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
+++ b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java
@@ -19,6 +19,7 @@ package org.apache.kafka.clients.producer;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
 import org.apache.kafka.clients.producer.internals.DefaultPartitioner;
 import org.apache.kafka.common.Cluster;
+import org.apache.kafka.common.KafkaException;
 import org.apache.kafka.common.Node;
 import org.apache.kafka.common.PartitionInfo;
 import org.apache.kafka.common.TopicPartition;
@@ -267,18 +268,9 @@ public class MockProducerTest {
         try {
             producer.send(null);
             fail("Should have thrown as producer is fenced off");
-        } catch (ProducerFencedException e) { }
-    }
-
-    @Test
-    public void shouldThrowOnFlushIfProducerGotFenced() {
-        buildMockProducer(true);
-        producer.initTransactions();
-        producer.fenceProducer();
-        try {
-            producer.flush();
-            fail("Should have thrown as producer is fenced off");
-        } catch (ProducerFencedException e) { }
+        } catch (KafkaException e) {
+            assertTrue("The root cause of the exception should be ProducerFenced", e.getCause()
instanceof ProducerFencedException);
+        }
     }
 
     @Test
diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java
b/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java
index afdadf2..44b2089 100644
--- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java
+++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/RecordCollectorImpl.java
@@ -23,14 +23,14 @@ import org.apache.kafka.clients.producer.RecordMetadata;
 import org.apache.kafka.common.KafkaException;
 import org.apache.kafka.common.PartitionInfo;
 import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.errors.AuthorizationException;
 import org.apache.kafka.common.errors.AuthenticationException;
+import org.apache.kafka.common.errors.AuthorizationException;
 import org.apache.kafka.common.errors.InvalidTopicException;
 import org.apache.kafka.common.errors.OffsetMetadataTooLarge;
 import org.apache.kafka.common.errors.ProducerFencedException;
 import org.apache.kafka.common.errors.RetriableException;
-import org.apache.kafka.common.errors.SerializationException;
 import org.apache.kafka.common.errors.SecurityDisabledException;
+import org.apache.kafka.common.errors.SerializationException;
 import org.apache.kafka.common.errors.TimeoutException;
 import org.apache.kafka.common.errors.UnknownServerException;
 import org.apache.kafka.common.serialization.Serializer;
@@ -193,16 +193,26 @@ public class RecordCollectorImpl implements RecordCollector {
                 "You can increase producer parameter `max.block.ms` to increase this timeout.",
topic);
             throw new StreamsException(String.format("%sFailed to send record to topic %s
due to timeout.", logPrefix, topic));
         } catch (final Exception uncaughtException) {
-            throw new StreamsException(
-                String.format(EXCEPTION_MESSAGE,
-                              logPrefix,
-                              "an error caught",
-                              key,
-                              value,
-                              timestamp,
-                              topic,
-                              uncaughtException.getMessage()),
-                uncaughtException);
+            if (uncaughtException instanceof KafkaException &&
+                uncaughtException.getCause() instanceof ProducerFencedException) {
+                final KafkaException kafkaException = (KafkaException) uncaughtException;
+                // producer.send() call may throw a KafkaException which wraps a FencedException,
+                // in this case we should throw its wrapped inner cause so that it can be
captured and re-wrapped as TaskMigrationException
+                throw (ProducerFencedException) kafkaException.getCause();
+            } else {
+                throw new StreamsException(
+                    String.format(
+                        EXCEPTION_MESSAGE,
+                        logPrefix,
+                        "an error caught",
+                        key,
+                        value,
+                        timestamp,
+                        topic,
+                        uncaughtException.getMessage()
+                    ),
+                    uncaughtException);
+            }
         }
     }
 
diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java
index 39fe7ab..68c779b 100644
--- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/RecordCollectorTest.java
@@ -26,6 +26,7 @@ import org.apache.kafka.common.KafkaException;
 import org.apache.kafka.common.Node;
 import org.apache.kafka.common.PartitionInfo;
 import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.ProducerFencedException;
 import org.apache.kafka.common.serialization.ByteArraySerializer;
 import org.apache.kafka.common.serialization.StringSerializer;
 import org.apache.kafka.common.utils.LogContext;
@@ -299,4 +300,27 @@ public class RecordCollectorTest {
             new AlwaysContinueProductionExceptionHandler());
         collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
     }
+
+    @Test
+    public void shouldUnwrapAndThrowProducerFencedExceptionFromCallToSend() {
+        final MockProducer<byte[], byte[]> producer =
+            new MockProducer<>(cluster, true, new DefaultPartitioner(), byteArraySerializer,
byteArraySerializer);
+
+        final RecordCollector collector = new RecordCollectorImpl(
+            producer,
+            "test",
+            logContext,
+            new DefaultProductionExceptionHandler()
+        );
+
+        producer.initTransactions();
+        producer.fenceProducer();
+
+        try {
+            collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer,
streamPartitioner);
+            fail("expected a ProducerFencedException");
+        } catch (ProducerFencedException pfe) {
+            // expected
+        }
+    }
 }


Mime
View raw message