kafka-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From chia7...@apache.org
Subject [kafka] branch trunk updated: MINOR: Remove unused parameters in functions. (#10035)
Date Wed, 10 Feb 2021 03:19:34 GMT
This is an automated email from the ASF dual-hosted git repository.

chia7712 pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 7583e14  MINOR: Remove unused parameters in functions. (#10035)
7583e14 is described below

commit 7583e14fb20b34a044d92dcf6b078456bc4f6903
Author: Kamal Chandraprakash <kamal.chandraprakash@gmail.com>
AuthorDate: Wed Feb 10 08:48:13 2021 +0530

    MINOR: Remove unused parameters in functions. (#10035)
    
    Reviewers: Chia-Ping Tsai <chia7712@gmail.com>
---
 core/src/main/scala/kafka/server/KafkaApis.scala | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala
index 938c401..745f26d 100644
--- a/core/src/main/scala/kafka/server/KafkaApis.scala
+++ b/core/src/main/scala/kafka/server/KafkaApis.scala
@@ -720,7 +720,7 @@ class KafkaApis(val requestChannel: RequestChannel,
       }
     }
 
-    def maybeDownConvertStorageError(error: Errors, version: Short): Errors = {
+    def maybeDownConvertStorageError(error: Errors): Errors = {
       // If consumer sends FetchRequest V5 or earlier, the client library is not guaranteed
to recognize the error code
       // for KafkaStorageException. In this case the client library will translate KafkaStorageException
to
       // UnknownServerException which is not retriable. We can ensure that consumer will
update metadata and retry
@@ -771,7 +771,7 @@ class KafkaApis(val requestChannel: RequestChannel,
                 // as possible. With KIP-283, we have the ability to lazily down-convert
in a chunked manner. The lazy, chunked
                 // down-conversion always guarantees that at least one batch of messages
is down-converted and sent out to the
                 // client.
-                val error = maybeDownConvertStorageError(partitionData.error, versionId)
+                val error = maybeDownConvertStorageError(partitionData.error)
                 new FetchResponse.PartitionData[BaseRecords](error, partitionData.highWatermark,
                   partitionData.lastStableOffset, partitionData.logStartOffset,
                   partitionData.preferredReadReplica, partitionData.abortedTransactions,
@@ -783,7 +783,7 @@ class KafkaApis(val requestChannel: RequestChannel,
               }
             }
           case None =>
-            val error = maybeDownConvertStorageError(partitionData.error, versionId)
+            val error = maybeDownConvertStorageError(partitionData.error)
             new FetchResponse.PartitionData[BaseRecords](error,
               partitionData.highWatermark,
               partitionData.lastStableOffset,
@@ -805,7 +805,7 @@ class KafkaApis(val requestChannel: RequestChannel,
         val lastStableOffset = data.lastStableOffset.getOrElse(FetchResponse.INVALID_LAST_STABLE_OFFSET)
         if (data.isReassignmentFetch)
           reassigningPartitions.add(tp)
-        val error = maybeDownConvertStorageError(data.error, versionId)
+        val error = maybeDownConvertStorageError(data.error)
         partitions.put(tp, new FetchResponse.PartitionData(
           error,
           data.highWatermark,


Mime
View raw message