kafka-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From j...@apache.org
Subject [kafka] branch trunk updated: MINOR: A few cleanups and compiler warning fixes (#6986)
Date Mon, 08 Jul 2019 23:53:21 GMT
This is an automated email from the ASF dual-hosted git repository.

jgus pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 05cba28  MINOR: A few cleanups and compiler warning fixes (#6986)
05cba28 is described below

commit 05cba28ca7aafd3974e9e818be08f239b6162855
Author: Lee Dongjin <dongjin@apache.org>
AuthorDate: Tue Jul 9 08:53:02 2019 +0900

    MINOR: A few cleanups and compiler warning fixes (#6986)
    
    Reviewers: Jason Gustafson <jason@confluent.io>
---
 .../src/main/java/org/apache/kafka/common/utils/Utils.java   | 10 ----------
 .../kafka/common/record/ByteBufferLogInputStreamTest.java    |  7 +++----
 core/src/main/scala/kafka/log/LogConfig.scala                |  2 +-
 core/src/test/scala/unit/kafka/log/LogValidatorTest.scala    | 12 +-----------
 .../src/test/scala/unit/kafka/log/TransactionIndexTest.scala |  2 +-
 core/src/test/scala/unit/kafka/security/auth/AclTest.scala   |  2 +-
 6 files changed, 7 insertions(+), 28 deletions(-)

diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java
index caca1a8..eba5a01 100755
--- a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java
+++ b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java
@@ -603,15 +603,6 @@ public final class Utils {
     }
 
     /**
-     * Print an error message and shutdown the JVM
-     * @param message The error message
-     */
-    public static void croak(String message) {
-        System.err.println(message);
-        Exit.exit(1);
-    }
-
-    /**
      * Read a buffer into a Byte array for the given offset and length
      */
     public static byte[] readBytes(ByteBuffer buffer, int offset, int length) {
@@ -869,7 +860,6 @@ public final class Utils {
         }
     }
 
-
     /**
      * A cheap way to deterministically convert a number to a positive value. When the input
is
      * positive, the original value is returned. When the input number is negative, the returned
diff --git a/clients/src/test/java/org/apache/kafka/common/record/ByteBufferLogInputStreamTest.java
b/clients/src/test/java/org/apache/kafka/common/record/ByteBufferLogInputStreamTest.java
index 3745006..063e188 100644
--- a/clients/src/test/java/org/apache/kafka/common/record/ByteBufferLogInputStreamTest.java
+++ b/clients/src/test/java/org/apache/kafka/common/record/ByteBufferLogInputStreamTest.java
@@ -19,7 +19,6 @@ package org.apache.kafka.common.record;
 import org.apache.kafka.common.errors.CorruptRecordException;
 import org.junit.Test;
 
-import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Iterator;
 
@@ -56,7 +55,7 @@ public class ByteBufferLogInputStreamTest {
     }
 
     @Test(expected = CorruptRecordException.class)
-    public void iteratorRaisesOnTooSmallRecords() throws IOException {
+    public void iteratorRaisesOnTooSmallRecords() {
         ByteBuffer buffer = ByteBuffer.allocate(1024);
         MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE,
TimestampType.CREATE_TIME, 0L);
         builder.append(15L, "a".getBytes(), "1".getBytes());
@@ -79,7 +78,7 @@ public class ByteBufferLogInputStreamTest {
     }
 
     @Test(expected = CorruptRecordException.class)
-    public void iteratorRaisesOnInvalidMagic() throws IOException {
+    public void iteratorRaisesOnInvalidMagic() {
         ByteBuffer buffer = ByteBuffer.allocate(1024);
         MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE,
TimestampType.CREATE_TIME, 0L);
         builder.append(15L, "a".getBytes(), "1".getBytes());
@@ -102,7 +101,7 @@ public class ByteBufferLogInputStreamTest {
     }
 
     @Test(expected = CorruptRecordException.class)
-    public void iteratorRaisesOnTooLargeRecords() throws IOException {
+    public void iteratorRaisesOnTooLargeRecords() {
         ByteBuffer buffer = ByteBuffer.allocate(1024);
         MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE,
TimestampType.CREATE_TIME, 0L);
         builder.append(15L, "a".getBytes(), "1".getBytes());
diff --git a/core/src/main/scala/kafka/log/LogConfig.scala b/core/src/main/scala/kafka/log/LogConfig.scala
index c3684e8..bcc3f12 100755
--- a/core/src/main/scala/kafka/log/LogConfig.scala
+++ b/core/src/main/scala/kafka/log/LogConfig.scala
@@ -104,7 +104,7 @@ case class LogConfig(props: java.util.Map[_, _], overriddenConfigs: Set[String]
   def randomSegmentJitter: Long =
     if (segmentJitterMs == 0) 0 else Utils.abs(scala.util.Random.nextInt()) % math.min(segmentJitterMs,
segmentMs)
 
-  def maxSegmentMs :Long = {
+  def maxSegmentMs: Long = {
     if (compact && maxCompactionLagMs > 0) math.min(maxCompactionLagMs, segmentMs)
     else segmentMs
   }
diff --git a/core/src/test/scala/unit/kafka/log/LogValidatorTest.scala b/core/src/test/scala/unit/kafka/log/LogValidatorTest.scala
index 26c1e5f..e2a8e17 100644
--- a/core/src/test/scala/unit/kafka/log/LogValidatorTest.scala
+++ b/core/src/test/scala/unit/kafka/log/LogValidatorTest.scala
@@ -1178,7 +1178,7 @@ class LogValidatorTest {
   }
 
   private def createTwoBatchedRecords(magicValue: Byte,
-                                      timestamp: Long = RecordBatch.NO_TIMESTAMP,
+                                      timestamp: Long,
                                       codec: CompressionType): MemoryRecords = {
     val buf = ByteBuffer.allocate(2048)
     var builder = MemoryRecords.builder(buf, magicValue, codec, TimestampType.CREATE_TIME,
0L)
@@ -1193,16 +1193,6 @@ class LogValidatorTest {
     MemoryRecords.readableRecords(buf.slice())
   }
 
-  private def createDiscontinuousOffsetRecords(magicValue: Byte,
-                                               codec: CompressionType): MemoryRecords = {
-    val buf = ByteBuffer.allocate(512)
-    val builder = MemoryRecords.builder(buf, magicValue, codec, TimestampType.CREATE_TIME,
0L)
-    builder.appendWithOffset(0, RecordBatch.NO_TIMESTAMP, null, "hello".getBytes)
-    builder.appendWithOffset(2, RecordBatch.NO_TIMESTAMP, null, "there".getBytes)
-    builder.appendWithOffset(3, RecordBatch.NO_TIMESTAMP, null, "beautiful".getBytes)
-    builder.build()
-  }
-
   /* check that offsets are assigned consecutively from the given base offset */
   def checkOffsets(records: MemoryRecords, baseOffset: Long) {
     assertTrue("Message set should not be empty", records.records.asScala.nonEmpty)
diff --git a/core/src/test/scala/unit/kafka/log/TransactionIndexTest.scala b/core/src/test/scala/unit/kafka/log/TransactionIndexTest.scala
index 574a8f5..0eb93e3 100644
--- a/core/src/test/scala/unit/kafka/log/TransactionIndexTest.scala
+++ b/core/src/test/scala/unit/kafka/log/TransactionIndexTest.scala
@@ -22,7 +22,7 @@ import kafka.utils.TestUtils
 import org.apache.kafka.common.requests.FetchResponse.AbortedTransaction
 import org.junit.Assert._
 import org.junit.{After, Before, Test}
-import org.scalatest.junit.JUnitSuite
+import org.scalatestplus.junit.JUnitSuite
 
 class TransactionIndexTest extends JUnitSuite {
   var file: File = _
diff --git a/core/src/test/scala/unit/kafka/security/auth/AclTest.scala b/core/src/test/scala/unit/kafka/security/auth/AclTest.scala
index beeac37..a06d7a6 100644
--- a/core/src/test/scala/unit/kafka/security/auth/AclTest.scala
+++ b/core/src/test/scala/unit/kafka/security/auth/AclTest.scala
@@ -21,7 +21,7 @@ import java.nio.charset.StandardCharsets.UTF_8
 import kafka.utils.Json
 import org.apache.kafka.common.security.auth.KafkaPrincipal
 import org.junit.{Assert, Test}
-import org.scalatest.junit.JUnitSuite
+import org.scalatestplus.junit.JUnitSuite
 import scala.collection.JavaConverters._
 
 class AclTest extends JUnitSuite {


Mime
View raw message