kafka-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ewe...@apache.org
Subject kafka git commit: MINOR: Remove unused code in `LeaderAndIsr`, `ApiUtils` and `TopicMetadataRequest`
Date Tue, 29 Nov 2016 18:32:36 GMT
Repository: kafka
Updated Branches:
  refs/heads/trunk 7ed3768fb -> 7d3aa01ce


MINOR: Remove unused code in `LeaderAndIsr`, `ApiUtils` and `TopicMetadataRequest`

Author: Ismael Juma <ismael@juma.me.uk>

Reviewers: Ewen Cheslack-Postava <ewen@confluent.io>

Closes #2145 from ijuma/unused-code-in-leader-and-isr


Project: http://git-wip-us.apache.org/repos/asf/kafka/repo
Commit: http://git-wip-us.apache.org/repos/asf/kafka/commit/7d3aa01c
Tree: http://git-wip-us.apache.org/repos/asf/kafka/tree/7d3aa01c
Diff: http://git-wip-us.apache.org/repos/asf/kafka/diff/7d3aa01c

Branch: refs/heads/trunk
Commit: 7d3aa01ced15d30cd1c5390e76a68a06403cd2c1
Parents: 7ed3768
Author: Ismael Juma <ismael@juma.me.uk>
Authored: Tue Nov 29 10:32:32 2016 -0800
Committer: Ewen Cheslack-Postava <me@ewencp.org>
Committed: Tue Nov 29 10:32:32 2016 -0800

----------------------------------------------------------------------
 core/src/main/scala/kafka/api/ApiUtils.scala    | 11 -----
 .../src/main/scala/kafka/api/LeaderAndIsr.scala | 44 +-------------------
 .../scala/kafka/api/TopicMetadataRequest.scala  |  2 -
 3 files changed, 1 insertion(+), 56 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/kafka/blob/7d3aa01c/core/src/main/scala/kafka/api/ApiUtils.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/kafka/api/ApiUtils.scala b/core/src/main/scala/kafka/api/ApiUtils.scala
index ca0a63f..8e8ac36 100644
--- a/core/src/main/scala/kafka/api/ApiUtils.scala
+++ b/core/src/main/scala/kafka/api/ApiUtils.scala
@@ -99,17 +99,6 @@ object ApiUtils {
     else value
   }
 
-  /**
-   * Read a long out of the bytebuffer from the current position and check that it falls
within the given
-   * range. If not, throw KafkaException.
-   */
-  def readLongInRange(buffer: ByteBuffer, name: String, range: (Long, Long)): Long = {
-    val value = buffer.getLong
-    if(value < range._1 || value > range._2)
-      throw new KafkaException(name + " has value " + value + " which is not in the range
" + range + ".")
-    else value
-  }
-
   private[api] def hasPendingWrites(channel: GatheringByteChannel): Boolean = channel match
{
     case t: TransportLayer => t.hasPendingWrites
     case _ => false

http://git-wip-us.apache.org/repos/asf/kafka/blob/7d3aa01c/core/src/main/scala/kafka/api/LeaderAndIsr.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/kafka/api/LeaderAndIsr.scala b/core/src/main/scala/kafka/api/LeaderAndIsr.scala
index 9123788..e68ad86 100644
--- a/core/src/main/scala/kafka/api/LeaderAndIsr.scala
+++ b/core/src/main/scala/kafka/api/LeaderAndIsr.scala
@@ -15,11 +15,8 @@
  * limitations under the License.
  */
 
-
 package kafka.api
 
-import java.nio._
-
 import kafka.controller.LeaderIsrAndControllerEpoch
 import kafka.utils._
 
@@ -40,49 +37,10 @@ case class LeaderAndIsr(var leader: Int, var leaderEpoch: Int, var isr:
List[Int
   }
 }
 
-object PartitionStateInfo {
-  def readFrom(buffer: ByteBuffer): PartitionStateInfo = {
-    val controllerEpoch = buffer.getInt
-    val leader = buffer.getInt
-    val leaderEpoch = buffer.getInt
-    val isrSize = buffer.getInt
-    val isr = for (_ <- 0 until isrSize) yield buffer.getInt
-    val zkVersion = buffer.getInt
-    val replicationFactor = buffer.getInt
-    val replicas = for (_ <- 0 until replicationFactor) yield buffer.getInt
-    PartitionStateInfo(LeaderIsrAndControllerEpoch(LeaderAndIsr(leader, leaderEpoch, isr.toList,
zkVersion), controllerEpoch),
-                       replicas.toSet)
-  }
-}
+case class PartitionStateInfo(leaderIsrAndControllerEpoch: LeaderIsrAndControllerEpoch, allReplicas:
Set[Int]) {
 
-case class PartitionStateInfo(leaderIsrAndControllerEpoch: LeaderIsrAndControllerEpoch,
-                              allReplicas: Set[Int]) {
   def replicationFactor = allReplicas.size
 
-  def writeTo(buffer: ByteBuffer) {
-    buffer.putInt(leaderIsrAndControllerEpoch.controllerEpoch)
-    buffer.putInt(leaderIsrAndControllerEpoch.leaderAndIsr.leader)
-    buffer.putInt(leaderIsrAndControllerEpoch.leaderAndIsr.leaderEpoch)
-    buffer.putInt(leaderIsrAndControllerEpoch.leaderAndIsr.isr.size)
-    leaderIsrAndControllerEpoch.leaderAndIsr.isr.foreach(buffer.putInt(_))
-    buffer.putInt(leaderIsrAndControllerEpoch.leaderAndIsr.zkVersion)
-    buffer.putInt(replicationFactor)
-    allReplicas.foreach(buffer.putInt(_))
-  }
-
-  def sizeInBytes(): Int = {
-    val size =
-      4 /* epoch of the controller that elected the leader */ +
-      4 /* leader broker id */ +
-      4 /* leader epoch */ +
-      4 /* number of replicas in isr */ +
-      4 * leaderIsrAndControllerEpoch.leaderAndIsr.isr.size /* replicas in isr */ +
-      4 /* zk version */ +
-      4 /* replication factor */ +
-      allReplicas.size * 4
-    size
-  }
-
   override def toString: String = {
     val partitionStateInfo = new StringBuilder
     partitionStateInfo.append("(LeaderAndIsrInfo:" + leaderIsrAndControllerEpoch.toString)

http://git-wip-us.apache.org/repos/asf/kafka/blob/7d3aa01c/core/src/main/scala/kafka/api/TopicMetadataRequest.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/kafka/api/TopicMetadataRequest.scala b/core/src/main/scala/kafka/api/TopicMetadataRequest.scala
index 107696d..c64b268 100644
--- a/core/src/main/scala/kafka/api/TopicMetadataRequest.scala
+++ b/core/src/main/scala/kafka/api/TopicMetadataRequest.scala
@@ -25,8 +25,6 @@ import kafka.network.RequestChannel.Response
 import kafka.utils.Logging
 import org.apache.kafka.common.protocol.{ApiKeys, Errors}
 
-import scala.collection.mutable.ListBuffer
-
 object TopicMetadataRequest extends Logging {
   val CurrentVersion = 0.shortValue
   val DefaultClientId = ""


Mime
View raw message