kafka-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From Apache Jenkins Server <jenk...@builds.apache.org>
Subject Build failed in Jenkins: Kafka-0.8 #61
Date Tue, 09 Oct 2012 00:48:31 GMT
See <https://builds.apache.org/job/Kafka-0.8/61/changes>

Changes:

[jjkoshy] ConsumerOffsetChecker now works with hostnames (in addition to IP) in the brokers/ids
zk path; KAFKA-549; patched by Bob Cotton; reviewed by Joel Koshy

------------------------------------------
[...truncated 4520 lines...]
	at scala.collection.LinearSeqOptimized$class.foreach(LinearSeqOptimized.scala:61)
	at scala.collection.immutable.List.foreach(List.scala:45)
	at kafka.controller.ReplicaStateMachine.handleStateChanges(ReplicaStateMachine.scala:79)
	at kafka.controller.ReplicaStateMachine.startup(ReplicaStateMachine.scala:52)
	at kafka.controller.KafkaController.onControllerFailover(KafkaController.scala:74)
	at kafka.controller.KafkaController$$anonfun$1.apply$mcV$sp(KafkaController.scala:47)
	at kafka.server.ZookeeperLeaderElector.elect(ZookeeperLeaderElector.scala:55)
	at kafka.server.ZookeeperLeaderElector$LeaderChangeListener.handleDataDeleted(ZookeeperLeaderElector.scala:94)
	at org.I0Itec.zkclient.ZkClient$6.run(ZkClient.java:549)
	at org.I0Itec.zkclient.ZkEventThread.run(ZkEventThread.java:71)
Caused by: java.lang.InterruptedException
	at java.lang.Object.wait(Native Method)
	at java.lang.Object.wait(Object.java:485)
	at org.apache.zookeeper.ClientCnxn.submitRequest(ClientCnxn.java:1344)
	at org.apache.zookeeper.ZooKeeper.getData(ZooKeeper.java:925)
	at org.apache.zookeeper.ZooKeeper.getData(ZooKeeper.java:956)
	at org.I0Itec.zkclient.ZkConnection.readData(ZkConnection.java:103)
	at org.I0Itec.zkclient.ZkClient$9.call(ZkClient.java:770)
	at org.I0Itec.zkclient.ZkClient$9.call(ZkClient.java:766)
	at org.I0Itec.zkclient.ZkClient.retryUntilConnected(ZkClient.java:675)
	... 22 more
[2012-10-09 00:48:01,895] ERROR [Replica state machine on Controller 1]: Error while changing
state of replica 1 for partition [new-topic, 3] to OnlineReplica (kafka.controller.ReplicaStateMachine:102)
org.I0Itec.zkclient.exception.ZkInterruptedException: java.lang.InterruptedException
	at org.I0Itec.zkclient.ZkClient.retryUntilConnected(ZkClient.java:687)
	at org.I0Itec.zkclient.ZkClient.readData(ZkClient.java:766)
	at org.I0Itec.zkclient.ZkClient.readData(ZkClient.java:761)
	at kafka.utils.ZkUtils$.readDataMaybeNull(ZkUtils.scala:355)
	at kafka.utils.ZkUtils$.getLeaderAndIsrForPartition(ZkUtils.scala:75)
	at kafka.controller.ReplicaStateMachine.kafka$controller$ReplicaStateMachine$$handleStateChange(ReplicaStateMachine.scala:111)
	at kafka.controller.ReplicaStateMachine$$anonfun$handleStateChanges$1$$anonfun$apply$mcVI$sp$1.apply(ReplicaStateMachine.scala:84)
	at kafka.controller.ReplicaStateMachine$$anonfun$handleStateChanges$1$$anonfun$apply$mcVI$sp$1.apply(ReplicaStateMachine.scala:83)
	at scala.collection.LinearSeqOptimized$class.foreach(LinearSeqOptimized.scala:61)
	at scala.collection.immutable.List.foreach(List.scala:45)
	at kafka.controller.ReplicaStateMachine$$anonfun$handleStateChanges$1.apply$mcVI$sp(ReplicaStateMachine.scala:83)
	at kafka.controller.ReplicaStateMachine$$anonfun$handleStateChanges$1.apply(ReplicaStateMachine.scala:79)
	at kafka.controller.ReplicaStateMachine$$anonfun$handleStateChanges$1.apply(ReplicaStateMachine.scala:79)
	at scala.collection.LinearSeqOptimized$class.foreach(LinearSeqOptimized.scala:61)
	at scala.collection.immutable.List.foreach(List.scala:45)
	at kafka.controller.ReplicaStateMachine.handleStateChanges(ReplicaStateMachine.scala:79)
	at kafka.controller.ReplicaStateMachine.startup(ReplicaStateMachine.scala:52)
	at kafka.controller.KafkaController.onControllerFailover(KafkaController.scala:74)
	at kafka.controller.KafkaController$$anonfun$1.apply$mcV$sp(KafkaController.scala:47)
	at kafka.server.ZookeeperLeaderElector.elect(ZookeeperLeaderElector.scala:55)
	at kafka.server.ZookeeperLeaderElector$LeaderChangeListener.handleDataDeleted(ZookeeperLeaderElector.scala:94)
	at org.I0Itec.zkclient.ZkClient$6.run(ZkClient.java:549)
	at org.I0Itec.zkclient.ZkEventThread.run(ZkEventThread.java:71)
Caused by: java.lang.InterruptedException
	at java.lang.Object.wait(Native Method)
	at java.lang.Object.wait(Object.java:485)
	at org.apache.zookeeper.ClientCnxn.submitRequest(ClientCnxn.java:1344)
	at org.apache.zookeeper.ZooKeeper.getData(ZooKeeper.java:925)
	at org.apache.zookeeper.ZooKeeper.getData(ZooKeeper.java:956)
	at org.I0Itec.zkclient.ZkConnection.readData(ZkConnection.java:103)
	at org.I0Itec.zkclient.ZkClient$9.call(ZkClient.java:770)
	at org.I0Itec.zkclient.ZkClient$9.call(ZkClient.java:766)
	at org.I0Itec.zkclient.ZkClient.retryUntilConnected(ZkClient.java:675)
	... 22 more
[2012-10-09 00:48:01,896] ERROR [Replica state machine on Controller 1]: Error while changing
state of replica 1 for partition [new-topic, 1] to OnlineReplica (kafka.controller.ReplicaStateMachine:102)
org.I0Itec.zkclient.exception.ZkInterruptedException: java.lang.InterruptedException
	at org.I0Itec.zkclient.ZkClient.retryUntilConnected(ZkClient.java:687)
	at org.I0Itec.zkclient.ZkClient.readData(ZkClient.java:766)
	at org.I0Itec.zkclient.ZkClient.readData(ZkClient.java:761)
	at kafka.utils.ZkUtils$.readDataMaybeNull(ZkUtils.scala:355)
	at kafka.utils.ZkUtils$.getLeaderAndIsrForPartition(ZkUtils.scala:75)
	at kafka.controller.ReplicaStateMachine.kafka$controller$ReplicaStateMachine$$handleStateChange(ReplicaStateMachine.scala:111)
	at kafka.controller.ReplicaStateMachine$$anonfun$handleStateChanges$1$$anonfun$apply$mcVI$sp$1.apply(ReplicaStateMachine.scala:84)
	at kafka.controller.ReplicaStateMachine$$anonfun$handleStateChanges$1$$anonfun$apply$mcVI$sp$1.apply(ReplicaStateMachine.scala:83)
	at scala.collection.LinearSeqOptimized$class.foreach(LinearSeqOptimized.scala:61)
	at scala.collection.immutable.List.foreach(List.scala:45)
	at kafka.controller.ReplicaStateMachine$$anonfun$handleStateChanges$1.apply$mcVI$sp(ReplicaStateMachine.scala:83)
	at kafka.controller.ReplicaStateMachine$$anonfun$handleStateChanges$1.apply(ReplicaStateMachine.scala:79)
	at kafka.controller.ReplicaStateMachine$$anonfun$handleStateChanges$1.apply(ReplicaStateMachine.scala:79)
	at scala.collection.LinearSeqOptimized$class.foreach(LinearSeqOptimized.scala:61)
	at scala.collection.immutable.List.foreach(List.scala:45)
	at kafka.controller.ReplicaStateMachine.handleStateChanges(ReplicaStateMachine.scala:79)
	at kafka.controller.ReplicaStateMachine.startup(ReplicaStateMachine.scala:52)
	at kafka.controller.KafkaController.onControllerFailover(KafkaController.scala:74)
	at kafka.controller.KafkaController$$anonfun$1.apply$mcV$sp(KafkaController.scala:47)
	at kafka.server.ZookeeperLeaderElector.elect(ZookeeperLeaderElector.scala:55)
	at kafka.server.ZookeeperLeaderElector$LeaderChangeListener.handleDataDeleted(ZookeeperLeaderElector.scala:94)
	at org.I0Itec.zkclient.ZkClient$6.run(ZkClient.java:549)
	at org.I0Itec.zkclient.ZkEventThread.run(ZkEventThread.java:71)
Caused by: java.lang.InterruptedException
	at java.lang.Object.wait(Native Method)
	at java.lang.Object.wait(Object.java:485)
	at org.apache.zookeeper.ClientCnxn.submitRequest(ClientCnxn.java:1344)
	at org.apache.zookeeper.ZooKeeper.getData(ZooKeeper.java:925)
	at org.apache.zookeeper.ZooKeeper.getData(ZooKeeper.java:956)
	at org.I0Itec.zkclient.ZkConnection.readData(ZkConnection.java:103)
	at org.I0Itec.zkclient.ZkClient$9.call(ZkClient.java:770)
	at org.I0Itec.zkclient.ZkClient$9.call(ZkClient.java:766)
	at org.I0Itec.zkclient.ZkClient.retryUntilConnected(ZkClient.java:675)
	... 22 more
[info] Test Passed: testAsyncSendCanCorrectlyFailWithTimeout(kafka.producer.ProducerTest)
[info] == core-kafka / kafka.producer.ProducerTest ==
[info] 
[info] == core-kafka / kafka.server.SimpleFetchTest ==
[info] Test Starting: testNonReplicaSeesHwWhenFetching(kafka.server.SimpleFetchTest)
[info] Test Passed: testNonReplicaSeesHwWhenFetching(kafka.server.SimpleFetchTest)
[info] Test Starting: testReplicaSeesLeoWhenFetching(kafka.server.SimpleFetchTest)
[info] Test Passed: testReplicaSeesLeoWhenFetching(kafka.server.SimpleFetchTest)
[info] == core-kafka / kafka.server.SimpleFetchTest ==
[info] 
[info] == core-kafka / kafka.consumer.TopicFilterTest ==
[info] Test Starting: testWhitelists
[info] Test Passed: testWhitelists
[info] Test Starting: testBlacklists
[info] Test Passed: testBlacklists
[info] == core-kafka / kafka.consumer.TopicFilterTest ==
[info] 
[info] == core-kafka / kafka.log4j.KafkaLog4jAppenderTest ==
[info] Test Starting: testKafkaLog4jConfigs(kafka.log4j.KafkaLog4jAppenderTest)
log4j:WARN No appenders could be found for logger (org.I0Itec.zkclient.ZkEventThread).
log4j:WARN Please initialize the log4j system properly.
[info] Test Passed: testKafkaLog4jConfigs(kafka.log4j.KafkaLog4jAppenderTest)
[info] Test Starting: testLog4jAppends(kafka.log4j.KafkaLog4jAppenderTest)
[info] Test Passed: testLog4jAppends(kafka.log4j.KafkaLog4jAppenderTest)
[info] == core-kafka / kafka.log4j.KafkaLog4jAppenderTest ==
[info] 
[info] == core-kafka / kafka.integration.LazyInitProducerTest ==
[info] Test Starting: testProduceAndMultiFetch(kafka.integration.LazyInitProducerTest)
[info] Test Passed: testProduceAndMultiFetch(kafka.integration.LazyInitProducerTest)
[info] Test Starting: testMultiProduce(kafka.integration.LazyInitProducerTest)
[info] Test Passed: testMultiProduce(kafka.integration.LazyInitProducerTest)
[info] Test Starting: testProduceAndFetch(kafka.integration.LazyInitProducerTest)
[info] Test Passed: testProduceAndFetch(kafka.integration.LazyInitProducerTest)
[info] Test Starting: testMultiProduceResend(kafka.integration.LazyInitProducerTest)
[info] Test Passed: testMultiProduceResend(kafka.integration.LazyInitProducerTest)
[info] == core-kafka / kafka.integration.LazyInitProducerTest ==
[info] 
[info] == core-kafka / kafka.integration.TopicMetadataTest ==
[info] Test Starting: testTopicMetadataRequest(kafka.integration.TopicMetadataTest)
[info] Test Passed: testTopicMetadataRequest(kafka.integration.TopicMetadataTest)
[info] Test Starting: testBasicTopicMetadata(kafka.integration.TopicMetadataTest)
[info] Test Passed: testBasicTopicMetadata(kafka.integration.TopicMetadataTest)
[info] Test Starting: testAutoCreateTopic(kafka.integration.TopicMetadataTest)
[info] Test Passed: testAutoCreateTopic(kafka.integration.TopicMetadataTest)
[info] == core-kafka / kafka.integration.TopicMetadataTest ==
[info] 
[info] == core-kafka / kafka.log.SegmentListTest ==
[info] Test Starting: testAppend
[info] Test Passed: testAppend
[info] Test Starting: testTrunc
[info] Test Passed: testTrunc
[info] Test Starting: testTruncLast
[info] Test Passed: testTruncLast
[info] == core-kafka / kafka.log.SegmentListTest ==
[info] 
[info] == core-kafka / kafka.log.OffsetIndexTest ==
[info] Test Starting: truncate
[info] Test Passed: truncate
[info] Test Starting: randomLookupTest
[info] Test Passed: randomLookupTest
[info] Test Starting: lookupExtremeCases
[info] Test Passed: lookupExtremeCases
[info] Test Starting: appendTooMany
[info] Test Passed: appendTooMany
[info] Test Starting: testReadOnly
[info] Test Passed: testReadOnly
[info] Test Starting: appendOutOfOrder
[info] Test Passed: appendOutOfOrder
[info] Test Starting: reopenAsReadonly
[info] Test Passed: reopenAsReadonly
[info] == core-kafka / kafka.log.OffsetIndexTest ==
[info] 
[info] == core-kafka / kafka.server.RequestPurgatoryTest ==
[info] Test Starting: testRequestSatisfaction(kafka.server.RequestPurgatoryTest)
[info] Test Passed: testRequestSatisfaction(kafka.server.RequestPurgatoryTest)
[info] Test Starting: testRequestExpiry(kafka.server.RequestPurgatoryTest)
[info] Test Passed: testRequestExpiry(kafka.server.RequestPurgatoryTest)
[info] == core-kafka / kafka.server.RequestPurgatoryTest ==
[info] 
[info] == core-kafka / kafka.server.LogRecoveryTest ==
[info] Test Starting: testHWCheckpointNoFailuresSingleLogSegment(kafka.server.LogRecoveryTest)
[info] Test Passed: testHWCheckpointNoFailuresSingleLogSegment(kafka.server.LogRecoveryTest)
[info] Test Starting: testHWCheckpointWithFailuresSingleLogSegment(kafka.server.LogRecoveryTest)
[error] Test Failed: testHWCheckpointWithFailuresSingleLogSegment(kafka.server.LogRecoveryTest)
java.lang.AssertionError: expected:<2> but was:<1>
	at org.junit.Assert.fail(Assert.java:69)
	at org.junit.Assert.failNotEquals(Assert.java:314)
	at org.junit.Assert.assertEquals(Assert.java:94)
	at org.junit.Assert.assertEquals(Assert.java:104)
	at kafka.server.LogRecoveryTest.testHWCheckpointWithFailuresSingleLogSegment(LogRecoveryTest.scala:149)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
	at java.lang.reflect.Method.invoke(Method.java:597)
	at junit.framework.TestCase.runTest(TestCase.java:164)
	at junit.framework.TestCase.runBare(TestCase.java:130)
	at junit.framework.TestResult$1.protect(TestResult.java:110)
	at junit.framework.TestResult.runProtected(TestResult.java:128)
	at junit.framework.TestResult.run(TestResult.java:113)
	at junit.framework.TestCase.run(TestCase.java:120)
	at junit.framework.TestSuite.runTest(TestSuite.java:228)
	at junit.framework.TestSuite.run(TestSuite.java:223)
	at junit.framework.TestSuite.runTest(TestSuite.java:228)
	at junit.framework.TestSuite.run(TestSuite.java:223)
	at org.scalatest.junit.JUnit3Suite.run(JUnit3Suite.scala:309)
	at org.scalatest.tools.ScalaTestFramework$ScalaTestRunner.run(ScalaTestFramework.scala:40)
	at sbt.TestRunner.run(TestFramework.scala:53)
	at sbt.TestRunner.runTest$1(TestFramework.scala:67)
	at sbt.TestRunner.run(TestFramework.scala:76)
	at sbt.TestFramework$$anonfun$10$$anonfun$apply$11.runTest$2(TestFramework.scala:194)
	at sbt.TestFramework$$anonfun$10$$anonfun$apply$11$$anonfun$apply$12.apply(TestFramework.scala:205)
	at sbt.TestFramework$$anonfun$10$$anonfun$apply$11$$anonfun$apply$12.apply(TestFramework.scala:205)
	at sbt.NamedTestTask.run(TestFramework.scala:92)
	at sbt.ScalaProject$$anonfun$sbt$ScalaProject$$toTask$1.apply(ScalaProject.scala:193)
	at sbt.ScalaProject$$anonfun$sbt$ScalaProject$$toTask$1.apply(ScalaProject.scala:193)
	at sbt.TaskManager$Task.invoke(TaskManager.scala:62)
	at sbt.impl.RunTask.doRun$1(RunTask.scala:77)
	at sbt.impl.RunTask.runTask(RunTask.scala:85)
	at sbt.impl.RunTask.sbt$impl$RunTask$$runIfNotRoot(RunTask.scala:60)
	at sbt.impl.RunTask$$anonfun$runTasksExceptRoot$2.apply(RunTask.scala:48)
	at sbt.impl.RunTask$$anonfun$runTasksExceptRoot$2.apply(RunTask.scala:48)
	at sbt.Distributor$Run$Worker$$anonfun$2.apply(ParallelRunner.scala:131)
	at sbt.Distributor$Run$Worker$$anonfun$2.apply(ParallelRunner.scala:131)
	at sbt.Control$.trapUnit(Control.scala:19)
	at sbt.Distributor$Run$Worker.run(ParallelRunner.scala:131)
[info] Test Starting: testHWCheckpointNoFailuresMultipleLogSegments(kafka.server.LogRecoveryTest)
[info] Test Passed: testHWCheckpointNoFailuresMultipleLogSegments(kafka.server.LogRecoveryTest)
[info] Test Starting: testHWCheckpointWithFailuresMultipleLogSegments(kafka.server.LogRecoveryTest)
[info] Test Passed: testHWCheckpointWithFailuresMultipleLogSegments(kafka.server.LogRecoveryTest)
[info] == core-kafka / kafka.server.LogRecoveryTest ==
[info] 
[info] == core-kafka / kafka.server.HighwatermarkPersistenceTest ==
[info] Test Starting: testHighWatermarkPersistenceSinglePartition(kafka.server.HighwatermarkPersistenceTest)
[info] Test Passed: testHighWatermarkPersistenceSinglePartition(kafka.server.HighwatermarkPersistenceTest)
[info] Test Starting: testHighWatermarkPersistenceMultiplePartitions(kafka.server.HighwatermarkPersistenceTest)
[info] Test Passed: testHighWatermarkPersistenceMultiplePartitions(kafka.server.HighwatermarkPersistenceTest)
[info] == core-kafka / kafka.server.HighwatermarkPersistenceTest ==
[info] 
[info] == core-kafka / test-finish ==
[error] Failed: : Total 151, Failed 1, Errors 0, Passed 150, Skipped 0
[info] == core-kafka / test-finish ==
[info] 
[info] == core-kafka / Test cleanup 1 ==
[info] Deleting directory /tmp/sbt_9a8fd571
[info] == core-kafka / Test cleanup 1 ==
[info] 
[info] == core-kafka / test-cleanup ==
[info] == core-kafka / test-cleanup ==
[error] Error running kafka.server.LogRecoveryTest: Test FAILED
[error] Error running test: One or more subtasks failed
[info] 
[info] Total time: 209 s, completed Oct 9, 2012 12:48:31 AM
[info] 
[info] Total session time: 210 s, completed Oct 9, 2012 12:48:31 AM
[error] Error during build.
Build step 'Execute shell' marked build as failure

Mime
View raw message