kafka-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gwens...@apache.org
Subject [10/13] kafka git commit: KAFKA-2715: Removed previous system_test folder
Date Fri, 30 Oct 2015 22:13:39 GMT
http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/offset_management_testsuite/config/server.properties
----------------------------------------------------------------------
diff --git a/system_test/offset_management_testsuite/config/server.properties b/system_test/offset_management_testsuite/config/server.properties
deleted file mode 100644
index b6de528..0000000
--- a/system_test/offset_management_testsuite/config/server.properties
+++ /dev/null
@@ -1,143 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id=0
-
-# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned
-# from InetAddress.getLocalHost().  If there are multiple interfaces getLocalHost
-# may not be what you want.
-#host.name=
-
-
-############################# Socket Server Settings #############################
-
-# The port the socket server listens on
-port=9091
-
-# The number of threads handling network requests
-num.network.threads=2
- 
-# The number of threads doing disk I/O
-num.io.threads=2
-
-# The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer.bytes=1048576
-
-# The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=1048576
-
-# The maximum size of a request that the socket server will accept (protection against OOM)
-socket.request.max.bytes=104857600
-
-
-############################# Log Basics #############################
-
-# The directory under which to store log files
-log.dir=/tmp/kafka_server_logs
-
-# The default number of log partitions per topic. More partitions allow greater
-# parallelism for consumption, but this will also result in more files across
-# the brokers.
-num.partitions=5
-
-# Overrides for for the default given by num.partitions on a per-topic basis
-#topic.partition.count.map=topic1:3, topic2:4
-
-############################# Log Flush Policy #############################
-
-# The following configurations control the flush of data to disk. This is the most
-# important performance knob in kafka.
-# There are a few important trade-offs here:
-#    1. Durability: Unflushed data is at greater risk of loss in the event of a crash.
-#    2. Latency: Data is not made available to consumers until it is flushed (which adds latency).
-#    3. Throughput: The flush is generally the most expensive operation. 
-# The settings below allow one to configure the flush policy to flush data after a period of time or
-# every N messages (or both). This can be done globally and overridden on a per-topic basis.
-
-# The number of messages to accept before forcing a flush of data to disk
-log.flush.interval.messages=10000
-
-# The maximum amount of time a message can sit in a log before we force a flush
-log.flush.interval.ms=1000
-
-# Per-topic overrides for log.flush.interval.ms
-#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000
-
-# The interval (in ms) at which logs are checked to see if they need to be flushed to disk.
-log.flush.scheduler.interval.ms=1000
-
-############################# Log Retention Policy #############################
-
-# The following configurations control the disposal of log segments. The policy can
-# be set to delete segments after a period of time, or after a given size has accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion
-log.retention.hours=168
-
-# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
-# segments don't drop below log.retention.bytes.
-#log.retention.bytes=1073741824
-log.retention.bytes=-1
-
-# The maximum size of a log segment file. When this size is reached a new log segment will be created.
-#log.segment.size=536870912
-log.segment.bytes=102400
-
-# The interval at which log segments are checked to see if they can be deleted according 
-# to the retention policies
-log.cleanup.interval.mins=1
-
-############################# Zookeeper #############################
-
-# Enable connecting to zookeeper
-enable.zookeeper=true
-
-# Zk connection string (see zk docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zookeeper.connect=localhost:2181
-
-# Timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=1000000
-
-monitoring.period.secs=1
-message.max.bytes=1000000
-queued.max.requests=500
-log.roll.hours=168
-log.index.size.max.bytes=10485760
-log.index.interval.bytes=4096
-auto.create.topics.enable=true
-controller.socket.timeout.ms=30000
-default.replication.factor=1
-replica.lag.time.max.ms=10000
-replica.lag.max.messages=4000
-replica.socket.timeout.ms=30000
-replica.socket.receive.buffer.bytes=65536
-replica.fetch.max.bytes=1048576
-replica.fetch.wait.max.ms=500
-replica.fetch.min.bytes=4096
-num.replica.fetchers=1
-
-offsets.topic.num.partitions=2
-offsets.topic.replication.factor=4
-

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/offset_management_testsuite/config/zookeeper.properties
----------------------------------------------------------------------
diff --git a/system_test/offset_management_testsuite/config/zookeeper.properties b/system_test/offset_management_testsuite/config/zookeeper.properties
deleted file mode 100644
index 5474a72..0000000
--- a/system_test/offset_management_testsuite/config/zookeeper.properties
+++ /dev/null
@@ -1,23 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# the directory where the snapshot is stored.
-dataDir=/tmp/zookeeper
-# the port at which the clients will connect
-clientPort=2181
-# disable the per-ip limit on the number of connections since this is a non-production config
-maxClientCnxns=0
-syncLimit=5
-initLimit=10
-tickTime=2000

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/offset_management_testsuite/offset_management_test.py
----------------------------------------------------------------------
diff --git a/system_test/offset_management_testsuite/offset_management_test.py b/system_test/offset_management_testsuite/offset_management_test.py
deleted file mode 100644
index aa38910..0000000
--- a/system_test/offset_management_testsuite/offset_management_test.py
+++ /dev/null
@@ -1,299 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#!/usr/bin/env python
-
-# ===================================
-# offset_management_test.py
-# ===================================
-
-import os
-import signal
-import sys
-import time
-import traceback
-
-from   system_test_env    import SystemTestEnv
-sys.path.append(SystemTestEnv.SYSTEM_TEST_UTIL_DIR)
-
-from   setup_utils        import SetupUtils
-from   replication_utils  import ReplicationUtils
-import system_test_utils
-from   testcase_env       import TestcaseEnv
-
-# product specific: Kafka
-import kafka_system_test_utils
-import metrics
-
-class OffsetManagementTest(ReplicationUtils, SetupUtils):
-
-    testModuleAbsPathName = os.path.realpath(__file__)
-    testSuiteAbsPathName  = os.path.abspath(os.path.dirname(testModuleAbsPathName))
-
-    def __init__(self, systemTestEnv):
-
-        # SystemTestEnv - provides cluster level environment settings
-        #     such as entity_id, hostname, kafka_home, java_home which
-        #     are available in a list of dictionary named 
-        #     "clusterEntityConfigDictList"
-        self.systemTestEnv = systemTestEnv
-
-        super(OffsetManagementTest, self).__init__(self)
-
-        # dict to pass user-defined attributes to logger argument: "extra"
-        d = {'name_of_class': self.__class__.__name__}
-
-    def signal_handler(self, signal, frame):
-        self.log_message("Interrupt detected - User pressed Ctrl+c")
-
-        # perform the necessary cleanup here when user presses Ctrl+c and it may be product specific
-        self.log_message("stopping all entities - please wait ...")
-        kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
-        sys.exit(1) 
-
-    def runTest(self):
-
-        # ======================================================================
-        # get all testcase directories under this testsuite
-        # ======================================================================
-        testCasePathNameList = system_test_utils.get_dir_paths_with_prefix(
-            self.testSuiteAbsPathName, SystemTestEnv.SYSTEM_TEST_CASE_PREFIX)
-        testCasePathNameList.sort()
-
-        replicationUtils = ReplicationUtils(self)
-
-        # =============================================================
-        # launch each testcase one by one: testcase_1, testcase_2, ...
-        # =============================================================
-        for testCasePathName in testCasePathNameList:
-   
-            skipThisTestCase = False
-
-            try: 
-                # ======================================================================
-                # A new instance of TestcaseEnv to keep track of this testcase's env vars
-                # and initialize some env vars as testCasePathName is available now
-                # ======================================================================
-                self.testcaseEnv = TestcaseEnv(self.systemTestEnv, self)
-                self.testcaseEnv.testSuiteBaseDir = self.testSuiteAbsPathName
-                self.testcaseEnv.initWithKnownTestCasePathName(testCasePathName)
-                self.testcaseEnv.testcaseArgumentsDict = self.testcaseEnv.testcaseNonEntityDataDict["testcase_args"]
-
-                # ======================================================================
-                # SKIP if this case is IN testcase_to_skip.json or NOT IN testcase_to_run.json
-                # ======================================================================
-                testcaseDirName = self.testcaseEnv.testcaseResultsDict["_test_case_name"]
-
-                if self.systemTestEnv.printTestDescriptionsOnly:
-                    self.testcaseEnv.printTestCaseDescription(testcaseDirName)
-                    continue
-                elif self.systemTestEnv.isTestCaseToSkip(self.__class__.__name__, testcaseDirName):
-                    self.log_message("Skipping : " + testcaseDirName)
-                    skipThisTestCase = True
-                    continue
-                else:
-                    self.testcaseEnv.printTestCaseDescription(testcaseDirName)
-                    system_test_utils.setup_remote_hosts_with_testcase_level_cluster_config(self.systemTestEnv, testCasePathName)
-
-                # ============================================================================== #
-                # ============================================================================== #
-                #                   Product Specific Testing Code Starts Here:                   #
-                # ============================================================================== #
-                # ============================================================================== #
-    
-                # initialize self.testcaseEnv with user-defined environment variables (product specific)
-                self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"]    = False
-                self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"] = False
-
-                # initialize signal handler
-                signal.signal(signal.SIGINT, self.signal_handler)
-
-                # TestcaseEnv.testcaseConfigsList initialized by reading testcase properties file:
-                #   system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
-                self.testcaseEnv.testcaseConfigsList = system_test_utils.get_json_list_data(
-                    self.testcaseEnv.testcasePropJsonPathName)
-                 
-                # clean up data directories specified in zookeeper.properties and kafka_server_<n>.properties
-                kafka_system_test_utils.cleanup_data_at_remote_hosts(self.systemTestEnv, self.testcaseEnv)
-
-                # create "LOCAL" log directories for metrics, dashboards for each entity under this testcase
-                # for collecting logs from remote machines
-                kafka_system_test_utils.generate_testcase_log_dirs(self.systemTestEnv, self.testcaseEnv)
-
-                # TestcaseEnv - initialize producer & consumer config / log file pathnames
-                kafka_system_test_utils.init_entity_props(self.systemTestEnv, self.testcaseEnv)
-
-                # generate remote hosts log/config dirs if not exist
-                kafka_system_test_utils.generate_testcase_log_dirs_in_remote_hosts(self.systemTestEnv, self.testcaseEnv)
-    
-                # generate properties files for zookeeper, kafka, producer, and consumer:
-                # 1. copy system_test/<suite_name>_testsuite/config/*.properties to 
-                #    system_test/<suite_name>_testsuite/testcase_<n>/config/
-                # 2. update all properties files in system_test/<suite_name>_testsuite/testcase_<n>/config
-                #    by overriding the settings specified in:
-                #    system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
-                kafka_system_test_utils.generate_overriden_props_files(self.testSuiteAbsPathName,
-                    self.testcaseEnv, self.systemTestEnv)
-               
-                # =============================================
-                # preparing all entities to start the test
-                # =============================================
-                self.log_message("starting zookeepers")
-                kafka_system_test_utils.start_zookeepers(self.systemTestEnv, self.testcaseEnv)
-                self.anonLogger.info("sleeping for 2s")
-                time.sleep(2)
-
-                self.log_message("starting brokers")
-                kafka_system_test_utils.start_brokers(self.systemTestEnv, self.testcaseEnv)
-                self.anonLogger.info("sleeping for 5s")
-                time.sleep(5)
-
-                self.log_message("creating offset topic")
-                kafka_system_test_utils.create_topic(self.systemTestEnv, self.testcaseEnv, "__consumer_offsets", 3, 2)
-                self.anonLogger.info("sleeping for 5s")
-                time.sleep(5)
-
-                # =============================================
-                # starting producer 
-                # =============================================
-                self.log_message("starting producer in the background")
-                kafka_system_test_utils.start_producer_performance(self.systemTestEnv, self.testcaseEnv, False)
-                msgProducingFreeTimeSec = self.testcaseEnv.testcaseArgumentsDict["message_producing_free_time_sec"]
-                self.anonLogger.info("sleeping for " + msgProducingFreeTimeSec + " sec to produce some messages")
-                time.sleep(int(msgProducingFreeTimeSec))
-
-                kafka_system_test_utils.start_console_consumers(self.systemTestEnv, self.testcaseEnv)
-
-                kafka_system_test_utils.get_leader_for(self.systemTestEnv, self.testcaseEnv, "__consumer_offsets", 0)
-
-                # =============================================
-                # A while-loop to bounce consumers as specified
-                # by "num_iterations" in testcase_n_properties.json
-                # =============================================
-                i = 1
-                numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"])
-                bouncedEntityDownTimeSec = 10
-                try:
-                    bouncedEntityDownTimeSec = int(self.testcaseEnv.testcaseArgumentsDict["bounced_entity_downtime_sec"])
-                except:
-                    pass
-
-                # group1 -> offsets partition 0 // has one consumer; eid: 6
-                # group2 -> offsets partition 1 // has four consumers; eid: 7, 8, 9, 10
-
-                offsets_0_leader_entity = kafka_system_test_utils.get_leader_for(self.systemTestEnv, self.testcaseEnv, "__consumer_offsets", 0)
-                offsets_1_leader_entity = kafka_system_test_utils.get_leader_for(self.systemTestEnv, self.testcaseEnv, "__consumer_offsets", 1)
-
-                while i <= numIterations:
-
-                    self.log_message("Iteration " + str(i) + " of " + str(numIterations))
-                    kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, offsets_0_leader_entity, self.testcaseEnv.entityBrokerParentPidDict[offsets_0_leader_entity])
-                    kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, offsets_1_leader_entity, self.testcaseEnv.entityBrokerParentPidDict[offsets_1_leader_entity])
-
-                    # =============================================
-                    # Bounce consumers if specified in testcase config
-                    # =============================================
-                    bounceConsumers = self.testcaseEnv.testcaseArgumentsDict["bounce_consumers"]
-                    self.log_message("bounce_consumers flag : " + bounceConsumers)
-                    if (bounceConsumers.lower() == "true"):
-
-                        clusterConfigList       = self.systemTestEnv.clusterEntityConfigDictList
-                        consumerEntityIdList    = system_test_utils.get_data_from_list_of_dicts( clusterConfigList, "role", "console_consumer", "entity_id")
-
-                        for stoppedConsumerEntityId in consumerEntityIdList:
-                            consumerPPID = self.testcaseEnv.entityConsoleConsumerParentPidDict[stoppedConsumerEntityId]
-                            self.log_message("stopping consumer: " + consumerPPID)
-                            kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, stoppedConsumerEntityId, consumerPPID)
-
-                        self.anonLogger.info("sleeping for " + str(bouncedEntityDownTimeSec) + " sec")
-                        time.sleep(bouncedEntityDownTimeSec)
-                        # leaders would have changed during the above bounce.
-                        self.log_message("starting the previously terminated consumers.")
-                        for stoppedConsumerEntityId in consumerEntityIdList:
-                            # starting previously terminated consumer
-                            kafka_system_test_utils.start_console_consumers(self.systemTestEnv, self.testcaseEnv, stoppedConsumerEntityId)
-
-                        self.log_message("starting the previously terminated brokers")
-                        kafka_system_test_utils.start_entity_in_background(self.systemTestEnv, self.testcaseEnv, offsets_0_leader_entity)
-                        kafka_system_test_utils.start_entity_in_background(self.systemTestEnv, self.testcaseEnv, offsets_1_leader_entity)
-
-                    self.anonLogger.info("sleeping for 15s")
-                    time.sleep(15)
-                    i += 1
-                # while loop
-
-                # =============================================
-                # tell producer to stop
-                # =============================================
-                self.testcaseEnv.lock.acquire()
-                self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = True
-                time.sleep(1)
-                self.testcaseEnv.lock.release()
-                time.sleep(1)
-
-                # =============================================
-                # wait for producer thread's update of
-                # "backgroundProducerStopped" to be "True"
-                # =============================================
-                while 1:
-                    self.testcaseEnv.lock.acquire()
-                    self.logger.info("status of backgroundProducerStopped : [" + \
-                        str(self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]) + "]", extra=self.d)
-                    if self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]:
-                        time.sleep(1)
-                        self.logger.info("all producer threads completed", extra=self.d)
-                        break
-                    time.sleep(1)
-                    self.testcaseEnv.lock.release()
-                    time.sleep(2)
-
-                self.anonLogger.info("sleeping for 15s")
-                time.sleep(15)
-
-                # =============================================
-                # this testcase is completed - stop all entities
-                # =============================================
-                self.log_message("stopping all entities")
-                for entityId, parentPid in self.testcaseEnv.entityBrokerParentPidDict.items():
-                    kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
-
-                for entityId, parentPid in self.testcaseEnv.entityZkParentPidDict.items():
-                    kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
-
-                # make sure all entities are stopped
-                kafka_system_test_utils.ps_grep_terminate_running_entity(self.systemTestEnv)
-
-                # =============================================
-                # collect logs from remote hosts
-                # =============================================
-                kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv)
-
-                # =============================================
-                # validate the data matched and checksum
-                # =============================================
-                self.log_message("validating data matched")
-                kafka_system_test_utils.validate_data_matched_in_multi_topics_from_single_consumer_producer(self.systemTestEnv, self.testcaseEnv, replicationUtils)
-
-            except Exception as e:
-                self.log_message("Exception while running test {0}".format(e))
-                traceback.print_exc()
-                self.testcaseEnv.validationStatusDict["Test completed"] = "FAILED"
-
-            finally:
-                if not skipThisTestCase and not self.systemTestEnv.printTestDescriptionsOnly:
-                    self.log_message("stopping all entities - please wait ...")
-                    kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
-

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/offset_management_testsuite/testcase_7001/testcase_7001_properties.json
----------------------------------------------------------------------
diff --git a/system_test/offset_management_testsuite/testcase_7001/testcase_7001_properties.json b/system_test/offset_management_testsuite/testcase_7001/testcase_7001_properties.json
deleted file mode 100644
index 1f0b718..0000000
--- a/system_test/offset_management_testsuite/testcase_7001/testcase_7001_properties.json
+++ /dev/null
@@ -1,95 +0,0 @@
-{
-  "description": {"01":"To Test : 'Basic offset management test.'",
-                  "02":"Set up a Zk and Kafka cluster.",
-                  "03":"Produce messages to a multiple topics - various partition counts.",
-                  "04":"Start multiple consumer groups to read various subsets of above topics.",
-                  "05":"Bounce consumers.",
-                  "06":"Verify that there are no duplicate messages or lost messages on any consumer group.",
-                  "07":"Producer dimensions : mode:sync, acks:-1, comp:0"
-  },
-  "testcase_args": {
-    "bounce_leaders": "false",
-    "bounce_consumers": "true",
-    "replica_factor": "3",
-    "num_partition": "1",
-    "num_iteration": "1",
-    "sleep_seconds_between_producer_calls": "1",
-    "message_producing_free_time_sec": "15",
-    "num_messages_to_produce_per_producer_call": "50",
-    "num_topics_for_auto_generated_string":"1"
-  },
-  "entities": [
-    {
-      "entity_id": "0",
-      "clientPort": "2108",
-      "dataDir": "/tmp/zookeeper_0",
-      "log_filename": "zookeeper_0.log",
-      "config_filename": "zookeeper_0.properties"
-    },
-    {
-      "entity_id": "1",
-      "port": "9091",
-      "broker.id": "1",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_1_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_1.log",
-      "config_filename": "kafka_server_1.properties"
-    },
-    {
-      "entity_id": "2",
-      "port": "9092",
-      "broker.id": "2",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_2_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_2.log",
-      "config_filename": "kafka_server_2.properties"
-    },
-    {
-      "entity_id": "3",
-      "port": "9093",
-      "broker.id": "3",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_3_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_3.log",
-      "config_filename": "kafka_server_3.properties"
-    },
-    {
-      "entity_id": "4",
-      "port": "9094",
-      "broker.id": "4",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_4_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_4.log",
-      "config_filename": "kafka_server_4.properties"
-    },
-    {
-      "entity_id": "5",
-      "topic": "test",
-      "threads": "3",
-      "compression-codec": "0",
-      "message-size": "500",
-      "message": "1000",
-      "request-num-acks": "-1",
-      "sync":"true",
-      "producer-num-retries":"5",
-      "log_filename": "producer_performance_10.log",
-      "config_filename": "producer_performance_10.properties"
-    },
-    {
-      "entity_id": "6",
-      "topic": "test_0001",
-      "group.id": "group1",
-      "consumer-timeout-ms": "30000",
-      "log_filename": "console_consumer.log",
-      "config_filename": "console_consumer_6.properties"
-    }
-   ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_1.properties
----------------------------------------------------------------------
diff --git a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_1.properties b/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_1.properties
deleted file mode 100644
index 9efbd9d..0000000
--- a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_1.properties
+++ /dev/null
@@ -1,147 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id=1
-
-# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned
-# from InetAddress.getLocalHost().  If there are multiple interfaces getLocalHost
-# may not be what you want.
-#host.name=
-
-
-############################# Socket Server Settings #############################
-
-# The port the socket server listens on
-port=9091
-
-# The number of threads handling network requests
-num.network.threads=2
- 
-# The number of threads doing disk I/O
-num.io.threads=2
-
-# The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer.bytes=1048576
-
-# The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=1048576
-
-# The maximum size of a request that the socket server will accept (protection against OOM)
-socket.request.max.bytes=104857600
-
-
-############################# Log Basics #############################
-
-# The directory under which to store log files
-log.dir=/tmp/kafka_server_1_logs
-
-# The default number of log partitions per topic. More partitions allow greater
-# parallelism for consumption, but this will also result in more files across
-# the brokers.
-num.partitions=5
-
-# Overrides for for the default given by num.partitions on a per-topic basis
-#topic.partition.count.map=topic1:3, topic2:4
-
-############################# Log Flush Policy #############################
-
-# The following configurations control the flush of data to disk. This is the most
-# important performance knob in kafka.
-# There are a few important trade-offs here:
-#    1. Durability: Unflushed data is at greater risk of loss in the event of a crash.
-#    2. Latency: Data is not made available to consumers until it is flushed (which adds latency).
-#    3. Throughput: The flush is generally the most expensive operation. 
-# The settings below allow one to configure the flush policy to flush data after a period of time or
-# every N messages (or both). This can be done globally and overridden on a per-topic basis.
-
-# The number of messages to accept before forcing a flush of data to disk
-log.flush.interval.messages=10000
-
-# The maximum amount of time a message can sit in a log before we force a flush
-log.flush.interval.ms=1000
-
-# Per-topic overrides for log.flush.interval.ms
-#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000
-
-# The interval (in ms) at which logs are checked to see if they need to be flushed to disk.
-log.flush.scheduler.interval.ms=1000
-
-############################# Log Retention Policy #############################
-
-# The following configurations control the disposal of log segments. The policy can
-# be set to delete segments after a period of time, or after a given size has accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion
-log.retention.hours=168
-
-# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
-# segments don't drop below log.retention.bytes.
-#log.retention.bytes=1073741824
-log.retention.bytes=-1
-
-# The maximum size of a log segment file. When this size is reached a new log segment will be created.
-#log.segment.size=536870912
-log.segment.bytes=10240
-
-# The interval at which log segments are checked to see if they can be deleted according 
-# to the retention policies
-log.cleanup.interval.mins=1
-
-############################# Zookeeper #############################
-
-# Enable connecting to zookeeper
-enable.zookeeper=true
-
-# Zk connection string (see zk docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zookeeper.connect=localhost:2108
-
-# Timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=1000000
-
-monitoring.period.secs=1
-message.max.bytes=1000000
-queued.max.requests=500
-log.roll.hours=168
-log.index.size.max.bytes=10485760
-log.index.interval.bytes=4096
-auto.create.topics.enable=true
-controller.socket.timeout.ms=30000
-default.replication.factor=3
-replica.lag.time.max.ms=10000
-replica.lag.max.messages=4000
-replica.socket.timeout.ms=30000
-replica.socket.receive.buffer.bytes=65536
-replica.fetch.max.bytes=1048576
-replica.fetch.wait.max.ms=500
-replica.fetch.min.bytes=4096
-num.replica.fetchers=1
-
-offsets.topic.num.partitions=2
-offsets.topic.replication.factor=4
-
-kafka.csv.metrics.dir=/home/jkoshy/Projects/kafka/system_test/offset_management_testsuite/testcase_7002/logs/broker-1/metrics
-kafka.csv.metrics.reporter.enabled=true
-kafka.metrics.polling.interval.secs=5
-kafka.metrics.reporters=kafka.metrics.KafkaCSVMetricsReporter

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_2.properties
----------------------------------------------------------------------
diff --git a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_2.properties b/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_2.properties
deleted file mode 100644
index d4bf702..0000000
--- a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_2.properties
+++ /dev/null
@@ -1,147 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id=2
-
-# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned
-# from InetAddress.getLocalHost().  If there are multiple interfaces getLocalHost
-# may not be what you want.
-#host.name=
-
-
-############################# Socket Server Settings #############################
-
-# The port the socket server listens on
-port=9092
-
-# The number of threads handling network requests
-num.network.threads=2
- 
-# The number of threads doing disk I/O
-num.io.threads=2
-
-# The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer.bytes=1048576
-
-# The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=1048576
-
-# The maximum size of a request that the socket server will accept (protection against OOM)
-socket.request.max.bytes=104857600
-
-
-############################# Log Basics #############################
-
-# The directory under which to store log files
-log.dir=/tmp/kafka_server_2_logs
-
-# The default number of log partitions per topic. More partitions allow greater
-# parallelism for consumption, but this will also result in more files across
-# the brokers.
-num.partitions=5
-
-# Overrides for for the default given by num.partitions on a per-topic basis
-#topic.partition.count.map=topic1:3, topic2:4
-
-############################# Log Flush Policy #############################
-
-# The following configurations control the flush of data to disk. This is the most
-# important performance knob in kafka.
-# There are a few important trade-offs here:
-#    1. Durability: Unflushed data is at greater risk of loss in the event of a crash.
-#    2. Latency: Data is not made available to consumers until it is flushed (which adds latency).
-#    3. Throughput: The flush is generally the most expensive operation. 
-# The settings below allow one to configure the flush policy to flush data after a period of time or
-# every N messages (or both). This can be done globally and overridden on a per-topic basis.
-
-# The number of messages to accept before forcing a flush of data to disk
-log.flush.interval.messages=10000
-
-# The maximum amount of time a message can sit in a log before we force a flush
-log.flush.interval.ms=1000
-
-# Per-topic overrides for log.flush.interval.ms
-#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000
-
-# The interval (in ms) at which logs are checked to see if they need to be flushed to disk.
-log.flush.scheduler.interval.ms=1000
-
-############################# Log Retention Policy #############################
-
-# The following configurations control the disposal of log segments. The policy can
-# be set to delete segments after a period of time, or after a given size has accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion
-log.retention.hours=168
-
-# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
-# segments don't drop below log.retention.bytes.
-#log.retention.bytes=1073741824
-log.retention.bytes=-1
-
-# The maximum size of a log segment file. When this size is reached a new log segment will be created.
-#log.segment.size=536870912
-log.segment.bytes=10240
-
-# The interval at which log segments are checked to see if they can be deleted according 
-# to the retention policies
-log.cleanup.interval.mins=1
-
-############################# Zookeeper #############################
-
-# Enable connecting to zookeeper
-enable.zookeeper=true
-
-# Zk connection string (see zk docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zookeeper.connect=localhost:2108
-
-# Timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=1000000
-
-monitoring.period.secs=1
-message.max.bytes=1000000
-queued.max.requests=500
-log.roll.hours=168
-log.index.size.max.bytes=10485760
-log.index.interval.bytes=4096
-auto.create.topics.enable=true
-controller.socket.timeout.ms=30000
-default.replication.factor=3
-replica.lag.time.max.ms=10000
-replica.lag.max.messages=4000
-replica.socket.timeout.ms=30000
-replica.socket.receive.buffer.bytes=65536
-replica.fetch.max.bytes=1048576
-replica.fetch.wait.max.ms=500
-replica.fetch.min.bytes=4096
-num.replica.fetchers=1
-
-offsets.topic.num.partitions=2
-offsets.topic.replication.factor=4
-
-kafka.csv.metrics.dir=/home/jkoshy/Projects/kafka/system_test/offset_management_testsuite/testcase_7002/logs/broker-2/metrics
-kafka.csv.metrics.reporter.enabled=true
-kafka.metrics.polling.interval.secs=5
-kafka.metrics.reporters=kafka.metrics.KafkaCSVMetricsReporter

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_3.properties
----------------------------------------------------------------------
diff --git a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_3.properties b/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_3.properties
deleted file mode 100644
index e6e06be..0000000
--- a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_3.properties
+++ /dev/null
@@ -1,147 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id=3
-
-# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned
-# from InetAddress.getLocalHost().  If there are multiple interfaces getLocalHost
-# may not be what you want.
-#host.name=
-
-
-############################# Socket Server Settings #############################
-
-# The port the socket server listens on
-port=9093
-
-# The number of threads handling network requests
-num.network.threads=2
- 
-# The number of threads doing disk I/O
-num.io.threads=2
-
-# The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer.bytes=1048576
-
-# The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=1048576
-
-# The maximum size of a request that the socket server will accept (protection against OOM)
-socket.request.max.bytes=104857600
-
-
-############################# Log Basics #############################
-
-# The directory under which to store log files
-log.dir=/tmp/kafka_server_3_logs
-
-# The default number of log partitions per topic. More partitions allow greater
-# parallelism for consumption, but this will also result in more files across
-# the brokers.
-num.partitions=5
-
-# Overrides for for the default given by num.partitions on a per-topic basis
-#topic.partition.count.map=topic1:3, topic2:4
-
-############################# Log Flush Policy #############################
-
-# The following configurations control the flush of data to disk. This is the most
-# important performance knob in kafka.
-# There are a few important trade-offs here:
-#    1. Durability: Unflushed data is at greater risk of loss in the event of a crash.
-#    2. Latency: Data is not made available to consumers until it is flushed (which adds latency).
-#    3. Throughput: The flush is generally the most expensive operation. 
-# The settings below allow one to configure the flush policy to flush data after a period of time or
-# every N messages (or both). This can be done globally and overridden on a per-topic basis.
-
-# The number of messages to accept before forcing a flush of data to disk
-log.flush.interval.messages=10000
-
-# The maximum amount of time a message can sit in a log before we force a flush
-log.flush.interval.ms=1000
-
-# Per-topic overrides for log.flush.interval.ms
-#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000
-
-# The interval (in ms) at which logs are checked to see if they need to be flushed to disk.
-log.flush.scheduler.interval.ms=1000
-
-############################# Log Retention Policy #############################
-
-# The following configurations control the disposal of log segments. The policy can
-# be set to delete segments after a period of time, or after a given size has accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion
-log.retention.hours=168
-
-# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
-# segments don't drop below log.retention.bytes.
-#log.retention.bytes=1073741824
-log.retention.bytes=-1
-
-# The maximum size of a log segment file. When this size is reached a new log segment will be created.
-#log.segment.size=536870912
-log.segment.bytes=10240
-
-# The interval at which log segments are checked to see if they can be deleted according 
-# to the retention policies
-log.cleanup.interval.mins=1
-
-############################# Zookeeper #############################
-
-# Enable connecting to zookeeper
-enable.zookeeper=true
-
-# Zk connection string (see zk docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zookeeper.connect=localhost:2108
-
-# Timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=1000000
-
-monitoring.period.secs=1
-message.max.bytes=1000000
-queued.max.requests=500
-log.roll.hours=168
-log.index.size.max.bytes=10485760
-log.index.interval.bytes=4096
-auto.create.topics.enable=true
-controller.socket.timeout.ms=30000
-default.replication.factor=3
-replica.lag.time.max.ms=10000
-replica.lag.max.messages=4000
-replica.socket.timeout.ms=30000
-replica.socket.receive.buffer.bytes=65536
-replica.fetch.max.bytes=1048576
-replica.fetch.wait.max.ms=500
-replica.fetch.min.bytes=4096
-num.replica.fetchers=1
-
-offsets.topic.num.partitions=2
-offsets.topic.replication.factor=4
-
-kafka.csv.metrics.dir=/home/jkoshy/Projects/kafka/system_test/offset_management_testsuite/testcase_7002/logs/broker-3/metrics
-kafka.csv.metrics.reporter.enabled=true
-kafka.metrics.polling.interval.secs=5
-kafka.metrics.reporters=kafka.metrics.KafkaCSVMetricsReporter

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_4.properties
----------------------------------------------------------------------
diff --git a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_4.properties b/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_4.properties
deleted file mode 100644
index 2cb03e4..0000000
--- a/system_test/offset_management_testsuite/testcase_7002/config/kafka_server_4.properties
+++ /dev/null
@@ -1,147 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id=4
-
-# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned
-# from InetAddress.getLocalHost().  If there are multiple interfaces getLocalHost
-# may not be what you want.
-#host.name=
-
-
-############################# Socket Server Settings #############################
-
-# The port the socket server listens on
-port=9094
-
-# The number of threads handling network requests
-num.network.threads=2
- 
-# The number of threads doing disk I/O
-num.io.threads=2
-
-# The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer.bytes=1048576
-
-# The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=1048576
-
-# The maximum size of a request that the socket server will accept (protection against OOM)
-socket.request.max.bytes=104857600
-
-
-############################# Log Basics #############################
-
-# The directory under which to store log files
-log.dir=/tmp/kafka_server_4_logs
-
-# The default number of log partitions per topic. More partitions allow greater
-# parallelism for consumption, but this will also result in more files across
-# the brokers.
-num.partitions=5
-
-# Overrides for for the default given by num.partitions on a per-topic basis
-#topic.partition.count.map=topic1:3, topic2:4
-
-############################# Log Flush Policy #############################
-
-# The following configurations control the flush of data to disk. This is the most
-# important performance knob in kafka.
-# There are a few important trade-offs here:
-#    1. Durability: Unflushed data is at greater risk of loss in the event of a crash.
-#    2. Latency: Data is not made available to consumers until it is flushed (which adds latency).
-#    3. Throughput: The flush is generally the most expensive operation. 
-# The settings below allow one to configure the flush policy to flush data after a period of time or
-# every N messages (or both). This can be done globally and overridden on a per-topic basis.
-
-# The number of messages to accept before forcing a flush of data to disk
-log.flush.interval.messages=10000
-
-# The maximum amount of time a message can sit in a log before we force a flush
-log.flush.interval.ms=1000
-
-# Per-topic overrides for log.flush.interval.ms
-#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000
-
-# The interval (in ms) at which logs are checked to see if they need to be flushed to disk.
-log.flush.scheduler.interval.ms=1000
-
-############################# Log Retention Policy #############################
-
-# The following configurations control the disposal of log segments. The policy can
-# be set to delete segments after a period of time, or after a given size has accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion
-log.retention.hours=168
-
-# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
-# segments don't drop below log.retention.bytes.
-#log.retention.bytes=1073741824
-log.retention.bytes=-1
-
-# The maximum size of a log segment file. When this size is reached a new log segment will be created.
-#log.segment.size=536870912
-log.segment.bytes=10240
-
-# The interval at which log segments are checked to see if they can be deleted according 
-# to the retention policies
-log.cleanup.interval.mins=1
-
-############################# Zookeeper #############################
-
-# Enable connecting to zookeeper
-enable.zookeeper=true
-
-# Zk connection string (see zk docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zookeeper.connect=localhost:2108
-
-# Timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=1000000
-
-monitoring.period.secs=1
-message.max.bytes=1000000
-queued.max.requests=500
-log.roll.hours=168
-log.index.size.max.bytes=10485760
-log.index.interval.bytes=4096
-auto.create.topics.enable=true
-controller.socket.timeout.ms=30000
-default.replication.factor=3
-replica.lag.time.max.ms=10000
-replica.lag.max.messages=4000
-replica.socket.timeout.ms=30000
-replica.socket.receive.buffer.bytes=65536
-replica.fetch.max.bytes=1048576
-replica.fetch.wait.max.ms=500
-replica.fetch.min.bytes=4096
-num.replica.fetchers=1
-
-offsets.topic.num.partitions=2
-offsets.topic.replication.factor=4
-
-kafka.csv.metrics.dir=/home/jkoshy/Projects/kafka/system_test/offset_management_testsuite/testcase_7002/logs/broker-4/metrics
-kafka.csv.metrics.reporter.enabled=true
-kafka.metrics.polling.interval.secs=5
-kafka.metrics.reporters=kafka.metrics.KafkaCSVMetricsReporter

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/offset_management_testsuite/testcase_7002/config/zookeeper_0.properties
----------------------------------------------------------------------
diff --git a/system_test/offset_management_testsuite/testcase_7002/config/zookeeper_0.properties b/system_test/offset_management_testsuite/testcase_7002/config/zookeeper_0.properties
deleted file mode 100644
index 97c07b9..0000000
--- a/system_test/offset_management_testsuite/testcase_7002/config/zookeeper_0.properties
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# the directory where the snapshot is stored.
-dataDir=/tmp/zookeeper_0
-# the port at which the clients will connect
-clientPort=2108
-# disable the per-ip limit on the number of connections since this is a non-production config
-maxClientCnxns=0
-syncLimit=5
-initLimit=10
-tickTime=2000
-server.1=localhost:2107:2109

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/offset_management_testsuite/testcase_7002/testcase_7002_properties.json
----------------------------------------------------------------------
diff --git a/system_test/offset_management_testsuite/testcase_7002/testcase_7002_properties.json b/system_test/offset_management_testsuite/testcase_7002/testcase_7002_properties.json
deleted file mode 100644
index c5866a2..0000000
--- a/system_test/offset_management_testsuite/testcase_7002/testcase_7002_properties.json
+++ /dev/null
@@ -1,127 +0,0 @@
-{
-  "description": {"01":"To Test : 'Basic offset management test.'",
-                  "02":"Set up a Zk and Kafka cluster.",
-                  "03":"Produce messages to a multiple topics - various partition counts.",
-                  "04":"Start multiple consumer groups to read various subsets of above topics.",
-                  "05":"Bounce consumers.",
-                  "06":"Verify that there are no duplicate messages or lost messages on any consumer group.",
-                  "07":"Producer dimensions : mode:sync, acks:-1, comp:0"
-  },
-  "testcase_args": {
-    "bounce_leaders": "false",
-    "bounce_consumers": "true",
-    "replica_factor": "3",
-    "num_partition": "1",
-    "num_iteration": "1",
-    "sleep_seconds_between_producer_calls": "1",
-    "message_producing_free_time_sec": "15",
-    "num_messages_to_produce_per_producer_call": "50",
-    "num_topics_for_auto_generated_string":"3"
-  },
-  "entities": [
-    {
-      "entity_id": "0",
-      "clientPort": "2108",
-      "dataDir": "/tmp/zookeeper_0",
-      "log_filename": "zookeeper_0.log",
-      "config_filename": "zookeeper_0.properties"
-    },
-    {
-      "entity_id": "1",
-      "port": "9091",
-      "broker.id": "1",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_1_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_1.log",
-      "config_filename": "kafka_server_1.properties"
-    },
-    {
-      "entity_id": "2",
-      "port": "9092",
-      "broker.id": "2",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_2_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_2.log",
-      "config_filename": "kafka_server_2.properties"
-    },
-    {
-      "entity_id": "3",
-      "port": "9093",
-      "broker.id": "3",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_3_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_3.log",
-      "config_filename": "kafka_server_3.properties"
-    },
-    {
-      "entity_id": "4",
-      "port": "9094",
-      "broker.id": "4",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_4_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_4.log",
-      "config_filename": "kafka_server_4.properties"
-    },
-    {
-      "entity_id": "5",
-      "topic": "test",
-      "threads": "5",
-      "compression-codec": "0",
-      "message-size": "500",
-      "message": "1000",
-      "request-num-acks": "-1",
-      "sync":"true",
-      "producer-num-retries":"5",
-      "log_filename": "producer_performance_10.log",
-      "config_filename": "producer_performance_10.properties"
-    },
-    {
-      "entity_id": "6",
-      "topic": "test_0001",
-      "group.id": "group1",
-      "consumer-timeout-ms": "30000",
-      "log_filename": "console_consumer.log",
-      "config_filename": "console_consumer_6.properties"
-    },
-    {
-      "entity_id": "7",
-      "topic": "test_0002",
-      "group.id": "group2",
-      "consumer-timeout-ms": "30000",
-      "log_filename": "console_consumer.log",
-      "config_filename": "console_consumer_7.properties"
-    },
-    {
-      "entity_id": "8",
-      "topic": "test_0002",
-      "group.id": "group2",
-      "consumer-timeout-ms": "30000",
-      "log_filename": "console_consumer.log",
-      "config_filename": "console_consumer_8.properties"
-    },
-    {
-      "entity_id": "9",
-      "topic": "test_0002",
-      "group.id": "group2",
-      "consumer-timeout-ms": "30000",
-      "log_filename": "console_consumer.log",
-      "config_filename": "console_consumer_9.properties"
-    },
-    {
-      "entity_id": "10",
-      "topic": "test_0003",
-      "group.id": "group2",
-      "consumer-timeout-ms": "30000",
-      "log_filename": "console_consumer.log",
-      "config_filename": "console_consumer_10.properties"
-    }
-   ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/producer_perf/README
----------------------------------------------------------------------
diff --git a/system_test/producer_perf/README b/system_test/producer_perf/README
deleted file mode 100644
index be3bb51..0000000
--- a/system_test/producer_perf/README
+++ /dev/null
@@ -1,9 +0,0 @@
-This test produces a large number of messages to a broker. It measures the throughput and tests
-the amount of data received is expected.
-
-To run this test, do
-bin/run-test.sh
-
-The expected output is given in expected.out. There are 2 things to pay attention to:
-1. The output should have a line "test passed".
-2. The throughput from the producer should be around 300,000 Messages/sec on a typical machine.

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/producer_perf/bin/expected.out
----------------------------------------------------------------------
diff --git a/system_test/producer_perf/bin/expected.out b/system_test/producer_perf/bin/expected.out
deleted file mode 100644
index 311d9b7..0000000
--- a/system_test/producer_perf/bin/expected.out
+++ /dev/null
@@ -1,32 +0,0 @@
-start the servers ...
-start producing 2000000 messages ...
-[2011-05-17 14:31:12,568] INFO Creating async producer for broker id = 0 at localhost:9092 (kafka.producer.ProducerPool)
-thread 0: 100000 messages sent 3272786.7779 nMsg/sec 3.1212 MBs/sec
-thread 0: 200000 messages sent 3685956.5057 nMsg/sec 3.5152 MBs/sec
-thread 0: 300000 messages sent 3717472.1190 nMsg/sec 3.5453 MBs/sec
-thread 0: 400000 messages sent 3730647.2673 nMsg/sec 3.5578 MBs/sec
-thread 0: 500000 messages sent 3730647.2673 nMsg/sec 3.5578 MBs/sec
-thread 0: 600000 messages sent 3722315.2801 nMsg/sec 3.5499 MBs/sec
-thread 0: 700000 messages sent 3718854.5928 nMsg/sec 3.5466 MBs/sec
-thread 0: 800000 messages sent 3714020.4271 nMsg/sec 3.5420 MBs/sec
-thread 0: 900000 messages sent 3713330.8578 nMsg/sec 3.5413 MBs/sec
-thread 0: 1000000 messages sent 3710575.1391 nMsg/sec 3.5387 MBs/sec
-thread 0: 1100000 messages sent 3711263.6853 nMsg/sec 3.5393 MBs/sec
-thread 0: 1200000 messages sent 3716090.6726 nMsg/sec 3.5439 MBs/sec
-thread 0: 1300000 messages sent 3709198.8131 nMsg/sec 3.5374 MBs/sec
-thread 0: 1400000 messages sent 3705762.4606 nMsg/sec 3.5341 MBs/sec
-thread 0: 1500000 messages sent 3701647.2330 nMsg/sec 3.5302 MBs/sec
-thread 0: 1600000 messages sent 3696174.4594 nMsg/sec 3.5249 MBs/sec
-thread 0: 1700000 messages sent 3703703.7037 nMsg/sec 3.5321 MBs/sec
-thread 0: 1800000 messages sent 3703017.9596 nMsg/sec 3.5315 MBs/sec
-thread 0: 1900000 messages sent 3700277.5208 nMsg/sec 3.5289 MBs/sec
-thread 0: 2000000 messages sent 3702332.4695 nMsg/sec 3.5308 MBs/sec
-[2011-05-17 14:33:01,102] INFO Closing all async producers (kafka.producer.ProducerPool)
-[2011-05-17 14:33:01,103] INFO Closed AsyncProducer (kafka.producer.async.AsyncProducer)
-Total Num Messages: 2000000 bytes: 400000000 in 108.678 secs
-Messages/sec: 18402.9886
-MB/sec: 3.5101
-wait for data to be persisted
-test passed
-bin/../../../bin/kafka-server-start.sh: line 11: 21110 Terminated              $(dirname $0)/kafka-run-class.sh kafka.Kafka $@
-bin/../../../bin/zookeeper-server-start.sh: line 9: 21109 Terminated              $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.server.quorum.QuorumPeerMain $@

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/producer_perf/bin/run-compression-test.sh
----------------------------------------------------------------------
diff --git a/system_test/producer_perf/bin/run-compression-test.sh b/system_test/producer_perf/bin/run-compression-test.sh
deleted file mode 100755
index 5297d1f..0000000
--- a/system_test/producer_perf/bin/run-compression-test.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-num_messages=2000000
-message_size=200
-
-base_dir=$(dirname $0)/..
-
-rm -rf /tmp/zookeeper
-rm -rf /tmp/kafka-logs
-
-echo "start the servers ..."
-$base_dir/../../bin/zookeeper-server-start.sh $base_dir/config/zookeeper.properties 2>&1 > $base_dir/zookeeper.log &
-$base_dir/../../bin/kafka-server-start.sh $base_dir/config/server.properties 2>&1 > $base_dir/kafka.log &
-
-sleep 4
-echo "start producing $num_messages messages ..."
-$base_dir/../../bin/kafka-run-class.sh kafka.tools.ProducerPerformance --brokerinfo broker.list=0:localhost:9092 --topics test01 --messages $num_messages --message-size $message_size --batch-size 200 --threads 1 --reporting-interval 100000 num_messages --async --compression-codec 1
-
-echo "wait for data to be persisted"
-cur_offset="-1"
-quit=0
-while [ $quit -eq 0 ]
-do
-  sleep 2
-  target_size=`$base_dir/../../bin/kafka-run-class.sh kafka.tools.GetOffsetShell --server kafka://localhost:9092 --topic test01 --partition 0 --time -1 --offsets 1 | tail -1`
-  if [ $target_size -eq $cur_offset ]
-  then
-    quit=1
-  fi
-  cur_offset=$target_size
-done
-
-sleep 2
-actual_size=`$base_dir/../../bin/kafka-run-class.sh kafka.tools.GetOffsetShell --server kafka://localhost:9092 --topic test01 --partition 0 --time -1 --offsets 1 | tail -1`
-num_batches=`expr $num_messages \/ $message_size`
-expected_size=`expr $num_batches \* 262`
-
-if [ $actual_size != $expected_size ]
-then
-   echo "actual size: $actual_size expected size: $expected_size test failed!!! look at it!!!"
-else
-   echo "test passed"
-fi
-
-ps ax | grep -i 'kafka.kafka' | grep -v grep | awk '{print $1}' | xargs kill -15 > /dev/null
-sleep 2
-ps ax | grep -i 'QuorumPeerMain' | grep -v grep | awk '{print $1}' | xargs kill -15 > /dev/null

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/producer_perf/bin/run-test.sh
----------------------------------------------------------------------
diff --git a/system_test/producer_perf/bin/run-test.sh b/system_test/producer_perf/bin/run-test.sh
deleted file mode 100755
index 9a3b885..0000000
--- a/system_test/producer_perf/bin/run-test.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-num_messages=2000000
-message_size=200
-
-base_dir=$(dirname $0)/..
-
-rm -rf /tmp/zookeeper
-rm -rf /tmp/kafka-logs
-
-echo "start the servers ..."
-$base_dir/../../bin/zookeeper-server-start.sh $base_dir/config/zookeeper.properties 2>&1 > $base_dir/zookeeper.log &
-$base_dir/../../bin/kafka-server-start.sh $base_dir/config/server.properties 2>&1 > $base_dir/kafka.log &
-
-sleep 4
-echo "start producing $num_messages messages ..."
-$base_dir/../../bin/kafka-run-class.sh kafka.tools.ProducerPerformance --brokerinfo broker.list=0:localhost:9092 --topics test01 --messages $num_messages --message-size $message_size --batch-size 200 --threads 1 --reporting-interval 100000 num_messages --async
-
-echo "wait for data to be persisted"
-cur_offset="-1"
-quit=0
-while [ $quit -eq 0 ]
-do
-  sleep 2
-  target_size=`$base_dir/../../bin/kafka-run-class.sh kafka.tools.GetOffsetShell --server kafka://localhost:9092 --topic test01 --partition 0 --time -1 --offsets 1 | tail -1`
-  if [ $target_size -eq $cur_offset ]
-  then
-    quit=1
-  fi
-  cur_offset=$target_size
-done
-
-sleep 2
-actual_size=`$base_dir/../../bin/kafka-run-class.sh kafka.tools.GetOffsetShell --server kafka://localhost:9092 --topic test01 --partition 0 --time -1 --offsets 1 | tail -1`
-msg_full_size=`expr $message_size + 10`
-expected_size=`expr $num_messages \* $msg_full_size`
-
-if [ $actual_size != $expected_size ]
-then
-   echo "actual size: $actual_size expected size: $expected_size test failed!!! look at it!!!"
-else
-   echo "test passed"
-fi
-
-ps ax | grep -i 'kafka.kafka' | grep -v grep | awk '{print $1}' | xargs kill -15 > /dev/null
-sleep 2
-ps ax | grep -i 'QuorumPeerMain' | grep -v grep | awk '{print $1}' | xargs kill -15 > /dev/null

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/producer_perf/config/server.properties
----------------------------------------------------------------------
diff --git a/system_test/producer_perf/config/server.properties b/system_test/producer_perf/config/server.properties
deleted file mode 100644
index 83a1e06..0000000
--- a/system_test/producer_perf/config/server.properties
+++ /dev/null
@@ -1,78 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.server.KafkaConfig for additional details and defaults
-
-# the id of the broker
-broker.id=0
-
-# hostname of broker. If not set, will pick up from the value returned
-# from getLocalHost.  If there are multiple interfaces getLocalHost
-# may not be what you want.
-# host.name=
-
-# number of logical partitions on this broker
-num.partitions=1
-
-# the port the socket server runs on
-port=9092
-
-# the number of processor threads the socket server uses. Defaults to the number of cores on the machine
-num.threads=8
-
-# the directory in which to store log files
-log.dir=/tmp/kafka-logs
-
-# the send buffer used by the socket server 
-socket.send.buffer.bytes=1048576
-
-# the receive buffer used by the socket server
-socket.receive.buffer.bytes=1048576
-
-# the maximum size of a log segment
-log.segment.bytes=536870912
-
-# the interval between running cleanup on the logs
-log.cleanup.interval.mins=1
-
-# the minimum age of a log file to eligible for deletion
-log.retention.hours=168
-
-#the number of messages to accept without flushing the log to disk
-log.flush.interval.messages=600
-
-#set the following properties to use zookeeper
-
-# enable connecting to zookeeper
-enable.zookeeper=true
-
-# zk connection string
-# comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002"
-zookeeper.connect=localhost:2181
-
-# timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=1000000
-
-# time based topic flush intervals in ms
-#log.flush.intervals.ms.per.topic=topic:1000
-
-# default time based flush interval in ms
-log.flush.interval.ms=1000
-
-# time based topic flasher time rate in ms
-log.flush.scheduler.interval.ms=1000
-
-# topic partition count map
-# topic.partition.count.map=topic1:3, topic2:4

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/producer_perf/config/zookeeper.properties
----------------------------------------------------------------------
diff --git a/system_test/producer_perf/config/zookeeper.properties b/system_test/producer_perf/config/zookeeper.properties
deleted file mode 100644
index bd3fe84..0000000
--- a/system_test/producer_perf/config/zookeeper.properties
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# the directory where the snapshot is stored.
-dataDir=/tmp/zookeeper
-# the port at which the clients will connect
-clientPort=2181

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/replication_testsuite/__init__.py
----------------------------------------------------------------------
diff --git a/system_test/replication_testsuite/__init__.py b/system_test/replication_testsuite/__init__.py
deleted file mode 100644
index 8d1c8b6..0000000
--- a/system_test/replication_testsuite/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
- 

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/replication_testsuite/config/console_consumer.properties
----------------------------------------------------------------------
diff --git a/system_test/replication_testsuite/config/console_consumer.properties b/system_test/replication_testsuite/config/console_consumer.properties
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/replication_testsuite/config/consumer.properties
----------------------------------------------------------------------
diff --git a/system_test/replication_testsuite/config/consumer.properties b/system_test/replication_testsuite/config/consumer.properties
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/replication_testsuite/config/log4j.properties
----------------------------------------------------------------------
diff --git a/system_test/replication_testsuite/config/log4j.properties b/system_test/replication_testsuite/config/log4j.properties
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/replication_testsuite/config/producer.properties
----------------------------------------------------------------------
diff --git a/system_test/replication_testsuite/config/producer.properties b/system_test/replication_testsuite/config/producer.properties
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/replication_testsuite/config/producer_performance.properties
----------------------------------------------------------------------
diff --git a/system_test/replication_testsuite/config/producer_performance.properties b/system_test/replication_testsuite/config/producer_performance.properties
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/replication_testsuite/config/server.properties
----------------------------------------------------------------------
diff --git a/system_test/replication_testsuite/config/server.properties b/system_test/replication_testsuite/config/server.properties
deleted file mode 100644
index d1dff68..0000000
--- a/system_test/replication_testsuite/config/server.properties
+++ /dev/null
@@ -1,139 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id=0
-
-# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned
-# from InetAddress.getLocalHost().  If there are multiple interfaces getLocalHost
-# may not be what you want.
-#host.name=
-
-
-############################# Socket Server Settings #############################
-
-# The port the socket server listens on
-port=9091
-
-# The number of threads handling network requests
-num.network.threads=2
- 
-# The number of threads doing disk I/O
-num.io.threads=2
-
-# The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer.bytes=1048576
-
-# The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=1048576
-
-# The maximum size of a request that the socket server will accept (protection against OOM)
-socket.request.max.bytes=104857600
-
-
-############################# Log Basics #############################
-
-# The directory under which to store log files
-log.dir=/tmp/kafka_server_logs
-
-# The default number of log partitions per topic. More partitions allow greater
-# parallelism for consumption, but this will also result in more files across
-# the brokers.
-num.partitions=5
-
-# Overrides for for the default given by num.partitions on a per-topic basis
-#topic.partition.count.map=topic1:3, topic2:4
-
-############################# Log Flush Policy #############################
-
-# The following configurations control the flush of data to disk. This is the most
-# important performance knob in kafka.
-# There are a few important trade-offs here:
-#    1. Durability: Unflushed data is at greater risk of loss in the event of a crash.
-#    2. Latency: Data is not made available to consumers until it is flushed (which adds latency).
-#    3. Throughput: The flush is generally the most expensive operation. 
-# The settings below allow one to configure the flush policy to flush data after a period of time or
-# every N messages (or both). This can be done globally and overridden on a per-topic basis.
-
-# The number of messages to accept before forcing a flush of data to disk
-log.flush.interval.messages=10000
-
-# The maximum amount of time a message can sit in a log before we force a flush
-log.flush.interval.ms=1000
-
-# Per-topic overrides for log.flush.interval.ms
-#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000
-
-# The interval (in ms) at which logs are checked to see if they need to be flushed to disk.
-log.flush.scheduler.interval.ms=1000
-
-############################# Log Retention Policy #############################
-
-# The following configurations control the disposal of log segments. The policy can
-# be set to delete segments after a period of time, or after a given size has accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion
-log.retention.hours=168
-
-# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
-# segments don't drop below log.retention.bytes.
-#log.retention.bytes=1073741824
-log.retention.bytes=-1
-
-# The maximum size of a log segment file. When this size is reached a new log segment will be created.
-#log.segment.size=536870912
-log.segment.bytes=102400
-
-# The interval at which log segments are checked to see if they can be deleted according 
-# to the retention policies
-log.cleanup.interval.mins=1
-
-############################# Zookeeper #############################
-
-# Enable connecting to zookeeper
-enable.zookeeper=true
-
-# Zk connection string (see zk docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zookeeper.connect=localhost:2181
-
-# Timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=1000000
-
-monitoring.period.secs=1
-message.max.bytes=1000000
-queued.max.requests=500
-log.roll.hours=168
-log.index.size.max.bytes=10485760
-log.index.interval.bytes=4096
-auto.create.topics.enable=true
-controller.socket.timeout.ms=30000
-default.replication.factor=1
-replica.lag.time.max.ms=10000
-replica.lag.max.messages=4000
-replica.socket.timeout.ms=30000
-replica.socket.receive.buffer.bytes=65536
-replica.fetch.max.bytes=1048576
-replica.fetch.wait.max.ms=500
-replica.fetch.min.bytes=1
-num.replica.fetchers=1

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/replication_testsuite/config/zookeeper.properties
----------------------------------------------------------------------
diff --git a/system_test/replication_testsuite/config/zookeeper.properties b/system_test/replication_testsuite/config/zookeeper.properties
deleted file mode 100644
index 74cbf90..0000000
--- a/system_test/replication_testsuite/config/zookeeper.properties
+++ /dev/null
@@ -1,20 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# the directory where the snapshot is stored.
-dataDir=/tmp/zookeeper
-# the port at which the clients will connect
-clientPort=2181
-# disable the per-ip limit on the number of connections since this is a non-production config
-maxClientCnxns=0


Mime
View raw message