kafka-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gwens...@apache.org
Subject [12/13] kafka git commit: KAFKA-2715: Removed previous system_test folder
Date Fri, 30 Oct 2015 22:13:41 GMT
http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/config/server_target3.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/server_target3.properties b/system_test/broker_failure/config/server_target3.properties
deleted file mode 100644
index 0d3a9ae..0000000
--- a/system_test/broker_failure/config/server_target3.properties
+++ /dev/null
@@ -1,79 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.server.KafkaConfig for additional details and defaults
-
-# the id of the broker
-broker.id=3
-
-# hostname of broker. If not set, will pick up from the value returned
-# from getLocalHost.  If there are multiple interfaces getLocalHost
-# may not be what you want.
-# host.name=
-
-# number of logical partitions on this broker
-num.partitions=1
-
-# the port the socket server runs on
-port=9083
-
-# the number of processor threads the socket server uses. Defaults to the number of cores on the machine
-num.threads=8
-
-# the directory in which to store log files
-log.dir=/tmp/kafka-target3-logs
-
-# the send buffer used by the socket server 
-socket.send.buffer.bytes=1048576
-
-# the receive buffer used by the socket server
-socket.receive.buffer.bytes=1048576
-
-# the maximum size of a log segment
-log.segment.bytes=10000000
-
-# the interval between running cleanup on the logs
-log.cleanup.interval.mins=1
-
-# the minimum age of a log file to eligible for deletion
-log.retention.hours=168
-
-#the number of messages to accept without flushing the log to disk
-log.flush.interval.messages=600
-
-#set the following properties to use zookeeper
-
-# enable connecting to zookeeper
-enable.zookeeper=true
-
-# zk connection string
-# comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002"
-zk.connect=localhost:2182
-
-# timeout in ms for connecting to zookeeper
-zk.connection.timeout.ms=1000000
-
-# time based topic flush intervals in ms
-#log.flush.intervals.ms.per.topic=topic:1000
-
-# default time based flush interval in ms
-log.flush.interval.ms=1000
-
-# time based topic flasher time rate in ms
-log.flush.scheduler.interval.ms=1000
-
-# topic partition count map
-# topic.partition.count.map=topic1:3, topic2:4
-

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/config/whitelisttest.consumer.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/whitelisttest.consumer.properties b/system_test/broker_failure/config/whitelisttest.consumer.properties
deleted file mode 100644
index dd91bd3..0000000
--- a/system_test/broker_failure/config/whitelisttest.consumer.properties
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.consumer.ConsumerConfig for more details
-
-# zk connection string
-# comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002"
-zk.connect=localhost:2181
-
-# timeout in ms for connecting to zookeeper
-zk.connection.timeout.ms=1000000
-
-#consumer group id
-group.id=group1
-
-mirror.topics.whitelist=test_1,test_2
-auto.offset.reset=smallest

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/config/zookeeper_source.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/zookeeper_source.properties b/system_test/broker_failure/config/zookeeper_source.properties
deleted file mode 100644
index 76b02a2..0000000
--- a/system_test/broker_failure/config/zookeeper_source.properties
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# the directory where the snapshot is stored.
-dataDir=/tmp/zookeeper_source
-# the port at which the clients will connect
-clientPort=2181

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/broker_failure/config/zookeeper_target.properties
----------------------------------------------------------------------
diff --git a/system_test/broker_failure/config/zookeeper_target.properties b/system_test/broker_failure/config/zookeeper_target.properties
deleted file mode 100644
index 28561d9..0000000
--- a/system_test/broker_failure/config/zookeeper_target.properties
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# the directory where the snapshot is stored.
-dataDir=/tmp/zookeeper_target
-# the port at which the clients will connect
-clientPort=2182

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/cluster_config.json
----------------------------------------------------------------------
diff --git a/system_test/cluster_config.json b/system_test/cluster_config.json
deleted file mode 100644
index 8ed896b..0000000
--- a/system_test/cluster_config.json
+++ /dev/null
@@ -1,58 +0,0 @@
-{
-    "cluster_config": [
-        {
-            "entity_id": "0",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name": "source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9990"
-        },
-        {
-            "entity_id": "1",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name": "source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9991"
-        },
-        {
-            "entity_id": "2",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name": "source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9992"
-        },
-        {
-            "entity_id": "3",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name": "source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9993"
-        },
-        {
-            "entity_id": "4",
-            "hostname": "localhost",
-            "role": "producer_performance",
-            "cluster_name": "source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9997"
-        },
-        {
-            "entity_id": "5",
-            "hostname": "localhost",
-            "role": "console_consumer",
-            "cluster_name": "source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9998"
-        }
-    ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/common/util.sh
----------------------------------------------------------------------
diff --git a/system_test/common/util.sh b/system_test/common/util.sh
deleted file mode 100644
index e3d10c6..0000000
--- a/system_test/common/util.sh
+++ /dev/null
@@ -1,182 +0,0 @@
-#!/bin/bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# =========================================
-# info - print messages with timestamp
-# =========================================
-info() {
-    echo -e "$(date +"%Y-%m-%d %H:%M:%S") $*"
-}
-
-# =========================================
-# info_no_newline - print messages with
-# timestamp without newline
-# =========================================
-info_no_newline() {
-    echo -e -n "$(date +"%Y-%m-%d %H:%M:%S") $*"
-}
-
-# =========================================
-# get_random_range - return a random number
-#     between the lower & upper bounds
-# usage:
-#     get_random_range $lower $upper
-#     random_no=$?
-# =========================================
-get_random_range() {
-    lo=$1
-    up=$2
-    range=$(($up - $lo + 1))
-
-    echo $(($(($RANDOM % range)) + $lo))
-}
-
-# =========================================
-# kill_child_processes - terminate a
-# process and its child processes
-# =========================================
-kill_child_processes() {
-    isTopmost=$1
-    curPid=$2
-    childPids=$(ps a -o pid= -o ppid= | grep "${curPid}$" | awk '{print $1;}')
-
-    for childPid in $childPids
-    do
-        kill_child_processes 0 $childPid
-    done
-    if [ $isTopmost -eq 0 ]; then
-        kill -15 $curPid 2> /dev/null
-    fi
-}
-
-# =========================================================================
-# generate_kafka_properties_files -
-# 1. it takes the following arguments and generate server_{1..n}.properties
-#    for the total no. of kafka broker as specified in "num_server"; the
-#    resulting properties files will be located at: 
-#      <kafka home>/system_test/<test suite>/config
-# 2. the default values in the generated properties files will be copied
-#    from the settings in config/server.properties while the brokerid and
-#    server port will be incremented accordingly
-# 3. to generate properties files with non-default values such as 
-#    "socket.send.buffer.bytes=2097152", simply add the property with new value
-#    to the array variable kafka_properties_to_replace as shown below
-# =========================================================================
-generate_kafka_properties_files() {
-
-    test_suite_full_path=$1      # eg. <kafka home>/system_test/single_host_multi_brokers
-    num_server=$2                # total no. of brokers in the cluster
-    brokerid_to_start=$3         # this should be '0' in most cases
-    kafka_port_to_start=$4       # if 9091 is used, the rest would be 9092, 9093, ...
-
-    this_config_dir=${test_suite_full_path}/config
-
-    # info "test suite full path : $test_suite_full_path"
-    # info "broker id to start   : $brokerid_to_start"
-    # info "kafka port to start  : $kafka_port_to_start"
-    # info "num of server        : $num_server"
-    # info "config dir           : $this_config_dir"
-
-    # =============================================
-    # array to keep kafka properties statements
-    # from the file 'server.properties' need
-    # to be changed from their default values
-    # =============================================
-    # kafka_properties_to_replace     # DO NOT uncomment this line !!
-
-    # =============================================
-    # Uncomment the following kafka properties
-    # array element as needed to change the default
-    # values. Other kafka properties can be added
-    # in a similar fashion.
-    # =============================================
-    # kafka_properties_to_replace[1]="socket.send.buffer.bytes=2097152"
-    # kafka_properties_to_replace[2]="socket.receive.buffer.bytes=2097152"
-    # kafka_properties_to_replace[3]="num.partitions=3"
-    # kafka_properties_to_replace[4]="socket.request.max.bytes=10485760"
-
-    server_properties=`cat ${this_config_dir}/server.properties`
-
-    for ((i=1; i<=$num_server; i++))
-    do
-        # ======================
-        # update misc properties
-        # ======================
-        for ((j=1; j<=${#kafka_properties_to_replace[@]}; j++))
-        do
-            keyword_to_replace=`echo ${kafka_properties_to_replace[${j}]} | awk -F '=' '{print $1}'`
-            string_to_be_replaced=`echo "$server_properties" | grep $keyword_to_replace` 
-            # info "string to be replaced : [$string_to_be_replaced]"
-            # info "string to replace     : [${kafka_properties_to_replace[${j}]}]"
-
-            echo "${server_properties}" | \
-              sed -e "s/${string_to_be_replaced}/${kafka_properties_to_replace[${j}]}/g" \
-              >${this_config_dir}/server_${i}.properties
-
-            server_properties=`cat ${this_config_dir}/server_${i}.properties`
-        done
-
-        # ======================
-        # update brokerid
-        # ======================
-        keyword_to_replace="brokerid="
-        string_to_be_replaced=`echo "$server_properties" | grep $keyword_to_replace`
-        brokerid_idx=$(( $brokerid_to_start + $i))
-        string_to_replace="${keyword_to_replace}${brokerid_idx}"
-        # info "string to be replaced : [${string_to_be_replaced}]"
-        # info "string to replace     : [${string_to_replace}]"
-
-        echo "${server_properties}" | \
-          sed -e "s/${string_to_be_replaced}/${string_to_replace}/g" \
-          >${this_config_dir}/server_${i}.properties
-
-        server_properties=`cat ${this_config_dir}/server_${i}.properties`
-
-        # ======================
-        # update kafak_port
-        # ======================
-        keyword_to_replace="port="
-        string_to_be_replaced=`echo "$server_properties" | grep $keyword_to_replace`
-        port_idx=$(( $kafka_port_to_start + $i - 1 ))
-        string_to_replace="${keyword_to_replace}${port_idx}"
-        # info "string to be replaced : [${string_to_be_replaced}]"
-        # info "string to replace     : [${string_to_replace}]"
-
-        echo "${server_properties}" | \
-          sed -e "s/${string_to_be_replaced}/${string_to_replace}/g" \
-          >${this_config_dir}/server_${i}.properties
-
-        server_properties=`cat ${this_config_dir}/server_${i}.properties`
-
-        # ======================
-        # update kafka_log dir
-        # ======================
-        keyword_to_replace="log.dir="
-        string_to_be_replaced=`echo "$server_properties" | grep $keyword_to_replace`
-        string_to_be_replaced=${string_to_be_replaced//\//\\\/}
-        string_to_replace="${keyword_to_replace}\/tmp\/kafka_server_${i}_logs"
-        # info "string to be replaced : [${string_to_be_replaced}]"
-        # info "string to replace     : [${string_to_replace}]"
-
-        echo "${server_properties}" | \
-          sed -e "s/${string_to_be_replaced}/${string_to_replace}/g" \
-          >${this_config_dir}/server_${i}.properties
-
-        server_properties=`cat ${this_config_dir}/server_${i}.properties`
-
-     done
-}
-

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/logging.conf
----------------------------------------------------------------------
diff --git a/system_test/logging.conf b/system_test/logging.conf
deleted file mode 100644
index e9e9213..0000000
--- a/system_test/logging.conf
+++ /dev/null
@@ -1,56 +0,0 @@
-# ==============================================
-# declaration - must have a 'root' logger
-# ==============================================
-[loggers]
-keys=root,namedLogger,anonymousLogger
-
-[handlers]
-keys=namedConsoleHandler,anonymousConsoleHandler
-
-[formatters]
-keys=namedFormatter,anonymousFormatter
-
-# ==============================================
-# loggers session
-# ==============================================
-[logger_root]
-level=NOTSET
-handlers=
-
-[logger_namedLogger]
-level=DEBUG
-handlers=namedConsoleHandler
-qualname=namedLogger
-propagate=0
-
-[logger_anonymousLogger]
-level=DEBUG
-handlers=anonymousConsoleHandler
-qualname=anonymousLogger
-propagate=0
-
-# ==============================================
-# handlers session
-# ** Change 'level' to INFO/DEBUG in this session
-# ==============================================
-[handler_namedConsoleHandler]
-class=StreamHandler
-level=INFO
-formatter=namedFormatter
-args=[]
-
-[handler_anonymousConsoleHandler]
-class=StreamHandler
-level=INFO
-formatter=anonymousFormatter
-args=[]
-
-# ==============================================
-# formatters session
-# ==============================================
-[formatter_namedFormatter]
-format=%(asctime)s - %(levelname)s - %(message)s %(name_of_class)s
-
-[formatter_anonymousFormatter]
-format=%(asctime)s - %(levelname)s - %(message)s
-

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/metrics.json
----------------------------------------------------------------------
diff --git a/system_test/metrics.json b/system_test/metrics.json
deleted file mode 100644
index 30dabe5..0000000
--- a/system_test/metrics.json
+++ /dev/null
@@ -1,174 +0,0 @@
-{
-    "dashboards": [
-        {
-            "role": "broker",
-            "graphs": [
-               { 
-                  "graph_name": "Produce-Request-Rate",
-                  "y_label": "requests-per-sec",
-                  "bean_name": "kafka.network:type=RequestMetrics,name=Produce-RequestsPerSec",
-                  "attributes": "OneMinuteRate"
-               },
-               { 
-                  "graph_name": "Produce-Request-Time",
-                  "y_label": "ms,ms",
-                  "bean_name": "kafka.network:type=RequestMetrics,name=Produce-TotalTimeMs",
-                  "attributes": "Mean,99thPercentile"
-               },
-               { 
-                  "graph_name": "Produce-Request-Remote-Time",
-                  "y_label": "ms,ms",
-                  "bean_name": "kafka.network:type=RequestMetrics,name=Produce-RemoteTimeMs",
-                  "attributes": "Mean,99thPercentile"
-               },
-               { 
-                  "graph_name": "Fetch-Consumer-Request-Rate",
-                  "y_label": "requests-per-sec",
-                  "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Consumer-RequestsPerSec",
-                  "attributes": "OneMinuteRate"
-               },
-               { 
-                  "graph_name": "Fetch-Consumer-Request-Time",
-                  "y_label": "ms,ms",
-                  "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Consumer-TotalTimeMs",
-                  "attributes": "Mean,99thPercentile"
-               },
-               { 
-                  "graph_name": "Fetch-Consumer-Request-Remote-Time",
-                  "y_label": "ms,ms",
-                  "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Consumer-RemoteTimeMs",
-                  "attributes": "Mean,99thPercentile"
-               },
-               { 
-                  "graph_name": "Fetch-Follower-Request-Rate",
-                  "y_label": "requests-per-sec",
-                  "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Follower-RequestsPerSec",
-                  "attributes": "OneMinuteRate"
-               },
-               { 
-                  "graph_name": "Fetch-Follower-Request-Time",
-                  "y_label": "ms,ms",
-                  "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Follower-TotalTimeMs",
-                  "attributes": "Mean,99thPercentile"
-               },
-               { 
-                  "graph_name": "Fetch-Follower-Request-Remote-Time",
-                  "y_label": "ms,ms",
-                  "bean_name": "kafka.network:type=RequestMetrics,name=Fetch-Follower-RemoteTimeMs",
-                  "attributes": "Mean,99thPercentile"
-               },
-               { 
-                  "graph_name": "ProducePurgatoryExpirationRate",
-                  "y_label": "expirations-per-sec",
-                  "bean_name": "kafka.server:type=DelayedProducerRequestMetrics,name=AllExpiresPerSecond",
-                  "attributes": "OneMinuteRate"
-               },
-               { 
-                  "graph_name": "FetchConsumerPurgatoryExpirationRate",
-                  "y_label": "expirations-per-sec",
-                  "bean_name": "kafka.server:type=DelayedFetchRequestMetrics,name=ConsumerExpiresPerSecond",
-                  "attributes": "OneMinuteRate"
-               },
-               {
-                  "graph_name": "FetchFollowerPurgatoryExpirationRate",
-                  "y_label": "expirations-per-sec",
-                  "bean_name": "kafka.server:type=DelayedFetchRequestMetrics,name=FollowerExpiresPerSecond",
-                  "attributes": "OneMinuteRate"
-               },
-               {
-                  "graph_name": "ProducePurgatoryQueueSize",
-                  "y_label": "size",
-                  "bean_name": "kafka.server:type=ProducerRequestPurgatory,name=NumDelayedOperations",
-                  "attributes": "Value"
-               },
-               {
-                  "graph_name": "FetchPurgatoryQueueSize",
-                  "y_label": "size",
-                  "bean_name": "kafka.server:type=FetchRequestPurgatory,name=NumDelayedOperations",
-                  "attributes": "Value"
-               },
-               {
-                  "graph_name": "ControllerLeaderElectionRateAndTime",
-                  "y_label": "elections-per-sec,ms,ms",
-                  "bean_name": "kafka.controller:type=ControllerStat,name=LeaderElectionRateAndTimeMs",
-                  "attributes": "OneMinuteRate,Mean,99thPercentile"
-               },
-               {
-                  "graph_name": "LogFlushRateAndTime",
-                  "y_label": "flushes-per-sec,ms,ms",
-                  "bean_name": "kafka.log:type=LogFlushStats,name=LogFlushRateAndTimeMs",
-                  "attributes": "OneMinuteRate,Mean,99thPercentile"
-               },
-               {
-                  "graph_name": "AllBytesOutRate",
-                  "y_label": "bytes-per-sec",
-                  "bean_name": "kafka.server:type=BrokerTopicMetrics,name=AllTopicsBytesOutPerSec",
-                  "attributes": "OneMinuteRate"
-               },
-               {
-                  "graph_name": "AllBytesInRate",
-                  "y_label": "bytes-per-sec",
-                  "bean_name": "kafka.server:type=BrokerTopicMetrics,name=AllTopicsBytesInPerSec",
-                  "attributes": "OneMinuteRate"
-               },
-               {
-                  "graph_name": "AllMessagesInRate",
-                  "y_label": "messages-per-sec",
-                  "bean_name": "kafka.server:type=BrokerTopicMetrics,name=AllTopicsMessagesInPerSec",
-                  "attributes": "OneMinuteRate"
-               }
-             ]
-       },
-        {
-            "role": "producer_performance",
-            "graphs": [
-               {
-                  "graph_name": "ProduceRequestRateAndTime",
-                  "y_label": "requests-per-sec,ms,ms",
-                  "bean_name": "kafka.producer:type=ProducerRequestStat,name=ProduceRequestRateAndTimeMs",
-                  "attributes": "OneMinuteRate,Mean,99thPercentile"
-               },
-               {
-                  "graph_name": "ProduceRequestSize",
-                  "y_label": "bytes,bytes",
-                  "bean_name": "kafka.producer:type=ProducerRequestStat,name=ProducerRequestSize",
-                  "attributes": "Mean,99thPercentile"
-               }
-             ]
-       },
-       {
-            "role": "console_consumer",
-            "graphs": [
-               {
-                  "graph_name": "FetchRequestRateAndTime",
-                  "y_label": "requests-per-sec,ms,ms",
-                  "bean_name": "kafka.consumer:type=FetchRequestAndResponseStat,name=FetchRequestRateAndTimeMs",
-                  "attributes": "OneMinuteRate,Mean,99thPercentile"
-               },
-               {
-                  "graph_name": "FetchResponseSize",
-                  "y_label": "bytes,bytes",
-                  "bean_name": "kafka.consumer:type=FetchRequestAndResponseStat,name=FetchResponseSize",
-                  "attributes": "Mean,99thPercentile"
-               },
-               {
-                  "graph_name": "ConsumedMessageRate",
-                  "y_label": "messages-per-sec",
-                  "bean_name": "kafka.consumer:type=ConsumerTopicStat,name=AllTopicsMessagesPerSec",
-                  "attributes": "OneMinuteRate"
-               }
-             ]
-       },
-        {
-            "role": "zookeeper",
-            "graphs": [
-               {
-                  "graph_name": "ZookeeperServerStats",
-                  "y_label": "zookeeper-latency-ms",
-                  "bean_name": "org.apache.ZooKeeperService:name0=StandaloneServer_port-1",
-                  "attributes": "AvgRequestLatency"
-               }
-             ]
-       }
-    ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/__init__.py
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/__init__.py b/system_test/mirror_maker_testsuite/__init__.py
deleted file mode 100644
index 8d1c8b6..0000000
--- a/system_test/mirror_maker_testsuite/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
- 

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/cluster_config.json
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/cluster_config.json b/system_test/mirror_maker_testsuite/cluster_config.json
deleted file mode 100644
index 5b908ff..0000000
--- a/system_test/mirror_maker_testsuite/cluster_config.json
+++ /dev/null
@@ -1,136 +0,0 @@
-{
-    "cluster_config": [
-        {
-            "entity_id": "0",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9100"
-        },
-        {
-            "entity_id": "1",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9101"
-        },
-
-        {
-            "entity_id": "2",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9102"
-        },
-        {
-            "entity_id": "3",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9103"
-        },
-
-        {
-            "entity_id": "4",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9104"
-        },
-        {
-            "entity_id": "5",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9105"
-        },
-        {
-            "entity_id": "6",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9106"
-        },
-
-        {
-            "entity_id": "7",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9107"
-        },
-        {
-            "entity_id": "8",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9108"
-        },
-        {
-            "entity_id": "9",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9109"
-        },
-
-        {
-            "entity_id": "10",
-            "hostname": "localhost",
-            "role": "producer_performance",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9110"
-        },
-        {
-            "entity_id": "11",
-            "hostname": "localhost",
-            "role": "console_consumer",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9111"
-        },
-
-        {
-            "entity_id": "12",
-            "hostname": "localhost",
-            "role": "mirror_maker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9112"
-        },
-
-        {
-            "entity_id": "13",
-            "hostname": "localhost",
-            "role": "console_consumer",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9113"
-        }
-    ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/config/console_consumer.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/config/console_consumer.properties b/system_test/mirror_maker_testsuite/config/console_consumer.properties
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/config/consumer.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/config/consumer.properties b/system_test/mirror_maker_testsuite/config/consumer.properties
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/config/log4j.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/config/log4j.properties b/system_test/mirror_maker_testsuite/config/log4j.properties
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/config/mirror_consumer.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/config/mirror_consumer.properties b/system_test/mirror_maker_testsuite/config/mirror_consumer.properties
deleted file mode 100644
index e90634a..0000000
--- a/system_test/mirror_maker_testsuite/config/mirror_consumer.properties
+++ /dev/null
@@ -1,12 +0,0 @@
-zookeeper.connect=localhost:2108
-zookeeper.connection.timeout.ms=1000000
-group.id=mm_regtest_grp
-auto.commit.interval.ms=120000
-auto.offset.reset=smallest
-#fetch.message.max.bytes=1048576
-#rebalance.max.retries=4
-#rebalance.backoff.ms=2000
-socket.receive.buffer.bytes=1048576
-fetch.message.max.bytes=1048576
-zookeeper.sync.time.ms=15000
-shallow.iterator.enable=false

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/config/mirror_producer.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/config/mirror_producer.properties b/system_test/mirror_maker_testsuite/config/mirror_producer.properties
deleted file mode 100644
index f94bebd..0000000
--- a/system_test/mirror_maker_testsuite/config/mirror_producer.properties
+++ /dev/null
@@ -1,12 +0,0 @@
-# old producer
-metadata.broker.list=localhost:9094
-compression.codec=0
-request.retries=3
-request.required.acks=1
-
-# new producer
-block.on.buffer.full=true
-bootstrap.servers=localhost:9094
-compression.type=none
-retries=3
-acks=1

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/config/producer.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/config/producer.properties b/system_test/mirror_maker_testsuite/config/producer.properties
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/config/producer_performance.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/config/producer_performance.properties b/system_test/mirror_maker_testsuite/config/producer_performance.properties
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/config/server.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/config/server.properties b/system_test/mirror_maker_testsuite/config/server.properties
deleted file mode 100644
index 9717cd6..0000000
--- a/system_test/mirror_maker_testsuite/config/server.properties
+++ /dev/null
@@ -1,139 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id=0
-
-# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned
-# from InetAddress.getLocalHost().  If there are multiple interfaces getLocalHost
-# may not be what you want.
-#host.name=
-
-
-############################# Socket Server Settings #############################
-
-# The port the socket server listens on
-port=9091
-
-# The number of threads handling network requests
-num.network.threads=2
- 
-# The number of threads doing disk I/O
-num.io.threads=2
-
-# The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer.bytes=1048576
-
-# The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=1048576
-
-# The maximum size of a request that the socket server will accept (protection against OOM)
-socket.request.max.bytes=104857600
-
-
-############################# Log Basics #############################
-
-# The directory under which to store log files
-log.dir=/tmp/kafka_server_logs
-
-# The default number of log partitions per topic. More partitions allow greater
-# parallelism for consumption, but this will also result in more files across
-# the brokers.
-num.partitions=5
-
-# Overrides for for the default given by num.partitions on a per-topic basis
-#topic.partition.count.map=topic1:3, topic2:4
-
-############################# Log Flush Policy #############################
-
-# The following configurations control the flush of data to disk. This is the most
-# important performance knob in kafka.
-# There are a few important trade-offs here:
-#    1. Durability: Unflushed data is at greater risk of loss in the event of a crash.
-#    2. Latency: Data is not made available to consumers until it is flushed (which adds latency).
-#    3. Throughput: The flush is generally the most expensive operation. 
-# The settings below allow one to configure the flush policy to flush data after a period of time or
-# every N messages (or both). This can be done globally and overridden on a per-topic basis.
-
-# The number of messages to accept before forcing a flush of data to disk
-log.flush.interval.messages=10000
-
-# The maximum amount of time a message can sit in a log before we force a flush
-log.flush.interval.ms=1000
-
-# Per-topic overrides for log.flush.interval.ms
-#log.flush.intervals.ms.per.topic=topic1:1000, topic2:3000
-
-# The interval (in ms) at which logs are checked to see if they need to be flushed to disk.
-log.flush.scheduler.interval.ms=1000
-
-############################# Log Retention Policy #############################
-
-# The following configurations control the disposal of log segments. The policy can
-# be set to delete segments after a period of time, or after a given size has accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion
-log.retention.hours=168
-
-# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
-# segments don't drop below log.retention.bytes.
-#log.retention.bytes=1073741824
-log.retention.bytes=-1
-
-# The maximum size of a log segment file. When this size is reached a new log segment will be created.
-#log.segment.size=536870912
-log.segment.bytes=102400
-
-# The interval at which log segments are checked to see if they can be deleted according 
-# to the retention policies
-log.cleanup.interval.mins=1
-
-############################# Zookeeper #############################
-
-# Enable connecting to zookeeper
-enable.zookeeper=true
-
-# Zk connection string (see zk docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zookeeper.connect=localhost:2181
-
-# Timeout in ms for connecting to zookeeper
-zookeeper.connection.timeout.ms=1000000
-
-monitoring.period.secs=1
-message.max.bytes=1000000
-queued.max.requests=500
-log.roll.hours=168
-log.index.size.max.bytes=10485760
-log.index.interval.bytes=4096
-auto.create.topics.enable=true
-controller.socket.timeout.ms=30000
-default.replication.factor=1
-replica.lag.time.max.ms=10000
-replica.lag.max.messages=4000
-replica.socket.timeout.ms=30000
-replica.socket.receive.buffer.bytes=65536
-replica.fetch.max.bytes=1048576
-replica.fetch.wait.max.ms=500
-replica.fetch.min.bytes=4096
-num.replica.fetchers=1

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/config/zookeeper.properties
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/config/zookeeper.properties b/system_test/mirror_maker_testsuite/config/zookeeper.properties
deleted file mode 100644
index 5474a72..0000000
--- a/system_test/mirror_maker_testsuite/config/zookeeper.properties
+++ /dev/null
@@ -1,23 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-# 
-#    http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# the directory where the snapshot is stored.
-dataDir=/tmp/zookeeper
-# the port at which the clients will connect
-clientPort=2181
-# disable the per-ip limit on the number of connections since this is a non-production config
-maxClientCnxns=0
-syncLimit=5
-initLimit=10
-tickTime=2000

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/mirror_maker_test.py
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/mirror_maker_test.py b/system_test/mirror_maker_testsuite/mirror_maker_test.py
deleted file mode 100644
index 48f9ff6..0000000
--- a/system_test/mirror_maker_testsuite/mirror_maker_test.py
+++ /dev/null
@@ -1,324 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#!/usr/bin/env python
-
-# ===================================
-# mirror_maker_test.py
-# ===================================
-
-import inspect
-import logging
-import os
-import signal
-import subprocess
-import sys
-import time
-import traceback
-
-from   system_test_env    import SystemTestEnv
-sys.path.append(SystemTestEnv.SYSTEM_TEST_UTIL_DIR)
-
-from   setup_utils        import SetupUtils
-from   replication_utils  import ReplicationUtils
-import system_test_utils
-from   testcase_env       import TestcaseEnv
-
-# product specific: Kafka
-import kafka_system_test_utils
-import metrics
-
-class MirrorMakerTest(ReplicationUtils, SetupUtils):
-
-    testModuleAbsPathName = os.path.realpath(__file__)
-    testSuiteAbsPathName  = os.path.abspath(os.path.dirname(testModuleAbsPathName))
-
-    def __init__(self, systemTestEnv):
-
-        # SystemTestEnv - provides cluster level environment settings
-        #     such as entity_id, hostname, kafka_home, java_home which
-        #     are available in a list of dictionary named 
-        #     "clusterEntityConfigDictList"
-        self.systemTestEnv = systemTestEnv
-
-        super(MirrorMakerTest, self).__init__(self)
-
-        # dict to pass user-defined attributes to logger argument: "extra"
-        d = {'name_of_class': self.__class__.__name__}
-
-    def signal_handler(self, signal, frame):
-        self.log_message("Interrupt detected - User pressed Ctrl+c")
-
-        # perform the necessary cleanup here when user presses Ctrl+c and it may be product specific
-        self.log_message("stopping all entities - please wait ...")
-        kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
-        sys.exit(1) 
-
-    def runTest(self):
-
-        # ======================================================================
-        # get all testcase directories under this testsuite
-        # ======================================================================
-        testCasePathNameList = system_test_utils.get_dir_paths_with_prefix(
-            self.testSuiteAbsPathName, SystemTestEnv.SYSTEM_TEST_CASE_PREFIX)
-        testCasePathNameList.sort()
-
-        replicationUtils = ReplicationUtils(self)
-
-        # =============================================================
-        # launch each testcase one by one: testcase_1, testcase_2, ...
-        # =============================================================
-        for testCasePathName in testCasePathNameList:
-   
-            skipThisTestCase = False
-
-            try: 
-                # ======================================================================
-                # A new instance of TestcaseEnv to keep track of this testcase's env vars
-                # and initialize some env vars as testCasePathName is available now
-                # ======================================================================
-                self.testcaseEnv = TestcaseEnv(self.systemTestEnv, self)
-                self.testcaseEnv.testSuiteBaseDir = self.testSuiteAbsPathName
-                self.testcaseEnv.initWithKnownTestCasePathName(testCasePathName)
-                self.testcaseEnv.testcaseArgumentsDict = self.testcaseEnv.testcaseNonEntityDataDict["testcase_args"]
-
-                # ======================================================================
-                # SKIP if this case is IN testcase_to_skip.json or NOT IN testcase_to_run.json
-                # ======================================================================
-                testcaseDirName = self.testcaseEnv.testcaseResultsDict["_test_case_name"]
-
-                if self.systemTestEnv.printTestDescriptionsOnly:
-                    self.testcaseEnv.printTestCaseDescription(testcaseDirName)
-                    continue
-                elif self.systemTestEnv.isTestCaseToSkip(self.__class__.__name__, testcaseDirName):
-                    self.log_message("Skipping : " + testcaseDirName)
-                    skipThisTestCase = True
-                    continue
-                else:
-                    self.testcaseEnv.printTestCaseDescription(testcaseDirName)
-                    system_test_utils.setup_remote_hosts_with_testcase_level_cluster_config(self.systemTestEnv, testCasePathName)
-
-                # ============================================================================== #
-                # ============================================================================== #
-                #                   Product Specific Testing Code Starts Here:                   #
-                # ============================================================================== #
-                # ============================================================================== #
-    
-                # initialize self.testcaseEnv with user-defined environment variables (product specific)
-                self.testcaseEnv.userDefinedEnvVarDict["zkConnectStr"] = ""
-                self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"]    = False
-                self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"] = False
-
-                # initialize signal handler
-                signal.signal(signal.SIGINT, self.signal_handler)
-
-                # TestcaseEnv.testcaseConfigsList initialized by reading testcase properties file:
-                #   system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
-                self.testcaseEnv.testcaseConfigsList = system_test_utils.get_json_list_data(
-                    self.testcaseEnv.testcasePropJsonPathName)
-                 
-                # clean up data directories specified in zookeeper.properties and kafka_server_<n>.properties
-                kafka_system_test_utils.cleanup_data_at_remote_hosts(self.systemTestEnv, self.testcaseEnv)
-
-                # create "LOCAL" log directories for metrics, dashboards for each entity under this testcase
-                # for collecting logs from remote machines
-                kafka_system_test_utils.generate_testcase_log_dirs(self.systemTestEnv, self.testcaseEnv)
-
-                # TestcaseEnv - initialize producer & consumer config / log file pathnames
-                kafka_system_test_utils.init_entity_props(self.systemTestEnv, self.testcaseEnv)
-
-                # generate remote hosts log/config dirs if not exist
-                kafka_system_test_utils.generate_testcase_log_dirs_in_remote_hosts(self.systemTestEnv, self.testcaseEnv)
-    
-                # generate properties files for zookeeper, kafka, producer, consumer and mirror-maker:
-                # 1. copy system_test/<suite_name>_testsuite/config/*.properties to 
-                #    system_test/<suite_name>_testsuite/testcase_<n>/config/
-                # 2. update all properties files in system_test/<suite_name>_testsuite/testcase_<n>/config
-                #    by overriding the settings specified in:
-                #    system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
-                kafka_system_test_utils.generate_overriden_props_files(self.testSuiteAbsPathName,
-                    self.testcaseEnv, self.systemTestEnv)
-               
-                # =============================================
-                # preparing all entities to start the test
-                # =============================================
-                self.log_message("starting zookeepers")
-                kafka_system_test_utils.start_zookeepers(self.systemTestEnv, self.testcaseEnv)
-                self.anonLogger.info("sleeping for 2s")
-                time.sleep(2)
-
-                self.log_message("starting brokers")
-                kafka_system_test_utils.start_brokers(self.systemTestEnv, self.testcaseEnv)
-                self.anonLogger.info("sleeping for 5s")
-                time.sleep(5)
-
-                self.log_message("creating topics")
-                kafka_system_test_utils.create_topic_for_producer_performance(self.systemTestEnv, self.testcaseEnv)
-                self.anonLogger.info("sleeping for 5s")
-                time.sleep(5)
-
-                
-                self.log_message("starting mirror makers")
-                kafka_system_test_utils.start_mirror_makers(self.systemTestEnv, self.testcaseEnv)
-                self.anonLogger.info("sleeping for 10s")
-                time.sleep(10)
-
-                
-                # =============================================
-                # starting producer 
-                # =============================================
-                self.log_message("starting producer in the background")
-                kafka_system_test_utils.start_producer_performance(self.systemTestEnv, self.testcaseEnv, False)
-                msgProducingFreeTimeSec = self.testcaseEnv.testcaseArgumentsDict["message_producing_free_time_sec"]
-                self.anonLogger.info("sleeping for " + msgProducingFreeTimeSec + " sec to produce some messages")
-                time.sleep(int(msgProducingFreeTimeSec))
-
-                # =============================================
-                # A while-loop to bounce mirror maker as specified
-                # by "num_iterations" in testcase_n_properties.json
-                # =============================================
-                i = 1
-                numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"])
-                bouncedEntityDownTimeSec = 15
-                try:
-                    bouncedEntityDownTimeSec = int(self.testcaseEnv.testcaseArgumentsDict["bounced_entity_downtime_sec"])
-                except:
-                    pass
-
-                while i <= numIterations:
-
-                    self.log_message("Iteration " + str(i) + " of " + str(numIterations))
-
-                    # =============================================
-                    # Bounce Mirror Maker if specified in testcase config
-                    # =============================================
-                    bounceMirrorMaker = self.testcaseEnv.testcaseArgumentsDict["bounce_mirror_maker"]
-                    self.log_message("bounce_mirror_maker flag : " + bounceMirrorMaker)
-                    if (bounceMirrorMaker.lower() == "true"):
-
-                        clusterConfigList          = self.systemTestEnv.clusterEntityConfigDictList
-                        mirrorMakerEntityIdList    = system_test_utils.get_data_from_list_of_dicts(
-                                                     clusterConfigList, "role", "mirror_maker", "entity_id")
-                        stoppedMirrorMakerEntityId = mirrorMakerEntityIdList[0]
-
-                        mirrorMakerPPid = self.testcaseEnv.entityMirrorMakerParentPidDict[stoppedMirrorMakerEntityId]
-                        self.log_message("stopping mirror maker : " + mirrorMakerPPid)
-                        kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, stoppedMirrorMakerEntityId, mirrorMakerPPid)
-                        self.anonLogger.info("sleeping for " + str(bouncedEntityDownTimeSec) + " sec")
-                        time.sleep(bouncedEntityDownTimeSec)
-
-                        # starting previously terminated broker 
-                        self.log_message("starting the previously terminated mirror maker")
-                        kafka_system_test_utils.start_mirror_makers(self.systemTestEnv, self.testcaseEnv, stoppedMirrorMakerEntityId)
-
-                    self.anonLogger.info("sleeping for 15s")
-                    time.sleep(15)
-                    i += 1
-                # while loop
-
-                # =============================================
-                # tell producer to stop
-                # =============================================
-                self.testcaseEnv.lock.acquire()
-                self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = True
-                time.sleep(1)
-                self.testcaseEnv.lock.release()
-                time.sleep(1)
-
-                # =============================================
-                # wait for producer thread's update of
-                # "backgroundProducerStopped" to be "True"
-                # =============================================
-                while 1:
-                    self.testcaseEnv.lock.acquire()
-                    self.logger.info("status of backgroundProducerStopped : [" + \
-                        str(self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]) + "]", extra=self.d)
-                    if self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]:
-                        time.sleep(1)
-                        self.testcaseEnv.lock.release()
-                        self.logger.info("all producer threads completed", extra=self.d)
-                        break
-                    time.sleep(1)
-                    self.testcaseEnv.lock.release()
-                    time.sleep(2)
-
-                self.anonLogger.info("sleeping for 15s")
-                time.sleep(15)
-                self.anonLogger.info("terminate Mirror Maker")
-                cmdStr = "ps auxw | grep Mirror | grep -v grep | tr -s ' ' | cut -f2 -d ' ' | xargs kill -15"
-                subproc = system_test_utils.sys_call_return_subproc(cmdStr)
-                for line in subproc.stdout.readlines():
-                    line = line.rstrip('\n')
-                    self.anonLogger.info("#### ["+line+"]")
-                self.anonLogger.info("sleeping for 15s")
-                time.sleep(15)
-
-                # =============================================
-                # starting consumer
-                # =============================================
-                self.log_message("starting consumer in the background")
-                kafka_system_test_utils.start_console_consumer(self.systemTestEnv, self.testcaseEnv)
-                self.anonLogger.info("sleeping for 10s")
-                time.sleep(10)
-                    
-                # =============================================
-                # this testcase is completed - stop all entities
-                # =============================================
-                self.log_message("stopping all entities")
-                for entityId, parentPid in self.testcaseEnv.entityBrokerParentPidDict.items():
-                    kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
-
-                for entityId, parentPid in self.testcaseEnv.entityZkParentPidDict.items():
-                    kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
-
-                # make sure all entities are stopped
-                kafka_system_test_utils.ps_grep_terminate_running_entity(self.systemTestEnv)
-
-                # =============================================
-                # collect logs from remote hosts
-                # =============================================
-                kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv)
-
-                # =============================================
-                # validate the data matched and checksum
-                # =============================================
-                self.log_message("validating data matched")
-                kafka_system_test_utils.validate_data_matched(self.systemTestEnv, self.testcaseEnv, replicationUtils)
-                kafka_system_test_utils.validate_broker_log_segment_checksum(self.systemTestEnv, self.testcaseEnv, "source")
-                kafka_system_test_utils.validate_broker_log_segment_checksum(self.systemTestEnv, self.testcaseEnv, "target")
-
-                # =============================================
-                # draw graphs
-                # =============================================
-                metrics.draw_all_graphs(self.systemTestEnv.METRICS_PATHNAME, 
-                                        self.testcaseEnv, 
-                                        self.systemTestEnv.clusterEntityConfigDictList)
-                
-                # build dashboard, one for each role
-                metrics.build_all_dashboards(self.systemTestEnv.METRICS_PATHNAME,
-                                             self.testcaseEnv.testCaseDashboardsDir,
-                                             self.systemTestEnv.clusterEntityConfigDictList)
-
-            except Exception as e:
-                self.log_message("Exception while running test {0}".format(e))
-                traceback.print_exc()
-                self.testcaseEnv.validationStatusDict["Test completed"] = "FAILED"
-
-            finally:
-                if not skipThisTestCase and not self.systemTestEnv.printTestDescriptionsOnly:
-                    self.log_message("stopping all entities - please wait ...")
-                    kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
-

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/testcase_15001/testcase_15001_properties.json
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/testcase_15001/testcase_15001_properties.json b/system_test/mirror_maker_testsuite/testcase_15001/testcase_15001_properties.json
deleted file mode 100644
index 9dd3477..0000000
--- a/system_test/mirror_maker_testsuite/testcase_15001/testcase_15001_properties.json
+++ /dev/null
@@ -1,158 +0,0 @@
-{
-  "description": {"01":"To Test : 'Replication with Mirror Maker'",
-                  "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET",
-                  "03":"Set up 2-node Zk cluster for both SOURCE & TARGET",
-                  "04":"Produce and consume messages to a single topic - single partition.",
-                  "05":"This test sends messages to 3 replicas",
-                  "06":"At the end it verifies the log size and contents",
-                  "07":"Use a consumer to verify no message loss in TARGET cluster.",
-                  "08":"Producer dimensions : mode:sync, acks:-1, comp:0",
-                  "09":"Log segment size    : 10240"
-  },
-  "testcase_args": {
-    "bounce_leader": "false",
-    "bounce_mirror_maker": "false",
-    "replica_factor": "3",
-    "num_partition": "1",
-    "num_iteration": "1",
-    "sleep_seconds_between_producer_calls": "1",
-    "message_producing_free_time_sec": "15",
-    "num_messages_to_produce_per_producer_call": "50"
-  },
-  "entities": [
-    {
-      "entity_id": "0",
-      "clientPort": "2108",
-      "dataDir": "/tmp/zookeeper_0",
-      "log_filename": "zookeeper_0.log",
-      "config_filename": "zookeeper_0.properties"
-    },
-    {
-      "entity_id": "1",
-      "clientPort": "2118",
-      "dataDir": "/tmp/zookeeper_1",
-      "log_filename": "zookeeper_1.log",
-      "config_filename": "zookeeper_1.properties"
-    },
-
-    {
-      "entity_id": "2",
-      "clientPort": "2128",
-      "dataDir": "/tmp/zookeeper_2",
-      "log_filename": "zookeeper_2.log",
-      "config_filename": "zookeeper_2.properties"
-    },
-    {
-      "entity_id": "3",
-      "clientPort": "2138",
-      "dataDir": "/tmp/zookeeper_3",
-      "log_filename": "zookeeper_3.log",
-      "config_filename": "zookeeper_3.properties"
-    },
-
-    {
-      "entity_id": "4",
-      "port": "9091",
-      "broker.id": "1",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_4_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_4.log",
-      "config_filename": "kafka_server_4.properties"
-    },
-    {
-      "entity_id": "5",
-      "port": "9092",
-      "broker.id": "2",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_5_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_5.log",
-      "config_filename": "kafka_server_5.properties"
-    },
-    {
-      "entity_id": "6",
-      "port": "9093",
-      "broker.id": "3",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_6_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_6.log",
-      "config_filename": "kafka_server_6.properties"
-    },
-    {
-      "entity_id": "7",
-      "port": "9094",
-      "broker.id": "4",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_7_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_7.log",
-      "config_filename": "kafka_server_7.properties"
-    },
-    {
-      "entity_id": "8",
-      "port": "9095",
-      "broker.id": "5",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_8_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_8.log",
-      "config_filename": "kafka_server_8.properties"
-    },
-    {
-      "entity_id": "9",
-      "port": "9096",
-      "broker.id": "6",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_9_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_9.log",
-      "config_filename": "kafka_server_9.properties"
-    },
-
-    {
-      "entity_id": "10",
-      "topic": "test_1",
-      "threads": "5",
-      "compression-codec": "0",
-      "message-size": "500",
-      "message": "500",
-      "request-num-acks": "-1",
-      "sync":"true",
-      "producer-num-retries":"5",
-      "log_filename": "producer_performance_10.log",
-      "config_filename": "producer_performance_10.properties"
-    },
-    {
-      "entity_id": "11",
-      "topic": "test_1",
-      "group.id": "mytestgroup",
-      "consumer-timeout-ms": "10000",
-      "log_filename": "console_consumer_11.log",
-      "config_filename": "console_consumer_11.properties"
-    },
-
-    {
-      "entity_id": "12",
-      "log_filename": "mirror_maker_12.log",
-      "mirror_consumer_config_filename": "mirror_consumer_12.properties",
-      "mirror_producer_config_filename": "mirror_producer_12.properties"
-    },
-
-    {
-      "entity_id": "13",
-      "topic": "test_1",
-      "group.id": "mytestgroup",
-      "consumer-timeout-ms": "10000",
-      "log_filename": "console_consumer_13.log",
-      "config_filename": "console_consumer_13.properties"
-    }
-   ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/testcase_15002/testcase_15002_properties.json
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/testcase_15002/testcase_15002_properties.json b/system_test/mirror_maker_testsuite/testcase_15002/testcase_15002_properties.json
deleted file mode 100644
index d6495e5..0000000
--- a/system_test/mirror_maker_testsuite/testcase_15002/testcase_15002_properties.json
+++ /dev/null
@@ -1,158 +0,0 @@
-{
-  "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker",
-                  "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET",
-                  "03":"Set up 2-node Zk cluster for both SOURCE & TARGET",
-                  "04":"Produce and consume messages to a single topic - single partition.",
-                  "05":"This test sends messages to 3 replicas",
-                  "06":"At the end it verifies the log size and contents",
-                  "07":"Use a consumer to verify no message loss in TARGET cluster.",
-                  "08":"Producer dimensions : mode:sync, acks:-1, comp:0",
-                  "09":"Log segment size    : 20480"
-  },
-  "testcase_args": {
-    "bounce_leader": "false",
-    "bounce_mirror_maker": "true",
-    "replica_factor": "3",
-    "num_partition": "1",
-    "num_iteration": "1",
-    "sleep_seconds_between_producer_calls": "1",
-    "message_producing_free_time_sec": "15",
-    "num_messages_to_produce_per_producer_call": "50"
-  },
-  "entities": [
-    {
-      "entity_id": "0",
-      "clientPort": "2108",
-      "dataDir": "/tmp/zookeeper_0",
-      "log_filename": "zookeeper_0.log",
-      "config_filename": "zookeeper_0.properties"
-    },
-    {
-      "entity_id": "1",
-      "clientPort": "2118",
-      "dataDir": "/tmp/zookeeper_1",
-      "log_filename": "zookeeper_1.log",
-      "config_filename": "zookeeper_1.properties"
-    },
-
-    {
-      "entity_id": "2",
-      "clientPort": "2128",
-      "dataDir": "/tmp/zookeeper_2",
-      "log_filename": "zookeeper_2.log",
-      "config_filename": "zookeeper_2.properties"
-    },
-    {
-      "entity_id": "3",
-      "clientPort": "2138",
-      "dataDir": "/tmp/zookeeper_3",
-      "log_filename": "zookeeper_3.log",
-      "config_filename": "zookeeper_3.properties"
-    },
-
-    {
-      "entity_id": "4",
-      "port": "9091",
-      "broker.id": "1",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_4_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_4.log",
-      "config_filename": "kafka_server_4.properties"
-    },
-    {
-      "entity_id": "5",
-      "port": "9092",
-      "broker.id": "2",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_5_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_5.log",
-      "config_filename": "kafka_server_5.properties"
-    },
-    {
-      "entity_id": "6",
-      "port": "9093",
-      "broker.id": "3",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_6_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_6.log",
-      "config_filename": "kafka_server_6.properties"
-    },
-    {
-      "entity_id": "7",
-      "port": "9094",
-      "broker.id": "4",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_7_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_7.log",
-      "config_filename": "kafka_server_7.properties"
-    },
-    {
-      "entity_id": "8",
-      "port": "9095",
-      "broker.id": "5",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_8_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_8.log",
-      "config_filename": "kafka_server_8.properties"
-    },
-    {
-      "entity_id": "9",
-      "port": "9096",
-      "broker.id": "6",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_9_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_9.log",
-      "config_filename": "kafka_server_9.properties"
-    },
-
-    {
-      "entity_id": "10",
-      "topic": "test_1",
-      "threads": "5",
-      "compression-codec": "0",
-      "message-size": "500",
-      "message": "100",
-      "request-num-acks": "-1",
-      "sync":"true",
-      "producer-num-retries":"5",
-      "log_filename": "producer_performance_10.log",
-      "config_filename": "producer_performance_10.properties"
-    },
-    {
-      "entity_id": "11",
-      "topic": "test_1",
-      "group.id": "mytestgroup",
-      "consumer-timeout-ms": "10000",
-      "log_filename": "console_consumer_11.log",
-      "config_filename": "console_consumer_11.properties"
-    },
-
-    {
-      "entity_id": "12",
-      "log_filename": "mirror_maker_12.log",
-      "mirror_consumer_config_filename": "mirror_consumer_12.properties",
-      "mirror_producer_config_filename": "mirror_producer_12.properties"
-    },
-
-    {
-      "entity_id": "13",
-      "topic": "test_1",
-      "group.id": "mytestgroup",
-      "consumer-timeout-ms": "10000",
-      "log_filename": "console_consumer_13.log",
-      "config_filename": "console_consumer_13.properties"
-    }
-   ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/testcase_15003/cluster_config.json
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/testcase_15003/cluster_config.json b/system_test/mirror_maker_testsuite/testcase_15003/cluster_config.json
deleted file mode 100644
index f6fe867..0000000
--- a/system_test/mirror_maker_testsuite/testcase_15003/cluster_config.json
+++ /dev/null
@@ -1,135 +0,0 @@
-{
-    "cluster_config": [
-        {
-            "entity_id": "0",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9100"
-        },
-        {
-            "entity_id": "1",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9101"
-        },
-
-        {
-            "entity_id": "2",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9102"
-        },
-        {
-            "entity_id": "3",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9103"
-        },
-
-        {
-            "entity_id": "4",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9104"
-        },
-        {
-            "entity_id": "5",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9105"
-        },
-        {
-            "entity_id": "6",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9106"
-        },
-
-        {
-            "entity_id": "7",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9107"
-        },
-        {
-            "entity_id": "8",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9108"
-        },
-        {
-            "entity_id": "9",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9109"
-        },
-
-        {
-            "entity_id": "10",
-            "hostname": "localhost",
-            "role": "producer_performance",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9110"
-        },
-        {
-            "entity_id": "11",
-            "hostname": "localhost",
-            "role": "console_consumer",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9111"
-        },
-
-        {
-            "entity_id": "12",
-            "hostname": "localhost",
-            "role": "mirror_maker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9112"
-        },
-        {
-            "entity_id": "13",
-            "hostname": "localhost",
-            "role": "mirror_maker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9113"
-        }
-    ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/testcase_15003/testcase_15003_properties.json
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/testcase_15003/testcase_15003_properties.json b/system_test/mirror_maker_testsuite/testcase_15003/testcase_15003_properties.json
deleted file mode 100644
index 842c70e..0000000
--- a/system_test/mirror_maker_testsuite/testcase_15003/testcase_15003_properties.json
+++ /dev/null
@@ -1,156 +0,0 @@
-{
-  "description": {"01":"Replication with Mirror Maker => Bounce Mirror Maker",
-                  "02":"Set up 2 clusters such as : SOURCE => MirrorMaker => TARGET",
-                  "03":"Set up 2-node Zk cluster for both SOURCE & TARGET",
-                  "04":"Produce and consume messages to a single topic - single partition.",
-                  "05":"This test sends messages to 3 replicas",
-                  "06":"At the end it verifies the log size and contents",
-                  "07":"Use a consumer to verify no message loss in TARGET cluster.",
-                  "08":"Producer dimensions : mode:async, acks:-1, comp:1",
-                  "09":"Log segment size    : 20480"
-  },
-  "testcase_args": {
-    "bounce_leader": "false",
-    "bounce_mirror_maker": "true",
-    "bounced_entity_downtime_sec": "30",
-    "replica_factor": "3",
-    "num_partition": "1",
-    "num_iteration": "1",
-    "sleep_seconds_between_producer_calls": "1",
-    "message_producing_free_time_sec": "15",
-    "num_messages_to_produce_per_producer_call": "50"
-  },
-  "entities": [
-    {
-      "entity_id": "0",
-      "clientPort": "2108",
-      "dataDir": "/tmp/zookeeper_0",
-      "log_filename": "zookeeper_0.log",
-      "config_filename": "zookeeper_0.properties"
-    },
-    {
-      "entity_id": "1",
-      "clientPort": "2118",
-      "dataDir": "/tmp/zookeeper_1",
-      "log_filename": "zookeeper_1.log",
-      "config_filename": "zookeeper_1.properties"
-    },
-
-    {
-      "entity_id": "2",
-      "clientPort": "2128",
-      "dataDir": "/tmp/zookeeper_2",
-      "log_filename": "zookeeper_2.log",
-      "config_filename": "zookeeper_2.properties"
-    },
-    {
-      "entity_id": "3",
-      "clientPort": "2138",
-      "dataDir": "/tmp/zookeeper_3",
-      "log_filename": "zookeeper_3.log",
-      "config_filename": "zookeeper_3.properties"
-    },
-
-    {
-      "entity_id": "4",
-      "port": "9091",
-      "broker.id": "1",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_4_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_4.log",
-      "config_filename": "kafka_server_4.properties"
-    },
-    {
-      "entity_id": "5",
-      "port": "9092",
-      "broker.id": "2",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_5_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_5.log",
-      "config_filename": "kafka_server_5.properties"
-    },
-    {
-      "entity_id": "6",
-      "port": "9093",
-      "broker.id": "3",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_6_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_6.log",
-      "config_filename": "kafka_server_6.properties"
-    },
-    {
-      "entity_id": "7",
-      "port": "9094",
-      "broker.id": "4",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_7_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_7.log",
-      "config_filename": "kafka_server_7.properties"
-    },
-    {
-      "entity_id": "8",
-      "port": "9095",
-      "broker.id": "5",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_8_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_8.log",
-      "config_filename": "kafka_server_8.properties"
-    },
-    {
-      "entity_id": "9",
-      "port": "9096",
-      "broker.id": "6",
-      "log.segment.bytes": "20480",
-      "log.dir": "/tmp/kafka_server_9_logs",
-      "default.replication.factor": "3",
-      "num.partitions": "5",
-      "log_filename": "kafka_server_9.log",
-      "config_filename": "kafka_server_9.properties"
-    },
-
-    {
-      "entity_id": "10",
-      "topic": "test_1",
-      "threads": "5",
-      "compression-codec": "2",
-      "message-size": "500",
-      "message": "100",
-      "request-num-acks": "-1",
-      "sync":"false",
-      "producer-num-retries":"5",
-      "log_filename": "producer_performance_10.log",
-      "config_filename": "producer_performance_10.properties"
-    },
-    {
-      "entity_id": "11",
-      "topic": "test_1",
-      "group.id": "mytestgroup",
-      "consumer-timeout-ms": "10000",
-      "log_filename": "console_consumer_11.log",
-      "config_filename": "console_consumer_11.properties"
-    },
-
-    {
-      "entity_id": "12",
-      "log_filename": "mirror_maker_12.log",
-      "mirror_consumer_config_filename": "mirror_consumer_12.properties",
-      "mirror_producer_config_filename": "mirror_producer_12.properties"
-    },
-    {
-      "entity_id": "13",
-      "log_filename": "mirror_maker_13.log",
-      "mirror_consumer_config_filename": "mirror_consumer_13.properties",
-      "mirror_producer_config_filename": "mirror_producer_13.properties"
-    }
-   ]
-}

http://git-wip-us.apache.org/repos/asf/kafka/blob/d50499a0/system_test/mirror_maker_testsuite/testcase_15004/cluster_config.json
----------------------------------------------------------------------
diff --git a/system_test/mirror_maker_testsuite/testcase_15004/cluster_config.json b/system_test/mirror_maker_testsuite/testcase_15004/cluster_config.json
deleted file mode 100644
index f6fe867..0000000
--- a/system_test/mirror_maker_testsuite/testcase_15004/cluster_config.json
+++ /dev/null
@@ -1,135 +0,0 @@
-{
-    "cluster_config": [
-        {
-            "entity_id": "0",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9100"
-        },
-        {
-            "entity_id": "1",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9101"
-        },
-
-        {
-            "entity_id": "2",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9102"
-        },
-        {
-            "entity_id": "3",
-            "hostname": "localhost",
-            "role": "zookeeper",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9103"
-        },
-
-        {
-            "entity_id": "4",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9104"
-        },
-        {
-            "entity_id": "5",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9105"
-        },
-        {
-            "entity_id": "6",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9106"
-        },
-
-        {
-            "entity_id": "7",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9107"
-        },
-        {
-            "entity_id": "8",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9108"
-        },
-        {
-            "entity_id": "9",
-            "hostname": "localhost",
-            "role": "broker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9109"
-        },
-
-        {
-            "entity_id": "10",
-            "hostname": "localhost",
-            "role": "producer_performance",
-            "cluster_name":"source",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9110"
-        },
-        {
-            "entity_id": "11",
-            "hostname": "localhost",
-            "role": "console_consumer",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9111"
-        },
-
-        {
-            "entity_id": "12",
-            "hostname": "localhost",
-            "role": "mirror_maker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9112"
-        },
-        {
-            "entity_id": "13",
-            "hostname": "localhost",
-            "role": "mirror_maker",
-            "cluster_name":"target",
-            "kafka_home": "default",
-            "java_home": "default",
-            "jmx_port": "9113"
-        }
-    ]
-}


Mime
View raw message