kafka-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jkr...@apache.org
Subject svn commit: r1188024 - /incubator/kafka/trunk/config/server.properties
Date Mon, 24 Oct 2011 05:01:40 GMT
Author: jkreps
Date: Mon Oct 24 05:01:40 2011
New Revision: 1188024

URL: http://svn.apache.org/viewvc?rev=1188024&view=rev
Log:
KAFKA-164 Fix up configuration with more docs.


Modified:
    incubator/kafka/trunk/config/server.properties

Modified: incubator/kafka/trunk/config/server.properties
URL: http://svn.apache.org/viewvc/incubator/kafka/trunk/config/server.properties?rev=1188024&r1=1188023&r2=1188024&view=diff
==============================================================================
--- incubator/kafka/trunk/config/server.properties (original)
+++ incubator/kafka/trunk/config/server.properties Mon Oct 24 05:01:40 2011
@@ -14,65 +14,103 @@
 # limitations under the License.
 # see kafka.server.KafkaConfig for additional details and defaults
 
-# the id of the broker
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
 brokerid=0
 
-# hostname of broker. If not set, will pick up from the value returned
-# from getLocalHost.  If there are multiple interfaces getLocalHost
+# Hostname the broker will advertise to consumers. If not set, kafka will use the value returned
+# from InetAddress.getLocalHost().  If there are multiple interfaces getLocalHost
 # may not be what you want.
-# hostname=
+#hostname=
 
-# number of logical partitions on this broker
-num.partitions=1
 
-# the port the socket server runs on
-port=9092
+############################# Socket Server Settings #############################
 
-# the number of processor threads the socket server uses. Defaults to the number of cores
on the machine
-num.threads=8
+# The port the socket server listens on
+port=9093
 
-# the directory in which to store log files
-log.dir=/tmp/kafka-logs
+# The number of processor threads the socket server uses for receiving and answering requests.

+# Defaults to the number of cores on the machine
+num.threads=8
 
-# the send buffer used by the socket server 
+# The send buffer (SO_SNDBUF) used by the socket server
 socket.send.buffer=1048576
 
-# the receive buffer used by the socket server
+# The receive buffer (SO_RCVBUF) used by the socket server
 socket.receive.buffer=1048576
 
-# the maximum size of a log segment
-log.file.size=536870912
+# The maximum size of a request that the socket server will accept (protection against OOM)
+max.socket.request.bytes=104857600
 
-# the interval between running cleanup on the logs
-log.cleanup.interval.mins=1
 
-# the minimum age of a log file to eligible for deletion
-log.retention.hours=168
+############################# Log Basics #############################
 
-#the number of messages to accept without flushing the log to disk
-log.flush.interval=1
+# The directory under which to store log files
+log.dir=/tmp/kafka-logs
 
-#set the following properties to use zookeeper
+# The number of logical partitions per topic per server. More partitions allow greater parallelism
+# for consumption, but also mean more files.
+num.partitions=1
 
-# enable connecting to zookeeper
-enable.zookeeper=true
+# Overrides for for the default given by num.partitions on a per-topic basis
+#topic.partition.count.map=topic1:3, topic2:4
 
-# zk connection string
-# comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002"
-zk.connect=localhost:2181
+############################# Log Flush Policy #############################
 
-# timeout in ms for connecting to zookeeper
-zk.connectiontimeout.ms=1000000
+# The following configurations control the flush of data to disk. This is the most
+# important performance knob in kafka.
+# There are a few important trade-offs here:
+#    1. Durability: Unflushed data is at greater risk of loss in the event of a crash.
+#    2. Latency: Data is not made available to consumers until it is flushed (which adds
latency).
+#    3. Throughput: The flush is generally the most expensive operation. 
+# The settings below allow one to configure the flush policy to flush data after a period
of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+log.flush.interval=10000
 
-# time based topic flush intervals in ms
-#topic.flush.intervals.ms=topic:1000
+# The maximum amount of time a message can sit in a log before we force a flush
+log.default.flush.interval.ms=1000
 
-# default time based flush interval in ms
-log.default.flush.interval.ms=2000
+# Per-topic overrides for log.default.flush.interval.ms
+#topic.flush.intervals.ms=topic1:1000, topic2:3000
 
-# the interval (in ms) at which logs are checked to see if they need to be flushed to disk.
+# The interval (in ms) at which logs are checked to see if they need to be flushed to disk.
 log.default.flush.scheduler.interval.ms=1000
 
-# topic partition count map
-# topic.partition.count.map=topic1:3, topic2:4
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always
happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are pruned from the log as long as the
remaining
+# segments don't drop below log.retention.size.
+#log.retention.size=1073741824
+
+# The maximum size of a log segment file. When this size is reached a new log segment will
be created.
+log.file.size=536870912
+
+# The interval at which log segments are checked to see if they can be deleted according

+# to the retention policies
+log.cleanup.interval.mins=1
+
+############################# Zookeeper #############################
+
+# Enable connecting to zookeeper
+enable.zookeeper=true
+
+# Zk connection string (see zk docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zk.connect=localhost:2181
+
+# Timeout in ms for connecting to zookeeper
+zk.connectiontimeout.ms=1000000



Mime
View raw message