kafka-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ewe...@apache.org
Subject [kafka] branch trunk updated: MINOR: Fix some typos
Date Sun, 21 Oct 2018 02:41:09 GMT
This is an automated email from the ASF dual-hosted git repository.

ewencp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 83c3996  MINOR: Fix some typos
83c3996 is described below

commit 83c39969745dc7076e3756439f6842e7431a8c55
Author: John Eismeier <john.eismeier@gmail.com>
AuthorDate: Sat Oct 20 19:40:53 2018 -0700

    MINOR: Fix some typos
    
    Just a doc change
    
    Author: John Eismeier <john.eismeier@gmail.com>
    
    Reviewers: Ewen Cheslack-Postava <ewen@confluent.io>
    
    Closes #4573 from jeis2497052/trunk
---
 .../apache/kafka/common/record/FileRecordsTest.java  |  2 +-
 core/src/main/scala/kafka/utils/Mx4jLoader.scala     |  4 ++--
 core/src/test/scala/unit/kafka/admin/AdminTest.scala |  2 +-
 .../test/scala/unit/kafka/zk/AdminZkClientTest.scala |  2 +-
 docs/design.html                                     |  2 +-
 docs/security.html                                   | 12 ++++++------
 release.py                                           | 20 ++++++++++----------
 .../java/org/apache/kafka/streams/TopologyTest.java  |  2 +-
 .../internals/InternalTopologyBuilderTest.java       |  4 ++--
 9 files changed, 25 insertions(+), 25 deletions(-)

diff --git a/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java b/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java
index 4b2b361..637da93 100644
--- a/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java
+++ b/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java
@@ -220,7 +220,7 @@ public class FileRecordsTest {
         position += message2Size + batches.get(2).sizeInBytes();
 
         int message4Size = batches.get(3).sizeInBytes();
-        assertEquals("Should be able to find fourth message from a non-existant offset",
+        assertEquals("Should be able to find fourth message from a non-existent offset",
                 new FileRecords.LogOffsetPosition(50L, position, message4Size),
                 fileRecords.searchForOffsetWithSize(3, position));
         assertEquals("Should be able to find fourth message by correct offset",
diff --git a/core/src/main/scala/kafka/utils/Mx4jLoader.scala b/core/src/main/scala/kafka/utils/Mx4jLoader.scala
index d9d1cb4..f2c8644 100644
--- a/core/src/main/scala/kafka/utils/Mx4jLoader.scala
+++ b/core/src/main/scala/kafka/utils/Mx4jLoader.scala
@@ -57,11 +57,11 @@ object Mx4jLoader extends Logging {
       httpAdaptorClass.getMethod("setProcessor", Class.forName("mx4j.tools.adaptor.http.ProcessorMBean")).invoke(httpAdaptor,
xsltProcessor.asInstanceOf[AnyRef])
       mbs.registerMBean(xsltProcessor, processorName)
       httpAdaptorClass.getMethod("start").invoke(httpAdaptor)
-      info("mx4j successfuly loaded")
+      info("mx4j successfully loaded")
       return true
     }
     catch {
-	  case _: ClassNotFoundException =>
+      case _: ClassNotFoundException =>
         info("Will not load MX4J, mx4j-tools.jar is not in the classpath")
       case e: Throwable =>
         warn("Could not start register mbean in JMX", e)
diff --git a/core/src/test/scala/unit/kafka/admin/AdminTest.scala b/core/src/test/scala/unit/kafka/admin/AdminTest.scala
index a1c317e..88aff62 100755
--- a/core/src/test/scala/unit/kafka/admin/AdminTest.scala
+++ b/core/src/test/scala/unit/kafka/admin/AdminTest.scala
@@ -169,7 +169,7 @@ class AdminTest extends ZooKeeperTestHarness with Logging with RackAwareTest
{
     zkUtils.updatePersistentPath(ConfigEntityZNode.path(ConfigType.Client, clientId), Json.encodeAsString(map.asJava))
 
     val configInZk: Map[String, Properties] = AdminUtils.fetchAllEntityConfigs(zkUtils, ConfigType.Client)
-    assertEquals("Must have 1 overriden client config", 1, configInZk.size)
+    assertEquals("Must have 1 overridden client config", 1, configInZk.size)
     assertEquals(props, configInZk(clientId))
 
     // Test that the existing clientId overrides are read
diff --git a/core/src/test/scala/unit/kafka/zk/AdminZkClientTest.scala b/core/src/test/scala/unit/kafka/zk/AdminZkClientTest.scala
index 81d938b..9f81c18 100644
--- a/core/src/test/scala/unit/kafka/zk/AdminZkClientTest.scala
+++ b/core/src/test/scala/unit/kafka/zk/AdminZkClientTest.scala
@@ -307,7 +307,7 @@ class AdminZkClientTest extends ZooKeeperTestHarness with Logging with
RackAware
     zkClient.setOrCreateEntityConfigs(ConfigType.Client, clientId, props)
 
     val configInZk: Map[String, Properties] = adminZkClient.fetchAllEntityConfigs(ConfigType.Client)
-    assertEquals("Must have 1 overriden client config", 1, configInZk.size)
+    assertEquals("Must have 1 overridden client config", 1, configInZk.size)
     assertEquals(props, configInZk(clientId))
 
     // Test that the existing clientId overrides are read
diff --git a/docs/design.html b/docs/design.html
index 0061a53..88e737a 100644
--- a/docs/design.html
+++ b/docs/design.html
@@ -277,7 +277,7 @@
     offsets are both updated or neither is. We follow similar patterns for many other data
systems which require these stronger semantics and for which the messages do not have a primary
key to allow for deduplication.
     <p>
     So effectively Kafka supports exactly-once delivery in <a href="https://kafka.apache.org/documentation/streams">Kafka
Streams</a>, and the transactional producer/consumer can be used generally to provide
-    exactly-once delivery when transfering and processing data between Kafka topics. Exactly-once
delivery for other destination systems generally requires cooperation with such systems, but
Kafka provides the
+    exactly-once delivery when transferring and processing data between Kafka topics. Exactly-once
delivery for other destination systems generally requires cooperation with such systems, but
Kafka provides the
     offset which makes implementing this feasible (see also <a href="https://kafka.apache.org/documentation/#connect">Kafka
Connect</a>). Otherwise, Kafka guarantees at-least-once delivery by default, and allows
     the user to implement at-most-once delivery by disabling retries on the producer and
committing offsets in the consumer prior to processing a batch of messages.
 
diff --git a/docs/security.html b/docs/security.html
index b018334..6ff9eba 100644
--- a/docs/security.html
+++ b/docs/security.html
@@ -220,7 +220,7 @@
             ssl.keystore.location=/var/private/ssl/client.keystore.jks
             ssl.keystore.password=test1234
             ssl.key.password=test1234</pre>
-			
+
             Other configuration settings that may also be needed depending on our requirements
and the broker configuration:
                 <ol>
                     <li>ssl.provider (Optional). The name of the security provider
used for SSL connections. Default value is the default security provider of the JVM.</li>
@@ -342,7 +342,7 @@
                 </li>
                 <li>Pass the JAAS config file location as JVM parameter to each client
JVM. For example:
                     <pre class="brush: bash;">    -Djava.security.auth.login.config=/etc/kafka/kafka_client_jaas.conf</pre></li>
-	</ol>
+                </ol>
                 </li>
             </ol>
             </li>
@@ -455,7 +455,7 @@
                     Clients (producers, consumers, connect workers, etc) will authenticate
to the cluster with their
                     own principal (usually with the same name as the user running the client),
so obtain or create
                     these principals as needed. Then configure the JAAS configuration property
for each client.
-                    Different clients within a JVM may run as different users by specifiying
different principals.
+                    Different clients within a JVM may run as different users by specifying
different principals.
                     The property <code>sasl.jaas.config</code> in producer.properties
or consumer.properties describes
                     how clients like producer and consumer can connect to the Kafka Broker.
The following is an example
                     configuration for a client using a keytab (recommended for long-running
processes):
@@ -621,9 +621,9 @@
         <li><h5><a id="security_sasl_scram_clientconfig" href="#security_sasl_scram_clientconfig">Configuring
Kafka Clients</a></h5>
             To configure SASL authentication on the clients:
             <ol>
-	    <li>Configure the JAAS configuration property for each client in producer.properties
or consumer.properties.
+            <li>Configure the JAAS configuration property for each client in producer.properties
or consumer.properties.
                 The login module describes how the clients like producer and consumer can
connect to the Kafka Broker.
-	        The following is an example configuration for a client for the SCRAM mechanisms:
+                The following is an example configuration for a client for the SCRAM mechanisms:
                 <pre class="brush: text;">
    sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
         username="alice" \
@@ -973,7 +973,7 @@
                 <ol>
                     <li>Configure the JAAS configuration property for each client in
producer.properties or consumer.properties.
                 The login module describes how the clients like producer and consumer can
connect to the Kafka Broker.
-	        The following is an example configuration for a client for the token authentication:
+                The following is an example configuration for a client for the token authentication:
                 <pre class="brush: text;">
    sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
         username="tokenID123" \
diff --git a/release.py b/release.py
index 3573a7f..1cf54c4 100755
--- a/release.py
+++ b/release.py
@@ -285,13 +285,13 @@ if not user_ok("""Requirements:
       signing.keyId=your-gpgkeyId
       signing.password=your-gpg-passphrase
       signing.secretKeyRingFile=/Users/your-id/.gnupg/secring.gpg (if you are using GPG 2.1
and beyond, then this file will no longer exist anymore, and you have to manually create it
from the new private key directory with "gpg --export-secret-keys -o ~/.gnupg/secring.gpg")
-8. ~/.m2/settings.xml configured for pgp signing and uploading to apache release maven, i.e.,

+8. ~/.m2/settings.xml configured for pgp signing and uploading to apache release maven, i.e.,
        <server>
           <id>apache.releases.https</id>
           <username>your-apache-id</username>
           <password>your-apache-passwd</password>
         </server>
-	<server>
+        <server>
             <id>your-gpgkeyId</id>
             <passphrase>your-gpg-passphase</passphrase>
         </server>
@@ -299,18 +299,18 @@ if not user_ok("""Requirements:
             <id>gpg-signing</id>
             <properties>
                 <gpg.keyname>your-gpgkeyId</gpg.keyname>
-        	<gpg.passphraseServerId>your-gpgkeyId</gpg.passphraseServerId>
+                <gpg.passphraseServerId>your-gpgkeyId</gpg.passphraseServerId>
             </properties>
         </profile>
 9. You may also need to update some gnupgp configs:
-	~/.gnupg/gpg-agent.conf
-	allow-loopback-pinentry
+        ~/.gnupg/gpg-agent.conf
+        allow-loopback-pinentry
 
-	~/.gnupg/gpg.conf
-	use-agent
-	pinentry-mode loopback
+        ~/.gnupg/gpg.conf
+        use-agent
+        pinentry-mode loopback
 
-	echo RELOADAGENT | gpg-connect-agent
+        echo RELOADAGENT | gpg-connect-agent
 
 If any of these are missing, see https://cwiki.apache.org/confluence/display/KAFKA/Release+Process
for instructions on setting them up.
 
@@ -404,7 +404,7 @@ cmd("remove backup pom.xml", "rm streams/quickstart/pom.xml.orig")
 cmd("remove backup java pom.xml", "rm streams/quickstart/java/pom.xml.orig")
 cmd("remove backup java pom.xml", "rm streams/quickstart/java/src/main/resources/archetype-resources/pom.xml.orig")
 # Command in explicit list due to messages with spaces
-cmd("Commiting version number updates", ["git", "commit", "-a", "-m", "Bump version to %s"
% release_version])
+cmd("Committing version number updates", ["git", "commit", "-a", "-m", "Bump version to %s"
% release_version])
 # Command in explicit list due to messages with spaces
 cmd("Tagging release candidate %s" % rc_tag, ["git", "tag", "-a", rc_tag, "-m", rc_tag])
 rc_githash = cmd_output("git show-ref --hash " + rc_tag)
diff --git a/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java b/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java
index 289cdf0..de856bb 100644
--- a/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/TopologyTest.java
@@ -258,7 +258,7 @@ public class TopologyTest {
     public void shouldNotAllowToAddStateStoreToNonExistingProcessor() {
         mockStoreBuilder();
         EasyMock.replay(storeBuilder);
-        topology.addStateStore(storeBuilder, "no-such-processsor");
+        topology.addStateStore(storeBuilder, "no-such-processor");
     }
 
     @Test
diff --git a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java
index 8ddb0b5..d49dd9d 100644
--- a/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java
+++ b/streams/src/test/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilderTest.java
@@ -103,7 +103,7 @@ public class InternalTopologyBuilderTest {
     @Test
     public void shouldAddPatternSourceWithoutOffsetReset() {
         final Pattern expectedPattern = Pattern.compile("test-.*");
-        
+
         builder.addSource(null, "source", null, stringSerde.deserializer(), stringSerde.deserializer(),
Pattern.compile("test-.*"));
 
         assertEquals(expectedPattern.pattern(), builder.sourceTopicPattern().pattern());
@@ -286,7 +286,7 @@ public class InternalTopologyBuilderTest {
 
     @Test(expected = TopologyException.class)
     public void testAddStateStoreWithNonExistingProcessor() {
-        builder.addStateStore(storeBuilder, "no-such-processsor");
+        builder.addStateStore(storeBuilder, "no-such-processor");
     }
 
     @Test


Mime
View raw message