This is an automated email from the ASF dual-hosted git repository. rhauch pushed a commit to branch asf-site in repository https://gitbox.apache.org/repos/asf/kafka-site.git The following commit(s) were added to refs/heads/asf-site by this push: new d50b34d Add documentation and JavaDoc for 2.6.0 release d50b34d is described below commit d50b34d69a7b98fc748e33473ee39ea2e0a66b35 Author: Randall Hauch AuthorDate: Tue Jul 14 17:10:33 2020 -0500 Add documentation and JavaDoc for 2.6.0 release --- 26/api.html | 120 + 26/configuration.html | 293 + 26/connect.html | 745 + 26/design.html | 660 + 26/documentation.html | 83 + 26/documentation/index.html | 18 + 26/documentation/streams/architecture.html | 19 + 26/documentation/streams/core-concepts.html | 19 + .../streams/developer-guide/app-reset-tool.html | 19 + .../streams/developer-guide/config-streams.html | 19 + .../streams/developer-guide/datatypes.html | 19 + .../streams/developer-guide/dsl-api.html | 19 + .../developer-guide/dsl-topology-naming.html | 19 + .../streams/developer-guide/index.html | 19 + .../developer-guide/interactive-queries.html | 19 + .../streams/developer-guide/manage-topics.html | 19 + .../streams/developer-guide/memory-mgmt.html | 19 + .../streams/developer-guide/processor-api.html | 19 + .../streams/developer-guide/running-app.html | 19 + .../streams/developer-guide/security.html | 19 + .../streams/developer-guide/testing.html | 19 + .../streams/developer-guide/write-streams.html | 19 + 26/documentation/streams/index.html | 19 + 26/documentation/streams/quickstart.html | 19 + 26/documentation/streams/tutorial.html | 19 + 26/documentation/streams/upgrade-guide.html | 19 + 26/ecosystem.html | 18 + 26/generated/admin_client_config.html | 493 + 26/generated/connect_config.html | 933 ++ 26/generated/connect_metrics.html | 202 + 26/generated/connect_predicates.html | 44 + 26/generated/connect_transforms.html | 354 + 26/generated/consumer_config.html | 703 + 26/generated/consumer_metrics.html | 81 + 26/generated/kafka_config.html | 2291 +++ 26/generated/producer_config.html | 653 + 26/generated/producer_metrics.html | 78 + 26/generated/protocol_api_keys.html | 105 + 26/generated/protocol_errors.html | 98 + 26/generated/protocol_messages.html | 11443 ++++++++++++++ 26/generated/protocol_types.html | 27 + 26/generated/sink_connector_config.html | 193 + 26/generated/source_connector_config.html | 153 + 26/generated/streams_config.html | 433 + 26/generated/topic_config.html | 289 + 26/images/consumer-groups.png | Bin 0 -> 26820 bytes 26/images/icons/NYT.jpg | Bin 0 -> 12605 bytes 26/images/icons/architecture--white.png | Bin 0 -> 812 bytes 26/images/icons/architecture.png | Bin 0 -> 818 bytes 26/images/icons/documentation--white.png | Bin 0 -> 1758 bytes 26/images/icons/documentation.png | Bin 0 -> 1642 bytes 26/images/icons/line.png | Bin 0 -> 676 bytes 26/images/icons/new-york.png | Bin 0 -> 3381 bytes 26/images/icons/rabobank.png | Bin 0 -> 3593 bytes 26/images/icons/tutorials--white.png | Bin 0 -> 1047 bytes 26/images/icons/tutorials.png | Bin 0 -> 985 bytes 26/images/icons/zalando.png | Bin 0 -> 2716 bytes 26/images/kafka-apis.png | Bin 0 -> 86640 bytes 26/images/kafka_log.png | Bin 0 -> 134321 bytes 26/images/kafka_multidc.png | Bin 0 -> 33959 bytes 26/images/kafka_multidc_complex.png | Bin 0 -> 38559 bytes 26/images/log_anatomy.png | Bin 0 -> 19579 bytes 26/images/log_cleaner_anatomy.png | Bin 0 -> 18638 bytes 26/images/log_compaction.png | Bin 0 -> 41414 bytes 26/images/log_consumer.png | Bin 0 -> 139658 bytes 26/images/mirror-maker.png | Bin 0 -> 6579 bytes 26/images/producer_consumer.png | Bin 0 -> 8691 bytes 26/images/streams-architecture-overview.jpg | Bin 0 -> 420929 bytes 26/images/streams-architecture-states.jpg | Bin 0 -> 147338 bytes 26/images/streams-architecture-tasks.jpg | Bin 0 -> 130435 bytes 26/images/streams-architecture-threads.jpg | Bin 0 -> 153622 bytes 26/images/streams-architecture-topology.jpg | Bin 0 -> 182199 bytes 26/images/streams-cache-and-commit-interval.png | Bin 0 -> 38648 bytes 26/images/streams-concepts-topology.jpg | Bin 0 -> 136983 bytes 26/images/streams-elastic-scaling-1.png | Bin 0 -> 88673 bytes 26/images/streams-elastic-scaling-2.png | Bin 0 -> 91141 bytes 26/images/streams-elastic-scaling-3.png | Bin 0 -> 88604 bytes 26/images/streams-interactive-queries-01.png | Bin 0 -> 80976 bytes 26/images/streams-interactive-queries-02.png | Bin 0 -> 73218 bytes 26/images/streams-interactive-queries-03.png | Bin 0 -> 79879 bytes 26/images/streams-interactive-queries-api-01.png | Bin 0 -> 84438 bytes 26/images/streams-interactive-queries-api-02.png | Bin 0 -> 100725 bytes 26/images/streams-session-windows-01.png | Bin 0 -> 49003 bytes 26/images/streams-session-windows-02.png | Bin 0 -> 55956 bytes 26/images/streams-stateful_operations.png | Bin 0 -> 123213 bytes 26/images/streams-table-duality-01.png | Bin 0 -> 14534 bytes 26/images/streams-table-duality-02.png | Bin 0 -> 56736 bytes 26/images/streams-table-duality-03.png | Bin 0 -> 91331 bytes 26/images/streams-table-updates-01.png | Bin 0 -> 78069 bytes 26/images/streams-table-updates-02.png | Bin 0 -> 91880 bytes 26/images/streams-time-windows-hopping.png | Bin 0 -> 110392 bytes 26/images/streams-time-windows-tumbling.png | Bin 0 -> 63888 bytes 26/images/streams-welcome.png | Bin 0 -> 80530 bytes 26/images/tracking_high_level.png | Bin 0 -> 82759 bytes 26/implementation.html | 313 + 26/introduction.html | 215 + 26/javadoc/allclasses-frame.html | 648 + 26/javadoc/allclasses-noframe.html | 648 + 26/javadoc/constant-values.html | 3110 ++++ 26/javadoc/deprecated-list.html | 787 + 26/javadoc/help-doc.html | 222 + 26/javadoc/index-all.html | 15120 +++++++++++++++++++ 26/javadoc/index.html | 75 + .../kafka/clients/admin/AbstractOptions.html | 331 + .../org/apache/kafka/clients/admin/Admin.html | 2421 +++ .../apache/kafka/clients/admin/AdminClient.html | 324 + .../kafka/clients/admin/AdminClientConfig.html | 744 + .../clients/admin/AlterClientQuotasOptions.html | 318 + .../clients/admin/AlterClientQuotasResult.html | 298 + .../kafka/clients/admin/AlterConfigOp.OpType.html | 411 + .../apache/kafka/clients/admin/AlterConfigOp.html | 357 + .../kafka/clients/admin/AlterConfigsOptions.html | 340 + .../kafka/clients/admin/AlterConfigsResult.html | 257 + .../admin/AlterConsumerGroupOffsetsOptions.html | 265 + .../admin/AlterConsumerGroupOffsetsResult.html | 257 + .../admin/AlterPartitionReassignmentsOptions.html | 265 + .../admin/AlterPartitionReassignmentsResult.html | 263 + .../clients/admin/AlterReplicaLogDirsOptions.html | 263 + .../clients/admin/AlterReplicaLogDirsResult.html | 279 + .../org/apache/kafka/clients/admin/Config.html | 345 + .../clients/admin/ConfigEntry.ConfigSource.html | 403 + .../clients/admin/ConfigEntry.ConfigSynonym.html | 325 + .../clients/admin/ConfigEntry.ConfigType.html | 439 + .../apache/kafka/clients/admin/ConfigEntry.html | 536 + .../clients/admin/ConsumerGroupDescription.html | 429 + .../kafka/clients/admin/ConsumerGroupListing.html | 390 + .../kafka/clients/admin/CreateAclsOptions.html | 308 + .../kafka/clients/admin/CreateAclsResult.html | 259 + .../admin/CreateDelegationTokenOptions.html | 338 + .../clients/admin/CreateDelegationTokenResult.html | 241 + .../clients/admin/CreatePartitionsOptions.html | 318 + .../clients/admin/CreatePartitionsResult.html | 259 + .../kafka/clients/admin/CreateTopicsOptions.html | 340 + .../CreateTopicsResult.TopicMetadataAndConfig.html | 264 + .../kafka/clients/admin/CreateTopicsResult.html | 380 + .../kafka/clients/admin/DeleteAclsOptions.html | 308 + .../admin/DeleteAclsResult.FilterResult.html | 258 + .../admin/DeleteAclsResult.FilterResults.html | 242 + .../kafka/clients/admin/DeleteAclsResult.html | 287 + .../admin/DeleteConsumerGroupOffsetsOptions.html | 265 + .../admin/DeleteConsumerGroupOffsetsResult.html | 258 + .../clients/admin/DeleteConsumerGroupsOptions.html | 265 + .../clients/admin/DeleteConsumerGroupsResult.html | 259 + .../kafka/clients/admin/DeleteRecordsOptions.html | 265 + .../kafka/clients/admin/DeleteRecordsResult.html | 293 + .../kafka/clients/admin/DeleteTopicsOptions.html | 308 + .../kafka/clients/admin/DeleteTopicsResult.html | 259 + .../apache/kafka/clients/admin/DeletedRecords.html | 282 + .../kafka/clients/admin/DescribeAclsOptions.html | 308 + .../kafka/clients/admin/DescribeAclsResult.html | 241 + .../clients/admin/DescribeClientQuotasOptions.html | 265 + .../clients/admin/DescribeClientQuotasResult.html | 283 + .../clients/admin/DescribeClusterOptions.html | 338 + .../kafka/clients/admin/DescribeClusterResult.html | 292 + .../clients/admin/DescribeConfigsOptions.html | 372 + .../kafka/clients/admin/DescribeConfigsResult.html | 259 + .../admin/DescribeConsumerGroupsOptions.html | 312 + .../admin/DescribeConsumerGroupsResult.html | 291 + .../admin/DescribeDelegationTokenOptions.html | 323 + .../admin/DescribeDelegationTokenResult.html | 241 + .../clients/admin/DescribeLogDirsOptions.html | 265 + .../kafka/clients/admin/DescribeLogDirsResult.html | 257 + .../admin/DescribeReplicaLogDirsOptions.html | 265 + ...ribeReplicaLogDirsResult.ReplicaLogDirInfo.html | 294 + .../admin/DescribeReplicaLogDirsResult.html | 276 + .../kafka/clients/admin/DescribeTopicsOptions.html | 334 + .../kafka/clients/admin/DescribeTopicsResult.html | 295 + .../kafka/clients/admin/ElectLeadersOptions.html | 265 + .../kafka/clients/admin/ElectLeadersResult.html | 259 + .../admin/ElectPreferredLeadersOptions.html | 272 + .../clients/admin/ElectPreferredLeadersResult.html | 292 + .../admin/ExpireDelegationTokenOptions.html | 312 + .../clients/admin/ExpireDelegationTokenResult.html | 241 + .../kafka/clients/admin/KafkaAdminClient.html | 1399 ++ .../admin/ListConsumerGroupOffsetsOptions.html | 325 + .../admin/ListConsumerGroupOffsetsResult.html | 242 + .../clients/admin/ListConsumerGroupsOptions.html | 321 + .../clients/admin/ListConsumerGroupsResult.html | 290 + .../kafka/clients/admin/ListOffsetsOptions.html | 311 + .../ListOffsetsResult.ListOffsetsResultInfo.html | 319 + .../kafka/clients/admin/ListOffsetsResult.html | 312 + .../admin/ListPartitionReassignmentsOptions.html | 265 + .../admin/ListPartitionReassignmentsResult.html | 240 + .../kafka/clients/admin/ListTopicsOptions.html | 347 + .../kafka/clients/admin/ListTopicsResult.html | 273 + .../kafka/clients/admin/MemberAssignment.html | 330 + .../kafka/clients/admin/MemberDescription.html | 413 + .../apache/kafka/clients/admin/MemberToRemove.html | 303 + .../clients/admin/NewPartitionReassignment.html | 273 + .../apache/kafka/clients/admin/NewPartitions.html | 337 + .../org/apache/kafka/clients/admin/NewTopic.html | 463 + .../clients/admin/OffsetSpec.EarliestSpec.html | 265 + .../kafka/clients/admin/OffsetSpec.LatestSpec.html | 265 + .../clients/admin/OffsetSpec.TimestampSpec.html | 225 + .../org/apache/kafka/clients/admin/OffsetSpec.html | 341 + .../kafka/clients/admin/PartitionReassignment.html | 327 + .../kafka/clients/admin/RecordsToDelete.html | 312 + .../RemoveMembersFromConsumerGroupOptions.html | 325 + .../RemoveMembersFromConsumerGroupResult.html | 258 + .../clients/admin/RenewDelegationTokenOptions.html | 312 + .../clients/admin/RenewDelegationTokenResult.html | 241 + .../kafka/clients/admin/TopicDescription.html | 417 + .../apache/kafka/clients/admin/TopicListing.html | 316 + .../apache/kafka/clients/admin/package-frame.html | 124 + .../kafka/clients/admin/package-summary.html | 753 + .../apache/kafka/clients/admin/package-tree.html | 264 + .../clients/consumer/CommitFailedException.html | 285 + .../apache/kafka/clients/consumer/Consumer.html | 1066 ++ .../kafka/clients/consumer/ConsumerConfig.html | 1373 ++ .../clients/consumer/ConsumerGroupMetadata.html | 378 + .../clients/consumer/ConsumerInterceptor.html | 315 + .../ConsumerPartitionAssignor.Assignment.html | 316 + .../ConsumerPartitionAssignor.GroupAssignment.html | 272 + ...onsumerPartitionAssignor.GroupSubscription.html | 272 + ...onsumerPartitionAssignor.RebalanceProtocol.html | 383 + .../ConsumerPartitionAssignor.Subscription.html | 354 + .../consumer/ConsumerPartitionAssignor.html | 388 + .../consumer/ConsumerRebalanceListener.html | 420 + .../kafka/clients/consumer/ConsumerRecord.html | 743 + .../kafka/clients/consumer/ConsumerRecords.html | 421 + .../consumer/CooperativeStickyAssignor.html | 424 + .../clients/consumer/InvalidOffsetException.html | 311 + .../kafka/clients/consumer/KafkaConsumer.html | 2671 ++++ .../clients/consumer/LogTruncationException.html | 339 + .../kafka/clients/consumer/MockConsumer.html | 1406 ++ .../consumer/NoOffsetForPartitionException.html | 355 + .../kafka/clients/consumer/OffsetAndMetadata.html | 421 + .../kafka/clients/consumer/OffsetAndTimestamp.html | 372 + .../clients/consumer/OffsetCommitCallback.html | 246 + .../consumer/OffsetOutOfRangeException.html | 348 + .../clients/consumer/OffsetResetStrategy.html | 350 + .../kafka/clients/consumer/RangeAssignor.html | 378 + .../consumer/RetriableCommitFailedException.html | 312 + .../kafka/clients/consumer/RoundRobinAssignor.html | 408 + .../kafka/clients/consumer/StickyAssignor.html | 562 + .../kafka/clients/consumer/package-frame.html | 56 + .../kafka/clients/consumer/package-summary.html | 338 + .../kafka/clients/consumer/package-tree.html | 231 + .../clients/producer/BufferExhaustedException.html | 296 + .../apache/kafka/clients/producer/Callback.html | 253 + .../kafka/clients/producer/KafkaProducer.html | 1124 ++ .../kafka/clients/producer/MockProducer.html | 1042 ++ .../apache/kafka/clients/producer/Partitioner.html | 306 + .../apache/kafka/clients/producer/Producer.html | 484 + .../kafka/clients/producer/ProducerConfig.html | 1192 ++ .../clients/producer/ProducerInterceptor.html | 332 + .../kafka/clients/producer/ProducerRecord.html | 603 + .../kafka/clients/producer/RecordMetadata.html | 506 + .../clients/producer/RoundRobinPartitioner.html | 355 + .../clients/producer/UniformStickyPartitioner.html | 381 + .../kafka/clients/producer/package-frame.html | 36 + .../kafka/clients/producer/package-summary.html | 237 + .../kafka/clients/producer/package-tree.html | 193 + 26/javadoc/org/apache/kafka/common/Cluster.html | 733 + .../org/apache/kafka/common/ClusterResource.html | 333 + .../kafka/common/ClusterResourceListener.html | 252 + .../org/apache/kafka/common/Configurable.html | 230 + .../apache/kafka/common/ConsumerGroupState.html | 420 + .../org/apache/kafka/common/ElectionType.html | 391 + 26/javadoc/org/apache/kafka/common/Endpoint.html | 379 + .../kafka/common/InvalidRecordException.html | 295 + .../org/apache/kafka/common/IsolationLevel.html | 364 + .../org/apache/kafka/common/KafkaException.html | 307 + .../kafka/common/KafkaFuture.BaseFunction.html | 229 + .../kafka/common/KafkaFuture.BiConsumer.html | 227 + .../apache/kafka/common/KafkaFuture.Function.html | 254 + .../org/apache/kafka/common/KafkaFuture.html | 622 + 26/javadoc/org/apache/kafka/common/Metric.html | 258 + 26/javadoc/org/apache/kafka/common/MetricName.html | 413 + .../apache/kafka/common/MetricNameTemplate.html | 436 + 26/javadoc/org/apache/kafka/common/Node.html | 461 + .../org/apache/kafka/common/PartitionInfo.html | 401 + .../org/apache/kafka/common/Reconfigurable.html | 280 + .../org/apache/kafka/common/TopicPartition.html | 344 + .../apache/kafka/common/TopicPartitionInfo.html | 391 + .../apache/kafka/common/TopicPartitionReplica.html | 359 + .../kafka/common/acl/AccessControlEntry.html | 422 + .../kafka/common/acl/AccessControlEntryFilter.html | 497 + .../org/apache/kafka/common/acl/AclBinding.html | 413 + .../apache/kafka/common/acl/AclBindingFilter.html | 485 + .../org/apache/kafka/common/acl/AclOperation.html | 601 + .../apache/kafka/common/acl/AclPermissionType.html | 450 + .../org/apache/kafka/common/acl/package-frame.html | 27 + .../apache/kafka/common/acl/package-summary.html | 182 + .../org/apache/kafka/common/acl/package-tree.html | 150 + .../annotation/InterfaceStability.Evolving.html | 163 + .../annotation/InterfaceStability.Stable.html | 168 + .../annotation/InterfaceStability.Unstable.html | 163 + .../common/annotation/InterfaceStability.html | 271 + .../kafka/common/annotation/package-frame.html | 25 + .../kafka/common/annotation/package-summary.html | 171 + .../kafka/common/annotation/package-tree.html | 140 + .../apache/kafka/common/config/AbstractConfig.html | 919 ++ .../org/apache/kafka/common/config/Config.html | 268 + .../kafka/common/config/ConfigChangeCallback.html | 229 + .../org/apache/kafka/common/config/ConfigData.html | 325 + .../ConfigDef.CaseInsensitiveValidString.html | 286 + .../config/ConfigDef.CompositeValidator.html | 286 + .../kafka/common/config/ConfigDef.ConfigKey.html | 501 + .../kafka/common/config/ConfigDef.Importance.html | 355 + .../common/config/ConfigDef.LambdaValidator.html | 288 + .../common/config/ConfigDef.NonEmptyString.html | 307 + ...onfigDef.NonEmptyStringWithoutControlChars.html | 320 + .../common/config/ConfigDef.NonNullValidator.html | 307 + .../kafka/common/config/ConfigDef.Range.html | 312 + .../kafka/common/config/ConfigDef.Recommender.html | 264 + .../apache/kafka/common/config/ConfigDef.Type.html | 427 + .../kafka/common/config/ConfigDef.ValidList.html | 286 + .../kafka/common/config/ConfigDef.ValidString.html | 286 + .../kafka/common/config/ConfigDef.Validator.html | 239 + .../kafka/common/config/ConfigDef.Width.html | 367 + .../org/apache/kafka/common/config/ConfigDef.html | 1610 ++ .../kafka/common/config/ConfigException.html | 300 + .../kafka/common/config/ConfigResource.Type.html | 393 + .../apache/kafka/common/config/ConfigResource.html | 387 + .../kafka/common/config/ConfigTransformer.html | 344 + .../common/config/ConfigTransformerResult.html | 311 + .../apache/kafka/common/config/ConfigValue.html | 441 + .../apache/kafka/common/config/LogLevelConfig.html | 401 + .../apache/kafka/common/config/SaslConfigs.html | 1070 ++ .../apache/kafka/common/config/SecurityConfig.html | 292 + .../apache/kafka/common/config/SslClientAuth.html | 400 + .../org/apache/kafka/common/config/SslConfigs.html | 1077 ++ .../apache/kafka/common/config/TopicConfig.html | 1113 ++ .../apache/kafka/common/config/package-frame.html | 59 + .../kafka/common/config/package-summary.html | 336 + .../apache/kafka/common/config/package-tree.html | 195 + .../common/config/provider/ConfigProvider.html | 353 + .../common/config/provider/FileConfigProvider.html | 378 + .../common/config/provider/package-frame.html | 23 + .../common/config/provider/package-summary.html | 158 + .../kafka/common/config/provider/package-tree.html | 151 + .../apache/kafka/common/errors/ApiException.html | 351 + .../common/errors/AuthenticationException.html | 323 + .../common/errors/AuthorizationException.html | 299 + .../common/errors/BrokerNotAvailableException.html | 295 + .../errors/ClusterAuthorizationException.html | 300 + .../errors/ConcurrentTransactionsException.html | 281 + .../common/errors/ControllerMovedException.html | 295 + .../errors/CoordinatorLoadInProgressException.html | 306 + .../errors/CoordinatorNotAvailableException.html | 341 + .../common/errors/CorruptRecordException.html | 326 + .../DelegationTokenAuthorizationException.html | 300 + .../errors/DelegationTokenDisabledException.html | 295 + .../errors/DelegationTokenExpiredException.html | 295 + .../errors/DelegationTokenNotFoundException.html | 295 + .../DelegationTokenOwnerMismatchException.html | 295 + .../kafka/common/errors/DisconnectException.html | 361 + .../common/errors/DuplicateSequenceException.html | 281 + .../common/errors/ElectionNotNeededException.html | 305 + .../EligibleLeadersNotAvailableException.html | 305 + .../common/errors/FencedInstanceIdException.html | 295 + .../common/errors/FencedLeaderEpochException.html | 308 + .../errors/FetchSessionIdNotFoundException.html | 298 + .../common/errors/GroupAuthorizationException.html | 355 + .../common/errors/GroupIdNotFoundException.html | 281 + .../errors/GroupMaxSizeReachedException.html | 282 + .../common/errors/GroupNotEmptyException.html | 281 + .../errors/GroupSubscribedToTopicException.html | 281 + .../common/errors/IllegalGenerationException.html | 319 + .../common/errors/IllegalSaslStateException.html | 303 + .../errors/InconsistentGroupProtocolException.html | 295 + .../kafka/common/errors/InterruptException.html | 296 + .../errors/InvalidCommitOffsetSizeException.html | 295 + .../errors/InvalidConfigurationException.html | 295 + .../errors/InvalidFetchSessionEpochException.html | 298 + .../common/errors/InvalidFetchSizeException.html | 295 + .../common/errors/InvalidGroupIdException.html | 295 + .../common/errors/InvalidMetadataException.html | 329 + .../common/errors/InvalidOffsetException.html | 302 + .../common/errors/InvalidPartitionsException.html | 295 + .../common/errors/InvalidPidMappingException.html | 281 + .../errors/InvalidPrincipalTypeException.html | 295 + .../errors/InvalidReplicaAssignmentException.html | 295 + .../errors/InvalidReplicationFactorException.html | 295 + .../common/errors/InvalidRequestException.html | 298 + .../errors/InvalidRequiredAcksException.html | 281 + .../errors/InvalidSessionTimeoutException.html | 295 + .../common/errors/InvalidTimestampException.html | 296 + .../kafka/common/errors/InvalidTopicException.html | 369 + .../common/errors/InvalidTxnStateException.html | 281 + .../common/errors/InvalidTxnTimeoutException.html | 297 + .../kafka/common/errors/KafkaStorageException.html | 338 + .../common/errors/LeaderNotAvailableException.html | 307 + .../common/errors/ListenerNotFoundException.html | 310 + .../common/errors/LogDirNotFoundException.html | 308 + .../common/errors/MemberIdRequiredException.html | 295 + .../kafka/common/errors/NetworkException.html | 331 + .../errors/NoReassignmentInProgressException.html | 296 + .../common/errors/NotControllerException.html | 300 + .../common/errors/NotCoordinatorException.html | 305 + .../NotEnoughReplicasAfterAppendException.html | 288 + .../common/errors/NotEnoughReplicasException.html | 325 + .../errors/NotLeaderForPartitionException.html | 330 + .../common/errors/OffsetMetadataTooLarge.html | 320 + .../common/errors/OffsetNotAvailableException.html | 288 + .../common/errors/OffsetOutOfRangeException.html | 302 + .../errors/OperationNotAttemptedException.html | 283 + .../common/errors/OutOfOrderSequenceException.html | 291 + .../common/errors/PolicyViolationException.html | 296 + .../PreferredLeaderNotAvailableException.html | 305 + .../common/errors/ProducerFencedException.html | 285 + .../errors/ReassignmentInProgressException.html | 296 + .../errors/RebalanceInProgressException.html | 319 + .../errors/RecordBatchTooLargeException.html | 320 + .../common/errors/RecordTooLargeException.html | 368 + .../errors/ReplicaNotAvailableException.html | 307 + .../kafka/common/errors/RetriableException.html | 324 + .../common/errors/SaslAuthenticationException.html | 313 + .../common/errors/SecurityDisabledException.html | 296 + .../common/errors/SerializationException.html | 346 + .../common/errors/SslAuthenticationException.html | 309 + .../common/errors/StaleBrokerEpochException.html | 295 + .../kafka/common/errors/TimeoutException.html | 329 + .../common/errors/TopicAuthorizationException.html | 354 + .../errors/TopicDeletionDisabledException.html | 293 + .../kafka/common/errors/TopicExistsException.html | 295 + .../TransactionCoordinatorFencedException.html | 295 + .../TransactionalIdAuthorizationException.html | 286 + .../common/errors/UnknownLeaderEpochException.html | 303 + .../common/errors/UnknownMemberIdException.html | 319 + .../common/errors/UnknownProducerIdException.html | 290 + .../common/errors/UnknownServerException.html | 321 + .../errors/UnknownTopicOrPartitionException.html | 333 + .../errors/UnstableOffsetCommitException.html | 287 + .../UnsupportedByAuthenticationException.html | 296 + .../UnsupportedCompressionTypeException.html | 296 + .../UnsupportedForMessageFormatException.html | 297 + .../errors/UnsupportedSaslMechanismException.html | 302 + .../common/errors/UnsupportedVersionException.html | 303 + .../kafka/common/errors/WakeupException.html | 273 + .../apache/kafka/common/errors/package-frame.html | 117 + .../kafka/common/errors/package-summary.html | 654 + .../apache/kafka/common/errors/package-tree.html | 269 + .../org/apache/kafka/common/header/Header.html | 231 + .../org/apache/kafka/common/header/Headers.html | 359 + .../apache/kafka/common/header/package-frame.html | 20 + .../kafka/common/header/package-summary.html | 143 + .../apache/kafka/common/header/package-tree.html | 135 + .../org/apache/kafka/common/package-frame.html | 50 + .../org/apache/kafka/common/package-summary.html | 302 + .../org/apache/kafka/common/package-tree.html | 192 + .../apache/kafka/common/resource/PatternType.html | 488 + .../org/apache/kafka/common/resource/Resource.html | 443 + .../kafka/common/resource/ResourceFilter.html | 455 + .../kafka/common/resource/ResourcePattern.html | 452 + .../common/resource/ResourcePatternFilter.html | 490 + .../apache/kafka/common/resource/ResourceType.html | 498 + .../kafka/common/resource/package-frame.html | 27 + .../kafka/common/resource/package-summary.html | 183 + .../apache/kafka/common/resource/package-tree.html | 150 + .../security/auth/AuthenticateCallbackHandler.html | 277 + .../security/auth/AuthenticationContext.html | 260 + .../security/auth/DefaultPrincipalBuilder.html | 347 + .../kafka/common/security/auth/KafkaPrincipal.html | 480 + .../security/auth/KafkaPrincipalBuilder.html | 235 + .../apache/kafka/common/security/auth/Login.html | 312 + .../auth/PlaintextAuthenticationContext.html | 325 + .../common/security/auth/PrincipalBuilder.html | 291 + .../security/auth/SaslAuthenticationContext.html | 342 + .../kafka/common/security/auth/SaslExtensions.html | 362 + .../security/auth/SaslExtensionsCallback.html | 304 + .../common/security/auth/SecurityProtocol.html | 471 + .../security/auth/SecurityProviderCreator.html | 250 + .../security/auth/SslAuthenticationContext.html | 340 + .../common/security/auth/SslEngineFactory.html | 367 + .../kafka/common/security/auth/package-frame.html | 39 + .../common/security/auth/package-summary.html | 239 + .../kafka/common/security/auth/package-tree.html | 179 + .../OAuthBearerExtensionsValidatorCallback.html | 405 + .../oauthbearer/OAuthBearerLoginModule.html | 593 + .../security/oauthbearer/OAuthBearerToken.html | 359 + .../oauthbearer/OAuthBearerTokenCallback.html | 416 + .../oauthbearer/OAuthBearerValidatorCallback.html | 456 + .../common/security/oauthbearer/package-frame.html | 26 + .../security/oauthbearer/package-summary.html | 185 + .../common/security/oauthbearer/package-tree.html | 141 + .../security/plain/PlainAuthenticateCallback.html | 321 + .../common/security/plain/PlainLoginModule.html | 351 + .../kafka/common/security/plain/package-frame.html | 20 + .../common/security/plain/package-summary.html | 143 + .../kafka/common/security/plain/package-tree.html | 135 + .../common/security/scram/ScramCredential.html | 331 + .../security/scram/ScramCredentialCallback.html | 293 + .../security/scram/ScramExtensionsCallback.html | 297 + .../common/security/scram/ScramLoginModule.html | 391 + .../kafka/common/security/scram/package-frame.html | 22 + .../common/security/scram/package-summary.html | 159 + .../kafka/common/security/scram/package-tree.html | 137 + .../security/token/delegation/DelegationToken.html | 349 + .../token/delegation/TokenInformation.html | 448 + .../security/token/delegation/package-frame.html | 20 + .../security/token/delegation/package-summary.html | 147 + .../security/token/delegation/package-tree.html | 135 + .../serialization/ByteArrayDeserializer.html | 295 + .../common/serialization/ByteArraySerializer.html | 295 + .../serialization/ByteBufferDeserializer.html | 295 + .../common/serialization/ByteBufferSerializer.html | 295 + .../common/serialization/BytesDeserializer.html | 295 + .../common/serialization/BytesSerializer.html | 295 + .../kafka/common/serialization/Deserializer.html | 327 + .../common/serialization/DoubleDeserializer.html | 295 + .../common/serialization/DoubleSerializer.html | 295 + .../ExtendedDeserializer.Wrapper.html | 406 + .../common/serialization/ExtendedDeserializer.html | 289 + .../serialization/ExtendedSerializer.Wrapper.html | 410 + .../common/serialization/ExtendedSerializer.html | 291 + .../common/serialization/FloatDeserializer.html | 295 + .../common/serialization/FloatSerializer.html | 295 + .../common/serialization/IntegerDeserializer.html | 295 + .../common/serialization/IntegerSerializer.html | 295 + .../common/serialization/LongDeserializer.html | 295 + .../kafka/common/serialization/LongSerializer.html | 295 + .../apache/kafka/common/serialization/Serde.html | 294 + .../serialization/Serdes.ByteArraySerde.html | 254 + .../serialization/Serdes.ByteBufferSerde.html | 254 + .../common/serialization/Serdes.BytesSerde.html | 254 + .../common/serialization/Serdes.DoubleSerde.html | 254 + .../common/serialization/Serdes.FloatSerde.html | 254 + .../common/serialization/Serdes.IntegerSerde.html | 254 + .../common/serialization/Serdes.LongSerde.html | 254 + .../common/serialization/Serdes.ShortSerde.html | 254 + .../common/serialization/Serdes.StringSerde.html | 254 + .../common/serialization/Serdes.UUIDSerde.html | 254 + .../common/serialization/Serdes.VoidSerde.html | 254 + .../common/serialization/Serdes.WrapperSerde.html | 357 + .../apache/kafka/common/serialization/Serdes.html | 498 + .../kafka/common/serialization/Serializer.html | 331 + .../common/serialization/ShortDeserializer.html | 295 + .../common/serialization/ShortSerializer.html | 295 + .../common/serialization/StringDeserializer.html | 323 + .../common/serialization/StringSerializer.html | 323 + .../common/serialization/UUIDDeserializer.html | 323 + .../kafka/common/serialization/UUIDSerializer.html | 323 + .../common/serialization/VoidDeserializer.html | 295 + .../kafka/common/serialization/VoidSerializer.html | 295 + .../kafka/common/serialization/package-frame.html | 63 + .../common/serialization/package-summary.html | 336 + .../kafka/common/serialization/package-tree.html | 195 + .../apache/kafka/connect/components/Versioned.html | 234 + .../kafka/connect/components/package-frame.html | 19 + .../kafka/connect/components/package-summary.html | 141 + .../kafka/connect/components/package-tree.html | 130 + .../kafka/connect/connector/ConnectRecord.html | 550 + .../apache/kafka/connect/connector/Connector.html | 543 + .../kafka/connect/connector/ConnectorContext.html | 249 + .../org/apache/kafka/connect/connector/Task.html | 276 + .../kafka/connect/connector/package-frame.html | 25 + .../kafka/connect/connector/package-summary.html | 174 + .../kafka/connect/connector/package-tree.html | 140 + .../ConnectorClientConfigOverridePolicy.html | 258 + .../ConnectorClientConfigRequest.ClientType.html | 354 + .../policy/ConnectorClientConfigRequest.html | 392 + .../connect/connector/policy/package-frame.html | 27 + .../connect/connector/policy/package-summary.html | 171 + .../connect/connector/policy/package-tree.html | 159 + .../apache/kafka/connect/data/ConnectSchema.html | 745 + 26/javadoc/org/apache/kafka/connect/data/Date.html | 374 + .../org/apache/kafka/connect/data/Decimal.html | 401 + .../org/apache/kafka/connect/data/Field.html | 373 + .../org/apache/kafka/connect/data/Schema.Type.html | 546 + .../org/apache/kafka/connect/data/Schema.html | 730 + .../apache/kafka/connect/data/SchemaAndValue.html | 370 + .../apache/kafka/connect/data/SchemaBuilder.html | 1029 ++ .../apache/kafka/connect/data/SchemaProjector.html | 290 + .../org/apache/kafka/connect/data/Struct.html | 680 + 26/javadoc/org/apache/kafka/connect/data/Time.html | 374 + .../org/apache/kafka/connect/data/Timestamp.html | 372 + .../apache/kafka/connect/data/Values.Parser.html | 419 + .../kafka/connect/data/Values.SchemaDetector.html | 285 + .../org/apache/kafka/connect/data/Values.html | 1018 ++ .../apache/kafka/connect/data/package-frame.html | 39 + .../apache/kafka/connect/data/package-summary.html | 249 + .../apache/kafka/connect/data/package-tree.html | 162 + .../connect/errors/AlreadyExistsException.html | 301 + .../kafka/connect/errors/ConnectException.html | 300 + .../apache/kafka/connect/errors/DataException.html | 305 + .../errors/IllegalWorkerStateException.html | 301 + .../kafka/connect/errors/NotFoundException.html | 301 + .../kafka/connect/errors/RetriableException.html | 301 + .../connect/errors/SchemaBuilderException.html | 305 + .../connect/errors/SchemaProjectorException.html | 305 + .../apache/kafka/connect/errors/package-frame.html | 26 + .../kafka/connect/errors/package-summary.html | 179 + .../apache/kafka/connect/errors/package-tree.html | 163 + .../kafka/connect/header/ConnectHeaders.html | 1186 ++ .../org/apache/kafka/connect/header/Header.html | 316 + .../connect/header/Headers.HeaderTransform.html | 233 + .../org/apache/kafka/connect/header/Headers.html | 969 ++ .../apache/kafka/connect/header/package-frame.html | 25 + .../kafka/connect/header/package-summary.html | 170 + .../apache/kafka/connect/header/package-tree.html | 144 + .../apache/kafka/connect/health/AbstractState.html | 367 + .../connect/health/ConnectClusterDetails.html | 227 + .../kafka/connect/health/ConnectClusterState.html | 303 + .../kafka/connect/health/ConnectorHealth.html | 393 + .../kafka/connect/health/ConnectorState.html | 298 + .../apache/kafka/connect/health/ConnectorType.html | 377 + .../org/apache/kafka/connect/health/TaskState.html | 355 + .../apache/kafka/connect/health/package-frame.html | 31 + .../kafka/connect/health/package-summary.html | 200 + .../apache/kafka/connect/health/package-tree.html | 157 + .../apache/kafka/connect/mirror/Checkpoint.html | 570 + .../connect/mirror/DefaultReplicationPolicy.html | 417 + .../org/apache/kafka/connect/mirror/Heartbeat.html | 476 + .../apache/kafka/connect/mirror/MirrorClient.html | 474 + .../kafka/connect/mirror/MirrorClientConfig.html | 457 + .../kafka/connect/mirror/RemoteClusterUtils.html | 366 + .../kafka/connect/mirror/ReplicationPolicy.html | 300 + .../kafka/connect/mirror/SourceAndTarget.html | 335 + .../apache/kafka/connect/mirror/package-frame.html | 29 + .../kafka/connect/mirror/package-summary.html | 194 + .../apache/kafka/connect/mirror/package-tree.html | 148 + .../kafka/connect/rest/ConnectRestExtension.html | 270 + .../connect/rest/ConnectRestExtensionContext.html | 248 + .../apache/kafka/connect/rest/package-frame.html | 20 + .../apache/kafka/connect/rest/package-summary.html | 148 + .../apache/kafka/connect/rest/package-tree.html | 149 + .../kafka/connect/sink/ErrantRecordReporter.html | 254 + .../apache/kafka/connect/sink/SinkConnector.html | 359 + .../kafka/connect/sink/SinkConnectorContext.html | 191 + .../org/apache/kafka/connect/sink/SinkRecord.html | 511 + .../org/apache/kafka/connect/sink/SinkTask.html | 605 + .../apache/kafka/connect/sink/SinkTaskContext.html | 422 + .../apache/kafka/connect/sink/package-frame.html | 27 + .../apache/kafka/connect/sink/package-summary.html | 183 + .../apache/kafka/connect/sink/package-tree.html | 154 + .../kafka/connect/source/SourceConnector.html | 318 + .../connect/source/SourceConnectorContext.html | 238 + .../apache/kafka/connect/source/SourceRecord.html | 589 + .../apache/kafka/connect/source/SourceTask.html | 512 + .../kafka/connect/source/SourceTaskContext.html | 243 + .../apache/kafka/connect/source/package-frame.html | 26 + .../kafka/connect/source/package-summary.html | 180 + .../apache/kafka/connect/source/package-tree.html | 153 + .../apache/kafka/connect/storage/Converter.html | 360 + .../kafka/connect/storage/ConverterConfig.html | 363 + .../kafka/connect/storage/ConverterType.html | 386 + .../kafka/connect/storage/HeaderConverter.html | 307 + .../kafka/connect/storage/OffsetStorageReader.html | 271 + .../connect/storage/SimpleHeaderConverter.html | 393 + .../kafka/connect/storage/StringConverter.html | 490 + .../connect/storage/StringConverterConfig.html | 384 + .../kafka/connect/storage/package-frame.html | 32 + .../kafka/connect/storage/package-summary.html | 206 + .../apache/kafka/connect/storage/package-tree.html | 175 + .../kafka/connect/transforms/Transformation.html | 280 + .../kafka/connect/transforms/package-frame.html | 19 + .../kafka/connect/transforms/package-summary.html | 141 + .../kafka/connect/transforms/package-tree.html | 143 + .../connect/transforms/predicates/Predicate.html | 289 + .../transforms/predicates/package-frame.html | 19 + .../transforms/predicates/package-summary.html | 141 + .../transforms/predicates/package-tree.html | 139 + .../apache/kafka/connect/util/ConnectorUtils.html | 287 + .../apache/kafka/connect/util/package-frame.html | 19 + .../apache/kafka/connect/util/package-summary.html | 141 + .../apache/kafka/connect/util/package-tree.html | 134 + .../kafka/server/authorizer/AclCreateResult.html | 308 + .../AclDeleteResult.AclBindingDeleteResult.html | 308 + .../kafka/server/authorizer/AclDeleteResult.html | 323 + .../org/apache/kafka/server/authorizer/Action.html | 407 + .../authorizer/AuthorizableRequestContext.html | 337 + .../server/authorizer/AuthorizationResult.html | 339 + .../apache/kafka/server/authorizer/Authorizer.html | 405 + .../server/authorizer/AuthorizerServerInfo.html | 273 + .../kafka/server/authorizer/package-frame.html | 32 + .../kafka/server/authorizer/package-summary.html | 198 + .../kafka/server/authorizer/package-tree.html | 168 + .../policy/AlterConfigPolicy.RequestMetadata.html | 313 + .../kafka/server/policy/AlterConfigPolicy.html | 282 + .../policy/CreateTopicPolicy.RequestMetadata.html | 383 + .../kafka/server/policy/CreateTopicPolicy.html | 281 + .../apache/kafka/server/policy/package-frame.html | 25 + .../kafka/server/policy/package-summary.html | 170 + .../apache/kafka/server/policy/package-tree.html | 150 + .../kafka/server/quota/ClientQuotaCallback.html | 402 + .../quota/ClientQuotaEntity.ConfigEntity.html | 245 + .../quota/ClientQuotaEntity.ConfigEntityType.html | 367 + .../kafka/server/quota/ClientQuotaEntity.html | 253 + .../apache/kafka/server/quota/ClientQuotaType.html | 351 + .../apache/kafka/server/quota/package-frame.html | 26 + .../apache/kafka/server/quota/package-summary.html | 176 + .../apache/kafka/server/quota/package-tree.html | 149 + .../apache/kafka/streams/KafkaClientSupplier.html | 350 + .../apache/kafka/streams/KafkaStreams.State.html | 456 + .../kafka/streams/KafkaStreams.StateListener.html | 233 + .../org/apache/kafka/streams/KafkaStreams.html | 1144 ++ .../org/apache/kafka/streams/KeyQueryMetadata.html | 415 + 26/javadoc/org/apache/kafka/streams/KeyValue.html | 410 + 26/javadoc/org/apache/kafka/streams/LagInfo.html | 336 + .../apache/kafka/streams/StoreQueryParameters.html | 412 + .../org/apache/kafka/streams/StreamsBuilder.html | 930 ++ .../streams/StreamsConfig.InternalConfig.html | 397 + .../org/apache/kafka/streams/StreamsConfig.html | 2320 +++ .../org/apache/kafka/streams/StreamsMetrics.html | 584 + .../org/apache/kafka/streams/TestInputTopic.html | 561 + .../org/apache/kafka/streams/TestOutputTopic.html | 455 + .../kafka/streams/Topology.AutoOffsetReset.html | 345 + 26/javadoc/org/apache/kafka/streams/Topology.html | 1430 ++ .../streams/TopologyDescription.GlobalStore.html | 268 + .../kafka/streams/TopologyDescription.Node.html | 278 + .../streams/TopologyDescription.Processor.html | 242 + .../kafka/streams/TopologyDescription.Sink.html | 264 + .../kafka/streams/TopologyDescription.Source.html | 286 + .../streams/TopologyDescription.Subtopology.html | 254 + .../apache/kafka/streams/TopologyDescription.html | 305 + .../apache/kafka/streams/TopologyTestDriver.html | 1000 ++ .../streams/errors/BrokerNotFoundException.html | 303 + .../errors/DefaultProductionExceptionHandler.html | 326 + ...tionHandler.DeserializationHandlerResponse.html | 398 + .../errors/DeserializationExceptionHandler.html | 270 + .../streams/errors/InvalidStateStoreException.html | 307 + .../apache/kafka/streams/errors/LockException.html | 302 + .../errors/LogAndContinueExceptionHandler.html | 327 + .../streams/errors/LogAndFailExceptionHandler.html | 327 + .../streams/errors/ProcessorStateException.html | 302 + ...Handler.ProductionExceptionHandlerResponse.html | 397 + .../streams/errors/ProductionExceptionHandler.html | 267 + .../kafka/streams/errors/StreamsException.html | 300 + .../streams/errors/TaskAssignmentException.html | 303 + .../streams/errors/TaskCorruptedException.html | 328 + .../streams/errors/TaskIdFormatException.html | 303 + .../streams/errors/TaskMigratedException.html | 291 + .../kafka/streams/errors/TopologyException.html | 302 + .../apache/kafka/streams/errors/package-frame.html | 44 + .../kafka/streams/errors/package-summary.html | 280 + .../apache/kafka/streams/errors/package-tree.html | 187 + .../apache/kafka/streams/kstream/Aggregator.html | 258 + .../kafka/streams/kstream/CogroupedKStream.html | 569 + .../org/apache/kafka/streams/kstream/Consumed.html | 660 + .../kafka/streams/kstream/ForeachAction.html | 245 + .../apache/kafka/streams/kstream/GlobalKTable.html | 271 + .../org/apache/kafka/streams/kstream/Grouped.html | 535 + .../apache/kafka/streams/kstream/Initializer.html | 241 + .../apache/kafka/streams/kstream/JoinWindows.html | 728 + .../org/apache/kafka/streams/kstream/Joined.html | 703 + .../kafka/streams/kstream/KGroupedStream.html | 931 ++ .../kafka/streams/kstream/KGroupedTable.html | 1049 ++ .../org/apache/kafka/streams/kstream/KStream.html | 5468 +++++++ .../org/apache/kafka/streams/kstream/KTable.html | 3320 ++++ .../kafka/streams/kstream/KeyValueMapper.html | 261 + .../apache/kafka/streams/kstream/Materialized.html | 719 + .../org/apache/kafka/streams/kstream/Merger.html | 241 + .../org/apache/kafka/streams/kstream/Named.html | 365 + .../apache/kafka/streams/kstream/Predicate.html | 248 + .../org/apache/kafka/streams/kstream/Printed.html | 477 + .../org/apache/kafka/streams/kstream/Produced.html | 642 + .../org/apache/kafka/streams/kstream/Reducer.html | 253 + .../kafka/streams/kstream/Repartitioned.html | 581 + .../apache/kafka/streams/kstream/Serialized.html | 411 + .../kstream/SessionWindowedCogroupedKStream.html | 507 + .../kstream/SessionWindowedDeserializer.html | 364 + .../streams/kstream/SessionWindowedKStream.html | 1013 ++ .../streams/kstream/SessionWindowedSerializer.html | 383 + .../kafka/streams/kstream/SessionWindows.html | 489 + .../apache/kafka/streams/kstream/StreamJoined.html | 656 + .../streams/kstream/Suppressed.BufferConfig.html | 421 + .../kstream/Suppressed.EagerBufferConfig.html | 197 + .../kstream/Suppressed.StrictBufferConfig.html | 196 + .../apache/kafka/streams/kstream/Suppressed.html | 335 + .../kstream/TimeWindowedCogroupedKStream.html | 486 + .../streams/kstream/TimeWindowedDeserializer.html | 404 + .../kafka/streams/kstream/TimeWindowedKStream.html | 1000 ++ .../streams/kstream/TimeWindowedSerializer.html | 383 + .../apache/kafka/streams/kstream/TimeWindows.html | 667 + .../apache/kafka/streams/kstream/Transformer.html | 319 + .../kafka/streams/kstream/TransformerSupplier.html | 253 + .../kafka/streams/kstream/UnlimitedWindows.html | 548 + .../apache/kafka/streams/kstream/ValueJoiner.html | 258 + .../apache/kafka/streams/kstream/ValueMapper.html | 252 + .../kafka/streams/kstream/ValueMapperWithKey.html | 257 + .../kafka/streams/kstream/ValueTransformer.html | 317 + .../streams/kstream/ValueTransformerSupplier.html | 254 + .../streams/kstream/ValueTransformerWithKey.html | 321 + .../kstream/ValueTransformerWithKeySupplier.html | 246 + .../org/apache/kafka/streams/kstream/Window.html | 485 + .../org/apache/kafka/streams/kstream/Windowed.html | 365 + .../WindowedSerdes.SessionWindowedSerde.html | 266 + .../kstream/WindowedSerdes.TimeWindowedSerde.html | 314 + .../kafka/streams/kstream/WindowedSerdes.html | 330 + .../org/apache/kafka/streams/kstream/Windows.html | 470 + .../kafka/streams/kstream/package-frame.html | 75 + .../kafka/streams/kstream/package-summary.html | 481 + .../apache/kafka/streams/kstream/package-tree.html | 208 + .../org/apache/kafka/streams/package-frame.html | 48 + .../org/apache/kafka/streams/package-summary.html | 303 + .../org/apache/kafka/streams/package-tree.html | 178 + .../kafka/streams/processor/AbstractProcessor.html | 348 + .../processor/BatchingStateRestoreCallback.html | 254 + .../kafka/streams/processor/Cancellable.html | 226 + .../streams/processor/ConnectedStoreProvider.html | 306 + .../streams/processor/DefaultPartitionGrouper.html | 323 + .../streams/processor/FailOnInvalidTimestamp.html | 340 + .../processor/LogAndSkipOnInvalidTimestamp.html | 341 + .../MockProcessorContext.CapturedForward.html | 302 + .../MockProcessorContext.CapturedPunctuator.html | 291 + .../streams/processor/MockProcessorContext.html | 1262 ++ .../kafka/streams/processor/PartitionGrouper.html | 254 + .../apache/kafka/streams/processor/Processor.html | 286 + .../kafka/streams/processor/ProcessorContext.html | 793 + .../kafka/streams/processor/ProcessorSupplier.html | 247 + .../kafka/streams/processor/PunctuationType.html | 348 + .../apache/kafka/streams/processor/Punctuator.html | 230 + .../kafka/streams/processor/RecordContext.html | 297 + .../streams/processor/StateRestoreCallback.html | 226 + .../streams/processor/StateRestoreListener.html | 313 + .../apache/kafka/streams/processor/StateStore.html | 356 + .../kafka/streams/processor/StreamPartitioner.html | 270 + .../org/apache/kafka/streams/processor/TaskId.html | 465 + .../kafka/streams/processor/TaskMetadata.html | 335 + .../kafka/streams/processor/ThreadMetadata.html | 425 + .../streams/processor/TimestampExtractor.html | 250 + .../org/apache/kafka/streams/processor/To.html | 422 + .../streams/processor/TopicNameExtractor.html | 237 + .../UsePartitionTimeOnInvalidTimestamp.html | 340 + .../UsePreviousTimeOnInvalidTimestamp.html | 349 + .../processor/WallclockTimestampExtractor.html | 299 + .../kafka/streams/processor/package-frame.html | 54 + .../kafka/streams/processor/package-summary.html | 347 + .../kafka/streams/processor/package-tree.html | 183 + .../org/apache/kafka/streams/state/HostInfo.html | 383 + .../streams/state/KeyValueBytesStoreSupplier.html | 196 + .../kafka/streams/state/KeyValueIterator.html | 270 + .../apache/kafka/streams/state/KeyValueStore.html | 341 + .../kafka/streams/state/QueryableStoreType.html | 270 + .../QueryableStoreTypes.KeyValueStoreType.html | 258 + .../QueryableStoreTypes.SessionStoreType.html | 258 + .../state/QueryableStoreTypes.WindowStoreType.html | 258 + .../kafka/streams/state/QueryableStoreTypes.html | 400 + .../kafka/streams/state/ReadOnlyKeyValueStore.html | 330 + .../kafka/streams/state/ReadOnlySessionStore.html | 280 + .../kafka/streams/state/ReadOnlyWindowStore.html | 530 + .../kafka/streams/state/RocksDBConfigSetter.html | 303 + .../streams/state/SessionBytesStoreSupplier.html | 266 + .../apache/kafka/streams/state/SessionStore.html | 397 + .../apache/kafka/streams/state/StateSerdes.html | 545 + .../apache/kafka/streams/state/StoreBuilder.html | 376 + .../apache/kafka/streams/state/StoreSupplier.html | 275 + .../org/apache/kafka/streams/state/Stores.html | 830 + .../kafka/streams/state/StreamsMetadata.html | 487 + .../kafka/streams/state/TimestampedBytesStore.html | 218 + .../streams/state/TimestampedKeyValueStore.html | 210 + .../streams/state/TimestampedWindowStore.html | 217 + .../kafka/streams/state/ValueAndTimestamp.html | 359 + .../streams/state/WindowBytesStoreSupplier.html | 333 + .../apache/kafka/streams/state/WindowStore.html | 558 + .../kafka/streams/state/WindowStoreIterator.html | 254 + .../apache/kafka/streams/state/package-frame.html | 48 + .../kafka/streams/state/package-summary.html | 304 + .../apache/kafka/streams/state/package-tree.html | 217 + .../kafka/streams/test/ConsumerRecordFactory.html | 1164 ++ .../apache/kafka/streams/test/OutputVerifier.html | 785 + .../org/apache/kafka/streams/test/TestRecord.html | 630 + .../apache/kafka/streams/test/package-frame.html | 21 + .../apache/kafka/streams/test/package-summary.html | 153 + .../apache/kafka/streams/test/package-tree.html | 136 + 26/javadoc/overview-frame.html | 60 + 26/javadoc/overview-summary.html | 295 + 26/javadoc/overview-tree.html | 1022 ++ 26/javadoc/package-list | 41 + 26/javadoc/script.js | 30 + 26/javadoc/serialized-form.html | 1350 ++ 26/javadoc/stylesheet.css | 574 + 26/js/templateData.js | 24 + 26/migration.html | 34 + 26/ops.html | 2255 +++ 26/protocol.html | 228 + 26/quickstart.html | 300 + 26/security.html | 2212 +++ 26/streams/architecture.html | 186 + 26/streams/core-concepts.html | 306 + 26/streams/developer-guide/app-reset-tool.html | 200 + 26/streams/developer-guide/config-streams.html | 904 ++ 26/streams/developer-guide/datatypes.html | 239 + 26/streams/developer-guide/dsl-api.html | 3929 +++++ .../developer-guide/dsl-topology-naming.html | 370 + 26/streams/developer-guide/index.html | 106 + .../developer-guide/interactive-queries.html | 529 + 26/streams/developer-guide/manage-topics.html | 128 + 26/streams/developer-guide/memory-mgmt.html | 289 + 26/streams/developer-guide/processor-api.html | 494 + 26/streams/developer-guide/running-app.html | 178 + 26/streams/developer-guide/security.html | 196 + 26/streams/developer-guide/testing.html | 439 + 26/streams/developer-guide/write-streams.html | 263 + 26/streams/index.html | 370 + 26/streams/quickstart.html | 391 + 26/streams/tutorial.html | 667 + 26/streams/upgrade-guide.html | 973 ++ 26/toc.html | 168 + 26/upgrade.html | 1406 ++ 26/uses.html | 81 + 893 files changed, 331250 insertions(+) diff --git a/26/api.html b/26/api.html new file mode 100644 index 0000000..b6ab1fa --- /dev/null +++ b/26/api.html @@ -0,0 +1,120 @@ + + + +
diff --git a/26/configuration.html b/26/configuration.html new file mode 100644 index 0000000..9e913a2 --- /dev/null +++ b/26/configuration.html @@ -0,0 +1,293 @@ + + + + +
diff --git a/26/connect.html b/26/connect.html new file mode 100644 index 0000000..9777b17 --- /dev/null +++ b/26/connect.html @@ -0,0 +1,745 @@ + + + + +
diff --git a/26/design.html b/26/design.html new file mode 100644 index 0000000..3745ab5 --- /dev/null +++ b/26/design.html @@ -0,0 +1,660 @@ + + + + +
diff --git a/26/documentation.html b/26/documentation.html new file mode 100644 index 0000000..ee914f2 --- /dev/null +++ b/26/documentation.html @@ -0,0 +1,83 @@ + + + + + + + + +
+ +
+ +

Documentation

+

Kafka 2.6 Documentation

+ Prior releases: 0.7.x, 0.8.0, 0.8.1.X, 0.8.2.X, 0.9.0.X, 0.10.0.X, 0.10.1.X, 0.10.2.X, 0.11.0.X, 1.0.X, + +

1. Getting Started

+

1.1 Introduction

+ +

1.2 Use Cases

+ +

1.3 Quick Start

+ +

1.4 Ecosystem

+ +

1.5 Upgrading From Previous Versions

+ + +

2. APIs

+ + + +

3. Configuration

+ + + +

4. Design

+ + + +

5. Implementation

+ + + +

6. Operations

+ + + +

7. Security

+ + +

8. Kafka Connect

+ + +

9. Kafka Streams

+

+ Kafka Streams is a client library for processing and analyzing data stored in Kafka. It builds upon important stream processing concepts such as properly distinguishing between event time and processing time, windowing support, exactly-once processing semantics and simple yet efficient management of application state. +

+

+ Kafka Streams has a low barrier to entry: You can quickly write and run a small-scale proof-of-concept on a single machine; and you only need to run additional instances of your application on multiple machines to scale up to high-volume production workloads. Kafka Streams transparently handles the load balancing of multiple instances of the same application by leveraging Kafka's parallelism model. +

+ +

Learn More about Kafka Streams read this Section.

+ + + diff --git a/26/documentation/index.html b/26/documentation/index.html new file mode 100644 index 0000000..1d7507f --- /dev/null +++ b/26/documentation/index.html @@ -0,0 +1,18 @@ + + + \ No newline at end of file diff --git a/26/documentation/streams/architecture.html b/26/documentation/streams/architecture.html new file mode 100644 index 0000000..ad7b323 --- /dev/null +++ b/26/documentation/streams/architecture.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/core-concepts.html b/26/documentation/streams/core-concepts.html new file mode 100644 index 0000000..d699b79 --- /dev/null +++ b/26/documentation/streams/core-concepts.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/developer-guide/app-reset-tool.html b/26/documentation/streams/developer-guide/app-reset-tool.html new file mode 100644 index 0000000..64a43aa --- /dev/null +++ b/26/documentation/streams/developer-guide/app-reset-tool.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/developer-guide/config-streams.html b/26/documentation/streams/developer-guide/config-streams.html new file mode 100644 index 0000000..979f66d --- /dev/null +++ b/26/documentation/streams/developer-guide/config-streams.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/developer-guide/datatypes.html b/26/documentation/streams/developer-guide/datatypes.html new file mode 100644 index 0000000..98dd3a1 --- /dev/null +++ b/26/documentation/streams/developer-guide/datatypes.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/developer-guide/dsl-api.html b/26/documentation/streams/developer-guide/dsl-api.html new file mode 100644 index 0000000..1bbc06d --- /dev/null +++ b/26/documentation/streams/developer-guide/dsl-api.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/developer-guide/dsl-topology-naming.html b/26/documentation/streams/developer-guide/dsl-topology-naming.html new file mode 100644 index 0000000..db5eee3 --- /dev/null +++ b/26/documentation/streams/developer-guide/dsl-topology-naming.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/developer-guide/index.html b/26/documentation/streams/developer-guide/index.html new file mode 100644 index 0000000..3a61247 --- /dev/null +++ b/26/documentation/streams/developer-guide/index.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/developer-guide/interactive-queries.html b/26/documentation/streams/developer-guide/interactive-queries.html new file mode 100644 index 0000000..0506012 --- /dev/null +++ b/26/documentation/streams/developer-guide/interactive-queries.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/developer-guide/manage-topics.html b/26/documentation/streams/developer-guide/manage-topics.html new file mode 100644 index 0000000..f422554 --- /dev/null +++ b/26/documentation/streams/developer-guide/manage-topics.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/developer-guide/memory-mgmt.html b/26/documentation/streams/developer-guide/memory-mgmt.html new file mode 100644 index 0000000..024e137 --- /dev/null +++ b/26/documentation/streams/developer-guide/memory-mgmt.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/developer-guide/processor-api.html b/26/documentation/streams/developer-guide/processor-api.html new file mode 100644 index 0000000..9e9ab91 --- /dev/null +++ b/26/documentation/streams/developer-guide/processor-api.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/developer-guide/running-app.html b/26/documentation/streams/developer-guide/running-app.html new file mode 100644 index 0000000..05d5f0b --- /dev/null +++ b/26/documentation/streams/developer-guide/running-app.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/developer-guide/security.html b/26/documentation/streams/developer-guide/security.html new file mode 100644 index 0000000..5d6e5f0 --- /dev/null +++ b/26/documentation/streams/developer-guide/security.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/developer-guide/testing.html b/26/documentation/streams/developer-guide/testing.html new file mode 100644 index 0000000..4753e66 --- /dev/null +++ b/26/documentation/streams/developer-guide/testing.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/developer-guide/write-streams.html b/26/documentation/streams/developer-guide/write-streams.html new file mode 100644 index 0000000..976c6fe --- /dev/null +++ b/26/documentation/streams/developer-guide/write-streams.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/index.html b/26/documentation/streams/index.html new file mode 100644 index 0000000..5ff3b3b --- /dev/null +++ b/26/documentation/streams/index.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/quickstart.html b/26/documentation/streams/quickstart.html new file mode 100644 index 0000000..efb0234 --- /dev/null +++ b/26/documentation/streams/quickstart.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/tutorial.html b/26/documentation/streams/tutorial.html new file mode 100644 index 0000000..e2cf401 --- /dev/null +++ b/26/documentation/streams/tutorial.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/documentation/streams/upgrade-guide.html b/26/documentation/streams/upgrade-guide.html new file mode 100644 index 0000000..b1b3200 --- /dev/null +++ b/26/documentation/streams/upgrade-guide.html @@ -0,0 +1,19 @@ + + + + diff --git a/26/ecosystem.html b/26/ecosystem.html new file mode 100644 index 0000000..5fbcec5 --- /dev/null +++ b/26/ecosystem.html @@ -0,0 +1,18 @@ + + +There are a plethora of tools that integrate with Kafka outside the main distribution. The ecosystem page lists many of these, including stream processing systems, Hadoop integration, monitoring, and deployment tools. diff --git a/26/generated/admin_client_config.html b/26/generated/admin_client_config.html new file mode 100644 index 0000000..ef4b090 --- /dev/null +++ b/26/generated/admin_client_config.html @@ -0,0 +1,493 @@ +
    +
  • +

    bootstrap.servers

    +

    A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynam [...] + + + + + +
    Type:list
    Default:
    Valid Values:
    Importance:high
    +

  • +
  • +

    ssl.key.password

    +

    The password of the private key in the key store file. This is optional for client.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.location

    +

    The location of the key store file. This is optional for client and can be used for two-way authentication for client.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.password

    +

    The store password for the key store file. This is optional for client and only needed if ssl.keystore.location is configured.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.location

    +

    The location of the trust store file.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.password

    +

    The password for the trust store file. If a password is not set access to the truststore is still available, but integrity checking is disabled.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    client.dns.lookup

    +

    Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again (both the JVM and the OS cache DNS name lookups, however). If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical n [...] + + + + + +
    Type:string
    Default:use_all_dns_ips
    Valid Values:[default, use_all_dns_ips, resolve_canonical_bootstrap_servers_only]
    Importance:medium
    +

  • +
  • +

    client.id

    +

    An id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:medium
    +
  • +
  • +

    connections.max.idle.ms

    +

    Close idle connections after the number of milliseconds specified by this config.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    default.api.timeout.ms

    +

    Specifies the timeout (in milliseconds) for client APIs. This configuration is used as the default timeout for all client operations that do not specify a timeout parameter.

    + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    receive.buffer.bytes

    +

    The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:65536 (64 kibibytes)
    Valid Values:[-1,...]
    Importance:medium
    +
  • +
  • +

    request.timeout.ms

    +

    The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.

    + + + + + +
    Type:int
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    sasl.client.callback.handler.class

    +

    The fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.jaas.config

    +

    JAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described here. The format for the value is: 'loginModuleClass controlFlag (optionName=optionValue)*;'. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.nam [...] + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +

  • +
  • +

    sasl.kerberos.service.name

    +

    The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.callback.handler.class

    +

    The fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.class

    +

    The fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.mechanism

    +

    SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism.

    + + + + + +
    Type:string
    Default:GSSAPI
    Valid Values:
    Importance:medium
    +
  • +
  • +

    security.protocol

    +

    Protocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.

    + + + + + +
    Type:string
    Default:PLAINTEXT
    Valid Values:
    Importance:medium
    +
  • +
  • +

    send.buffer.bytes

    +

    The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:131072 (128 kibibytes)
    Valid Values:[-1,...]
    Importance:medium
    +
  • +
  • +

    ssl.enabled.protocols

    +

    The list of protocols enabled for SSL connections. The default is 'TLSv1.2,TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. With the default value for Java 11, clients and servers will prefer TLSv1.3 if both support it and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most cases. Also see the config documentation for `ssl.protocol`.

    + + + + + +
    Type:list
    Default:TLSv1.2
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.keystore.type

    +

    The file format of the key store file. This is optional for client.

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.protocol

    +

    The SSL protocol used to generate the SSLContext. The default is 'TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. This value should be fine for most use cases. Allowed values in recent JVMs are 'TLSv1.2' and 'TLSv1.3'. 'TLS', 'TLSv1.1', 'SSL', 'SSLv2' and 'SSLv3' may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities. With the default value for this config and 'ssl.enabled.protocols', clients will downgrade to 'TLSv1.2' i [...] + + + + + +
    Type:string
    Default:TLSv1.2
    Valid Values:
    Importance:medium
    +

  • +
  • +

    ssl.provider

    +

    The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.truststore.type

    +

    The file format of the trust store file.

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    metadata.max.age.ms

    +

    The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    metric.reporters

    +

    A list of classes to use as metrics reporters. Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.

    + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    metrics.num.samples

    +

    The number of samples maintained to compute metrics.

    + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:low
    +
  • +
  • +

    metrics.recording.level

    +

    The highest recording level for metrics.

    + + + + + +
    Type:string
    Default:INFO
    Valid Values:[INFO, DEBUG]
    Importance:low
    +
  • +
  • +

    metrics.sample.window.ms

    +

    The window of time a metrics sample is computed over.

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    reconnect.backoff.max.ms

    +

    The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.

    + + + + + +
    Type:long
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    reconnect.backoff.ms

    +

    The base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker.

    + + + + + +
    Type:long
    Default:50
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    retries

    +

    Setting a value greater than zero will cause the client to resend any request that fails with a potentially transient error.

    + + + + + +
    Type:int
    Default:2147483647
    Valid Values:[0,...,2147483647]
    Importance:low
    +
  • +
  • +

    retry.backoff.ms

    +

    The amount of time to wait before attempting to retry a failed request. This avoids repeatedly sending requests in a tight loop under some failure scenarios.

    + + + + + +
    Type:long
    Default:100
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    sasl.kerberos.kinit.cmd

    +

    Kerberos kinit command path.

    + + + + + +
    Type:string
    Default:/usr/bin/kinit
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.min.time.before.relogin

    +

    Login thread sleep time between refresh attempts.

    + + + + + +
    Type:long
    Default:60000
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.jitter

    +

    Percentage of random jitter added to the renewal time.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.window.factor

    +

    Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.buffer.seconds

    +

    The amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum excee [...] + + + + + +
    Type:short
    Default:300
    Valid Values:[0,...,3600]
    Importance:low
    +

  • +
  • +

    sasl.login.refresh.min.period.seconds

    +

    The desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:short
    Default:60
    Valid Values:[0,...,900]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.factor

    +

    Login refresh thread will sleep until the specified window factor relative to the credential's lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:[0.5,...,1.0]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.jitter

    +

    The maximum amount of random jitter relative to the credential's lifetime that is added to the login refresh thread's sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:[0.0,...,0.25]
    Importance:low
    +
  • +
  • +

    security.providers

    +

    A list of configurable creator classes each returning a provider implementing security algorithms. These classes should implement the org.apache.kafka.common.security.auth.SecurityProviderCreator interface.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.cipher.suites

    +

    A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.

    + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.endpoint.identification.algorithm

    +

    The endpoint identification algorithm to validate server hostname using server certificate.

    + + + + + +
    Type:string
    Default:https
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.engine.factory.class

    +

    The class of type org.apache.kafka.common.security.auth.SslEngineFactory to provide SSLEngine objects. Default value is org.apache.kafka.common.security.ssl.DefaultSslEngineFactory

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.keymanager.algorithm

    +

    The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:SunX509
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.secure.random.implementation

    +

    The SecureRandom PRNG implementation to use for SSL cryptography operations.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.trustmanager.algorithm

    +

    The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:PKIX
    Valid Values:
    Importance:low
    +
  • +
+ diff --git a/26/generated/connect_config.html b/26/generated/connect_config.html new file mode 100644 index 0000000..3dab64c --- /dev/null +++ b/26/generated/connect_config.html @@ -0,0 +1,933 @@ +
    +
  • +

    config.storage.topic

    +

    The name of the Kafka topic where connector configurations are stored

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    group.id

    +

    A unique string that identifies the Connect cluster group this worker belongs to.

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    key.converter

    +

    Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the keys in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.

    + + + + + +
    Type:class
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    offset.storage.topic

    +

    The name of the Kafka topic where connector offsets are stored

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    status.storage.topic

    +

    The name of the Kafka topic where connector and task status are stored

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    value.converter

    +

    Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.

    + + + + + +
    Type:class
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    bootstrap.servers

    +

    A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynam [...] + + + + + +
    Type:list
    Default:localhost:9092
    Valid Values:
    Importance:high
    +

  • +
  • +

    heartbeat.interval.ms

    +

    The expected time between heartbeats to the group coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the worker's session stays active and to facilitate rebalancing when new members join or leave the group. The value must be set lower than session.timeout.ms, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.

    + + + + + +
    Type:int
    Default:3000 (3 seconds)
    Valid Values:
    Importance:high
    +
  • +
  • +

    rebalance.timeout.ms

    +

    The maximum allowed time for each worker to join the group once a rebalance has begun. This is basically a limit on the amount of time needed for all tasks to flush any pending data and commit offsets. If the timeout is exceeded, then the worker will be removed from the group, which will cause offset commit failures.

    + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:
    Importance:high
    +
  • +
  • +

    session.timeout.ms

    +

    The timeout used to detect worker failures. The worker sends periodic heartbeats to indicate its liveness to the broker. If no heartbeats are received by the broker before the expiration of this session timeout, then the broker will remove the worker from the group and initiate a rebalance. Note that the value must be in the allowable range as configured in the broker configuration by group.min.session.timeout.ms and group.max.session.timeout.ms.

    + + + + + +
    Type:int
    Default:10000 (10 seconds)
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.key.password

    +

    The password of the private key in the key store file. This is optional for client.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.location

    +

    The location of the key store file. This is optional for client and can be used for two-way authentication for client.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.password

    +

    The store password for the key store file. This is optional for client and only needed if ssl.keystore.location is configured.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.location

    +

    The location of the trust store file.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.password

    +

    The password for the trust store file. If a password is not set access to the truststore is still available, but integrity checking is disabled.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    client.dns.lookup

    +

    Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again (both the JVM and the OS cache DNS name lookups, however). If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical n [...] + + + + + +
    Type:string
    Default:use_all_dns_ips
    Valid Values:[default, use_all_dns_ips, resolve_canonical_bootstrap_servers_only]
    Importance:medium
    +

  • +
  • +

    connections.max.idle.ms

    +

    Close idle connections after the number of milliseconds specified by this config.

    + + + + + +
    Type:long
    Default:540000 (9 minutes)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    connector.client.config.override.policy

    +

    Class name or alias of implementation of ConnectorClientConfigOverridePolicy. Defines what client configurations can be overriden by the connector. The default implementation is `None`. The other possible policies in the framework include `All` and `Principal`.

    + + + + + +
    Type:string
    Default:None
    Valid Values:
    Importance:medium
    +
  • +
  • +

    receive.buffer.bytes

    +

    The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:32768 (32 kibibytes)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    request.timeout.ms

    +

    The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.

    + + + + + +
    Type:int
    Default:40000 (40 seconds)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    sasl.client.callback.handler.class

    +

    The fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.jaas.config

    +

    JAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described here. The format for the value is: 'loginModuleClass controlFlag (optionName=optionValue)*;'. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.nam [...] + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +

  • +
  • +

    sasl.kerberos.service.name

    +

    The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.callback.handler.class

    +

    The fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.class

    +

    The fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.mechanism

    +

    SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism.

    + + + + + +
    Type:string
    Default:GSSAPI
    Valid Values:
    Importance:medium
    +
  • +
  • +

    security.protocol

    +

    Protocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.

    + + + + + +
    Type:string
    Default:PLAINTEXT
    Valid Values:
    Importance:medium
    +
  • +
  • +

    send.buffer.bytes

    +

    The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:131072 (128 kibibytes)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    ssl.enabled.protocols

    +

    The list of protocols enabled for SSL connections. The default is 'TLSv1.2,TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. With the default value for Java 11, clients and servers will prefer TLSv1.3 if both support it and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most cases. Also see the config documentation for `ssl.protocol`.

    + + + + + +
    Type:list
    Default:TLSv1.2
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.keystore.type

    +

    The file format of the key store file. This is optional for client.

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.protocol

    +

    The SSL protocol used to generate the SSLContext. The default is 'TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. This value should be fine for most use cases. Allowed values in recent JVMs are 'TLSv1.2' and 'TLSv1.3'. 'TLS', 'TLSv1.1', 'SSL', 'SSLv2' and 'SSLv3' may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities. With the default value for this config and 'ssl.enabled.protocols', clients will downgrade to 'TLSv1.2' i [...] + + + + + +
    Type:string
    Default:TLSv1.2
    Valid Values:
    Importance:medium
    +

  • +
  • +

    ssl.provider

    +

    The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.truststore.type

    +

    The file format of the trust store file.

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    worker.sync.timeout.ms

    +

    When the worker is out of sync with other workers and needs to resynchronize configurations, wait up to this amount of time before giving up, leaving the group, and waiting a backoff period before rejoining.

    + + + + + +
    Type:int
    Default:3000 (3 seconds)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    worker.unsync.backoff.ms

    +

    When the worker is out of sync with other workers and fails to catch up within worker.sync.timeout.ms, leave the Connect cluster for this long before rejoining.

    + + + + + +
    Type:int
    Default:300000 (5 minutes)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    access.control.allow.methods

    +

    Sets the methods supported for cross origin requests by setting the Access-Control-Allow-Methods header. The default value of the Access-Control-Allow-Methods header allows cross origin requests for GET, POST and HEAD.

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    access.control.allow.origin

    +

    Value to set the Access-Control-Allow-Origin header to for REST API requests.To enable cross origin access, set this to the domain of the application that should be permitted to access the API, or '*' to allow access from any domain. The default value only allows access from the domain of the REST API.

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    admin.listeners

    +

    List of comma-separated URIs the Admin REST API will listen on. The supported protocols are HTTP and HTTPS. An empty or blank string will disable this feature. The default behavior is to use the regular listener (specified by the 'listeners' property).

    + + + + + +
    Type:list
    Default:null
    Valid Values:org.apache.kafka.connect.runtime.WorkerConfig$AdminListenersValidator@7b1d7fff
    Importance:low
    +
  • +
  • +

    client.id

    +

    An id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    config.providers

    +

    Comma-separated names of ConfigProvider classes, loaded and used in the order specified. Implementing the interface ConfigProvider allows you to replace variable references in connector configurations, such as for externalized secrets.

    + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    config.storage.replication.factor

    +

    Replication factor used when creating the configuration storage topic

    + + + + + +
    Type:short
    Default:3
    Valid Values:Positive number not larger than the number of brokers in the Kafka cluster, or -1 to use the broker's default
    Importance:low
    +
  • +
  • +

    connect.protocol

    +

    Compatibility mode for Kafka Connect Protocol

    + + + + + +
    Type:string
    Default:sessioned
    Valid Values:[eager, compatible, sessioned]
    Importance:low
    +
  • +
  • +

    header.converter

    +

    HeaderConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the header values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro. By default, the SimpleHeaderConverter is used to serialize header values to strings and deserialize them by inferring the sche [...] + + + + + +
    Type:class
    Default:org.apache.kafka.connect.storage.SimpleHeaderConverter
    Valid Values:
    Importance:low
    +

  • +
  • +

    inter.worker.key.generation.algorithm

    +

    The algorithm to use for generating internal request keys

    + + + + + +
    Type:string
    Default:HmacSHA256
    Valid Values:Any KeyGenerator algorithm supported by the worker JVM
    Importance:low
    +
  • +
  • +

    inter.worker.key.size

    +

    The size of the key to use for signing internal requests, in bits. If null, the default key size for the key generation algorithm will be used.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    inter.worker.key.ttl.ms

    +

    The TTL of generated session keys used for internal request validation (in milliseconds)

    + + + + + +
    Type:int
    Default:3600000 (1 hour)
    Valid Values:[0,...,2147483647]
    Importance:low
    +
  • +
  • +

    inter.worker.signature.algorithm

    +

    The algorithm used to sign internal requests

    + + + + + +
    Type:string
    Default:HmacSHA256
    Valid Values:Any MAC algorithm supported by the worker JVM
    Importance:low
    +
  • +
  • +

    inter.worker.verification.algorithms

    +

    A list of permitted algorithms for verifying internal requests

    + + + + + +
    Type:list
    Default:HmacSHA256
    Valid Values:A list of one or more MAC algorithms, each supported by the worker JVM
    Importance:low
    +
  • +
  • +

    internal.key.converter

    +

    Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the keys in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro. This setting controls the format used for internal bookkeeping data used by the framework, such as configs and offsets, so users can typicall [...] + + + + + +
    Type:class
    Default:org.apache.kafka.connect.json.JsonConverter
    Valid Values:
    Importance:low
    +

  • +
  • +

    internal.value.converter

    +

    Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro. This setting controls the format used for internal bookkeeping data used by the framework, such as configs and offsets, so users can typica [...] + + + + + +
    Type:class
    Default:org.apache.kafka.connect.json.JsonConverter
    Valid Values:
    Importance:low
    +

  • +
  • +

    listeners

    +

    List of comma-separated URIs the REST API will listen on. The supported protocols are HTTP and HTTPS.
    Specify hostname as 0.0.0.0 to bind to all interfaces.
    Leave hostname empty to bind to default interface.
    Examples of legal listener lists: HTTP://myhost:8083,HTTPS://myhost:8084

    + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    metadata.max.age.ms

    +

    The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    metric.reporters

    +

    A list of classes to use as metrics reporters. Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.

    + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    metrics.num.samples

    +

    The number of samples maintained to compute metrics.

    + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:low
    +
  • +
  • +

    metrics.recording.level

    +

    The highest recording level for metrics.

    + + + + + +
    Type:string
    Default:INFO
    Valid Values:[INFO, DEBUG]
    Importance:low
    +
  • +
  • +

    metrics.sample.window.ms

    +

    The window of time a metrics sample is computed over.

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    offset.flush.interval.ms

    +

    Interval at which to try committing offsets for tasks.

    + + + + + +
    Type:long
    Default:60000 (1 minute)
    Valid Values:
    Importance:low
    +
  • +
  • +

    offset.flush.timeout.ms

    +

    Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt.

    + + + + + +
    Type:long
    Default:5000 (5 seconds)
    Valid Values:
    Importance:low
    +
  • +
  • +

    offset.storage.partitions

    +

    The number of partitions used when creating the offset storage topic

    + + + + + +
    Type:int
    Default:25
    Valid Values:Positive number, or -1 to use the broker's default
    Importance:low
    +
  • +
  • +

    offset.storage.replication.factor

    +

    Replication factor used when creating the offset storage topic

    + + + + + +
    Type:short
    Default:3
    Valid Values:Positive number not larger than the number of brokers in the Kafka cluster, or -1 to use the broker's default
    Importance:low
    +
  • +
  • +

    plugin.path

    +

    List of paths separated by commas (,) that contain plugins (connectors, converters, transformations). The list should consist of top level directories that include any combination of:
    a) directories immediately containing jars with plugins and their dependencies
    b) uber-jars with plugins and their dependencies
    c) directories immediately containing the package directory structure of classes of plugins and their dependencies
    Note: symlinks will be followed to discover depen [...] + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +

  • +
  • +

    reconnect.backoff.max.ms

    +

    The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.

    + + + + + +
    Type:long
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    reconnect.backoff.ms

    +

    The base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker.

    + + + + + +
    Type:long
    Default:50
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    response.http.headers.config

    +

    Rules for REST API HTTP response headers

    + + + + + +
    Type:string
    Default:""
    Valid Values:Comma-separated header rules, where each header rule is of the form '[action] [header name]:[header value]' and optionally surrounded by double quotes if any part of a header rule contains a comma
    Importance:low
    +
  • +
  • +

    rest.advertised.host.name

    +

    If this is set, this is the hostname that will be given out to other workers to connect to.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    rest.advertised.listener

    +

    Sets the advertised listener (HTTP or HTTPS) which will be given to other workers to use.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    rest.advertised.port

    +

    If this is set, this is the port that will be given out to other workers to connect to.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    rest.extension.classes

    +

    Comma-separated names of ConnectRestExtension classes, loaded and called in the order specified. Implementing the interface ConnectRestExtension allows you to inject into Connect's REST API user defined resources like filters. Typically used to add custom capability like logging, security, etc.

    + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    rest.host.name

    +

    Hostname for the REST API. If this is set, it will only bind to this interface.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    rest.port

    +

    Port for the REST API to listen on.

    + + + + + +
    Type:int
    Default:8083
    Valid Values:
    Importance:low
    +
  • +
  • +

    retry.backoff.ms

    +

    The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.

    + + + + + +
    Type:long
    Default:100
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    sasl.kerberos.kinit.cmd

    +

    Kerberos kinit command path.

    + + + + + +
    Type:string
    Default:/usr/bin/kinit
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.min.time.before.relogin

    +

    Login thread sleep time between refresh attempts.

    + + + + + +
    Type:long
    Default:60000
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.jitter

    +

    Percentage of random jitter added to the renewal time.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.window.factor

    +

    Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.buffer.seconds

    +

    The amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum excee [...] + + + + + +
    Type:short
    Default:300
    Valid Values:[0,...,3600]
    Importance:low
    +

  • +
  • +

    sasl.login.refresh.min.period.seconds

    +

    The desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:short
    Default:60
    Valid Values:[0,...,900]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.factor

    +

    Login refresh thread will sleep until the specified window factor relative to the credential's lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:[0.5,...,1.0]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.jitter

    +

    The maximum amount of random jitter relative to the credential's lifetime that is added to the login refresh thread's sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:[0.0,...,0.25]
    Importance:low
    +
  • +
  • +

    scheduled.rebalance.max.delay.ms

    +

    The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned

    + + + + + +
    Type:int
    Default:300000 (5 minutes)
    Valid Values:[0,...,2147483647]
    Importance:low
    +
  • +
  • +

    ssl.cipher.suites

    +

    A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.

    + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.client.auth

    +

    Configures kafka broker to request client authentication. The following settings are common:

    • ssl.client.auth=required If set to required client authentication is required.
    • ssl.client.auth=requested This means client authentication is optional. unlike requested , if this option is set client can choose not to provide authentication information about itself
    • ssl.client.auth=none This means client authentication is not needed.

    + + + + + +
    Type:string
    Default:none
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.endpoint.identification.algorithm

    +

    The endpoint identification algorithm to validate server hostname using server certificate.

    + + + + + +
    Type:string
    Default:https
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.engine.factory.class

    +

    The class of type org.apache.kafka.common.security.auth.SslEngineFactory to provide SSLEngine objects. Default value is org.apache.kafka.common.security.ssl.DefaultSslEngineFactory

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.keymanager.algorithm

    +

    The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:SunX509
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.secure.random.implementation

    +

    The SecureRandom PRNG implementation to use for SSL cryptography operations.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.trustmanager.algorithm

    +

    The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:PKIX
    Valid Values:
    Importance:low
    +
  • +
  • +

    status.storage.partitions

    +

    The number of partitions used when creating the status storage topic

    + + + + + +
    Type:int
    Default:5
    Valid Values:Positive number, or -1 to use the broker's default
    Importance:low
    +
  • +
  • +

    status.storage.replication.factor

    +

    Replication factor used when creating the status storage topic

    + + + + + +
    Type:short
    Default:3
    Valid Values:Positive number not larger than the number of brokers in the Kafka cluster, or -1 to use the broker's default
    Importance:low
    +
  • +
  • +

    task.shutdown.graceful.timeout.ms

    +

    Amount of time to wait for tasks to shutdown gracefully. This is the total amount of time, not per task. All task have shutdown triggered, then they are waited on sequentially.

    + + + + + +
    Type:long
    Default:5000 (5 seconds)
    Valid Values:
    Importance:low
    +
  • +
  • +

    topic.creation.enable

    +

    Whether to allow automatic creation of topics used by source connectors, when source connectors are configured with `topic.creation.` properties. Each task will use an admin client to create its topics and will not depend on the Kafka brokers to create topics automatically.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    topic.tracking.allow.reset

    +

    If set to true, it allows user requests to reset the set of active topics per connector.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    topic.tracking.enable

    +

    Enable tracking the set of active topics per connector during runtime.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
+ diff --git a/26/generated/connect_metrics.html b/26/generated/connect_metrics.html new file mode 100644 index 0000000..7359fe9 --- /dev/null +++ b/26/generated/connect_metrics.html @@ -0,0 +1,202 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
kafka.connect:type=connect-worker-metrics
Attribute nameDescription
connector-countThe number of connectors run in this worker.
connector-startup-attempts-totalThe total number of connector startups that this worker has attempted.
connector-startup-failure-percentageThe average percentage of this worker's connectors starts that failed.
connector-startup-failure-totalThe total number of connector starts that failed.
connector-startup-success-percentageThe average percentage of this worker's connectors starts that succeeded.
connector-startup-success-totalThe total number of connector starts that succeeded.
task-countThe number of tasks run in this worker.
task-startup-attempts-totalThe total number of task startups that this worker has attempted.
task-startup-failure-percentageThe average percentage of this worker's tasks starts that failed.
task-startup-failure-totalThe total number of task starts that failed.
task-startup-success-percentageThe average percentage of this worker's tasks starts that succeeded.
task-startup-success-totalThe total number of task starts that succeeded.
kafka.connect:type=connect-worker-metrics,connector="{connector}"
Attribute nameDescription
connector-destroyed-task-countThe number of destroyed tasks of the connector on the worker.
connector-failed-task-countThe number of failed tasks of the connector on the worker.
connector-paused-task-countThe number of paused tasks of the connector on the worker.
connector-running-task-countThe number of running tasks of the connector on the worker.
connector-total-task-countThe number of tasks of the connector on the worker.
connector-unassigned-task-countThe number of unassigned tasks of the connector on the worker.
kafka.connect:type=connect-worker-rebalance-metrics
Attribute nameDescription
completed-rebalances-totalThe total number of rebalances completed by this worker.
connect-protocolThe Connect protocol used by this cluster
epochThe epoch or generation number of this worker.
leader-nameThe name of the group leader.
rebalance-avg-time-msThe average time in milliseconds spent by this worker to rebalance.
rebalance-max-time-msThe maximum time in milliseconds spent by this worker to rebalance.
rebalancingWhether this worker is currently rebalancing.
time-since-last-rebalance-msThe time in milliseconds since this worker completed the most recent rebalance.
kafka.connect:type=connector-metrics,connector="{connector}"
Attribute nameDescription
connector-classThe name of the connector class.
connector-typeThe type of the connector. One of 'source' or 'sink'.
connector-versionThe version of the connector class, as reported by the connector.
statusThe status of the connector. One of 'unassigned', 'running', 'paused', 'failed', or 'destroyed'.
kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
Attribute nameDescription
batch-size-avgThe average size of the batches processed by the connector.
batch-size-maxThe maximum size of the batches processed by the connector.
offset-commit-avg-time-msThe average time in milliseconds taken by this task to commit offsets.
offset-commit-failure-percentageThe average percentage of this task's offset commit attempts that failed.
offset-commit-max-time-msThe maximum time in milliseconds taken by this task to commit offsets.
offset-commit-success-percentageThe average percentage of this task's offset commit attempts that succeeded.
pause-ratioThe fraction of time this task has spent in the pause state.
running-ratioThe fraction of time this task has spent in the running state.
statusThe status of the connector task. One of 'unassigned', 'running', 'paused', 'failed', or 'destroyed'.
kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
Attribute nameDescription
offset-commit-completion-rateThe average per-second number of offset commit completions that were completed successfully.
offset-commit-completion-totalThe total number of offset commit completions that were completed successfully.
offset-commit-seq-noThe current sequence number for offset commits.
offset-commit-skip-rateThe average per-second number of offset commit completions that were received too late and skipped/ignored.
offset-commit-skip-totalThe total number of offset commit completions that were received too late and skipped/ignored.
partition-countThe number of topic partitions assigned to this task belonging to the named sink connector in this worker.
put-batch-avg-time-msThe average time taken by this task to put a batch of sinks records.
put-batch-max-time-msThe maximum time taken by this task to put a batch of sinks records.
sink-record-active-countThe number of records that have been read from Kafka but not yet completely committed/flushed/acknowledged by the sink task.
sink-record-active-count-avgThe average number of records that have been read from Kafka but not yet completely committed/flushed/acknowledged by the sink task.
sink-record-active-count-maxThe maximum number of records that have been read from Kafka but not yet completely committed/flushed/acknowledged by the sink task.
sink-record-lag-maxThe maximum lag in terms of number of records that the sink task is behind the consumer's position for any topic partitions.
sink-record-read-rateThe average per-second number of records read from Kafka for this task belonging to the named sink connector in this worker. This is before transformations are applied.
sink-record-read-totalThe total number of records read from Kafka by this task belonging to the named sink connector in this worker, since the task was last restarted.
sink-record-send-rateThe average per-second number of records output from the transformations and sent/put to this task belonging to the named sink connector in this worker. This is after transformations are applied and excludes any records filtered out by the transformations.
sink-record-send-totalThe total number of records output from the transformations and sent/put to this task belonging to the named sink connector in this worker, since the task was last restarted.
kafka.connect:type=source-task-metrics,connector="{connector}",task="{task}"
Attribute nameDescription
poll-batch-avg-time-msThe average time in milliseconds taken by this task to poll for a batch of source records.
poll-batch-max-time-msThe maximum time in milliseconds taken by this task to poll for a batch of source records.
source-record-active-countThe number of records that have been produced by this task but not yet completely written to Kafka.
source-record-active-count-avgThe average number of records that have been produced by this task but not yet completely written to Kafka.
source-record-active-count-maxThe maximum number of records that have been produced by this task but not yet completely written to Kafka.
source-record-poll-rateThe average per-second number of records produced/polled (before transformation) by this task belonging to the named source connector in this worker.
source-record-poll-totalThe total number of records produced/polled (before transformation) by this task belonging to the named source connector in this worker.
source-record-write-rateThe average per-second number of records output from the transformations and written to Kafka for this task belonging to the named source connector in this worker. This is after transformations are applied and excludes any records filtered out by the transformations.
source-record-write-totalThe number of records output from the transformations and written to Kafka for this task belonging to the named source connector in this worker, since the task was last restarted.
kafka.connect:type=task-error-metrics,connector="{connector}",task="{task}"
Attribute nameDescription
deadletterqueue-produce-failuresThe number of failed writes to the dead letter queue.
deadletterqueue-produce-requestsThe number of attempted writes to the dead letter queue.
last-error-timestampThe epoch timestamp when this task last encountered an error.
total-errors-loggedThe number of errors that were logged.
total-record-errorsThe number of record processing errors in this task.
total-record-failuresThe number of record processing failures in this task.
total-records-skippedThe number of records skipped due to errors.
total-retriesThe number of operations retried.
diff --git a/26/generated/connect_predicates.html b/26/generated/connect_predicates.html new file mode 100644 index 0000000..85a7675 --- /dev/null +++ b/26/generated/connect_predicates.html @@ -0,0 +1,44 @@ +
+
org.apache.kafka.connect.transforms.predicates.HasHeaderKey
+A predicate which is true for records with at least one header with the configured name. +

+

    +
  • +
    name
    +

    The header name.

    + + + + + +
    Type:string
    Default:
    Valid Values:non-empty string
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.predicates.RecordIsTombstone
+A predicate which is true for records which are tombstones (i.e. have null value). +

+

    +
+ +
+
+
org.apache.kafka.connect.transforms.predicates.TopicNameMatches
+A predicate which is true for records with a topic name that matches the configured regular expression. +

+

    +
  • +
    pattern
    +

    A Java regular expression for matching against the name of a record's topic.

    + + + + + +
    Type:string
    Default:
    Valid Values:non-empty string, valid regex
    Importance:medium
    +
  • +
+ +
diff --git a/26/generated/connect_transforms.html b/26/generated/connect_transforms.html new file mode 100644 index 0000000..bec41ba --- /dev/null +++ b/26/generated/connect_transforms.html @@ -0,0 +1,354 @@ +
+
org.apache.kafka.connect.transforms.InsertField
+Insert field(s) using attributes from the record metadata or a configured static value.

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.InsertField$Key) or value (org.apache.kafka.connect.transforms.InsertField$Value). +

+

    +
  • +
    offset.field
    +

    Field name for Kafka offset - only applicable to sink connectors.
    Suffix with ! to make this a required field, or ? to keep it optional (the default).

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +
    partition.field
    +

    Field name for Kafka partition. Suffix with ! to make this a required field, or ? to keep it optional (the default).

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +
    static.field
    +

    Field name for static data field. Suffix with ! to make this a required field, or ? to keep it optional (the default).

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +
    static.value
    +

    Static field value, if field name configured.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +
    timestamp.field
    +

    Field name for record timestamp. Suffix with ! to make this a required field, or ? to keep it optional (the default).

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +
    topic.field
    +

    Field name for Kafka topic. Suffix with ! to make this a required field, or ? to keep it optional (the default).

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.ReplaceField
+Filter or rename fields.

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.ReplaceField$Key) or value (org.apache.kafka.connect.transforms.ReplaceField$Value). +

+

    +
  • +
    blacklist
    +

    Fields to exclude. This takes precedence over the whitelist.

    + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:medium
    +
  • +
  • +
    renames
    +

    Field rename mappings.

    + + + + + +
    Type:list
    Default:""
    Valid Values:list of colon-delimited pairs, e.g. foo:bar,abc:xyz
    Importance:medium
    +
  • +
  • +
    whitelist
    +

    Fields to include. If specified, only these fields will be used.

    + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.MaskField
+Mask specified fields with a valid null value for the field type (i.e. 0, false, empty string, and so on).

For numeric and string fields, an optional replacement value can be specified that is converted to the correct type.

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.MaskField$Key) or value (org.apache.kafka.connect.transforms.MaskField$Value). +

+

    +
  • +
    fields
    +

    Names of fields to mask.

    + + + + + +
    Type:list
    Default:
    Valid Values:non-empty list
    Importance:high
    +
  • +
  • +
    replacement
    +

    Custom value replacement, that will be applied to all 'fields' values (numeric or non-empty string values only).

    + + + + + +
    Type:string
    Default:null
    Valid Values:non-empty string
    Importance:low
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.ValueToKey
+Replace the record key with a new key formed from a subset of fields in the record value. +

+

    +
  • +
    fields
    +

    Field names on the record value to extract as the record key.

    + + + + + +
    Type:list
    Default:
    Valid Values:non-empty list
    Importance:high
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.HoistField
+Wrap data using the specified field name in a Struct when schema present, or a Map in the case of schemaless data.

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.HoistField$Key) or value (org.apache.kafka.connect.transforms.HoistField$Value). +

+

    +
  • +
    field
    +

    Field name for the single field that will be created in the resulting Struct or Map.

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.ExtractField
+Extract the specified field from a Struct when schema present, or a Map in the case of schemaless data. Any null values are passed through unmodified.

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.ExtractField$Key) or value (org.apache.kafka.connect.transforms.ExtractField$Value). +

+

    +
  • +
    field
    +

    Field name to extract.

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.SetSchemaMetadata
+Set the schema name, version or both on the record's key (org.apache.kafka.connect.transforms.SetSchemaMetadata$Key) or value (org.apache.kafka.connect.transforms.SetSchemaMetadata$Value) schema. +

+

    +
  • +
    schema.name
    +

    Schema name to set.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +
    schema.version
    +

    Schema version to set.

    + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:high
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.TimestampRouter
+Update the record's topic field as a function of the original topic value and the record timestamp.

This is mainly useful for sink connectors, since the topic field is often used to determine the equivalent entity name in the destination system(e.g. database table or search index name). +

+

    +
  • +
    timestamp.format
    +

    Format string for the timestamp that is compatible with java.text.SimpleDateFormat.

    + + + + + +
    Type:string
    Default:yyyyMMdd
    Valid Values:
    Importance:high
    +
  • +
  • +
    topic.format
    +

    Format string which can contain ${topic} and ${timestamp} as placeholders for the topic and timestamp, respectively.

    + + + + + +
    Type:string
    Default:${topic}-${timestamp}
    Valid Values:
    Importance:high
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.RegexRouter
+Update the record topic using the configured regular expression and replacement string.

Under the hood, the regex is compiled to a java.util.regex.Pattern. If the pattern matches the input topic, java.util.regex.Matcher#replaceFirst() is used with the replacement string to obtain the new topic. +

+

    +
  • +
    regex
    +

    Regular expression to use for matching.

    + + + + + +
    Type:string
    Default:
    Valid Values:valid regex
    Importance:high
    +
  • +
  • +
    replacement
    +

    Replacement string.

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.Flatten
+Flatten a nested data structure, generating names for each field by concatenating the field names at each level with a configurable delimiter character. Applies to Struct when schema present, or a Map in the case of schemaless data. The default delimiter is '.'.

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.Flatten$Key) or value (org.apache.kafka.connect.transforms.Flatten$Value). +

+

    +
  • +
    delimiter
    +

    Delimiter to insert between field names from the input record when generating field names for the output record

    + + + + + +
    Type:string
    Default:.
    Valid Values:
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.Cast
+Cast fields or the entire key or value to a specific type, e.g. to force an integer field to a smaller width. Only simple primitive types are supported -- integers, floats, boolean, and string.

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.Cast$Key) or value (org.apache.kafka.connect.transforms.Cast$Value). +

+

    +
  • +
    spec
    +

    List of fields and the type to cast them to of the form field1:type,field2:type to cast fields of Maps or Structs. A single type to cast the entire value. Valid types are int8, int16, int32, int64, float32, float64, boolean, and string.

    + + + + + +
    Type:list
    Default:
    Valid Values:list of colon-delimited pairs, e.g. foo:bar,abc:xyz
    Importance:high
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.TimestampConverter
+Convert timestamps between different formats such as Unix epoch, strings, and Connect Date/Timestamp types.Applies to individual fields or to the entire value.

Use the concrete transformation type designed for the record key (org.apache.kafka.connect.transforms.TimestampConverter$Key) or value (org.apache.kafka.connect.transforms.TimestampConverter$Value). +

+

    +
  • +
    target.type
    +

    The desired timestamp representation: string, unix, Date, Time, or Timestamp

    + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +
    field
    +

    The field containing the timestamp, or empty if the entire value is a timestamp

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:high
    +
  • +
  • +
    format
    +

    A SimpleDateFormat-compatible format for the timestamp. Used to generate the output when type=string or used to parse the input if the input is a string.

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:medium
    +
  • +
+ +
+
+
org.apache.kafka.connect.transforms.Filter
+Drops all records, filtering them from subsequent transformations in the chain. This is intended to be used conditionally to filter out records matching (or not matching) a particular Predicate. +

+

    +
+ +
diff --git a/26/generated/consumer_config.html b/26/generated/consumer_config.html new file mode 100644 index 0000000..2ef6030 --- /dev/null +++ b/26/generated/consumer_config.html @@ -0,0 +1,703 @@ +
    +
  • +

    key.deserializer

    +

    Deserializer class for key that implements the org.apache.kafka.common.serialization.Deserializer interface.

    + + + + + +
    Type:class
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    value.deserializer

    +

    Deserializer class for value that implements the org.apache.kafka.common.serialization.Deserializer interface.

    + + + + + +
    Type:class
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    bootstrap.servers

    +

    A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynam [...] + + + + + +
    Type:list
    Default:""
    Valid Values:non-null string
    Importance:high
    +

  • +
  • +

    fetch.min.bytes

    +

    The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request. The default setting of 1 byte means that fetch requests are answered as soon as a single byte of data is available or the fetch request times out waiting for data to arrive. Setting this to something greater than 1 will cause the server to wait for larger amounts of data to accumulate which can i [...] + + + + + +
    Type:int
    Default:1
    Valid Values:[0,...]
    Importance:high
    +

  • +
  • +

    group.id

    +

    A unique string that identifies the consumer group this consumer belongs to. This property is required if the consumer uses either the group management functionality by using subscribe(topic) or the Kafka-based offset management strategy.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    heartbeat.interval.ms

    +

    The expected time between heartbeats to the consumer coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. The value must be set lower than session.timeout.ms, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.

    + + + + + +
    Type:int
    Default:3000 (3 seconds)
    Valid Values:
    Importance:high
    +
  • +
  • +

    max.partition.fetch.bytes

    +

    The maximum amount of data per-partition the server will return. Records are fetched in batches by the consumer. If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. The maximum record batch size accepted by the broker is defined via message.max.bytes (broker config) or max.message.bytes (topic config). See fetch.max.bytes for limiting [...] + + + + + +
    Type:int
    Default:1048576 (1 mebibyte)
    Valid Values:[0,...]
    Importance:high
    +

  • +
  • +

    session.timeout.ms

    +

    The timeout used to detect client failures when using Kafka's group management facility. The client sends periodic heartbeats to indicate its liveness to the broker. If no heartbeats are received by the broker before the expiration of this session timeout, then the broker will remove this client from the group and initiate a rebalance. Note that the value must be in the allowable range as configured in the broker configuration by group.min.session.timeout.ms and gro [...] + + + + + +
    Type:int
    Default:10000 (10 seconds)
    Valid Values:
    Importance:high
    +

  • +
  • +

    ssl.key.password

    +

    The password of the private key in the key store file. This is optional for client.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.location

    +

    The location of the key store file. This is optional for client and can be used for two-way authentication for client.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.keystore.password

    +

    The store password for the key store file. This is optional for client and only needed if ssl.keystore.location is configured.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.location

    +

    The location of the trust store file.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    ssl.truststore.password

    +

    The password for the trust store file. If a password is not set access to the truststore is still available, but integrity checking is disabled.

    + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:high
    +
  • +
  • +

    allow.auto.create.topics

    +

    Allow automatic topic creation on the broker when subscribing to or assigning a topic. A topic being subscribed to will be automatically created only if the broker allows for it using `auto.create.topics.enable` broker configuration. This configuration must be set to `false` when using brokers older than 0.11.0

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    +
  • +
  • +

    auto.offset.reset

    +

    What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server (e.g. because that data has been deleted):

    • earliest: automatically reset the offset to the earliest offset
    • latest: automatically reset the offset to the latest offset
    • none: throw exception to the consumer if no previous offset is found for the consumer's group
    • anything else: throw exception to the consumer.

    + + + + + +
    Type:string
    Default:latest
    Valid Values:[latest, earliest, none]
    Importance:medium
    +
  • +
  • +

    client.dns.lookup

    +

    Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again (both the JVM and the OS cache DNS name lookups, however). If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical n [...] + + + + + +
    Type:string
    Default:use_all_dns_ips
    Valid Values:[default, use_all_dns_ips, resolve_canonical_bootstrap_servers_only]
    Importance:medium
    +

  • +
  • +

    connections.max.idle.ms

    +

    Close idle connections after the number of milliseconds specified by this config.

    + + + + + +
    Type:long
    Default:540000 (9 minutes)
    Valid Values:
    Importance:medium
    +
  • +
  • +

    default.api.timeout.ms

    +

    Specifies the timeout (in milliseconds) for client APIs. This configuration is used as the default timeout for all client operations that do not specify a timeout parameter.

    + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    enable.auto.commit

    +

    If true the consumer's offset will be periodically committed in the background.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    +
  • +
  • +

    exclude.internal.topics

    +

    Whether internal topics matching a subscribed pattern should be excluded from the subscription. It is always possible to explicitly subscribe to an internal topic.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    +
  • +
  • +

    fetch.max.bytes

    +

    The maximum amount of data the server should return for a fetch request. Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. The maximum record batch size accepted by the broker is defined via message.max.bytes (broker config) or max.message.b [...] + + + + + +
    Type:int
    Default:52428800 (50 mebibytes)
    Valid Values:[0,...]
    Importance:medium
    +

  • +
  • +

    group.instance.id

    +

    A unique identifier of the consumer instance provided by the end user. Only non-empty strings are permitted. If set, the consumer is treated as a static member, which means that only one instance with this ID is allowed in the consumer group at any time. This can be used in combination with a larger session timeout to avoid group rebalances caused by transient unavailability (e.g. process restarts). If not set, the consumer will join the group as a dynamic member, which is the traditi [...] + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +

  • +
  • +

    isolation.level

    +

    Controls how to read messages written transactionally. If set to read_committed, consumer.poll() will only return transactional messages which have been committed. If set to read_uncommitted' (the default), consumer.poll() will return all messages, even transactional messages which have been aborted. Non-transactional messages will be returned unconditionally in either mode.

    Messages will always be returned in offset order. Hence, in read_committed< [...] + + + + + +
    Type:string
    Default:read_uncommitted
    Valid Values:[read_committed, read_uncommitted]
    Importance:medium
    +

  • +
  • +

    max.poll.interval.ms

    +

    The maximum delay between invocations of poll() when using consumer group management. This places an upper bound on the amount of time that the consumer can be idle before fetching more records. If poll() is not called before expiration of this timeout, then the consumer is considered failed and the group will rebalance in order to reassign the partitions to another member. For consumers using a non-null group.instance.id which reach this timeout, partitions will not be i [...] + + + + + +
    Type:int
    Default:300000 (5 minutes)
    Valid Values:[1,...]
    Importance:medium
    +

  • +
  • +

    max.poll.records

    +

    The maximum number of records returned in a single call to poll().

    + + + + + +
    Type:int
    Default:500
    Valid Values:[1,...]
    Importance:medium
    +
  • +
  • +

    partition.assignment.strategy

    +

    A list of class names or class types, ordered by preference, of supported partition assignment strategies that the client will use to distribute partition ownership amongst consumer instances when group management is used.

    In addition to the default class specified below, you can use the org.apache.kafka.clients.consumer.RoundRobinAssignorclass for round robin assignments of partitions to consumers.

    Implementing the org.apache.kafka.clients.consumer.Consume [...] + + + + + +
    Type:list
    Default:class org.apache.kafka.clients.consumer.RangeAssignor
    Valid Values:non-null string
    Importance:medium
    +

  • +
  • +

    receive.buffer.bytes

    +

    The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:65536 (64 kibibytes)
    Valid Values:[-1,...]
    Importance:medium
    +
  • +
  • +

    request.timeout.ms

    +

    The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.

    + + + + + +
    Type:int
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:medium
    +
  • +
  • +

    sasl.client.callback.handler.class

    +

    The fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.jaas.config

    +

    JAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described here. The format for the value is: 'loginModuleClass controlFlag (optionName=optionValue)*;'. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.nam [...] + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    +

  • +
  • +

    sasl.kerberos.service.name

    +

    The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.callback.handler.class

    +

    The fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.login.class

    +

    The fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    sasl.mechanism

    +

    SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism.

    + + + + + +
    Type:string
    Default:GSSAPI
    Valid Values:
    Importance:medium
    +
  • +
  • +

    security.protocol

    +

    Protocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.

    + + + + + +
    Type:string
    Default:PLAINTEXT
    Valid Values:
    Importance:medium
    +
  • +
  • +

    send.buffer.bytes

    +

    The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.

    + + + + + +
    Type:int
    Default:131072 (128 kibibytes)
    Valid Values:[-1,...]
    Importance:medium
    +
  • +
  • +

    ssl.enabled.protocols

    +

    The list of protocols enabled for SSL connections. The default is 'TLSv1.2,TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. With the default value for Java 11, clients and servers will prefer TLSv1.3 if both support it and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most cases. Also see the config documentation for `ssl.protocol`.

    + + + + + +
    Type:list
    Default:TLSv1.2
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.keystore.type

    +

    The file format of the key store file. This is optional for client.

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.protocol

    +

    The SSL protocol used to generate the SSLContext. The default is 'TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. This value should be fine for most use cases. Allowed values in recent JVMs are 'TLSv1.2' and 'TLSv1.3'. 'TLS', 'TLSv1.1', 'SSL', 'SSLv2' and 'SSLv3' may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities. With the default value for this config and 'ssl.enabled.protocols', clients will downgrade to 'TLSv1.2' i [...] + + + + + +
    Type:string
    Default:TLSv1.2
    Valid Values:
    Importance:medium
    +

  • +
  • +

    ssl.provider

    +

    The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    +
  • +
  • +

    ssl.truststore.type

    +

    The file format of the trust store file.

    + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    +
  • +
  • +

    auto.commit.interval.ms

    +

    The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true.

    + + + + + +
    Type:int
    Default:5000 (5 seconds)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    check.crcs

    +

    Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance.

    + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    +
  • +
  • +

    client.id

    +

    An id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    client.rack

    +

    A rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config 'broker.rack'

    + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:low
    +
  • +
  • +

    fetch.max.wait.ms

    +

    The maximum amount of time the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy the requirement given by fetch.min.bytes.

    + + + + + +
    Type:int
    Default:500
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    interceptor.classes

    +

    A list of classes to use as interceptors. Implementing the org.apache.kafka.clients.consumer.ConsumerInterceptor interface allows you to intercept (and possibly mutate) records received by the consumer. By default, there are no interceptors.

    + + + + + +
    Type:list
    Default:""
    Valid Values:non-null string
    Importance:low
    +
  • +
  • +

    metadata.max.age.ms

    +

    The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.

    + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    metric.reporters

    +

    A list of classes to use as metrics reporters. Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.

    + + + + + +
    Type:list
    Default:""
    Valid Values:non-null string
    Importance:low
    +
  • +
  • +

    metrics.num.samples

    +

    The number of samples maintained to compute metrics.

    + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:low
    +
  • +
  • +

    metrics.recording.level

    +

    The highest recording level for metrics.

    + + + + + +
    Type:string
    Default:INFO
    Valid Values:[INFO, DEBUG]
    Importance:low
    +
  • +
  • +

    metrics.sample.window.ms

    +

    The window of time a metrics sample is computed over.

    + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    reconnect.backoff.max.ms

    +

    The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.

    + + + + + +
    Type:long
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    reconnect.backoff.ms

    +

    The base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker.

    + + + + + +
    Type:long
    Default:50
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    retry.backoff.ms

    +

    The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.

    + + + + + +
    Type:long
    Default:100
    Valid Values:[0,...]
    Importance:low
    +
  • +
  • +

    sasl.kerberos.kinit.cmd

    +

    Kerberos kinit command path.

    + + + + + +
    Type:string
    Default:/usr/bin/kinit
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.min.time.before.relogin

    +

    Login thread sleep time between refresh attempts.

    + + + + + +
    Type:long
    Default:60000
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.jitter

    +

    Percentage of random jitter added to the renewal time.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.kerberos.ticket.renew.window.factor

    +

    Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.buffer.seconds

    +

    The amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum excee [...] + + + + + +
    Type:short
    Default:300
    Valid Values:[0,...,3600]
    Importance:low
    +

  • +
  • +

    sasl.login.refresh.min.period.seconds

    +

    The desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:short
    Default:60
    Valid Values:[0,...,900]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.factor

    +

    Login refresh thread will sleep until the specified window factor relative to the credential's lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.8
    Valid Values:[0.5,...,1.0]
    Importance:low
    +
  • +
  • +

    sasl.login.refresh.window.jitter

    +

    The maximum amount of random jitter relative to the credential's lifetime that is added to the login refresh thread's sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + +
    Type:double
    Default:0.05
    Valid Values:[0.0,...,0.25]
    Importance:low
    +
  • +
  • +

    security.providers

    +

    A list of configurable creator classes each returning a provider implementing security algorithms. These classes should implement the org.apache.kafka.common.security.auth.SecurityProviderCreator interface.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.cipher.suites

    +

    A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.

    + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.endpoint.identification.algorithm

    +

    The endpoint identification algorithm to validate server hostname using server certificate.

    + + + + + +
    Type:string
    Default:https
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.engine.factory.class

    +

    The class of type org.apache.kafka.common.security.auth.SslEngineFactory to provide SSLEngine objects. Default value is org.apache.kafka.common.security.ssl.DefaultSslEngineFactory

    + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.keymanager.algorithm

    +

    The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:SunX509
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.secure.random.implementation

    +

    The SecureRandom PRNG implementation to use for SSL cryptography operations.

    + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    +
  • +
  • +

    ssl.trustmanager.algorithm

    +

    The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.

    + + + + + +
    Type:string
    Default:PKIX
    Valid Values:
    Importance:low
    +
  • +
+ diff --git a/26/generated/consumer_metrics.html b/26/generated/consumer_metrics.html new file mode 100644 index 0000000..d9ae7af --- /dev/null +++ b/26/generated/consumer_metrics.html @@ -0,0 +1,81 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
kafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}"
Attribute nameDescription
bytes-consumed-rateThe average number of bytes consumed per second
bytes-consumed-totalThe total number of bytes consumed
fetch-latency-avgThe average time taken for a fetch request.
fetch-latency-maxThe max time taken for any fetch request.
fetch-rateThe number of fetch requests per second.
fetch-size-avgThe average number of bytes fetched per request
fetch-size-maxThe maximum number of bytes fetched per request
fetch-throttle-time-avgThe average throttle time in ms
fetch-throttle-time-maxThe maximum throttle time in ms
fetch-totalThe total number of fetch requests.
records-consumed-rateThe average number of records consumed per second
records-consumed-totalThe total number of records consumed
records-lag-maxThe maximum lag in terms of number of records for any partition in this window
records-lead-minThe minimum lead in terms of number of records for any partition in this window
records-per-request-avgThe average number of records in each request
kafka.consumer:type=consumer-fetch-manager-metrics,client-id="{client-id}",topic="{topic}"
Attribute nameDescription
bytes-consumed-rateThe average number of bytes consumed per second for a topic
bytes-consumed-totalThe total number of bytes consumed for a topic
fetch-size-avgThe average number of bytes fetched per request for a topic
fetch-size-maxThe maximum number of bytes fetched per request for a topic
records-consumed-rateThe average number of records consumed per second for a topic
records-consumed-totalThe total number of records consumed for a topic
records-per-request-avgThe average number of records in each request for a topic
kafka.consumer:type=consumer-fetch-manager-metrics,partition="{partition}",topic="{topic}",client-id="{client-id}"
Attribute nameDescription
preferred-read-replicaThe current read replica for the partition, or -1 if reading from leader
records-lagThe latest lag of the partition
records-lag-avgThe average lag of the partition
records-lag-maxThe max lag of the partition
records-leadThe latest lead of the partition
records-lead-avgThe average lead of the partition
records-lead-minThe min lead of the partition
diff --git a/26/generated/kafka_config.html b/26/generated/kafka_config.html new file mode 100644 index 0000000..26b6d77 --- /dev/null +++ b/26/generated/kafka_config.html @@ -0,0 +1,2291 @@ +
    +
  • +

    zookeeper.connect

    +

    Specifies the ZooKeeper connection string in the form hostname:port where host and port are the host and port of a ZooKeeper server. To allow connecting through other ZooKeeper nodes when that ZooKeeper machine is down you can also specify multiple hosts in the form hostname1:port1,hostname2:port2,hostname3:port3.
    The server can also have a ZooKeeper chroot path as part of its ZooKeeper connection string which puts its data under some path in the global Zo [...] + + + + + + +
    Type:string
    Default:
    Valid Values:
    Importance:high
    Update Mode:read-only
    +

  • +
  • +

    advertised.host.name

    +

    DEPRECATED: only used when advertised.listeners or listeners are not set. Use advertised.listeners instead.
    Hostname to publish to ZooKeeper for clients to use. In IaaS environments, this may need to be different from the interface to which the broker binds. If this is not set, it will use the value for host.name if configured. Otherwise it will use the value returned from java.net.InetAddress.getCanonicalHostName().

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    advertised.listeners

    +

    Listeners to publish to ZooKeeper for clients to use, if different than the listeners config property. In IaaS environments, this may need to be different from the interface to which the broker binds. If this is not set, the value for listeners will be used. Unlike listeners it is not valid to advertise the 0.0.0.0 meta-address.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    Update Mode:per-broker
    +
  • +
  • +

    advertised.port

    +

    DEPRECATED: only used when advertised.listeners or listeners are not set. Use advertised.listeners instead.
    The port to publish to ZooKeeper for clients to use. In IaaS environments, this may need to be different from the port to which the broker binds. If this is not set, it will publish the same port that the broker binds to.

    + + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    auto.create.topics.enable

    +

    Enable auto creation of topic on the server

    + + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    auto.leader.rebalance.enable

    +

    Enables auto leader balancing. A background thread checks the distribution of partition leaders at regular intervals, configurable by `leader.imbalance.check.interval.seconds`. If the leader imbalance exceeds `leader.imbalance.per.broker.percentage`, leader rebalance to the preferred leader for partitions is triggered.

    + + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    background.threads

    +

    The number of threads to use for various background processing tasks

    + + + + + + +
    Type:int
    Default:10
    Valid Values:[1,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    broker.id

    +

    The broker id for this server. If unset, a unique broker id will be generated.To avoid conflicts between zookeeper generated broker id's and user configured broker id's, generated broker ids start from reserved.broker.max.id + 1.

    + + + + + + +
    Type:int
    Default:-1
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    compression.type

    +

    Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.

    + + + + + + +
    Type:string
    Default:producer
    Valid Values:
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    control.plane.listener.name

    +

    Name of listener used for communication between controller and brokers. Broker will use the control.plane.listener.name to locate the endpoint in listeners list, to listen for connections from the controller. For example, if a broker's config is :
    listeners = INTERNAL://192.1.1.8:9092, EXTERNAL://10.1.1.5:9093, CONTROLLER://192.1.1.8:9094
    listener.security.protocol.map = INTERNAL:PLAINTEXT, EXTERNAL:SSL, CONTROLLER:SSL
    control.plane.listener.name = CONTROLLER
    On startup, t [...] + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    Update Mode:read-only
    +

  • +
  • +

    delete.topic.enable

    +

    Enables delete topic. Delete topic through the admin tool will have no effect if this config is turned off

    + + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    host.name

    +

    DEPRECATED: only used when listeners is not set. Use listeners instead.
    hostname of broker. If this is set, it will only bind to this address. If this is not set, it will bind to all interfaces

    + + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    leader.imbalance.check.interval.seconds

    +

    The frequency with which the partition rebalance check is triggered by the controller

    + + + + + + +
    Type:long
    Default:300
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    leader.imbalance.per.broker.percentage

    +

    The ratio of leader imbalance allowed per broker. The controller would trigger a leader balance if it goes above this value per broker. The value is specified in percentage.

    + + + + + + +
    Type:int
    Default:10
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    listeners

    +

    Listener List - Comma-separated list of URIs we will listen on and the listener names. If the listener name is not a security protocol, listener.security.protocol.map must also be set.
    Specify hostname as 0.0.0.0 to bind to all interfaces.
    Leave hostname empty to bind to default interface.
    Examples of legal listener lists:
    PLAINTEXT://myhost:9092,SSL://:9091
    CLIENT://0.0.0.0:9092,REPLICATION://localhost:9093

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    Update Mode:per-broker
    +
  • +
  • +

    log.dir

    +

    The directory in which the log data is kept (supplemental for log.dirs property)

    + + + + + + +
    Type:string
    Default:/tmp/kafka-logs
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.dirs

    +

    The directories in which the log data is kept. If not set, the value in log.dir is used

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.flush.interval.messages

    +

    The number of messages accumulated on a log partition before messages are flushed to disk

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:[1,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    log.flush.interval.ms

    +

    The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used

    + + + + + + +
    Type:long
    Default:null
    Valid Values:
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    log.flush.offset.checkpoint.interval.ms

    +

    The frequency with which we update the persistent record of the last flush which acts as the log recovery point

    + + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:[0,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.flush.scheduler.interval.ms

    +

    The frequency in ms that the log flusher checks whether any log needs to be flushed to disk

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.flush.start.offset.checkpoint.interval.ms

    +

    The frequency with which we update the persistent record of log start offset

    + + + + + + +
    Type:int
    Default:60000 (1 minute)
    Valid Values:[0,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.retention.bytes

    +

    The maximum size of the log before deleting it

    + + + + + + +
    Type:long
    Default:-1
    Valid Values:
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    log.retention.hours

    +

    The number of hours to keep a log file before deleting it (in hours), tertiary to log.retention.ms property

    + + + + + + +
    Type:int
    Default:168
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.retention.minutes

    +

    The number of minutes to keep a log file before deleting it (in minutes), secondary to log.retention.ms property. If not set, the value in log.retention.hours is used

    + + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.retention.ms

    +

    The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in log.retention.minutes is used. If set to -1, no time limit is applied.

    + + + + + + +
    Type:long
    Default:null
    Valid Values:
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    log.roll.hours

    +

    The maximum time before a new log segment is rolled out (in hours), secondary to log.roll.ms property

    + + + + + + +
    Type:int
    Default:168
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.roll.jitter.hours

    +

    The maximum jitter to subtract from logRollTimeMillis (in hours), secondary to log.roll.jitter.ms property

    + + + + + + +
    Type:int
    Default:0
    Valid Values:[0,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    log.roll.jitter.ms

    +

    The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in log.roll.jitter.hours is used

    + + + + + + +
    Type:long
    Default:null
    Valid Values:
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    log.roll.ms

    +

    The maximum time before a new log segment is rolled out (in milliseconds). If not set, the value in log.roll.hours is used

    + + + + + + +
    Type:long
    Default:null
    Valid Values:
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    log.segment.bytes

    +

    The maximum size of a single log file

    + + + + + + +
    Type:int
    Default:1073741824 (1 gibibyte)
    Valid Values:[14,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    log.segment.delete.delay.ms

    +

    The amount of time to wait before deleting a file from the filesystem

    + + + + + + +
    Type:long
    Default:60000 (1 minute)
    Valid Values:[0,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    message.max.bytes

    +

    The largest record batch size allowed by Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased so that they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in t [...] + + + + + + +
    Type:int
    Default:1048588
    Valid Values:[0,...]
    Importance:high
    Update Mode:cluster-wide
    +

  • +
  • +

    min.insync.replicas

    +

    When a producer sets acks to "all" (or "-1"), min.insync.replicas specifies the minimum number of replicas that must acknowledge a write for the write to be considered successful. If this minimum cannot be met, then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend).
    When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical scenario would be to create a topic with a replication factor [...] + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:high
    Update Mode:cluster-wide
    +

  • +
  • +

    num.io.threads

    +

    The number of threads that the server uses for processing requests, which may include disk I/O

    + + + + + + +
    Type:int
    Default:8
    Valid Values:[1,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    num.network.threads

    +

    The number of threads that the server uses for receiving requests from the network and sending responses to the network

    + + + + + + +
    Type:int
    Default:3
    Valid Values:[1,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    num.recovery.threads.per.data.dir

    +

    The number of threads per data directory to be used for log recovery at startup and flushing at shutdown

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    num.replica.alter.log.dirs.threads

    +

    The number of threads that can move replicas between log directories, which may include disk I/O

    + + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    num.replica.fetchers

    +

    Number of fetcher threads used to replicate messages from a source broker. Increasing this value can increase the degree of I/O parallelism in the follower broker.

    + + + + + + +
    Type:int
    Default:1
    Valid Values:
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    offset.metadata.max.bytes

    +

    The maximum size for a metadata entry associated with an offset commit

    + + + + + + +
    Type:int
    Default:4096 (4 kibibytes)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.commit.required.acks

    +

    The required acks before the commit can be accepted. In general, the default (-1) should not be overridden

    + + + + + + +
    Type:short
    Default:-1
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.commit.timeout.ms

    +

    Offset commit will be delayed until all replicas for the offsets topic receive the commit or this timeout is reached. This is similar to the producer request timeout.

    + + + + + + +
    Type:int
    Default:5000 (5 seconds)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.load.buffer.size

    +

    Batch size for reading from the offsets segments when loading offsets into the cache (soft-limit, overridden if records are too large).

    + + + + + + +
    Type:int
    Default:5242880
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.retention.check.interval.ms

    +

    Frequency at which to check for stale offsets

    + + + + + + +
    Type:long
    Default:600000 (10 minutes)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.retention.minutes

    +

    After a consumer group loses all its consumers (i.e. becomes empty) its offsets will be kept for this retention period before getting discarded. For standalone consumers (using manual assignment), offsets will be expired after the time of last commit plus this retention period.

    + + + + + + +
    Type:int
    Default:10080
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.topic.compression.codec

    +

    Compression codec for the offsets topic - compression may be used to achieve "atomic" commits

    + + + + + + +
    Type:int
    Default:0
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.topic.num.partitions

    +

    The number of partitions for the offset commit topic (should not change after deployment)

    + + + + + + +
    Type:int
    Default:50
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.topic.replication.factor

    +

    The replication factor for the offsets topic (set higher to ensure availability). Internal topic creation will fail until the cluster size meets this replication factor requirement.

    + + + + + + +
    Type:short
    Default:3
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    offsets.topic.segment.bytes

    +

    The offsets topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads

    + + + + + + +
    Type:int
    Default:104857600 (100 mebibytes)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    port

    +

    DEPRECATED: only used when listeners is not set. Use listeners instead.
    the port to listen and accept connections on

    + + + + + + +
    Type:int
    Default:9092
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    queued.max.requests

    +

    The number of queued requests allowed for data-plane, before blocking the network threads

    + + + + + + +
    Type:int
    Default:500
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    quota.consumer.default

    +

    DEPRECATED: Used only when dynamic default quotas are not configured for or in Zookeeper. Any consumer distinguished by clientId/consumer group will get throttled if it fetches more bytes than this value per-second

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    quota.producer.default

    +

    DEPRECATED: Used only when dynamic default quotas are not configured for , or in Zookeeper. Any producer distinguished by clientId will get throttled if it produces more bytes than this value per-second

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    replica.fetch.min.bytes

    +

    Minimum bytes expected for each fetch response. If not enough bytes, wait up to replica.fetch.wait.max.ms (broker config).

    + + + + + + +
    Type:int
    Default:1
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    replica.fetch.wait.max.ms

    +

    max wait time for each fetcher request issued by follower replicas. This value should always be less than the replica.lag.time.max.ms at all times to prevent frequent shrinking of ISR for low throughput topics

    + + + + + + +
    Type:int
    Default:500
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    replica.high.watermark.checkpoint.interval.ms

    +

    The frequency with which the high watermark is saved out to disk

    + + + + + + +
    Type:long
    Default:5000 (5 seconds)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    replica.lag.time.max.ms

    +

    If a follower hasn't sent any fetch requests or hasn't consumed up to the leaders log end offset for at least this time, the leader will remove the follower from isr

    + + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    replica.socket.receive.buffer.bytes

    +

    The socket receive buffer for network requests

    + + + + + + +
    Type:int
    Default:65536 (64 kibibytes)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    replica.socket.timeout.ms

    +

    The socket timeout for network requests. Its value should be at least replica.fetch.wait.max.ms

    + + + + + + +
    Type:int
    Default:30000 (30 seconds)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    request.timeout.ms

    +

    The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.

    + + + + + + +
    Type:int
    Default:30000 (30 seconds)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    socket.receive.buffer.bytes

    +

    The SO_RCVBUF buffer of the socket server sockets. If the value is -1, the OS default will be used.

    + + + + + + +
    Type:int
    Default:102400 (100 kibibytes)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    socket.request.max.bytes

    +

    The maximum number of bytes in a socket request

    + + + + + + +
    Type:int
    Default:104857600 (100 mebibytes)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    socket.send.buffer.bytes

    +

    The SO_SNDBUF buffer of the socket server sockets. If the value is -1, the OS default will be used.

    + + + + + + +
    Type:int
    Default:102400 (100 kibibytes)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    transaction.max.timeout.ms

    +

    The maximum allowed timeout for transactions. If a client’s requested transaction time exceed this, then the broker will return an error in InitProducerIdRequest. This prevents a client from too large of a timeout, which can stall consumers reading from topics included in the transaction.

    + + + + + + +
    Type:int
    Default:900000 (15 minutes)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    transaction.state.log.load.buffer.size

    +

    Batch size for reading from the transaction log segments when loading producer ids and transactions into the cache (soft-limit, overridden if records are too large).

    + + + + + + +
    Type:int
    Default:5242880
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    transaction.state.log.min.isr

    +

    Overridden min.insync.replicas config for the transaction topic.

    + + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    transaction.state.log.num.partitions

    +

    The number of partitions for the transaction topic (should not change after deployment).

    + + + + + + +
    Type:int
    Default:50
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    transaction.state.log.replication.factor

    +

    The replication factor for the transaction topic (set higher to ensure availability). Internal topic creation will fail until the cluster size meets this replication factor requirement.

    + + + + + + +
    Type:short
    Default:3
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    transaction.state.log.segment.bytes

    +

    The transaction topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads

    + + + + + + +
    Type:int
    Default:104857600 (100 mebibytes)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    transactional.id.expiration.ms

    +

    The time in ms that the transaction coordinator will wait without receiving any transaction status updates for the current transaction before expiring its transactional id. This setting also influences producer id expiration - producer ids are expired once this time has elapsed after the last write with the given producer id. Note that producer ids may expire sooner if the last write from the producer id is deleted due to the topic's retention settings.

    + + + + + + +
    Type:int
    Default:604800000 (7 days)
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    unclean.leader.election.enable

    +

    Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:high
    Update Mode:cluster-wide
    +
  • +
  • +

    zookeeper.connection.timeout.ms

    +

    The max time that the client waits to establish a connection to zookeeper. If not set, the value in zookeeper.session.timeout.ms is used

    + + + + + + +
    Type:int
    Default:null
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    zookeeper.max.in.flight.requests

    +

    The maximum number of unacknowledged requests the client will send to Zookeeper before blocking.

    + + + + + + +
    Type:int
    Default:10
    Valid Values:[1,...]
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    zookeeper.session.timeout.ms

    +

    Zookeeper session timeout

    + + + + + + +
    Type:int
    Default:18000 (18 seconds)
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    zookeeper.set.acl

    +

    Set client to use secure ACLs

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:high
    Update Mode:read-only
    +
  • +
  • +

    broker.id.generation.enable

    +

    Enable automatic broker id generation on the server. When enabled the value configured for reserved.broker.max.id should be reviewed.

    + + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    broker.rack

    +

    Rack of the broker. This will be used in rack aware replication assignment for fault tolerance. Examples: `RACK1`, `us-east-1d`

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    connections.max.idle.ms

    +

    Idle connections timeout: the server socket processor threads close the connections that idle more than this

    + + + + + + +
    Type:long
    Default:600000 (10 minutes)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    connections.max.reauth.ms

    +

    When explicitly set to a positive number (the default is 0, not a positive number), a session lifetime that will not exceed the configured value will be communicated to v2.2.0 or later clients when they authenticate. The broker will disconnect any such connection that is not re-authenticated within the session lifetime and that is then subsequently used for any purpose other than re-authentication. Configuration names can optionally be prefixed with listener prefix and SASL mechanism [...] + + + + + + +
    Type:long
    Default:0
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +

  • +
  • +

    controlled.shutdown.enable

    +

    Enable controlled shutdown of the server

    + + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    controlled.shutdown.max.retries

    +

    Controlled shutdown can fail for multiple reasons. This determines the number of retries when such failure happens

    + + + + + + +
    Type:int
    Default:3
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    controlled.shutdown.retry.backoff.ms

    +

    Before each retry, the system needs time to recover from the state that caused the previous failure (Controller fail over, replica lag etc). This config determines the amount of time to wait before retrying.

    + + + + + + +
    Type:long
    Default:5000 (5 seconds)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    controller.socket.timeout.ms

    +

    The socket timeout for controller-to-broker channels

    + + + + + + +
    Type:int
    Default:30000 (30 seconds)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    default.replication.factor

    +

    default replication factors for automatically created topics

    + + + + + + +
    Type:int
    Default:1
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    delegation.token.expiry.time.ms

    +

    The token validity time in miliseconds before the token needs to be renewed. Default value 1 day.

    + + + + + + +
    Type:long
    Default:86400000 (1 day)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    delegation.token.master.key

    +

    Master/secret key to generate and verify delegation tokens. Same key must be configured across all the brokers. If the key is not set or set to empty string, brokers will disable the delegation token support.

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    delegation.token.max.lifetime.ms

    +

    The token has a maximum lifetime beyond which it cannot be renewed anymore. Default value 7 days.

    + + + + + + +
    Type:long
    Default:604800000 (7 days)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    delete.records.purgatory.purge.interval.requests

    +

    The purge interval (in number of requests) of the delete records request purgatory

    + + + + + + +
    Type:int
    Default:1
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    fetch.max.bytes

    +

    The maximum number of bytes we will return for a fetch request. Must be at least 1024.

    + + + + + + +
    Type:int
    Default:57671680 (55 mebibytes)
    Valid Values:[1024,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    fetch.purgatory.purge.interval.requests

    +

    The purge interval (in number of requests) of the fetch request purgatory

    + + + + + + +
    Type:int
    Default:1000
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.initial.rebalance.delay.ms

    +

    The amount of time the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins.

    + + + + + + +
    Type:int
    Default:3000 (3 seconds)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.max.session.timeout.ms

    +

    The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.

    + + + + + + +
    Type:int
    Default:1800000 (30 minutes)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.max.size

    +

    The maximum number of consumers that a single consumer group can accommodate.

    + + + + + + +
    Type:int
    Default:2147483647
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    group.min.session.timeout.ms

    +

    The minimum allowed session timeout for registered consumers. Shorter timeouts result in quicker failure detection at the cost of more frequent consumer heartbeating, which can overwhelm broker resources.

    + + + + + + +
    Type:int
    Default:6000 (6 seconds)
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    inter.broker.listener.name

    +

    Name of listener used for communication between brokers. If this is unset, the listener name is defined by security.inter.broker.protocol. It is an error to set this and security.inter.broker.protocol properties at the same time.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    inter.broker.protocol.version

    +

    Specify which version of the inter-broker protocol will be used.
    This is typically bumped after all brokers were upgraded to a new version.
    Example of some valid values are: 0.8.0, 0.8.1, 0.8.1.1, 0.8.2, 0.8.2.0, 0.8.2.1, 0.9.0.0, 0.9.0.1 Check ApiVersion for the full list.

    + + + + + + +
    Type:string
    Default:2.7-IV0
    Valid Values:[0.8.0, 0.8.1, 0.8.2, 0.9.0, 0.10.0-IV0, 0.10.0-IV1, 0.10.1-IV0, 0.10.1-IV1, 0.10.1-IV2, 0.10.2-IV0, 0.11.0-IV0, 0.11.0-IV1, 0.11.0-IV2, 1.0-IV0, 1.1-IV0, 2.0-IV0, 2.0-IV1, 2.1-IV0, 2.1-IV1, 2.1-IV2, 2.2-IV0, 2.2-IV1, 2.3-IV0, 2.3-IV1, 2.4-IV0, 2.4-IV1, 2.5-IV0, 2.6-IV0, 2.7-IV0]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    log.cleaner.backoff.ms

    +

    The amount of time to sleep when there are no logs to clean

    + + + + + + +
    Type:long
    Default:15000 (15 seconds)
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.dedupe.buffer.size

    +

    The total memory used for log deduplication across all cleaner threads

    + + + + + + +
    Type:long
    Default:134217728
    Valid Values:
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.delete.retention.ms

    +

    How long are delete records retained?

    + + + + + + +
    Type:long
    Default:86400000 (1 day)
    Valid Values:
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.enable

    +

    Enable the log cleaner process to run on the server. Should be enabled if using any topics with a cleanup.policy=compact including the internal offsets topic. If disabled those topics will not be compacted and continually grow in size.

    + + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    log.cleaner.io.buffer.load.factor

    +

    Log cleaner dedupe buffer load factor. The percentage full the dedupe buffer can become. A higher value will allow more log to be cleaned at once but will lead to more hash collisions

    + + + + + + +
    Type:double
    Default:0.9
    Valid Values:
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.io.buffer.size

    +

    The total memory used for log cleaner I/O buffers across all cleaner threads

    + + + + + + +
    Type:int
    Default:524288
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.io.max.bytes.per.second

    +

    The log cleaner will be throttled so that the sum of its read and write i/o will be less than this value on average

    + + + + + + +
    Type:double
    Default:1.7976931348623157E308
    Valid Values:
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.max.compaction.lag.ms

    +

    The maximum time a message will remain ineligible for compaction in the log. Only applicable for logs that are being compacted.

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.min.cleanable.ratio

    +

    The minimum ratio of dirty log to total log for a log to eligible for cleaning. If the log.cleaner.max.compaction.lag.ms or the log.cleaner.min.compaction.lag.ms configurations are also specified, then the log compactor considers the log eligible for compaction as soon as either: (i) the dirty ratio threshold has been met and the log has had dirty (uncompacted) records for at least the log.cleaner.min.compaction.lag.ms duration, or (ii) if the log has had dirty (uncompacted) records f [...] + + + + + + +
    Type:double
    Default:0.5
    Valid Values:
    Importance:medium
    Update Mode:cluster-wide
    +

  • +
  • +

    log.cleaner.min.compaction.lag.ms

    +

    The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted.

    + + + + + + +
    Type:long
    Default:0
    Valid Values:
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleaner.threads

    +

    The number of background threads to use for log cleaning

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.cleanup.policy

    +

    The default cleanup policy for segments beyond the retention window. A comma separated list of valid policies. Valid policies are: "delete" and "compact"

    + + + + + + +
    Type:list
    Default:delete
    Valid Values:[compact, delete]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.index.interval.bytes

    +

    The interval with which we add an entry to the offset index

    + + + + + + +
    Type:int
    Default:4096 (4 kibibytes)
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.index.size.max.bytes

    +

    The maximum size in bytes of the offset index

    + + + + + + +
    Type:int
    Default:10485760 (10 mebibytes)
    Valid Values:[4,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.message.format.version

    +

    Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly will cause consumers with older versions to break as they will receive messages with a format [...] + + + + + + +
    Type:string
    Default:2.7-IV0
    Valid Values:[0.8.0, 0.8.1, 0.8.2, 0.9.0, 0.10.0-IV0, 0.10.0-IV1, 0.10.1-IV0, 0.10.1-IV1, 0.10.1-IV2, 0.10.2-IV0, 0.11.0-IV0, 0.11.0-IV1, 0.11.0-IV2, 1.0-IV0, 1.1-IV0, 2.0-IV0, 2.0-IV1, 2.1-IV0, 2.1-IV1, 2.1-IV2, 2.2-IV0, 2.2-IV1, 2.3-IV0, 2.3-IV1, 2.4-IV0, 2.4-IV1, 2.5-IV0, 2.6-IV0, 2.7-IV0]
    Importance:medium
    Update Mode:read-only
    +

  • +
  • +

    log.message.timestamp.difference.max.ms

    +

    The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. If log.message.timestamp.type=CreateTime, a message will be rejected if the difference in timestamp exceeds this threshold. This configuration is ignored if log.message.timestamp.type=LogAppendTime.The maximum timestamp difference allowed should be no greater than log.retention.ms to avoid unnecessarily frequent log rolling.

    + + + + + + +
    Type:long
    Default:9223372036854775807
    Valid Values:
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.message.timestamp.type

    +

    Define whether the timestamp in the message is message create time or log append time. The value should be either `CreateTime` or `LogAppendTime`

    + + + + + + +
    Type:string
    Default:CreateTime
    Valid Values:[CreateTime, LogAppendTime]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.preallocate

    +

    Should pre allocate file when create new segment? If you are using Kafka on Windows, you probably need to set it to true.

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    log.retention.check.interval.ms

    +

    The frequency in milliseconds that the log cleaner checks whether any log is eligible for deletion

    + + + + + + +
    Type:long
    Default:300000 (5 minutes)
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    max.connections

    +

    The maximum number of connections we allow in the broker at any time. This limit is applied in addition to any per-ip limits configured using max.connections.per.ip. Listener-level limits may also be configured by prefixing the config name with the listener prefix, for example, listener.name.internal.max.connections. Broker-wide limit should be configured based on broker capacity while listener limits should be configured based on application requirements. New connections [...] + + + + + + +
    Type:int
    Default:2147483647
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +

  • +
  • +

    max.connections.per.ip

    +

    The maximum number of connections we allow from each ip address. This can be set to 0 if there are overrides configured using max.connections.per.ip.overrides property. New connections from the ip address are dropped if the limit is reached.

    + + + + + + +
    Type:int
    Default:2147483647
    Valid Values:[0,...]
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    max.connections.per.ip.overrides

    +

    A comma-separated list of per-ip or hostname overrides to the default maximum number of connections. An example value is "hostName:100,127.0.0.1:200"

    + + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:medium
    Update Mode:cluster-wide
    +
  • +
  • +

    max.incremental.fetch.session.cache.slots

    +

    The maximum number of incremental fetch sessions that we will maintain.

    + + + + + + +
    Type:int
    Default:1000
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    num.partitions

    +

    The default number of log partitions per topic

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    password.encoder.old.secret

    +

    The old secret that was used for encoding dynamically configured passwords. This is required only when the secret is updated. If specified, all dynamically encoded passwords are decoded using this old secret and re-encoded using password.encoder.secret when broker starts up.

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    password.encoder.secret

    +

    The secret used for encoding dynamically configured passwords for this broker.

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    principal.builder.class

    +

    The fully qualified name of a class that implements the KafkaPrincipalBuilder interface, which is used to build the KafkaPrincipal object used during authorization. This config also supports the deprecated PrincipalBuilder interface which was previously used for client authentication over SSL. If no principal builder is defined, the default behavior depends on the security protocol in use. For SSL authentication, the principal will be derived using the rules defined by ssl.prin [...] + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +

  • +
  • +

    producer.purgatory.purge.interval.requests

    +

    The purge interval (in number of requests) of the producer request purgatory

    + + + + + + +
    Type:int
    Default:1000
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    queued.max.request.bytes

    +

    The number of queued bytes allowed before no more requests are read

    + + + + + + +
    Type:long
    Default:-1
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    replica.fetch.backoff.ms

    +

    The amount of time to sleep when fetch partition error occurs.

    + + + + + + +
    Type:int
    Default:1000 (1 second)
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    replica.fetch.max.bytes

    +

    The number of bytes of messages to attempt to fetch for each partition. This is not an absolute maximum, if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. The maximum record batch size accepted by the broker is defined via message.max.bytes (broker config) or max.message.bytes (topic config).

    + + + + + + +
    Type:int
    Default:1048576 (1 mebibyte)
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    replica.fetch.response.max.bytes

    +

    Maximum bytes expected for the entire fetch response. Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum. The maximum record batch size accepted by the broker is defined via message.max.bytes (broker config) or max.message.bytes (topic config).

    + + + + + + +
    Type:int
    Default:10485760 (10 mebibytes)
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    replica.selector.class

    +

    The fully qualified class name that implements ReplicaSelector. This is used by the broker to find the preferred read replica. By default, we use an implementation that returns the leader.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    reserved.broker.max.id

    +

    Max number that can be used for a broker.id

    + + + + + + +
    Type:int
    Default:1000
    Valid Values:[0,...]
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.client.callback.handler.class

    +

    The fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.enabled.mechanisms

    +

    The list of SASL mechanisms enabled in the Kafka server. The list may contain any mechanism for which a security provider is available. Only GSSAPI is enabled by default.

    + + + + + + +
    Type:list
    Default:GSSAPI
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.jaas.config

    +

    JAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described here. The format for the value is: 'loginModuleClass controlFlag (optionName=optionValue)*;'. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.nam [...] + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +

  • +
  • +

    sasl.kerberos.kinit.cmd

    +

    Kerberos kinit command path.

    + + + + + + +
    Type:string
    Default:/usr/bin/kinit
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.kerberos.min.time.before.relogin

    +

    Login thread sleep time between refresh attempts.

    + + + + + + +
    Type:long
    Default:60000
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.kerberos.principal.to.local.rules

    +

    A list of rules for mapping from principal names to short names (typically operating system usernames). The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, principal names of the form {username}/{hostname}@{REALM} are mapped to {username}. For more details on the format please see security authorization and acls. Note that this configurat [...] + + + + + + +
    Type:list
    Default:DEFAULT
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +

  • +
  • +

    sasl.kerberos.service.name

    +

    The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.kerberos.ticket.renew.jitter

    +

    Percentage of random jitter added to the renewal time.

    + + + + + + +
    Type:double
    Default:0.05
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.kerberos.ticket.renew.window.factor

    +

    Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.

    + + + + + + +
    Type:double
    Default:0.8
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.login.callback.handler.class

    +

    The fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.login.class

    +

    The fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    sasl.login.refresh.buffer.seconds

    +

    The amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum excee [...] + + + + + + +
    Type:short
    Default:300
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +

  • +
  • +

    sasl.login.refresh.min.period.seconds

    +

    The desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

    + + + + + + +
    Type:short
    Default:60
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.login.refresh.window.factor

    +

    Login refresh thread will sleep until the specified window factor relative to the credential's lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + + +
    Type:double
    Default:0.8
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.login.refresh.window.jitter

    +

    The maximum amount of random jitter relative to the credential's lifetime that is added to the login refresh thread's sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.

    + + + + + + +
    Type:double
    Default:0.05
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.mechanism.inter.broker.protocol

    +

    SASL mechanism used for inter-broker communication. Default is GSSAPI.

    + + + + + + +
    Type:string
    Default:GSSAPI
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    sasl.server.callback.handler.class

    +

    The fully qualified name of a SASL server callback handler class that implements the AuthenticateCallbackHandler interface. Server callback handlers must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.plain.sasl.server.callback.handler.class=com.example.CustomPlainCallbackHandler.

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    security.inter.broker.protocol

    +

    Security protocol used to communicate between brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. It is an error to set this and inter.broker.listener.name properties at the same time.

    + + + + + + +
    Type:string
    Default:PLAINTEXT
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    ssl.cipher.suites

    +

    A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.

    + + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.client.auth

    +

    Configures kafka broker to request client authentication. The following settings are common:

    • ssl.client.auth=required If set to required client authentication is required.
    • ssl.client.auth=requested This means client authentication is optional. unlike requested , if this option is set client can choose not to provide authentication information about itself
    • ssl.client.auth=none This means client authentication is not needed.

    + + + + + + +
    Type:string
    Default:none
    Valid Values:[required, requested, none]
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.enabled.protocols

    +

    The list of protocols enabled for SSL connections. The default is 'TLSv1.2,TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. With the default value for Java 11, clients and servers will prefer TLSv1.3 if both support it and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most cases. Also see the config documentation for `ssl.protocol`.

    + + + + + + +
    Type:list
    Default:TLSv1.2
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.key.password

    +

    The password of the private key in the key store file. This is optional for client.

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.keymanager.algorithm

    +

    The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.

    + + + + + + +
    Type:string
    Default:SunX509
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.keystore.location

    +

    The location of the key store file. This is optional for client and can be used for two-way authentication for client.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.keystore.password

    +

    The store password for the key store file. This is optional for client and only needed if ssl.keystore.location is configured.

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.keystore.type

    +

    The file format of the key store file. This is optional for client.

    + + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.protocol

    +

    The SSL protocol used to generate the SSLContext. The default is 'TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. This value should be fine for most use cases. Allowed values in recent JVMs are 'TLSv1.2' and 'TLSv1.3'. 'TLS', 'TLSv1.1', 'SSL', 'SSLv2' and 'SSLv3' may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities. With the default value for this config and 'ssl.enabled.protocols', clients will downgrade to 'TLSv1.2' i [...] + + + + + + +
    Type:string
    Default:TLSv1.2
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +

  • +
  • +

    ssl.provider

    +

    The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.trustmanager.algorithm

    +

    The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.

    + + + + + + +
    Type:string
    Default:PKIX
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.truststore.location

    +

    The location of the trust store file.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.truststore.password

    +

    The password for the trust store file. If a password is not set access to the truststore is still available, but integrity checking is disabled.

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    ssl.truststore.type

    +

    The file format of the trust store file.

    + + + + + + +
    Type:string
    Default:JKS
    Valid Values:
    Importance:medium
    Update Mode:per-broker
    +
  • +
  • +

    zookeeper.clientCnxnSocket

    +

    Typically set to org.apache.zookeeper.ClientCnxnSocketNetty when using TLS connectivity to ZooKeeper. Overrides any explicit value set via the same-named zookeeper.clientCnxnSocket system property.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    zookeeper.ssl.client.enable

    +

    Set client to use TLS when connecting to ZooKeeper. An explicit value overrides any value set via the zookeeper.client.secure system property (note the different name). Defaults to false if neither is set; when true, zookeeper.clientCnxnSocket must be set (typically to org.apache.zookeeper.ClientCnxnSocketNetty); other values to set may include zookeeper.ssl.cipher.suites, zookeeper.ssl.crl.enable, zookeeper.ssl.en [...] + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +

  • +
  • +

    zookeeper.ssl.keystore.location

    +

    Keystore location when using a client-side certificate with TLS connectivity to ZooKeeper. Overrides any explicit value set via the zookeeper.ssl.keyStore.location system property (note the camelCase).

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    zookeeper.ssl.keystore.password

    +

    Keystore password when using a client-side certificate with TLS connectivity to ZooKeeper. Overrides any explicit value set via the zookeeper.ssl.keyStore.password system property (note the camelCase). Note that ZooKeeper does not support a key password different from the keystore password, so be sure to set the key password in the keystore to be identical to the keystore password; otherwise the connection attempt to Zookeeper will fail.

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    zookeeper.ssl.keystore.type

    +

    Keystore type when using a client-side certificate with TLS connectivity to ZooKeeper. Overrides any explicit value set via the zookeeper.ssl.keyStore.type system property (note the camelCase). The default value of null means the type will be auto-detected based on the filename extension of the keystore.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    zookeeper.ssl.truststore.location

    +

    Truststore location when using TLS connectivity to ZooKeeper. Overrides any explicit value set via the zookeeper.ssl.trustStore.location system property (note the camelCase).

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    zookeeper.ssl.truststore.password

    +

    Truststore password when using TLS connectivity to ZooKeeper. Overrides any explicit value set via the zookeeper.ssl.trustStore.password system property (note the camelCase).

    + + + + + + +
    Type:password
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    zookeeper.ssl.truststore.type

    +

    Truststore type when using TLS connectivity to ZooKeeper. Overrides any explicit value set via the zookeeper.ssl.trustStore.type system property (note the camelCase). The default value of null means the type will be auto-detected based on the filename extension of the truststore.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:medium
    Update Mode:read-only
    +
  • +
  • +

    alter.config.policy.class.name

    +

    The alter configs policy class that should be used for validation. The class should implement the org.apache.kafka.server.policy.AlterConfigPolicy interface.

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    alter.log.dirs.replication.quota.window.num

    +

    The number of samples to retain in memory for alter log dirs replication quotas

    + + + + + + +
    Type:int
    Default:11
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    alter.log.dirs.replication.quota.window.size.seconds

    +

    The time span of each sample for alter log dirs replication quotas

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    authorizer.class.name

    +

    The fully qualified name of a class that implements sorg.apache.kafka.server.authorizer.Authorizer interface, which is used by the broker for authorization. This config also supports authorizers that implement the deprecated kafka.security.auth.Authorizer trait which was previously used for authorization.

    + + + + + + +
    Type:string
    Default:""
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    client.quota.callback.class

    +

    The fully qualified name of a class that implements the ClientQuotaCallback interface, which is used to determine quota limits applied to client requests. By default, , or quotas stored in ZooKeeper are applied. For any given request, the most specific quota that matches the user principal of the session and the client-id of the request is applied.

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    connection.failed.authentication.delay.ms

    +

    Connection close delay on failed authentication: this is the time (in milliseconds) by which connection close will be delayed on authentication failure. This must be configured to be less than connections.max.idle.ms to prevent connection timeout.

    + + + + + + +
    Type:int
    Default:100
    Valid Values:[0,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    create.topic.policy.class.name

    +

    The create topic policy class that should be used for validation. The class should implement the org.apache.kafka.server.policy.CreateTopicPolicy interface.

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    delegation.token.expiry.check.interval.ms

    +

    Scan interval to remove expired delegation tokens.

    + + + + + + +
    Type:long
    Default:3600000 (1 hour)
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    kafka.metrics.polling.interval.secs

    +

    The metrics polling interval (in seconds) which can be used in kafka.metrics.reporters implementations.

    + + + + + + +
    Type:int
    Default:10
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    kafka.metrics.reporters

    +

    A list of classes to use as Yammer metrics custom reporters. The reporters should implement kafka.metrics.KafkaMetricsReporter trait. If a client wants to expose JMX operations on a custom reporter, the custom reporter needs to additionally implement an MBean trait that extends kafka.metrics.KafkaMetricsReporterMBean trait so that the registered MBean is compliant with the standard MBean convention.

    + + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    listener.security.protocol.map

    +

    Map between listener names and security protocols. This must be defined for the same security protocol to be usable in more than one port or IP. For example, internal and external traffic can be separated even if SSL is required for both. Concretely, the user could define listeners with names INTERNAL and EXTERNAL and this property as: `INTERNAL:SSL,EXTERNAL:SSL`. As shown, key and value are separated by a colon and map entries are separated by commas. Each listener name should only a [...] + + + + + + +
    Type:string
    Default:PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
    Valid Values:
    Importance:low
    Update Mode:per-broker
    +

  • +
  • +

    log.message.downconversion.enable

    +

    This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. When set to false, broker will not perform down-conversion for consumers expecting an older message format. The broker responds with UNSUPPORTED_VERSION error for consume requests from such older clients. This configurationdoes not apply to any message format conversion that might be required for replication to followers.

    + + + + + + +
    Type:boolean
    Default:true
    Valid Values:
    Importance:low
    Update Mode:cluster-wide
    +
  • +
  • +

    metric.reporters

    +

    A list of classes to use as metrics reporters. Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.

    + + + + + + +
    Type:list
    Default:""
    Valid Values:
    Importance:low
    Update Mode:cluster-wide
    +
  • +
  • +

    metrics.num.samples

    +

    The number of samples maintained to compute metrics.

    + + + + + + +
    Type:int
    Default:2
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    metrics.recording.level

    +

    The highest recording level for metrics.

    + + + + + + +
    Type:string
    Default:INFO
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    metrics.sample.window.ms

    +

    The window of time a metrics sample is computed over.

    + + + + + + +
    Type:long
    Default:30000 (30 seconds)
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    password.encoder.cipher.algorithm

    +

    The Cipher algorithm used for encoding dynamically configured passwords.

    + + + + + + +
    Type:string
    Default:AES/CBC/PKCS5Padding
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    password.encoder.iterations

    +

    The iteration count used for encoding dynamically configured passwords.

    + + + + + + +
    Type:int
    Default:4096
    Valid Values:[1024,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    password.encoder.key.length

    +

    The key length used for encoding dynamically configured passwords.

    + + + + + + +
    Type:int
    Default:128
    Valid Values:[8,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    password.encoder.keyfactory.algorithm

    +

    The SecretKeyFactory algorithm used for encoding dynamically configured passwords. Default is PBKDF2WithHmacSHA512 if available and PBKDF2WithHmacSHA1 otherwise.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    quota.window.num

    +

    The number of samples to retain in memory for client quotas

    + + + + + + +
    Type:int
    Default:11
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    quota.window.size.seconds

    +

    The time span of each sample for client quotas

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    replication.quota.window.num

    +

    The number of samples to retain in memory for replication quotas

    + + + + + + +
    Type:int
    Default:11
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    replication.quota.window.size.seconds

    +

    The time span of each sample for replication quotas

    + + + + + + +
    Type:int
    Default:1
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    security.providers

    +

    A list of configurable creator classes each returning a provider implementing security algorithms. These classes should implement the org.apache.kafka.common.security.auth.SecurityProviderCreator interface.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    ssl.endpoint.identification.algorithm

    +

    The endpoint identification algorithm to validate server hostname using server certificate.

    + + + + + + +
    Type:string
    Default:https
    Valid Values:
    Importance:low
    Update Mode:per-broker
    +
  • +
  • +

    ssl.engine.factory.class

    +

    The class of type org.apache.kafka.common.security.auth.SslEngineFactory to provide SSLEngine objects. Default value is org.apache.kafka.common.security.ssl.DefaultSslEngineFactory

    + + + + + + +
    Type:class
    Default:null
    Valid Values:
    Importance:low
    Update Mode:per-broker
    +
  • +
  • +

    ssl.principal.mapping.rules

    +

    A list of rules for mapping from distinguished name from the client certificate to short name. The rules are evaluated in order and the first rule that matches a principal name is used to map it to a short name. Any later rules in the list are ignored. By default, distinguished name of the X.500 certificate will be the principal. For more details on the format please see security authorization and acls. Note that this configuration is ignored if an exten [...] + + + + + + +
    Type:string
    Default:DEFAULT
    Valid Values:
    Importance:low
    Update Mode:read-only
    +

  • +
  • +

    ssl.secure.random.implementation

    +

    The SecureRandom PRNG implementation to use for SSL cryptography operations.

    + + + + + + +
    Type:string
    Default:null
    Valid Values:
    Importance:low
    Update Mode:per-broker
    +
  • +
  • +

    transaction.abort.timed.out.transaction.cleanup.interval.ms

    +

    The interval at which to rollback transactions that have timed out

    + + + + + + +
    Type:int
    Default:10000 (10 seconds)
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    transaction.remove.expired.transaction.cleanup.interval.ms

    +

    The interval at which to remove transactions that have expired due to transactional.id.expiration.ms passing

    + + + + + + +
    Type:int
    Default:3600000 (1 hour)
    Valid Values:[1,...]
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    zookeeper.ssl.cipher.suites

    +

    Specifies the enabled cipher suites to be used in ZooKeeper TLS negotiation (csv). Overrides any explicit value set via the zookeeper.ssl.ciphersuites system property (note the single word "ciphersuites"). The default value of null means the list of enabled cipher suites is determined by the Java runtime being used.

    + + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    zookeeper.ssl.crl.enable

    +

    Specifies whether to enable Certificate Revocation List in the ZooKeeper TLS protocols. Overrides any explicit value set via the zookeeper.ssl.crl system property (note the shorter name).

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    zookeeper.ssl.enabled.protocols

    +

    Specifies the enabled protocol(s) in ZooKeeper TLS negotiation (csv). Overrides any explicit value set via the zookeeper.ssl.enabledProtocols system property (note the camelCase). The default value of null means the enabled protocol will be the value of the zookeeper.ssl.protocol configuration property.

    + + + + + + +
    Type:list
    Default:null
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    zookeeper.ssl.endpoint.identification.algorithm

    +

    Specifies whether to enable hostname verification in the ZooKeeper TLS negotiation process, with (case-insensitively) "https" meaning ZooKeeper hostname verification is enabled and an explicit blank value meaning it is disabled (disabling it is only recommended for testing purposes). An explicit value overrides any "true" or "false" value set via the zookeeper.ssl.hostnameVerification system property (note the different name and values; true implies https and false implie [...] + + + + + + +
    Type:string
    Default:HTTPS
    Valid Values:
    Importance:low
    Update Mode:read-only
    +

  • +
  • +

    zookeeper.ssl.ocsp.enable

    +

    Specifies whether to enable Online Certificate Status Protocol in the ZooKeeper TLS protocols. Overrides any explicit value set via the zookeeper.ssl.ocsp system property (note the shorter name).

    + + + + + + +
    Type:boolean
    Default:false
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    zookeeper.ssl.protocol

    +

    Specifies the protocol to be used in ZooKeeper TLS negotiation. An explicit value overrides any value set via the same-named zookeeper.ssl.protocol system property.

    + + + + + + +
    Type:string
    Default:TLSv1.2
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
  • +

    zookeeper.sync.time.ms

    +

    How far a ZK follower can be behind a ZK leader

    + + + + + + +
    Type:int
    Default:2000 (2 seconds)
    Valid Values:
    Importance:low
    Update Mode:read-only
    +
  • +
+ diff --git a/26/generated/producer_config.html b/26/generated/producer_config.html new file mode 100644 index 0000000..e27eb74 --- /dev/null +++ b/26/generated/producer_config.html @@ -0,0 +1,653 @@ +
    +
  • +

    key.serializer

    +

    Serializer class for key that implements the org.apache.kafka.common.serialization.Serializer interface.

    + + + + + +
    Type:class
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    value.serializer

    +

    Serializer class for value that implements the org.apache.kafka.common.serialization.Serializer interface.

    + + + + + +
    Type:class
    Default:
    Valid Values:
    Importance:high
    +
  • +
  • +

    acks

    +

    The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. The following settings are allowed:

    • acks=0 If set to zero then the producer will not wait for any acknowledgment from the server at all. The record will be immediately added to the socket buffer and considered sent. No guarantee can be made that the server has received the record in this ca [...] + + + + + +
      Type:string
      Default:1
      Valid Values:[all, -1, 0, 1]
      Importance:high
      +
    • +
    • +

      bootstrap.servers

      +

      A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynam [...] + + + + + +
      Type:list
      Default:""
      Valid Values:non-null string
      Importance:high
      +

    • +
    • +

      buffer.memory

      +

      The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are sent faster than they can be delivered to the server the producer will block for max.block.ms after which it will throw an exception.

      This setting should correspond roughly to the total memory the producer will use, but is not a hard bound since not all memory the producer uses is used for buffering. Some additional memory will be used for compression (if co [...] + + + + + +
      Type:long
      Default:33554432
      Valid Values:[0,...]
      Importance:high
      +

    • +
    • +

      compression.type

      +

      The compression type for all data generated by the producer. The default is none (i.e. no compression). Valid values are none, gzip, snappy, lz4, or zstd. Compression is of full batches of data, so the efficacy of batching will also impact the compression ratio (more batching means better compression).

      + + + + + +
      Type:string
      Default:none
      Valid Values:
      Importance:high
      +
    • +
    • +

      retries

      +

      Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. Note that this retry is no different than if the client resent the record upon receiving the error. Allowing retries without setting max.in.flight.requests.per.connection to 1 will potentially change the ordering of records because if two batches are sent to a single partition, and the first fails and is retried but the second succeeds, then th [...] + + + + + +
      Type:int
      Default:2147483647
      Valid Values:[0,...,2147483647]
      Importance:high
      +

    • +
    • +

      ssl.key.password

      +

      The password of the private key in the key store file. This is optional for client.

      + + + + + +
      Type:password
      Default:null
      Valid Values:
      Importance:high
      +
    • +
    • +

      ssl.keystore.location

      +

      The location of the key store file. This is optional for client and can be used for two-way authentication for client.

      + + + + + +
      Type:string
      Default:null
      Valid Values:
      Importance:high
      +
    • +
    • +

      ssl.keystore.password

      +

      The store password for the key store file. This is optional for client and only needed if ssl.keystore.location is configured.

      + + + + + +
      Type:password
      Default:null
      Valid Values:
      Importance:high
      +
    • +
    • +

      ssl.truststore.location

      +

      The location of the trust store file.

      + + + + + +
      Type:string
      Default:null
      Valid Values:
      Importance:high
      +
    • +
    • +

      ssl.truststore.password

      +

      The password for the trust store file. If a password is not set access to the truststore is still available, but integrity checking is disabled.

      + + + + + +
      Type:password
      Default:null
      Valid Values:
      Importance:high
      +
    • +
    • +

      batch.size

      +

      The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps performance on both the client and the server. This configuration controls the default batch size in bytes.

      No attempt will be made to batch records larger than this size.

      Requests sent to brokers will contain multiple batches, one for each partition with data available to be sent.

      A small batch size will make batching less common [...] + + + + + +
      Type:int
      Default:16384
      Valid Values:[0,...]
      Importance:medium
      +

    • +
    • +

      client.dns.lookup

      +

      Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again (both the JVM and the OS cache DNS name lookups, however). If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical n [...] + + + + + +
      Type:string
      Default:use_all_dns_ips
      Valid Values:[default, use_all_dns_ips, resolve_canonical_bootstrap_servers_only]
      Importance:medium
      +

    • +
    • +

      client.id

      +

      An id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.

      + + + + + +
      Type:string
      Default:""
      Valid Values:
      Importance:medium
      +
    • +
    • +

      connections.max.idle.ms

      +

      Close idle connections after the number of milliseconds specified by this config.

      + + + + + +
      Type:long
      Default:540000 (9 minutes)
      Valid Values:
      Importance:medium
      +
    • +
    • +

      delivery.timeout.ms

      +

      An upper bound on the time to report success or failure after a call to send() returns. This limits the total time that a record will be delayed prior to sending, the time to await acknowledgement from the broker (if expected), and the time allowed for retriable send failures. The producer may report failure to send a record earlier than this config if either an unrecoverable error is encountered, the retries have been exhausted, or the record is added to a batch which re [...] + + + + + +
      Type:int
      Default:120000 (2 minutes)
      Valid Values:[0,...]
      Importance:medium
      +

    • +
    • +

      linger.ms

      +

      The producer groups together any records that arrive in between request transmissions into a single batched request. Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount of artificial delay—that is, rather than immediately sending out a record the producer will wait for up to the [...] + + + + + +
      Type:long
      Default:0
      Valid Values:[0,...]
      Importance:medium
      +

    • +
    • +

      max.block.ms

      +

      The configuration controls how long KafkaProducer.send() and KafkaProducer.partitionsFor() will block.These methods can be blocked either because the buffer is full or metadata unavailable.Blocking in the user-supplied serializers or partitioner will not be counted against this timeout.

      + + + + + +
      Type:long
      Default:60000 (1 minute)
      Valid Values:[0,...]
      Importance:medium
      +
    • +
    • +

      max.request.size

      +

      The maximum size of a request in bytes. This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. This is also effectively a cap on the maximum uncompressed record batch size. Note that the server has its own cap on the record batch size (after compression if compression is enabled) which may be different from this.

      + + + + + +
      Type:int
      Default:1048576
      Valid Values:[0,...]
      Importance:medium
      +
    • +
    • +

      partitioner.class

      +

      Partitioner class that implements the org.apache.kafka.clients.producer.Partitioner interface.

      + + + + + +
      Type:class
      Default:org.apache.kafka.clients.producer.internals.DefaultPartitioner
      Valid Values:
      Importance:medium
      +
    • +
    • +

      receive.buffer.bytes

      +

      The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.

      + + + + + +
      Type:int
      Default:32768 (32 kibibytes)
      Valid Values:[-1,...]
      Importance:medium
      +
    • +
    • +

      request.timeout.ms

      +

      The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. This should be larger than replica.lag.time.max.ms (a broker configuration) to reduce the possibility of message duplication due to unnecessary producer retries.

      + + + + + +
      Type:int
      Default:30000 (30 seconds)
      Valid Values:[0,...]
      Importance:medium
      +
    • +
    • +

      sasl.client.callback.handler.class

      +

      The fully qualified name of a SASL client callback handler class that implements the AuthenticateCallbackHandler interface.

      + + + + + +
      Type:class
      Default:null
      Valid Values:
      Importance:medium
      +
    • +
    • +

      sasl.jaas.config

      +

      JAAS login context parameters for SASL connections in the format used by JAAS configuration files. JAAS configuration file format is described here. The format for the value is: 'loginModuleClass controlFlag (optionName=optionValue)*;'. For brokers, the config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.nam [...] + + + + + +
      Type:password
      Default:null
      Valid Values:
      Importance:medium
      +

    • +
    • +

      sasl.kerberos.service.name

      +

      The Kerberos principal name that Kafka runs as. This can be defined either in Kafka's JAAS config or in Kafka's config.

      + + + + + +
      Type:string
      Default:null
      Valid Values:
      Importance:medium
      +
    • +
    • +

      sasl.login.callback.handler.class

      +

      The fully qualified name of a SASL login callback handler class that implements the AuthenticateCallbackHandler interface. For brokers, login callback handler config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler

      + + + + + +
      Type:class
      Default:null
      Valid Values:
      Importance:medium
      +
    • +
    • +

      sasl.login.class

      +

      The fully qualified name of a class that implements the Login interface. For brokers, login config must be prefixed with listener prefix and SASL mechanism name in lower-case. For example, listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin

      + + + + + +
      Type:class
      Default:null
      Valid Values:
      Importance:medium
      +
    • +
    • +

      sasl.mechanism

      +

      SASL mechanism used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism.

      + + + + + +
      Type:string
      Default:GSSAPI
      Valid Values:
      Importance:medium
      +
    • +
    • +

      security.protocol

      +

      Protocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.

      + + + + + +
      Type:string
      Default:PLAINTEXT
      Valid Values:
      Importance:medium
      +
    • +
    • +

      send.buffer.bytes

      +

      The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.

      + + + + + +
      Type:int
      Default:131072 (128 kibibytes)
      Valid Values:[-1,...]
      Importance:medium
      +
    • +
    • +

      ssl.enabled.protocols

      +

      The list of protocols enabled for SSL connections. The default is 'TLSv1.2,TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. With the default value for Java 11, clients and servers will prefer TLSv1.3 if both support it and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most cases. Also see the config documentation for `ssl.protocol`.

      + + + + + +
      Type:list
      Default:TLSv1.2
      Valid Values:
      Importance:medium
      +
    • +
    • +

      ssl.keystore.type

      +

      The file format of the key store file. This is optional for client.

      + + + + + +
      Type:string
      Default:JKS
      Valid Values:
      Importance:medium
      +
    • +
    • +

      ssl.protocol

      +

      The SSL protocol used to generate the SSLContext. The default is 'TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. This value should be fine for most use cases. Allowed values in recent JVMs are 'TLSv1.2' and 'TLSv1.3'. 'TLS', 'TLSv1.1', 'SSL', 'SSLv2' and 'SSLv3' may be supported in older JVMs, but their usage is discouraged due to known security vulnerabilities. With the default value for this config and 'ssl.enabled.protocols', clients will downgrade to 'TLSv1.2' i [...] + + + + + +
      Type:string
      Default:TLSv1.2
      Valid Values:
      Importance:medium
      +

    • +
    • +

      ssl.provider

      +

      The name of the security provider used for SSL connections. Default value is the default security provider of the JVM.

      + + + + + +
      Type:string
      Default:null
      Valid Values:
      Importance:medium
      +
    • +
    • +

      ssl.truststore.type

      +

      The file format of the trust store file.

      + + + + + +
      Type:string
      Default:JKS
      Valid Values:
      Importance:medium
      +
    • +
    • +

      enable.idempotence

      +

      When set to 'true', the producer will ensure that exactly one copy of each message is written in the stream. If 'false', producer retries due to broker failures, etc., may write duplicates of the retried message in the stream. Note that enabling idempotence requires max.in.flight.requests.per.connection to be less than or equal to 5, retries to be greater than 0 and acks must be 'all'. If these values are not explicitly set by the user, suitable [...] + + + + + +
      Type:boolean
      Default:false
      Valid Values:
      Importance:low
      +

    • +
    • +

      interceptor.classes

      +

      A list of classes to use as interceptors. Implementing the org.apache.kafka.clients.producer.ProducerInterceptor interface allows you to intercept (and possibly mutate) the records received by the producer before they are published to the Kafka cluster. By default, there are no interceptors.

      + + + + + +
      Type:list
      Default:""
      Valid Values:non-null string
      Importance:low
      +
    • +
    • +

      max.in.flight.requests.per.connection

      +

      The maximum number of unacknowledged requests the client will send on a single connection before blocking. Note that if this setting is set to be greater than 1 and there are failed sends, there is a risk of message re-ordering due to retries (i.e., if retries are enabled).

      + + + + + +
      Type:int
      Default:5
      Valid Values:[1,...]
      Importance:low
      +
    • +
    • +

      metadata.max.age.ms

      +

      The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.

      + + + + + +
      Type:long
      Default:300000 (5 minutes)
      Valid Values:[0,...]
      Importance:low
      +
    • +
    • +

      metadata.max.idle.ms

      +

      Controls how long the producer will cache metadata for a topic that's idle. If the elapsed time since a topic was last produced to exceeds the metadata idle duration, then the topic's metadata is forgotten and the next access to it will force a metadata fetch request.

      + + + + + +
      Type:long
      Default:300000 (5 minutes)
      Valid Values:[5000,...]
      Importance:low
      +
    • +
    • +

      metric.reporters

      +

      A list of classes to use as metrics reporters. Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.

      + + + + + +
      Type:list
      Default:""
      Valid Values:non-null string
      Importance:low
      +
    • +
    • +

      metrics.num.samples

      +

      The number of samples maintained to compute metrics.

      + + + + + +
      Type:int
      Default:2
      Valid Values:[1,...]
      Importance:low
      +
    • +
    • +

      metrics.recording.level

      +

      The highest recording level for metrics.

      + + + + + +
      Type:string
      Default:INFO
      Valid Values:[INFO, DEBUG]
      Importance:low
      +
    • +
    • +

      metrics.sample.window.ms

      +

      The window of time a metrics sample is computed over.

      + + + + + +
      Type:long
      Default:30000 (30 seconds)
      Valid Values:[0,...]
      Importance:low
      +
    • +
    • +

      reconnect.backoff.max.ms

      +

      The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.

      + + + + + +
      Type:long
      Default:1000 (1 second)
      Valid Values:[0,...]
      Importance:low
      +
    • +
    • +

      reconnect.backoff.ms

      +

      The base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker.

      + + + + + +
      Type:long
      Default:50
      Valid Values:[0,...]
      Importance:low
      +
    • +
    • +

      retry.backoff.ms

      +

      The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.

      + + + + + +
      Type:long
      Default:100
      Valid Values:[0,...]
      Importance:low
      +
    • +
    • +

      sasl.kerberos.kinit.cmd

      +

      Kerberos kinit command path.

      + + + + + +
      Type:string
      Default:/usr/bin/kinit
      Valid Values:
      Importance:low
      +
    • +
    • +

      sasl.kerberos.min.time.before.relogin

      +

      Login thread sleep time between refresh attempts.

      + + + + + +
      Type:long
      Default:60000
      Valid Values:
      Importance:low
      +
    • +
    • +

      sasl.kerberos.ticket.renew.jitter

      +

      Percentage of random jitter added to the renewal time.

      + + + + + +
      Type:double
      Default:0.05
      Valid Values:
      Importance:low
      +
    • +
    • +

      sasl.kerberos.ticket.renew.window.factor

      +

      Login thread will sleep until the specified window factor of time from last refresh to ticket's expiry has been reached, at which time it will try to renew the ticket.

      + + + + + +
      Type:double
      Default:0.8
      Valid Values:
      Importance:low
      +
    • +
    • +

      sasl.login.refresh.buffer.seconds

      +

      The amount of buffer time before credential expiration to maintain when refreshing a credential, in seconds. If a refresh would otherwise occur closer to expiration than the number of buffer seconds then the refresh will be moved up to maintain as much of the buffer time as possible. Legal values are between 0 and 3600 (1 hour); a default value of 300 (5 minutes) is used if no value is specified. This value and sasl.login.refresh.min.period.seconds are both ignored if their sum excee [...] + + + + + +
      Type:short
      Default:300
      Valid Values:[0,...,3600]
      Importance:low
      +

    • +
    • +

      sasl.login.refresh.min.period.seconds

      +

      The desired minimum time for the login refresh thread to wait before refreshing a credential, in seconds. Legal values are between 0 and 900 (15 minutes); a default value of 60 (1 minute) is used if no value is specified. This value and sasl.login.refresh.buffer.seconds are both ignored if their sum exceeds the remaining lifetime of a credential. Currently applies only to OAUTHBEARER.

      + + + + + +
      Type:short
      Default:60
      Valid Values:[0,...,900]
      Importance:low
      +
    • +
    • +

      sasl.login.refresh.window.factor

      +

      Login refresh thread will sleep until the specified window factor relative to the credential's lifetime has been reached, at which time it will try to refresh the credential. Legal values are between 0.5 (50%) and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no value is specified. Currently applies only to OAUTHBEARER.

      + + + + + +
      Type:double
      Default:0.8
      Valid Values:[0.5,...,1.0]
      Importance:low
      +
    • +
    • +

      sasl.login.refresh.window.jitter

      +

      The maximum amount of random jitter relative to the credential's lifetime that is added to the login refresh thread's sleep time. Legal values are between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used if no value is specified. Currently applies only to OAUTHBEARER.

      + + + + + +
      Type:double
      Default:0.05
      Valid Values:[0.0,...,0.25]
      Importance:low
      +
    • +
    • +

      security.providers

      +

      A list of configurable creator classes each returning a provider implementing security algorithms. These classes should implement the org.apache.kafka.common.security.auth.SecurityProviderCreator interface.

      + + + + + +
      Type:string
      Default:null
      Valid Values:
      Importance:low
      +
    • +
    • +

      ssl.cipher.suites

      +

      A list of cipher suites. This is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. By default all the available cipher suites are supported.

      + + + + + +
      Type:list
      Default:null
      Valid Values:
      Importance:low
      +
    • +
    • +

      ssl.endpoint.identification.algorithm

      +

      The endpoint identification algorithm to validate server hostname using server certificate.

      + + + + + +
      Type:string
      Default:https
      Valid Values:
      Importance:low
      +
    • +
    • +

      ssl.engine.factory.class

      +

      The class of type org.apache.kafka.common.security.auth.SslEngineFactory to provide SSLEngine objects. Default value is org.apache.kafka.common.security.ssl.DefaultSslEngineFactory

      + + + + + +
      Type:class
      Default:null
      Valid Values:
      Importance:low
      +
    • +
    • +

      ssl.keymanager.algorithm

      +

      The algorithm used by key manager factory for SSL connections. Default value is the key manager factory algorithm configured for the Java Virtual Machine.

      + + + + + +
      Type:string
      Default:SunX509
      Valid Values:
      Importance:low
      +
    • +
    • +

      ssl.secure.random.implementation

      +

      The SecureRandom PRNG implementation to use for SSL cryptography operations.

      + + + + + +
      Type:string
      Default:null
      Valid Values:
      Importance:low
      +
    • +
    • +

      ssl.trustmanager.algorithm

      +

      The algorithm used by trust manager factory for SSL connections. Default value is the trust manager factory algorithm configured for the Java Virtual Machine.

      + + + + + +
      Type:string
      Default:PKIX
      Valid Values:
      Importance:low
      +
    • +
    • +

      transaction.timeout.ms

      +

      The maximum amount of time in ms that the transaction coordinator will wait for a transaction status update from the producer before proactively aborting the ongoing transaction.If this value is larger than the transaction.max.timeout.ms setting in the broker, the request will fail with a InvalidTxnTimeoutException error.

      + + + + + +
      Type:int
      Default:60000 (1 minute)
      Valid Values:
      Importance:low
      +
    • +
    • +

      transactional.id

      +

      The TransactionalId to use for transactional delivery. This enables reliability semantics which span multiple producer sessions since it allows the client to guarantee that transactions using the same TransactionalId have been completed prior to starting any new transactions. If no TransactionalId is provided, then the producer is limited to idempotent delivery. If a TransactionalId is configured, enable.idempotence is implied. By default the TransactionId is not configur [...] + + + + + +
      Type:string
      Default:null
      Valid Values:non-empty string
      Importance:low
      +

    • +
    + diff --git a/26/generated/producer_metrics.html b/26/generated/producer_metrics.html new file mode 100644 index 0000000..a02defa --- /dev/null +++ b/26/generated/producer_metrics.html @@ -0,0 +1,78 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    kafka.producer:type=producer-metrics,client-id="{client-id}"
    Attribute nameDescription
    batch-size-avgThe average number of bytes sent per partition per-request.
    batch-size-maxThe max number of bytes sent per partition per-request.
    batch-split-rateThe average number of batch splits per second
    batch-split-totalThe total number of batch splits
    compression-rate-avgThe average compression rate of record batches, defined as the average ratio of the compressed batch size over the uncompressed size.
    metadata-ageThe age in seconds of the current producer metadata being used.
    produce-throttle-time-avgThe average time in ms a request was throttled by a broker
    produce-throttle-time-maxThe maximum time in ms a request was throttled by a broker
    record-error-rateThe average per-second number of record sends that resulted in errors
    record-error-totalThe total number of record sends that resulted in errors
    record-queue-time-avgThe average time in ms record batches spent in the send buffer.
    record-queue-time-maxThe maximum time in ms record batches spent in the send buffer.
    record-retry-rateThe average per-second number of retried record sends
    record-retry-totalThe total number of retried record sends
    record-send-rateThe average number of records sent per second.
    record-send-totalThe total number of records sent.
    record-size-avgThe average record size
    record-size-maxThe maximum record size
    records-per-request-avgThe average number of records per request.
    request-latency-avgThe average request latency in ms
    request-latency-maxThe maximum request latency in ms
    requests-in-flightThe current number of in-flight requests awaiting a response.
    kafka.producer:type=producer-topic-metrics,client-id="{client-id}",topic="{topic}"
    Attribute nameDescription
    byte-rateThe average number of bytes sent per second for a topic.
    byte-totalThe total number of bytes sent for a topic.
    compression-rateThe average compression rate of record batches for a topic, defined as the average ratio of the compressed batch size over the uncompressed size.
    record-error-rateThe average per-second number of record sends that resulted in errors for a topic
    record-error-totalThe total number of record sends that resulted in errors for a topic
    record-retry-rateThe average per-second number of retried record sends for a topic
    record-retry-totalThe total number of retried record sends for a topic
    record-send-rateThe average number of records sent per second for a topic.
    record-send-totalThe total number of records sent for a topic.
    diff --git a/26/generated/protocol_api_keys.html b/26/generated/protocol_api_keys.html new file mode 100644 index 0000000..fb3ed3e --- /dev/null +++ b/26/generated/protocol_api_keys.html @@ -0,0 +1,105 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    NameKey
    Produce0
    Fetch1
    ListOffsets2
    Metadata3
    LeaderAndIsr4
    StopReplica5
    UpdateMetadata6
    ControlledShutdown7
    OffsetCommit8
    OffsetFetch9
    FindCoordinator10
    JoinGroup11
    Heartbeat12
    LeaveGroup13
    SyncGroup14
    DescribeGroups15
    ListGroups16
    SaslHandshake17
    ApiVersions18
    CreateTopics19
    DeleteTopics20
    DeleteRecords21
    InitProducerId22
    OffsetForLeaderEpoch23
    AddPartitionsToTxn24
    AddOffsetsToTxn25
    EndTxn26
    WriteTxnMarkers27
    TxnOffsetCommit28
    DescribeAcls29
    CreateAcls30
    DeleteAcls31
    DescribeConfigs32
    AlterConfigs33
    AlterReplicaLogDirs34
    DescribeLogDirs35
    SaslAuthenticate36
    CreatePartitions37
    CreateDelegationToken38
    RenewDelegationToken39
    ExpireDelegationToken40
    DescribeDelegationToken41
    DeleteGroups42
    ElectLeaders43
    IncrementalAlterConfigs44
    AlterPartitionReassignments45
    ListPartitionReassignments46
    OffsetDelete47
    DescribeClientQuotas48
    AlterClientQuotas49
    + diff --git a/26/generated/protocol_errors.html b/26/generated/protocol_errors.html new file mode 100644 index 0000000..00c9a2b --- /dev/null +++ b/26/generated/protocol_errors.html @@ -0,0 +1,98 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ErrorCodeRetriableDescription
    UNKNOWN_SERVER_ERROR-1FalseThe server experienced an unexpected error when processing the request.
    NONE0False
    OFFSET_OUT_OF_RANGE1FalseThe requested offset is not within the range of offsets maintained by the server.
    CORRUPT_MESSAGE2TrueThis message has failed its CRC checksum, exceeds the valid size, has a null key for a compacted topic, or is otherwise corrupt.
    UNKNOWN_TOPIC_OR_PARTITION3TrueThis server does not host this topic-partition.
    INVALID_FETCH_SIZE4FalseThe requested fetch size is invalid.
    LEADER_NOT_AVAILABLE5TrueThere is no leader for this topic-partition as we are in the middle of a leadership election.
    NOT_LEADER_FOR_PARTITION6TrueThis server is not the leader for that topic-partition.
    REQUEST_TIMED_OUT7TrueThe request timed out.
    BROKER_NOT_AVAILABLE8FalseThe broker is not available.
    REPLICA_NOT_AVAILABLE9FalseThe replica is not available for the requested topic-partition.
    MESSAGE_TOO_LARGE10FalseThe request included a message larger than the max message size the server will accept.
    STALE_CONTROLLER_EPOCH11FalseThe controller moved to another broker.
    OFFSET_METADATA_TOO_LARGE12FalseThe metadata field of the offset request was too large.
    NETWORK_EXCEPTION13TrueThe server disconnected before a response was received.
    COORDINATOR_LOAD_IN_PROGRESS14TrueThe coordinator is loading and hence can't process requests.
    COORDINATOR_NOT_AVAILABLE15TrueThe coordinator is not available.
    NOT_COORDINATOR16TrueThis is not the correct coordinator.
    INVALID_TOPIC_EXCEPTION17FalseThe request attempted to perform an operation on an invalid topic.
    RECORD_LIST_TOO_LARGE18FalseThe request included message batch larger than the configured segment size on the server.
    NOT_ENOUGH_REPLICAS19TrueMessages are rejected since there are fewer in-sync replicas than required.
    NOT_ENOUGH_REPLICAS_AFTER_APPEND20TrueMessages are written to the log, but to fewer in-sync replicas than required.
    INVALID_REQUIRED_ACKS21FalseProduce request specified an invalid value for required acks.
    ILLEGAL_GENERATION22FalseSpecified group generation id is not valid.
    INCONSISTENT_GROUP_PROTOCOL23FalseThe group member's supported protocols are incompatible with those of existing members or first group member tried to join with empty protocol type or empty protocol list.
    INVALID_GROUP_ID24FalseThe configured groupId is invalid.
    UNKNOWN_MEMBER_ID25FalseThe coordinator is not aware of this member.
    INVALID_SESSION_TIMEOUT26FalseThe session timeout is not within the range allowed by the broker (as configured by group.min.session.timeout.ms and group.max.session.timeout.ms).
    REBALANCE_IN_PROGRESS27FalseThe group is rebalancing, so a rejoin is needed.
    INVALID_COMMIT_OFFSET_SIZE28FalseThe committing offset data size is not valid.
    TOPIC_AUTHORIZATION_FAILED29FalseTopic authorization failed.
    GROUP_AUTHORIZATION_FAILED30FalseGroup authorization failed.
    CLUSTER_AUTHORIZATION_FAILED31FalseCluster authorization failed.
    INVALID_TIMESTAMP32FalseThe timestamp of the message is out of acceptable range.
    UNSUPPORTED_SASL_MECHANISM33FalseThe broker does not support the requested SASL mechanism.
    ILLEGAL_SASL_STATE34FalseRequest is not valid given the current SASL state.
    UNSUPPORTED_VERSION35FalseThe version of API is not supported.
    TOPIC_ALREADY_EXISTS36FalseTopic with this name already exists.
    INVALID_PARTITIONS37FalseNumber of partitions is below 1.
    INVALID_REPLICATION_FACTOR38FalseReplication factor is below 1 or larger than the number of available brokers.
    INVALID_REPLICA_ASSIGNMENT39FalseReplica assignment is invalid.
    INVALID_CONFIG40FalseConfiguration is invalid.
    NOT_CONTROLLER41TrueThis is not the correct controller for this cluster.
    INVALID_REQUEST42FalseThis most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details.
    UNSUPPORTED_FOR_MESSAGE_FORMAT43FalseThe message format version on the broker does not support the request.
    POLICY_VIOLATION44FalseRequest parameters do not satisfy the configured policy.
    OUT_OF_ORDER_SEQUENCE_NUMBER45FalseThe broker received an out of order sequence number.
    DUPLICATE_SEQUENCE_NUMBER46FalseThe broker received a duplicate sequence number.
    INVALID_PRODUCER_EPOCH47FalseProducer attempted an operation with an old epoch. Either there is a newer producer with the same transactionalId, or the producer's transaction has been expired by the broker.
    INVALID_TXN_STATE48FalseThe producer attempted a transactional operation in an invalid state.
    INVALID_PRODUCER_ID_MAPPING49FalseThe producer attempted to use a producer id which is not currently assigned to its transactional id.
    INVALID_TRANSACTION_TIMEOUT50FalseThe transaction timeout is larger than the maximum value allowed by the broker (as configured by transaction.max.timeout.ms).
    CONCURRENT_TRANSACTIONS51FalseThe producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing.
    TRANSACTION_COORDINATOR_FENCED52FalseIndicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer.
    TRANSACTIONAL_ID_AUTHORIZATION_FAILED53FalseTransactional Id authorization failed.
    SECURITY_DISABLED54FalseSecurity features are disabled.
    OPERATION_NOT_ATTEMPTED55FalseThe broker did not attempt to execute this operation. This may happen for batched RPCs where some operations in the batch failed, causing the broker to respond without trying the rest.
    KAFKA_STORAGE_ERROR56TrueDisk error when trying to access log file on the disk.
    LOG_DIR_NOT_FOUND57FalseThe user-specified log directory is not found in the broker config.
    SASL_AUTHENTICATION_FAILED58FalseSASL Authentication failed.
    UNKNOWN_PRODUCER_ID59FalseThis exception is raised by the broker if it could not locate the producer metadata associated with the producerId in question. This could happen if, for instance, the producer's records were deleted because their retention time had elapsed. Once the last records of the producerId are removed, the producer's metadata is removed from the broker, and future appends by the producer will return this exception.
    REASSIGNMENT_IN_PROGRESS60FalseA partition reassignment is in progress.
    DELEGATION_TOKEN_AUTH_DISABLED61FalseDelegation Token feature is not enabled.
    DELEGATION_TOKEN_NOT_FOUND62FalseDelegation Token is not found on server.
    DELEGATION_TOKEN_OWNER_MISMATCH63FalseSpecified Principal is not valid Owner/Renewer.
    DELEGATION_TOKEN_REQUEST_NOT_ALLOWED64FalseDelegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels.
    DELEGATION_TOKEN_AUTHORIZATION_FAILED65FalseDelegation Token authorization failed.
    DELEGATION_TOKEN_EXPIRED66FalseDelegation Token is expired.
    INVALID_PRINCIPAL_TYPE67FalseSupplied principalType is not supported.
    NON_EMPTY_GROUP68FalseThe group is not empty.
    GROUP_ID_NOT_FOUND69FalseThe group id does not exist.
    FETCH_SESSION_ID_NOT_FOUND70TrueThe fetch session ID was not found.
    INVALID_FETCH_SESSION_EPOCH71TrueThe fetch session epoch is invalid.
    LISTENER_NOT_FOUND72TrueThere is no listener on the leader broker that matches the listener on which metadata request was processed.
    TOPIC_DELETION_DISABLED73FalseTopic deletion is disabled.
    FENCED_LEADER_EPOCH74TrueThe leader epoch in the request is older than the epoch on the broker.
    UNKNOWN_LEADER_EPOCH75TrueThe leader epoch in the request is newer than the epoch on the broker.
    UNSUPPORTED_COMPRESSION_TYPE76FalseThe requesting client does not support the compression type of given partition.
    STALE_BROKER_EPOCH77FalseBroker epoch has changed.
    OFFSET_NOT_AVAILABLE78TrueThe leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing.
    MEMBER_ID_REQUIRED79FalseThe group member needs to have a valid member id before actually entering a consumer group.
    PREFERRED_LEADER_NOT_AVAILABLE80TrueThe preferred leader was not available.
    GROUP_MAX_SIZE_REACHED81FalseThe consumer group has reached its max size.
    FENCED_INSTANCE_ID82FalseThe broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id.
    ELIGIBLE_LEADERS_NOT_AVAILABLE83TrueEligible topic partition leaders are not available.
    ELECTION_NOT_NEEDED84TrueLeader election not needed for topic partition.
    NO_REASSIGNMENT_IN_PROGRESS85FalseNo partition reassignment is in progress.
    GROUP_SUBSCRIBED_TO_TOPIC86FalseDeleting offsets of a topic is forbidden while the consumer group is actively subscribed to it.
    INVALID_RECORD87FalseThis record has failed the validation on broker and hence will be rejected.
    UNSTABLE_OFFSET_COMMIT88TrueThere are unstable offsets that need to be cleared.
    + diff --git a/26/generated/protocol_messages.html b/26/generated/protocol_messages.html new file mode 100644 index 0000000..a0db4f1 --- /dev/null +++ b/26/generated/protocol_messages.html @@ -0,0 +1,11443 @@ +
    Headers:
    +
    Request Header v0 => request_api_key request_api_version correlation_id 
    +  request_api_key => INT16
    +  request_api_version => INT16
    +  correlation_id => INT32
    +
    + + + + + + + + + +
    FieldDescription
    request_api_keyThe API key of this request.
    request_api_versionThe API version of this request.
    correlation_idThe correlation ID of this request.
    +
    Request Header v1 => request_api_key request_api_version correlation_id client_id 
    +  request_api_key => INT16
    +  request_api_version => INT16
    +  correlation_id => INT32
    +  client_id => NULLABLE_STRING
    +
    + + + + + + + + + + + +
    FieldDescription
    request_api_keyThe API key of this request.
    request_api_versionThe API version of this request.
    correlation_idThe correlation ID of this request.
    client_idThe client ID string.
    +
    Request Header v2 => request_api_key request_api_version correlation_id client_id TAG_BUFFER 
    +  request_api_key => INT16
    +  request_api_version => INT16
    +  correlation_id => INT32
    +  client_id => NULLABLE_STRING
    +
    + + + + + + + + + + + + + +
    FieldDescription
    request_api_keyThe API key of this request.
    request_api_versionThe API version of this request.
    correlation_idThe correlation ID of this request.
    client_idThe client ID string.
    _tagged_fieldsThe tagged fields
    +
    Response Header v0 => correlation_id 
    +  correlation_id => INT32
    +
    + + + + + +
    FieldDescription
    correlation_idThe correlation ID of this response.
    +
    Response Header v1 => correlation_id TAG_BUFFER 
    +  correlation_id => INT32
    +
    + + + + + + + +
    FieldDescription
    correlation_idThe correlation ID of this response.
    _tagged_fieldsThe tagged fields
    +
    Produce API (Key: 0):
    + +Requests:
    +

    Produce Request (Version: 0) => acks timeout [topic_data] 
    +  acks => INT16
    +  timeout => INT32
    +  topic_data => topic [data] 
    +    topic => STRING
    +    data => partition record_set 
    +      partition => INT32
    +      record_set => RECORDS
    +
    + + + + + + + + + + + + + + + + +
    FieldDescription
    acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
    timeoutThe time to await a response in ms.
    topic_datanull
    topicName of topic
    datanull
    partitionTopic partition id
    record_setnull
    +

    +

    Produce Request (Version: 1) => acks timeout [topic_data] 
    +  acks => INT16
    +  timeout => INT32
    +  topic_data => topic [data] 
    +    topic => STRING
    +    data => partition record_set 
    +      partition => INT32
    +      record_set => RECORDS
    +
    + + + + + + + + + + + + + + + + +
    FieldDescription
    acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
    timeoutThe time to await a response in ms.
    topic_datanull
    topicName of topic
    datanull
    partitionTopic partition id
    record_setnull
    +

    +

    Produce Request (Version: 2) => acks timeout [topic_data] 
    +  acks => INT16
    +  timeout => INT32
    +  topic_data => topic [data] 
    +    topic => STRING
    +    data => partition record_set 
    +      partition => INT32
    +      record_set => RECORDS
    +
    + + + + + + + + + + + + + + + + +
    FieldDescription
    acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
    timeoutThe time to await a response in ms.
    topic_datanull
    topicName of topic
    datanull
    partitionTopic partition id
    record_setnull
    +

    +

    Produce Request (Version: 3) => transactional_id acks timeout [topic_data] 
    +  transactional_id => NULLABLE_STRING
    +  acks => INT16
    +  timeout => INT32
    +  topic_data => topic [data] 
    +    topic => STRING
    +    data => partition record_set 
    +      partition => INT32
    +      record_set => RECORDS
    +
    + + + + + + + + + + + + + + + + + + +
    FieldDescription
    transactional_idThe transactional id or null if the producer is not transactional
    acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
    timeoutThe time to await a response in ms.
    topic_datanull
    topicName of topic
    datanull
    partitionTopic partition id
    record_setnull
    +

    +

    Produce Request (Version: 4) => transactional_id acks timeout [topic_data] 
    +  transactional_id => NULLABLE_STRING
    +  acks => INT16
    +  timeout => INT32
    +  topic_data => topic [data] 
    +    topic => STRING
    +    data => partition record_set 
    +      partition => INT32
    +      record_set => RECORDS
    +
    + + + + + + + + + + + + + + + + + + +
    FieldDescription
    transactional_idThe transactional id or null if the producer is not transactional
    acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
    timeoutThe time to await a response in ms.
    topic_datanull
    topicName of topic
    datanull
    partitionTopic partition id
    record_setnull
    +

    +

    Produce Request (Version: 5) => transactional_id acks timeout [topic_data] 
    +  transactional_id => NULLABLE_STRING
    +  acks => INT16
    +  timeout => INT32
    +  topic_data => topic [data] 
    +    topic => STRING
    +    data => partition record_set 
    +      partition => INT32
    +      record_set => RECORDS
    +
    + + + + + + + + + + + + + + + + + + +
    FieldDescription
    transactional_idThe transactional id or null if the producer is not transactional
    acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
    timeoutThe time to await a response in ms.
    topic_datanull
    topicName of topic
    datanull
    partitionTopic partition id
    record_setnull
    +

    +

    Produce Request (Version: 6) => transactional_id acks timeout [topic_data] 
    +  transactional_id => NULLABLE_STRING
    +  acks => INT16
    +  timeout => INT32
    +  topic_data => topic [data] 
    +    topic => STRING
    +    data => partition record_set 
    +      partition => INT32
    +      record_set => RECORDS
    +
    + + + + + + + + + + + + + + + + + + +
    FieldDescription
    transactional_idThe transactional id or null if the producer is not transactional
    acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
    timeoutThe time to await a response in ms.
    topic_datanull
    topicName of topic
    datanull
    partitionTopic partition id
    record_setnull
    +

    +

    Produce Request (Version: 7) => transactional_id acks timeout [topic_data] 
    +  transactional_id => NULLABLE_STRING
    +  acks => INT16
    +  timeout => INT32
    +  topic_data => topic [data] 
    +    topic => STRING
    +    data => partition record_set 
    +      partition => INT32
    +      record_set => RECORDS
    +
    + + + + + + + + + + + + + + + + + + +
    FieldDescription
    transactional_idThe transactional id or null if the producer is not transactional
    acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
    timeoutThe time to await a response in ms.
    topic_datanull
    topicName of topic
    datanull
    partitionTopic partition id
    record_setnull
    +

    +

    Produce Request (Version: 8) => transactional_id acks timeout [topic_data] 
    +  transactional_id => NULLABLE_STRING
    +  acks => INT16
    +  timeout => INT32
    +  topic_data => topic [data] 
    +    topic => STRING
    +    data => partition record_set 
    +      partition => INT32
    +      record_set => RECORDS
    +
    + + + + + + + + + + + + + + + + + + +
    FieldDescription
    transactional_idThe transactional id or null if the producer is not transactional
    acksThe number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR.
    timeoutThe time to await a response in ms.
    topic_datanull
    topicName of topic
    datanull
    partitionTopic partition id
    record_setnull
    +

    +Responses:
    +

    Produce Response (Version: 0) => [responses] 
    +  responses => topic [partition_responses] 
    +    topic => STRING
    +    partition_responses => partition error_code base_offset 
    +      partition => INT32
    +      error_code => INT16
    +      base_offset => INT64
    +
    + + + + + + + + + + + + + + +
    FieldDescription
    responsesnull
    topicName of topic
    partition_responsesnull
    partitionTopic partition id
    error_codeResponse error code
    base_offsetnull
    +

    +

    Produce Response (Version: 1) => [responses] throttle_time_ms 
    +  responses => topic [partition_responses] 
    +    topic => STRING
    +    partition_responses => partition error_code base_offset 
    +      partition => INT32
    +      error_code => INT16
    +      base_offset => INT64
    +  throttle_time_ms => INT32
    +
    + + + + + + + + + + + + + + + + +
    FieldDescription
    responsesnull
    topicName of topic
    partition_responsesnull
    partitionTopic partition id
    error_codeResponse error code
    base_offsetnull
    throttle_time_msDuration in milliseconds for which the request was throttled due to quota violation (Zero if the request did not violate any quota)
    +

    +

    Produce Response (Version: 2) => [responses] throttle_time_ms 
    +  responses => topic [partition_responses] 
    +    topic => STRING
    +    partition_responses => partition error_code base_offset log_append_time 
    +      partition => INT32
    +      error_code => INT16
    +      base_offset => INT64
    +      log_append_time => INT64
    +  throttle_time_ms => INT32
    +
    + + + + + + + + + + + + + + + + + + +
    FieldDescription
    responsesnull
    topicName of topic
    partition_responsesnull
    partitionTopic partition id
    error_codeResponse error code
    base_offsetnull
    log_append_timeThe timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.
    throttle_time_msDuration in milliseconds for which the request was throttled due to quota violation (Zero if the request did not violate any quota)
    +

    +

    Produce Response (Version: 3) => [responses] throttle_time_ms 
    +  responses => topic [partition_responses] 
    +    topic => STRING
    +    partition_responses => partition error_code base_offset log_append_time 
    +      partition => INT32
    +      error_code => INT16
    +      base_offset => INT64
    +      log_append_time => INT64
    +  throttle_time_ms => INT32
    +
    + + + + + + + + + + + + + + + + + + +
    FieldDescription
    responsesnull
    topicName of topic
    partition_responsesnull
    partitionTopic partition id
    error_codeResponse error code
    base_offsetnull
    log_append_timeThe timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.
    throttle_time_msDuration in milliseconds for which the request was throttled due to quota violation (Zero if the request did not violate any quota)
    +

    +

    Produce Response (Version: 4) => [responses] throttle_time_ms 
    +  responses => topic [partition_responses] 
    +    topic => STRING
    +    partition_responses => partition error_code base_offset log_append_time 
    +      partition => INT32
    +      error_code => INT16
    +      base_offset => INT64
    +      log_append_time => INT64
    +  throttle_time_ms => INT32
    +
    + + + + + + + + + + + + + + + + + + +
    FieldDescription
    responsesnull
    topicName of topic
    partition_responsesnull
    partitionTopic partition id
    error_codeResponse error code
    base_offsetnull
    log_append_timeThe timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.
    throttle_time_msDuration in milliseconds for which the request was throttled due to quota violation (Zero if the request did not violate any quota)
    +

    +

    Produce Response (Version: 5) => [responses] throttle_time_ms 
    +  responses => topic [partition_responses] 
    +    topic => STRING
    +    partition_responses => partition error_code base_offset log_append_time log_start_offset 
    +      partition => INT32
    +      error_code => INT16
    +      base_offset => INT64
    +      log_append_time => INT64
    +      log_start_offset => INT64
    +  throttle_time_ms => INT32
    +
    + + + + + + + + + + + ... 327417 lines suppressed ...
    FieldDescription
    responsesnull
    topicName of topic
    partition_responsesnull
    partitionTopic partition id