Class: Kafka::TopicProducer
- Inherits:
-
Object
- Object
- Kafka::TopicProducer
- Defined in:
- lib/fluent/plugin/kafka_producer_ext.rb
Instance Method Summary collapse
- #abort_transaction ⇒ Object
- #assign_partitions! ⇒ Object
- #begin_transaction ⇒ Object
- #buffer_bytesize ⇒ Object
- #buffer_messages ⇒ Object
-
#buffer_size ⇒ Integer
Returns the number of messages currently held in the buffer.
-
#clear_buffer ⇒ nil
Deletes all buffered messages.
- #commit_transaction ⇒ Object
- #deliver_messages ⇒ Object
- #deliver_messages_with_retries ⇒ Object
- #init_transactions ⇒ Object
-
#initialize(topic, cluster:, transaction_manager:, logger:, instrumenter:, compressor:, ack_timeout:, required_acks:, max_retries:, retry_backoff:, max_buffer_size:, max_buffer_bytesize:) ⇒ TopicProducer
constructor
A new instance of TopicProducer.
- #produce(value, key: nil, partition: nil, partition_key: nil) ⇒ Object
-
#shutdown ⇒ nil
Closes all connections to the brokers.
- #transaction ⇒ Object
Constructor Details
#initialize(topic, cluster:, transaction_manager:, logger:, instrumenter:, compressor:, ack_timeout:, required_acks:, max_retries:, retry_backoff:, max_buffer_size:, max_buffer_bytesize:) ⇒ TopicProducer
Returns a new instance of TopicProducer.
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
# File 'lib/fluent/plugin/kafka_producer_ext.rb', line 79 def initialize(topic, cluster:, transaction_manager:, logger:, instrumenter:, compressor:, ack_timeout:, required_acks:, max_retries:, retry_backoff:, max_buffer_size:, max_buffer_bytesize:) @cluster = cluster @transaction_manager = transaction_manager @logger = logger @instrumenter = instrumenter @required_acks = required_acks == :all ? -1 : required_acks @ack_timeout = ack_timeout @max_retries = max_retries @retry_backoff = retry_backoff @max_buffer_size = max_buffer_size @max_buffer_bytesize = max_buffer_bytesize @compressor = compressor @topic = topic @cluster.add_target_topics(Set.new([topic])) # A buffer organized by topic/partition. @buffer = MessageBuffer.new # Messages added by `#produce` but not yet assigned a partition. @pending_message_queue = PendingMessageQueue.new end |
Instance Method Details
#abort_transaction ⇒ Object
172 173 174 |
# File 'lib/fluent/plugin/kafka_producer_ext.rb', line 172 def abort_transaction @transaction_manager.abort_transaction end |
#assign_partitions! ⇒ Object
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 |
# File 'lib/fluent/plugin/kafka_producer_ext.rb', line 248 def assign_partitions! = [] partition_count = @cluster.partitions_for(@topic).count @pending_message_queue.each do || partition = .partition begin if partition.nil? partition = Partitioner.partition_for_key(partition_count, ) end @buffer.write( value: .value, key: .key, headers: .headers, topic: .topic, partition: partition, create_time: .create_time, ) rescue Kafka::Error => e << end end if .any? .group_by(&:topic).each do |topic, | @logger.error "Failed to assign partitions to #{.count} messages in #{topic}" end @cluster.mark_as_stale! end @pending_message_queue.replace() end |
#begin_transaction ⇒ Object
164 165 166 |
# File 'lib/fluent/plugin/kafka_producer_ext.rb', line 164 def begin_transaction @transaction_manager.begin_transaction end |
#buffer_bytesize ⇒ Object
140 141 142 |
# File 'lib/fluent/plugin/kafka_producer_ext.rb', line 140 def buffer_bytesize @pending_message_queue.bytesize + @buffer.bytesize end |
#buffer_messages ⇒ Object
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 |
# File 'lib/fluent/plugin/kafka_producer_ext.rb', line 284 def = [] @pending_message_queue.each do || << end @buffer.each do |topic, partition, | .each do || << PendingMessage.new( value: .value, key: .key, headers: .headers, topic: topic, partition: partition, partition_key: nil, create_time: .create_time ) end end end |
#buffer_size ⇒ Integer
Returns the number of messages currently held in the buffer.
136 137 138 |
# File 'lib/fluent/plugin/kafka_producer_ext.rb', line 136 def buffer_size @pending_message_queue.size + @buffer.size end |
#clear_buffer ⇒ nil
Deletes all buffered messages.
147 148 149 150 |
# File 'lib/fluent/plugin/kafka_producer_ext.rb', line 147 def clear_buffer @buffer.clear @pending_message_queue.clear end |
#commit_transaction ⇒ Object
168 169 170 |
# File 'lib/fluent/plugin/kafka_producer_ext.rb', line 168 def commit_transaction @transaction_manager.commit_transaction end |
#deliver_messages ⇒ Object
126 127 128 129 130 131 |
# File 'lib/fluent/plugin/kafka_producer_ext.rb', line 126 def # There's no need to do anything if the buffer is empty. return if buffer_size == 0 end |
#deliver_messages_with_retries ⇒ Object
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 |
# File 'lib/fluent/plugin/kafka_producer_ext.rb', line 188 def attempt = 0 #@cluster.add_target_topics(@target_topics) operation = ProduceOperation.new( cluster: @cluster, transaction_manager: @transaction_manager, buffer: @buffer, required_acks: @required_acks, ack_timeout: @ack_timeout, compressor: @compressor, logger: @logger, instrumenter: @instrumenter, ) loop do attempt += 1 begin @cluster. rescue ConnectionError => e raise DeliveryFailed.new(e, ) end assign_partitions! operation.execute if @required_acks.zero? # No response is returned by the brokers, so we can't know which messages # have been successfully written. Our only option is to assume that they all # have. @buffer.clear end if buffer_size.zero? break elsif attempt <= @max_retries @logger.warn "Failed to send all messages; attempting retry #{attempt} of #{@max_retries} after #{@retry_backoff}s" sleep @retry_backoff else @logger.error "Failed to send all messages; keeping remaining messages in buffer" break end end unless @pending_message_queue.empty? # Mark the cluster as stale in order to force a cluster metadata refresh. @cluster.mark_as_stale! raise DeliveryFailed.new("Failed to assign partitions to #{@pending_message_queue.size} messages", ) end unless @buffer.empty? partitions = @buffer.map {|topic, partition, _| "#{topic}/#{partition}" }.join(", ") raise DeliveryFailed.new("Failed to send messages to #{partitions}", ) end end |
#init_transactions ⇒ Object
160 161 162 |
# File 'lib/fluent/plugin/kafka_producer_ext.rb', line 160 def init_transactions @transaction_manager.init_transactions end |
#produce(value, key: nil, partition: nil, partition_key: nil) ⇒ Object
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
# File 'lib/fluent/plugin/kafka_producer_ext.rb', line 102 def produce(value, key: nil, partition: nil, partition_key: nil) create_time = Time.now = PendingMessage.new( value: value, key: key, headers: EMPTY_HEADER, topic: @topic, partition: partition, partition_key: partition_key, create_time: create_time ) # If the producer is in transactional mode, all the message production # must be used when the producer is currently in transaction if @transaction_manager.transactional? && !@transaction_manager.in_transaction? raise 'You must trigger begin_transaction before producing messages' end @pending_message_queue.write() nil end |
#shutdown ⇒ nil
Closes all connections to the brokers.
155 156 157 158 |
# File 'lib/fluent/plugin/kafka_producer_ext.rb', line 155 def shutdown @transaction_manager.close @cluster.disconnect end |
#transaction ⇒ Object
176 177 178 179 180 181 182 183 184 185 186 |
# File 'lib/fluent/plugin/kafka_producer_ext.rb', line 176 def transaction raise 'This method requires a block' unless block_given? begin_transaction yield commit_transaction rescue Kafka::Producer::AbortTransaction abort_transaction rescue abort_transaction raise end |