Class: Fluent::DatadogOutput

Inherits:
Plugin::Output
  • Object
show all
Defined in:
lib/fluent/plugin/out_datadog.rb

Defined Under Namespace

Classes: DatadogClient, DatadogHTTPClient, DatadogTCPClient, RetryableError

Constant Summary collapse

DD_MAX_BATCH_LENGTH =

Max limits for transport regardless of Fluentd buffer, respecting docs.datadoghq.com/api/?lang=bash#logs

500
DD_MAX_BATCH_SIZE =
5000000
DD_TRUNCATION_SUFFIX =
"...TRUNCATED..."
DD_DEFAULT_HTTP_ENDPOINT =
"http-intake.logs.datadoghq.com"
DD_DEFAULT_TCP_ENDPOINT =
"intake.logs.datadoghq.com"
DEFAULT_BUFFER_TYPE =
"memory"

Instance Method Summary collapse

Constructor Details

#initializeDatadogOutput

Returns a new instance of DatadogOutput.



73
74
75
# File 'lib/fluent/plugin/out_datadog.rb', line 73

def initialize
  super
end

Instance Method Details

#batch_http_events(encoded_events, max_batch_length, max_request_size) ⇒ Object

Group HTTP events in batches



177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
# File 'lib/fluent/plugin/out_datadog.rb', line 177

def batch_http_events(encoded_events, max_batch_length, max_request_size)
  batches = []
  current_batch = []
  current_batch_size = 0
  encoded_events.each_with_index do |encoded_event, i|
    current_event_size = encoded_event.bytesize
    # If this unique log size is bigger than the request size, truncate it
    if current_event_size > max_request_size
      encoded_event = truncate(encoded_event, max_request_size)
      current_event_size = encoded_event.bytesize
    end

    if (i > 0 and i % max_batch_length == 0) or (current_batch_size + current_event_size > max_request_size)
      batches << current_batch
      current_batch = []
      current_batch_size = 0
    end

    current_batch_size += encoded_event.bytesize
    current_batch << encoded_event
  end
  batches << current_batch
  batches
end

#configure(conf) ⇒ Object



77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# File 'lib/fluent/plugin/out_datadog.rb', line 77

def configure(conf)
  compat_parameters_convert(conf, :buffer)
  super
  return if @dd_hostname

  if not @use_http and @host == DD_DEFAULT_HTTP_ENDPOINT
    @host = DD_DEFAULT_TCP_ENDPOINT
  end

  # Set dd_hostname if not already set (can be set when using fluentd as aggregator)
  @dd_hostname = %x[hostname -f 2> /dev/null].strip
  @dd_hostname = Socket.gethostname if @dd_hostname.empty?

  @timestamp_key = nil if nilish?(@timestamp_key)
end

#enrich_record(tag, time, record) ⇒ Object

Enrich records with metadata such as service, tags or source



222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
# File 'lib/fluent/plugin/out_datadog.rb', line 222

def enrich_record(tag, time, record)
  if @dd_sourcecategory
    record["ddsourcecategory"] ||= @dd_sourcecategory
  end
  if @dd_source
    record["ddsource"] ||= @dd_source
  end
  if @dd_tags
    record["ddtags"] ||= @dd_tags
  end
  if @service
    record["service"] ||= @service
  end
  if @dd_hostname
    # set the record hostname to the configured dd_hostname only
    # if the record hostname is empty, ensuring having a hostname set
    # even if the record doesn't contain any.
    record["hostname"] ||= @dd_hostname
  end

  if @include_tag_key
    record[@tag_key] = tag
  end
  # If @timestamp_key already exists, we don't overwrite it.
  if @timestamp_key and record[@timestamp_key].nil? and time
    record[@timestamp_key] = Time.at(time).utc.iso8601(3)
  end

  container_tags = get_container_tags(record)
  unless container_tags.empty?
    if record["ddtags"].nil? || record["ddtags"].empty?
      record["ddtags"] = container_tags
    else
      record["ddtags"] = record["ddtags"] + "," + container_tags
    end
  end

  if @delete_extracted_tag_attributes
    record.delete('kubernetes')
    record.delete('docker')
  end

  record
end

#format(tag, time, record) ⇒ Object

This method is called when an event reaches Fluentd.



116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# File 'lib/fluent/plugin/out_datadog.rb', line 116

def format(tag, time, record)
  # When Fluent::EventTime is msgpack'ed it gets converted to int with seconds
  # precision only. We explicitly convert it to floating point number, which
  # is compatible with Time.at below.
  record = enrich_record(tag, time.to_f, record)
  if @use_http
    record = JSON.generate(record)
  else
    if @use_json
      record = "#{api_key} #{JSON.generate(record)}"
    else
      record = "#{api_key} #{record}"
    end
  end
  [record].to_msgpack
end

#format_http_event_batch(events) ⇒ Object

Format batch of http events



217
218
219
# File 'lib/fluent/plugin/out_datadog.rb', line 217

def format_http_event_batch(events)
  "[#{events.join(',')}]"
end

#formatted_to_msgpack_binary?Boolean

Returns:

  • (Boolean)


97
98
99
# File 'lib/fluent/plugin/out_datadog.rb', line 97

def formatted_to_msgpack_binary?
  true
end

#get_container_tags(record) ⇒ Object

Collect docker and kubernetes tags for your logs using filter_kubernetes_metadata plugin, for more information about the attribute names, check: github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter/blob/master/lib/fluent/plugin/filter_kubernetes_metadata.rb#L265



425
426
427
428
429
430
# File 'lib/fluent/plugin/out_datadog.rb', line 425

def get_container_tags(record)
  [
      get_kubernetes_tags(record),
      get_docker_tags(record)
  ].compact.join(",")
end

#get_docker_tags(record) ⇒ Object



446
447
448
449
450
451
452
453
454
# File 'lib/fluent/plugin/out_datadog.rb', line 446

def get_docker_tags(record)
  if record.key?('docker') and not record.fetch('docker').nil?
    docker = record['docker']
    tags = Array.new
    tags.push("container_id:" + docker['container_id']) unless docker['container_id'].nil?
    return tags.join(",")
  end
  nil
end

#get_kubernetes_tags(record) ⇒ Object



432
433
434
435
436
437
438
439
440
441
442
443
444
# File 'lib/fluent/plugin/out_datadog.rb', line 432

def get_kubernetes_tags(record)
  if record.key?('kubernetes') and not record.fetch('kubernetes').nil?
    kubernetes = record['kubernetes']
    tags = Array.new
    tags.push("image_name:" + kubernetes['container_image']) unless kubernetes['container_image'].nil?
    tags.push("container_name:" + kubernetes['container_name']) unless kubernetes['container_name'].nil?
    tags.push("kube_namespace:" + kubernetes['namespace_name']) unless kubernetes['namespace_name'].nil?
    tags.push("pod_name:" + kubernetes['pod_name']) unless kubernetes['pod_name'].nil?
    tags.push("container_id:" + kubernetes['docker_id']) unless kubernetes['docker_id'].nil?
    return tags.join(",")
  end
  nil
end

#gzip_compress(payload, compression_level) ⇒ Object

Compress logs with GZIP



268
269
270
271
272
273
274
275
276
277
278
# File 'lib/fluent/plugin/out_datadog.rb', line 268

def gzip_compress(payload, compression_level)
  gz = StringIO.new
  gz.set_encoding("BINARY")
  z = Zlib::GzipWriter.new(gz, compression_level)
  begin
    z.write(payload)
  ensure
    z.close
  end
  gz.string
end

#max(a, b) ⇒ Object



212
213
214
# File 'lib/fluent/plugin/out_datadog.rb', line 212

def max(a, b)
  a > b ? a : b
end

#multi_workers_ready?Boolean

Returns:

  • (Boolean)


93
94
95
# File 'lib/fluent/plugin/out_datadog.rb', line 93

def multi_workers_ready?
  true
end

#new_client(logger, api_key, use_http, use_ssl, no_ssl_validation, host, ssl_port, port, http_proxy, custom_headers, use_compression, force_v1_routes) ⇒ Object

Build a new transport client



281
282
283
284
285
286
287
# File 'lib/fluent/plugin/out_datadog.rb', line 281

def new_client(logger, api_key, use_http, use_ssl, no_ssl_validation, host, ssl_port, port, http_proxy, custom_headers, use_compression, force_v1_routes)
  if use_http
    DatadogHTTPClient.new logger, use_ssl, no_ssl_validation, host, ssl_port, port, http_proxy, custom_headers, use_compression, api_key, force_v1_routes
  else
    DatadogTCPClient.new logger, use_ssl, no_ssl_validation, host, ssl_port, port
  end
end

#process_http_events(events, use_compression, compression_level, max_retries, max_backoff, max_batch_length, max_batch_size) ⇒ Object

Process and send a set of http events. Potentially break down this set of http events in smaller batches



157
158
159
160
161
162
163
164
165
166
# File 'lib/fluent/plugin/out_datadog.rb', line 157

def process_http_events(events, use_compression, compression_level, max_retries, max_backoff, max_batch_length, max_batch_size)
  batches = batch_http_events(events, max_batch_length, max_batch_size)
  batches.each do |batched_event|
    formatted_events = format_http_event_batch(batched_event)
    if use_compression
      formatted_events = gzip_compress(formatted_events, compression_level)
    end
    @client.send_retries(formatted_events, max_retries, max_backoff)
  end
end

#process_tcp_event(event, max_retries, max_backoff, max_batch_size) ⇒ Object

Process and send a single tcp event



169
170
171
172
173
174
# File 'lib/fluent/plugin/out_datadog.rb', line 169

def process_tcp_event(event, max_retries, max_backoff, max_batch_size)
  if event.bytesize > max_batch_size
    event = truncate(event, max_batch_size)
  end
  @client.send_retries(event, max_retries, max_backoff)
end

#shutdownObject



106
107
108
# File 'lib/fluent/plugin/out_datadog.rb', line 106

def shutdown
  super
end

#startObject



101
102
103
104
# File 'lib/fluent/plugin/out_datadog.rb', line 101

def start
  super
  @client = new_client(log, @api_key, @use_http, @use_ssl, @no_ssl_validation, @host, @ssl_port, @port, @http_proxy, @custom_headers, @use_compression, @force_v1_routes)
end

#terminateObject



110
111
112
113
# File 'lib/fluent/plugin/out_datadog.rb', line 110

def terminate
  super
  @client.close if @client
end

#truncate(event, max_length) ⇒ Object

Truncate events over the provided max length, appending a marker when truncated



203
204
205
206
207
208
209
210
# File 'lib/fluent/plugin/out_datadog.rb', line 203

def truncate(event, max_length)
  if event.length > max_length
    event = event[0..max_length - 1]
    event[max(0, max_length - DD_TRUNCATION_SUFFIX.length)..max_length - 1] = DD_TRUNCATION_SUFFIX
    return event
  end
  event
end

#write(chunk) ⇒ Object

NOTE! This method is called by internal thread, not Fluentd’s main thread. ‘chunk’ is a buffer chunk that includes multiple formatted events.



136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
# File 'lib/fluent/plugin/out_datadog.rb', line 136

def write(chunk)
  begin
    if @use_http
      events = Array.new
      chunk.msgpack_each do |record|
        next if record.empty?
        events.push record[0]
      end
      process_http_events(events, @use_compression, @compression_level, @max_retries, @max_backoff, DD_MAX_BATCH_LENGTH, DD_MAX_BATCH_SIZE)
    else
      chunk.msgpack_each do |record|
        next if record.empty?
        process_tcp_event(record[0], @max_retries, @max_backoff, DD_MAX_BATCH_SIZE)
      end
    end
  rescue Exception => e
    log.error("Uncaught processing exception in datadog forwarder #{e.message}")
  end
end