Class: StatsD::Instrument::Aggregator

Inherits:
Object
  • Object
show all
Defined in:
lib/statsd/instrument/aggregator.rb

Constant Summary collapse

DEFAULT_MAX_CONTEXT_SIZE =
250

Class Method Summary collapse

Instance Method Summary collapse

Constructor Details

#initialize(sink, datagram_builder_class, prefix, default_tags, flush_interval: 5.0, max_values: DEFAULT_MAX_CONTEXT_SIZE) ⇒ Aggregator

Returns a new instance of Aggregator.

Parameters:

  • sink (#<<)

    The sink to write the aggregated metrics to.

  • datagram_builder_class (Class)

    The class to use for building datagrams.

  • prefix (String)

    The prefix to add to all metrics.

  • default_tags (Array<String>)

    The tags to add to all metrics.

  • flush_interval (Float) (defaults to: 5.0)

    The interval at which to flush the aggregated metrics.

  • max_values (Integer) (defaults to: DEFAULT_MAX_CONTEXT_SIZE)

    The maximum number of values to aggregate before flushing.



85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
# File 'lib/statsd/instrument/aggregator.rb', line 85

def initialize(
  sink,
  datagram_builder_class,
  prefix,
  default_tags,
  flush_interval: 5.0,
  max_values: DEFAULT_MAX_CONTEXT_SIZE
)
  @sink = sink
  @datagram_builder_class = datagram_builder_class
  @metric_prefix = prefix
  @default_tags = default_tags
  @datagram_builders = {
    true: nil,
    false: nil,
  }
  @max_values = max_values

  # Mutex protects the aggregation_state and flush_thread from concurrent access
  @mutex = Mutex.new
  @aggregation_state = {}

  @pid = Process.pid
  @flush_interval = flush_interval
  start_flush_thread

  ObjectSpace.define_finalizer(
    self,
    self.class.finalize(@aggregation_state, @sink, @datagram_builders, @datagram_builder_class, @default_tags),
  )
end

Class Method Details

.finalize(aggregation_state, sink, datagram_builders, datagram_builder_class, default_tags) ⇒ Object



39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
# File 'lib/statsd/instrument/aggregator.rb', line 39

def finalize(aggregation_state, sink, datagram_builders, datagram_builder_class, default_tags)
  proc do
    aggregation_state.each do |key, agg_value|
      no_prefix = key.no_prefix
      datagram_builders[no_prefix] ||= datagram_builder_class.new(
        prefix: no_prefix ? nil : @metric_prefix,
        default_tags: default_tags,
      )
      case key.type
      when COUNT
        sink << datagram_builders[no_prefix].c(
          key.name,
          agg_value,
          CONST_SAMPLE_RATE,
          key.tags,
        )
      when DISTRIBUTION, MEASURE, HISTOGRAM
        sink << datagram_builders[no_prefix].timing_value_packed(
          key.name,
          key.type.to_s,
          agg_value,
          key.sample_rate,
          key.tags,
        )
      when GAUGE
        sink << datagram_builders[no_prefix].g(
          key.name,
          agg_value,
          CONST_SAMPLE_RATE,
          key.tags,
        )
      else
        StatsD.logger.error { "[#{self.class.name}] Unknown aggregation type: #{key.type}" }
      end
    end
    aggregation_state.clear
  end
end

Instance Method Details

#aggregate_timing(name, value, tags: [], no_prefix: false, type: DISTRIBUTION, sample_rate: CONST_SAMPLE_RATE) ⇒ Object



138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
# File 'lib/statsd/instrument/aggregator.rb', line 138

def aggregate_timing(name, value, tags: [], no_prefix: false, type: DISTRIBUTION, sample_rate: CONST_SAMPLE_RATE)
  unless thread_healthcheck
    @sink << datagram_builder(no_prefix: no_prefix).timing_value_packed(
      name, type.to_s, [value], sample_rate, tags
    )
    return
  end

  tags = tags_sorted(tags)
  key = packet_key(name, tags, no_prefix, type, sample_rate: sample_rate)

  @mutex.synchronize do
    values = @aggregation_state[key] ||= []
    if values.size + 1 >= @max_values
      do_flush
    end
    values << value
  end
end

#flushObject



172
173
174
# File 'lib/statsd/instrument/aggregator.rb', line 172

def flush
  @mutex.synchronize { do_flush }
end

#gauge(name, value, tags: [], no_prefix: false) ⇒ Object



158
159
160
161
162
163
164
165
166
167
168
169
170
# File 'lib/statsd/instrument/aggregator.rb', line 158

def gauge(name, value, tags: [], no_prefix: false)
  unless thread_healthcheck
    @sink << datagram_builder(no_prefix: no_prefix).g(name, value, CONST_SAMPLE_RATE, tags)
    return
  end

  tags = tags_sorted(tags)
  key = packet_key(name, tags, no_prefix, GAUGE)

  @mutex.synchronize do
    @aggregation_state[key] = value
  end
end

#increment(name, value = 1, tags: [], no_prefix: false) ⇒ void

This method returns an undefined value.

Increment a counter by a given value and save it for later flushing.

Parameters:

  • name (String)

    The name of the counter.

  • value (Integer) (defaults to: 1)

    The value to increment the counter by.

  • tags (Hash{String, Symbol => String}, Array<String>) (defaults to: [])

    The tags to attach to the counter.

  • no_prefix (Boolean) (defaults to: false)

    If true, the metric will not be prefixed.



123
124
125
126
127
128
129
130
131
132
133
134
135
136
# File 'lib/statsd/instrument/aggregator.rb', line 123

def increment(name, value = 1, tags: [], no_prefix: false)
  unless thread_healthcheck
    @sink << datagram_builder(no_prefix: no_prefix).c(name, value, CONST_SAMPLE_RATE, tags)
    return
  end

  tags = tags_sorted(tags)
  key = packet_key(name, tags, no_prefix, COUNT)

  @mutex.synchronize do
    @aggregation_state[key] ||= 0
    @aggregation_state[key] += value
  end
end