1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
|
[cache]
# Configure carbon directories.
#
# OS environment variables can be used to tell carbon where graphite is
# installed, where to read configuration from and where to write data.
#
# GRAPHITE_ROOT - Root directory of the graphite installation.
# Defaults to ../
# GRAPHITE_CONF_DIR - Configuration directory (where this file lives).
# Defaults to $GRAPHITE_ROOT/conf/
# GRAPHITE_STORAGE_DIR - Storage directory for whisper/rrd/log/pid files.
# Defaults to $GRAPHITE_ROOT/storage/
#
# To change other directory paths, add settings to this file. The following
# configuration variables are available with these default values:
#
# STORAGE_DIR = $GRAPHITE_STORAGE_DIR
# LOCAL_DATA_DIR = %(STORAGE_DIR)s/whisper/
# WHITELISTS_DIR = %(STORAGE_DIR)s/lists/
# CONF_DIR = %(STORAGE_DIR)s/conf/
# LOG_DIR = %(STORAGE_DIR)s/log/
# PID_DIR = %(STORAGE_DIR)s/
#
# For FHS style directory structures, use:
#
# STORAGE_DIR = /var/lib/carbon/
# CONF_DIR = /etc/carbon/
# LOG_DIR = /var/log/carbon/
# PID_DIR = /var/run/
#
#LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
# Specify the database library used to store metric data on disk. Each database
# may have configurable options to change the behaviour of how it writes to
# persistent storage.
#
# whisper - Fixed-size database, similar in design and purpose to RRD. This is
# the default storage backend for carbon and the most rigorously tested.
#
# ceres - Experimental alternative database that supports storing data in sparse
# files of arbitrary fixed-size resolutions.
DATABASE = whisper
# Enable daily log rotation. If disabled, a new file will be opened whenever the log file path no
# longer exists (i.e. it is removed or renamed)
ENABLE_LOGROTATION = True
# Specify the user to drop privileges to
# If this is blank carbon-cache runs as the user that invokes it
# This user must have write access to the local data directory
USER =
# Limit the size of the cache to avoid swapping or becoming CPU bound.
# Sorts and serving cache queries gets more expensive as the cache grows.
# Use the value "inf" (infinity) for an unlimited cache size.
# value should be an integer number of metric datapoints.
MAX_CACHE_SIZE = inf
# Limits the number of whisper update_many() calls per second, which effectively
# means the number of write requests sent to the disk. This is intended to
# prevent over-utilizing the disk and thus starving the rest of the system.
# When the rate of required updates exceeds this, then carbon's caching will
# take effect and increase the overall throughput accordingly.
MAX_UPDATES_PER_SECOND = 500
# If defined, this changes the MAX_UPDATES_PER_SECOND in Carbon when a
# stop/shutdown is initiated. This helps when MAX_UPDATES_PER_SECOND is
# relatively low and carbon has cached a lot of updates; it enables the carbon
# daemon to shutdown more quickly.
# MAX_UPDATES_PER_SECOND_ON_SHUTDOWN = 1000
# Softly limits the number of whisper files that get created each minute.
# Setting this value low (e.g. 50) is a good way to ensure that your carbon
# system will not be adversely impacted when a bunch of new metrics are
# sent to it. The trade off is that any metrics received in excess of this
# value will be silently dropped, and the whisper file will not be created
# until such point as a subsequent metric is received and fits within the
# defined rate limit. Setting this value high (like "inf" for infinity) will
# cause carbon to create the files quickly but at the risk of increased I/O.
MAX_CREATES_PER_MINUTE = 50
# Set the minimum timestamp resolution supported by this instance. This allows
# internal optimisations by overwriting points with equal truncated timestamps
# in order to limit the number of updates to the database. It defaults to one
# second.
MIN_TIMESTAMP_RESOLUTION = 1
# Set the minimum lag in seconds for a point to be written to the database
# in order to optimize batching. This means that each point will wait at least
# the duration of this lag before being written. Setting this to 0 disable the feature.
# This currently only works when using the timesorted write strategy.
# MIN_TIMESTAMP_LAG = 0
# Set the interface and port for the line (plain text) listener. Setting the
# interface to 0.0.0.0 listens on all interfaces. Port can be set to 0 to
# disable this listener if it is not required.
LINE_RECEIVER_INTERFACE = 0.0.0.0
LINE_RECEIVER_PORT = 2003
# Set this to True to enable the UDP listener. By default this is off
# because it is very common to run multiple carbon daemons and managing
# another (rarely used) port for every carbon instance is not fun.
ENABLE_UDP_LISTENER = False
UDP_RECEIVER_INTERFACE = 0.0.0.0
UDP_RECEIVER_PORT = 2003
# Set the interface and port for the pickle listener. Setting the interface to
# 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this
# listener if it is not required.
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
PICKLE_RECEIVER_PORT = 2004
# Set the interface and port for the protobuf listener. Setting the interface to
# 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this
# listener if it is not required.
# PROTOBUF_RECEIVER_INTERFACE = 0.0.0.0
# PROTOBUF_RECEIVER_PORT = 2005
# Limit the number of open connections the receiver can handle as any time.
# Default is no limit. Setting up a limit for sites handling high volume
# traffic may be recommended to avoid running out of TCP memory or having
# thousands of TCP connections reduce the throughput of the service.
#MAX_RECEIVER_CONNECTIONS = inf
# Per security concerns outlined in Bug #817247 the pickle receiver
# will use a more secure and slightly less efficient unpickler.
# Set this to True to revert to the old-fashioned insecure unpickler.
USE_INSECURE_UNPICKLER = False
CACHE_QUERY_INTERFACE = 0.0.0.0
CACHE_QUERY_PORT = 7002
# Set this to False to drop datapoints received after the cache
# reaches MAX_CACHE_SIZE. If this is True (the default) then sockets
# over which metrics are received will temporarily stop accepting
# data until the cache size falls below 95% MAX_CACHE_SIZE.
USE_FLOW_CONTROL = True
# If enabled this setting is used to timeout metric client connection if no
# metrics have been sent in specified time in seconds
#METRIC_CLIENT_IDLE_TIMEOUT = None
# By default, carbon-cache will log every whisper update and cache hit.
# This can be excessive and degrade performance if logging on the same
# volume as the whisper data is stored.
LOG_UPDATES = False
LOG_CREATES = False
LOG_CACHE_HITS = False
LOG_CACHE_QUEUE_SORTS = False
# The thread that writes metrics to disk can use one of the following strategies
# determining the order in which metrics are removed from cache and flushed to
# disk. The default option preserves the same behavior as has been historically
# available in version 0.9.10.
#
# sorted - All metrics in the cache will be counted and an ordered list of
# them will be sorted according to the number of datapoints in the cache at the
# moment of the list's creation. Metrics will then be flushed from the cache to
# disk in that order.
#
# timesorted - All metrics in the list will be looked at and sorted according
# to the timestamp of there datapoints. The metric that were the least recently
# written will be written first. This is an hybrid strategy between max and
# sorted which is particularly adapted to sets of metrics with non-uniform
# resolutions.
#
# max - The writer thread will always pop and flush the metric from cache
# that has the most datapoints. This will give a strong flush preference to
# frequently updated metrics and will also reduce random file-io. Infrequently
# updated metrics may only ever be persisted to disk at daemon shutdown if
# there are a large number of metrics which receive very frequent updates OR if
# disk i/o is very slow.
#
# naive - Metrics will be flushed from the cache to disk in an unordered
# fashion. This strategy may be desirable in situations where the storage for
# whisper files is solid state, CPU resources are very limited or deference to
# the OS's i/o scheduler is expected to compensate for the random write
# pattern.
#
CACHE_WRITE_STRATEGY = sorted
# On some systems it is desirable for whisper to write synchronously.
# Set this option to True if you'd like to try this. Basically it will
# shift the onus of buffering writes from the kernel into carbon's cache.
WHISPER_AUTOFLUSH = False
# By default new Whisper files are created pre-allocated with the data region
# filled with zeros to prevent fragmentation and speed up contiguous reads and
# writes (which are common). Enabling this option will cause Whisper to create
# the file sparsely instead. Enabling this option may allow a large increase of
# MAX_CREATES_PER_MINUTE but may have longer term performance implications
# depending on the underlying storage configuration.
# WHISPER_SPARSE_CREATE = False
# Only beneficial on linux filesystems that support the fallocate system call.
# It maintains the benefits of contiguous reads/writes, but with a potentially
# much faster creation speed, by allowing the kernel to handle the block
# allocation and zero-ing. Enabling this option may allow a large increase of
# MAX_CREATES_PER_MINUTE. If enabled on an OS or filesystem that is unsupported
# this option will gracefully fallback to standard POSIX file access methods.
WHISPER_FALLOCATE_CREATE = True
# Enabling this option will cause Whisper to lock each Whisper file it writes
# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
# multiple carbon-cache daemons are writing to the same files.
# WHISPER_LOCK_WRITES = False
# On systems which has a large number of metrics, an amount of Whisper write(2)'s
# pageback sometimes cause disk thrashing due to memory shortage, so that abnormal
# disk reads occur. Enabling this option makes it possible to decrease useless
# page cache memory by posix_fadvise(2) with POSIX_FADVISE_RANDOM option.
# WHISPER_FADVISE_RANDOM = False
# By default all nodes stored in Ceres are cached in memory to improve the
# throughput of reads and writes to underlying slices. Turning this off will
# greatly reduce memory consumption for databases with millions of metrics, at
# the cost of a steep increase in disk i/o, approximately an extra two os.stat
# calls for every read and write. Reasons to do this are if the underlying
# storage can handle stat() with practically zero cost (SSD, NVMe, zRAM).
# Valid values are:
# all - all nodes are cached
# none - node caching is disabled
# CERES_NODE_CACHING_BEHAVIOR = all
# Ceres nodes can have many slices and caching the right ones can improve
# performance dramatically. Note that there are many trade-offs to tinkering
# with this, and unless you are a ceres developer you *really* should not
# mess with this. Valid values are:
# latest - only the most recent slice is cached
# all - all slices are cached
# none - slice caching is disabled
# CERES_SLICE_CACHING_BEHAVIOR = latest
# If a Ceres node accumulates too many slices, performance can suffer.
# This can be caused by intermittently reported data. To mitigate
# slice fragmentation there is a tolerance for how much space can be
# wasted within a slice file to avoid creating a new one. That tolerance
# level is determined by MAX_SLICE_GAP, which is the number of consecutive
# null datapoints allowed in a slice file.
# If you set this very low, you will waste less of the *tiny* bit disk space
# that this feature wastes, and you will be prone to performance problems
# caused by slice fragmentation, which can be pretty severe.
# If you set this really high, you will waste a bit more disk space (each
# null datapoint wastes 8 bytes, but keep in mind your filesystem's block
# size). If you suffer slice fragmentation issues, you should increase this or
# run the ceres-maintenance defrag plugin more often. However you should not
# set it to be huge because then if a large but allowed gap occurs it has to
# get filled in, which means instead of a simple 8-byte write to a new file we
# could end up doing an (8 * MAX_SLICE_GAP)-byte write to the latest slice.
# CERES_MAX_SLICE_GAP = 80
# Enabling this option will cause Ceres to lock each Ceres file it writes to
# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
# multiple carbon-cache daemons are writing to the same files.
# CERES_LOCK_WRITES = False
# Set this to True to enable whitelisting and blacklisting of metrics in
# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is
# missing or empty, all metrics will pass through
# USE_WHITELIST = False
# By default, carbon itself will log statistics (such as a count,
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
# CARBON_METRIC_PREFIX = carbon
# CARBON_METRIC_INTERVAL = 60
# Enable AMQP if you want to receve metrics using an amqp broker
# ENABLE_AMQP = False
# Verbose means a line will be logged for every metric received
# useful for testing
# AMQP_VERBOSE = False
# AMQP_HOST = localhost
# AMQP_PORT = 5672
# AMQP_VHOST = /
# AMQP_USER = guest
# AMQP_PASSWORD = guest
# AMQP_EXCHANGE = graphite
# AMQP_METRIC_NAME_IN_BODY = False
# The manhole interface allows you to SSH into the carbon daemon
# and get a python interpreter. BE CAREFUL WITH THIS! If you do
# something like time.sleep() in the interpreter, the whole process
# will sleep! This is *extremely* helpful in debugging, assuming
# you are familiar with the code. If you are not, please don't
# mess with this, you are asking for trouble :)
#
# ENABLE_MANHOLE = False
# MANHOLE_INTERFACE = 127.0.0.1
# MANHOLE_PORT = 7222
# MANHOLE_USER = admin
# MANHOLE_PUBLIC_KEY = ssh-rsa AAAAB3NzaC1yc2EAAAABiwAaAIEAoxN0sv/e4eZCPpi3N3KYvyzRaBaMeS2RsOQ/cDuKv11dlNzVeiyc3RFmCv5Rjwn/lQ79y0zyHxw67qLyhQ/kDzINc4cY41ivuQXm2tPmgvexdrBv5nsfEpjs3gLZfJnyvlcVyWK/lId8WUvEWSWHTzsbtmXAF2raJMdgLTbQ8wE=
# Patterns for all of the metrics this machine will store. Read more at
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
#
# Example: store all sales, linux servers, and utilization metrics
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
#
# Example: store everything
# BIND_PATTERNS = #
# URL of graphite-web instance, this is used to add incoming series to the tag database
# GRAPHITE_URL = http://127.0.0.1:8000
# Tag support, when enabled carbon will make HTTP calls to graphite-web to update the tag index
# ENABLE_TAGS = True
# Tag update interval, this specifies how frequently updates to existing series will trigger
# an update to the tag index, the default setting is once every 100 updates
# TAG_UPDATE_INTERVAL = 100
# To configure special settings for the carbon-cache instance 'b', uncomment this:
#[cache:b]
#LINE_RECEIVER_PORT = 2103
#PICKLE_RECEIVER_PORT = 2104
#CACHE_QUERY_PORT = 7102
# and any other settings you want to customize, defaults are inherited
# from the [cache] section.
# You can then specify the --instance=b option to manage this instance
#
# In order to turn off logging of successful connections for the line
# receiver, set this to False
# LOG_LISTENER_CONN_SUCCESS = True
[relay]
LINE_RECEIVER_INTERFACE = 0.0.0.0
LINE_RECEIVER_PORT = 2013
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
PICKLE_RECEIVER_PORT = 2014
# Carbon-relay has several options for metric routing controlled by RELAY_METHOD
#
# Use relay-rules.conf to route metrics to destinations based on pattern rules
#RELAY_METHOD = rules
#
# Use consistent-hashing for even distribution of metrics between destinations
#RELAY_METHOD = consistent-hashing
#
# Use consistent-hashing but take into account an aggregation-rules.conf shared
# by downstream carbon-aggregator daemons. This will ensure that all metrics
# that map to a given aggregation rule are sent to the same carbon-aggregator
# instance.
# Enable this for carbon-relays that send to a group of carbon-aggregators
#RELAY_METHOD = aggregated-consistent-hashing
#
# You can also use fast-hashing and fast-aggregated-hashing which are in O(1)
# and will always redirect the metrics to the same destination but do not try
# to minimize rebalancing when the list of destinations is changing.
RELAY_METHOD = rules
# If you use consistent-hashing you can add redundancy by replicating every
# datapoint to more than one machine.
REPLICATION_FACTOR = 1
# For REPLICATION_FACTOR >=2, set DIVERSE_REPLICAS to True to guarantee replicas
# across distributed hosts. With this setting disabled, it's possible that replicas
# may be sent to different caches on the same host. This has been the default
# behavior since introduction of 'consistent-hashing' relay method.
# Note that enabling this on an existing pre-0.9.14 cluster will require rebalancing
# your metrics across the cluster nodes using a tool like Carbonate.
#DIVERSE_REPLICAS = True
# This is a list of carbon daemons we will send any relayed or
# generated metrics to. The default provided would send to a single
# carbon-cache instance on the default port. However if you
# use multiple carbon-cache instances then it would look like this:
#
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
#
# The general form is IP:PORT:INSTANCE where the :INSTANCE part is
# optional and refers to the "None" instance if omitted.
#
# Note that if the destinations are all carbon-caches then this should
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
# instances listed (order matters!).
#
# If using RELAY_METHOD = rules, all destinations used in relay-rules.conf
# must be defined in this list
DESTINATIONS = 127.0.0.1:2004
# This define the protocol to use to contact the destination. It can be
# set to one of "line", "pickle", "udp" and "protobuf". This list can be
# extended with CarbonClientFactory plugins and defaults to "pickle".
# DESTINATION_PROTOCOL = pickle
# When using consistent hashing it sometime makes sense to make
# the ring dynamic when you don't want to loose points when a
# single destination is down. Replication is an answer to that
# but it can be quite expensive.
# DYNAMIC_ROUTER = False
# Controls the number of connection attempts before marking a
# destination as down. We usually do one connection attempt per
# second.
# DYNAMIC_ROUTER_MAX_RETRIES = 5
# This is the maximum number of datapoints that can be queued up
# for a single destination. Once this limit is hit, we will
# stop accepting new data if USE_FLOW_CONTROL is True, otherwise
# we will drop any subsequently received datapoints.
MAX_QUEUE_SIZE = 10000
# This defines the maximum "message size" between carbon daemons. If
# your queue is large, setting this to a lower number will cause the
# relay to forward smaller discrete chunks of stats, which may prevent
# overloading on the receiving side after a disconnect.
MAX_DATAPOINTS_PER_MESSAGE = 500
# Limit the number of open connections the receiver can handle as any time.
# Default is no limit. Setting up a limit for sites handling high volume
# traffic may be recommended to avoid running out of TCP memory or having
# thousands of TCP connections reduce the throughput of the service.
#MAX_RECEIVER_CONNECTIONS = inf
# Specify the user to drop privileges to
# If this is blank carbon-relay runs as the user that invokes it
# USER =
# This is the percentage that the queue must be empty before it will accept
# more messages. For a larger site, if the queue is very large it makes sense
# to tune this to allow for incoming stats. So if you have an average
# flow of 100k stats/minute, and a MAX_QUEUE_SIZE of 3,000,000, it makes sense
# to allow stats to start flowing when you've cleared the queue to 95% since
# you should have space to accommodate the next minute's worth of stats
# even before the relay incrementally clears more of the queue
QUEUE_LOW_WATERMARK_PCT = 0.8
# To allow for batch efficiency from the pickle protocol and to benefit from
# other batching advantages, all writes are deferred by putting them into a queue,
# and then the queue is flushed and sent a small fraction of a second later.
TIME_TO_DEFER_SENDING = 0.0001
# Set this to False to drop datapoints when any send queue (sending datapoints
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
# default) then sockets over which metrics are received will temporarily stop accepting
# data until the send queues fall below QUEUE_LOW_WATERMARK_PCT * MAX_QUEUE_SIZE.
USE_FLOW_CONTROL = True
# If enabled this setting is used to timeout metric client connection if no
# metrics have been sent in specified time in seconds
#METRIC_CLIENT_IDLE_TIMEOUT = None
# Set this to True to enable whitelisting and blacklisting of metrics in
# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is
# missing or empty, all metrics will pass through
# USE_WHITELIST = False
# By default, carbon itself will log statistics (such as a count,
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
# CARBON_METRIC_PREFIX = carbon
# CARBON_METRIC_INTERVAL = 60
#
# In order to turn off logging of successful connections for the line
# receiver, set this to False
# LOG_LISTENER_CONN_SUCCESS = True
# If you're connecting from the relay to a destination that's over the
# internet or similarly iffy connection, a backlog can develop because
# of internet weather conditions, e.g. acks getting lost or similar issues.
# To deal with that, you can enable USE_RATIO_RESET which will let you
# re-set the connection to an individual destination. Defaults to being off.
USE_RATIO_RESET=False
# When there is a small number of stats flowing, it's not desirable to
# perform any actions based on percentages - it's just too "twitchy".
MIN_RESET_STAT_FLOW=1000
# When the ratio of stats being sent in a reporting interval is far
# enough from 1.0, we will disconnect the socket and reconnecto to
# clear out queued stats. The default ratio of 0.9 indicates that 10%
# of stats aren't being delivered within one CARBON_METRIC_INTERVAL
# (default of 60 seconds), which can lead to a queue backup. Under
# some circumstances re-setting the connection can fix this, so
# set this according to your tolerance, and look in the logs for
# "resetConnectionForQualityReasons" to observe whether this is kicking
# in when your sent queue is building up.
MIN_RESET_RATIO=0.9
# The minimum time between resets. When a connection is re-set, we
# need to wait before another reset is performed.
# (2*CARBON_METRIC_INTERVAL) + 1 second is the minimum time needed
# before stats for the new connection will be available. Setting this
# below (2*CARBON_METRIC_INTERVAL) + 1 second will result in a lot of
# reset connections for no good reason.
MIN_RESET_INTERVAL=121
[aggregator]
LINE_RECEIVER_INTERFACE = 0.0.0.0
LINE_RECEIVER_PORT = 2023
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
PICKLE_RECEIVER_PORT = 2024
# If set true, metric received will be forwarded to DESTINATIONS in addition to
# the output of the aggregation rules. If set false the carbon-aggregator will
# only ever send the output of aggregation.
FORWARD_ALL = True
# Filenames of the configuration files to use for this instance of aggregator.
# Filenames are relative to CONF_DIR.
#
# AGGREGATION_RULES = aggregation-rules.conf
# REWRITE_RULES = rewrite-rules.conf
# This is a list of carbon daemons we will send any relayed or
# generated metrics to. The default provided would send to a single
# carbon-cache instance on the default port. However if you
# use multiple carbon-cache instances then it would look like this:
#
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
#
# The format is comma-delimited IP:PORT:INSTANCE where the :INSTANCE part is
# optional and refers to the "None" instance if omitted.
#
# Note that if the destinations are all carbon-caches then this should
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
# instances listed (order matters!).
DESTINATIONS = 127.0.0.1:2004
# If you want to add redundancy to your data by replicating every
# datapoint to more than one machine, increase this.
REPLICATION_FACTOR = 1
# This is the maximum number of datapoints that can be queued up
# for a single destination. Once this limit is hit, we will
# stop accepting new data if USE_FLOW_CONTROL is True, otherwise
# we will drop any subsequently received datapoints.
MAX_QUEUE_SIZE = 10000
# Set this to False to drop datapoints when any send queue (sending datapoints
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
# default) then sockets over which metrics are received will temporarily stop accepting
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
USE_FLOW_CONTROL = True
# If enabled this setting is used to timeout metric client connection if no
# metrics have been sent in specified time in seconds
#METRIC_CLIENT_IDLE_TIMEOUT = None
# This defines the maximum "message size" between carbon daemons.
# You shouldn't need to tune this unless you really know what you're doing.
MAX_DATAPOINTS_PER_MESSAGE = 500
# This defines how many datapoints the aggregator remembers for
# each metric. Aggregation only happens for datapoints that fall in
# the past MAX_AGGREGATION_INTERVALS * intervalSize seconds.
MAX_AGGREGATION_INTERVALS = 5
# Limit the number of open connections the receiver can handle as any time.
# Default is no limit. Setting up a limit for sites handling high volume
# traffic may be recommended to avoid running out of TCP memory or having
# thousands of TCP connections reduce the throughput of the service.
#MAX_RECEIVER_CONNECTIONS = inf
# By default (WRITE_BACK_FREQUENCY = 0), carbon-aggregator will write back
# aggregated data points once every rule.frequency seconds, on a per-rule basis.
# Set this (WRITE_BACK_FREQUENCY = N) to write back all aggregated data points
# every N seconds, independent of rule frequency. This is useful, for example,
# to be able to query partially aggregated metrics from carbon-cache without
# having to first wait rule.frequency seconds.
# WRITE_BACK_FREQUENCY = 0
# Set this to True to enable whitelisting and blacklisting of metrics in
# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is
# missing or empty, all metrics will pass through
# USE_WHITELIST = False
# By default, carbon itself will log statistics (such as a count,
# metricsReceived) with the top level prefix of 'carbon' at an interval of 60
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
# CARBON_METRIC_PREFIX = carbon
# CARBON_METRIC_INTERVAL = 60
# In order to turn off logging of successful connections for the line
# receiver, set this to False
# LOG_LISTENER_CONN_SUCCESS = True
# In order to turn off logging of metrics with no corresponding
# aggregation rules receiver, set this to False
# LOG_AGGREGATOR_MISSES = False
# Specify the user to drop privileges to
# If this is blank carbon-aggregator runs as the user that invokes it
# USER =
# Part of the code, and particularly aggregator rules, need
# to cache metric names. To avoid leaking too much memory you
# can tweak the size of this cache. The default allow for 1M
# different metrics per rule (~200MiB).
# CACHE_METRIC_NAMES_MAX=1000000
# You can optionally set a ttl to this cache.
# CACHE_METRIC_NAMES_TTL=600
|