· 6 years ago · Jul 26, 2019, 09:46 AM
1# Telegraf Configuration
2#
3# Telegraf is entirely plugin driven. All metrics are gathered from the
4# declared inputs, and sent to the declared outputs.
5#
6# Plugins must be declared in here to be active.
7# To deactivate a plugin, comment out the name and any variables.
8#
9# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
10# file would generate.
11#
12# Environment variables can be used anywhere in this config file, simply surround
13# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
14# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
15
16
17# Global tags can be specified here in key="value" format.
18[global_tags]
19 # dc = "us-east-1" # will tag all metrics with dc=us-east-1
20 # rack = "1a"
21 ## Environment variables can be used as tags, and throughout the config file
22 # user = "$USER"
23
24
25# Configuration for telegraf agent
26[agent]
27 ## Default data collection interval for all inputs
28 interval = "1s"
29 ## Rounds collection interval to 'interval'
30 ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
31 round_interval = true
32
33 ## Telegraf will send metrics to outputs in batches of at most
34 ## metric_batch_size metrics.
35 ## This controls the size of writes that Telegraf sends to output plugins.
36 metric_batch_size = 1000
37
38 ## Maximum number of unwritten metrics per output.
39 metric_buffer_limit = 10000
40
41 ## Collection jitter is used to jitter the collection by a random amount.
42 ## Each plugin will sleep for a random time within jitter before collecting.
43 ## This can be used to avoid many plugins querying things like sysfs at the
44 ## same time, which can have a measurable effect on the system.
45 collection_jitter = "0s"
46
47 ## Default flushing interval for all outputs. Maximum flush_interval will be
48 ## flush_interval + flush_jitter
49 flush_interval = "10s"
50 ## Jitter the flush interval by a random amount. This is primarily to avoid
51 ## large write spikes for users running a large number of telegraf instances.
52 ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
53 flush_jitter = "0s"
54
55 ## By default or when set to "0s", precision will be set to the same
56 ## timestamp order as the collection interval, with the maximum being 1s.
57 ## ie, when interval = "10s", precision will be "1s"
58 ## when interval = "250ms", precision will be "1ms"
59 ## Precision will NOT be used for service inputs. It is up to each individual
60 ## service input to set the timestamp at the appropriate precision.
61 ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
62 precision = ""
63
64 ## Log at debug level.
65 # debug = false
66 ## Log only error level messages.
67 # quiet = false
68
69 ## Log file name, the empty string means to log to stderr.
70 logfile = ""
71
72 ## The logfile will be rotated after the time interval specified. When set
73 ## to 0 no time based rotation is performed.
74 # logfile_rotation_interval = "0d"
75
76 ## The logfile will be rotated when it becomes larger than the specified
77 ## size. When set to 0 no size based rotation is performed.
78 # logfile_rotation_max_size = "0MB"
79
80 ## Maximum number of rotated archives to keep, any older logs are deleted.
81 ## If set to -1, no archives are removed.
82 # logfile_rotation_max_archives = 5
83
84 ## Override default hostname, if empty use os.Hostname()
85 hostname = ""
86 ## If set to true, do no set the "host" tag in the telegraf agent.
87 omit_hostname = false
88
89
90###############################################################################
91# OUTPUT PLUGINS #
92###############################################################################
93
94
95# Configuration for sending metrics to InfluxDB
96[[outputs.influxdb]]
97 ## The full HTTP or UDP URL for your InfluxDB instance.
98 ##
99 ## Multiple URLs can be specified for a single cluster, only ONE of the
100 ## urls will be written to each interval.
101 # urls = ["unix:///var/run/influxdb.sock"]
102 # urls = ["udp://127.0.0.1:8089"]
103 urls = ["http://54.37.20.152:8086"]
104
105 ## The target database for metrics; will be created as needed.
106 ## For UDP url endpoint database needs to be configured on server side.
107 database = "monitor"
108
109 ## The value of this tag will be used to determine the database. If this
110 ## tag is not set the 'database' option is used as the default.
111 # database_tag = ""
112
113 ## If true, no CREATE DATABASE queries will be sent. Set to true when using
114 ## Telegraf with a user without permissions to create databases or when the
115 ## database already exists.
116 # skip_database_creation = false
117
118 ## Name of existing retention policy to write to. Empty string writes to
119 ## the default retention policy. Only takes effect when using HTTP.
120 # retention_policy = ""
121
122 ## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
123 ## Only takes effect when using HTTP.
124 # write_consistency = "any"
125
126 ## Timeout for HTTP messages.
127 # timeout = "5s"
128
129 ## HTTP Basic Auth
130 # username = "telegraf"
131 # password = "metricsmetricsmetricsmetrics"
132
133 ## HTTP User-Agent
134 # user_agent = "telegraf"
135
136 ## UDP payload size is the maximum packet size to send.
137 # udp_payload = "512B"
138
139 ## Optional TLS Config for use on HTTP connections.
140 # tls_ca = "/etc/telegraf/ca.pem"
141 # tls_cert = "/etc/telegraf/cert.pem"
142 # tls_key = "/etc/telegraf/key.pem"
143 ## Use TLS but skip chain & host verification
144 # insecure_skip_verify = false
145
146 ## HTTP Proxy override, if unset values the standard proxy environment
147 ## variables are consulted to determine which proxy, if any, should be used.
148 # http_proxy = "http://corporate.proxy:3128"
149
150 ## Additional HTTP headers
151 # http_headers = {"X-Special-Header" = "Special-Value"}
152
153 ## HTTP Content-Encoding for write request body, can be set to "gzip" to
154 ## compress body or "identity" to apply no encoding.
155 # content_encoding = "identity"
156
157 ## When true, Telegraf will output unsigned integers as unsigned values,
158 ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
159 ## integer values. Enabling this option will result in field type errors if
160 ## existing data has been written.
161 # influx_uint_support = false
162
163
164###############################################################################
165# PROCESSOR PLUGINS #
166###############################################################################
167
168
169# # Convert values to another metric value type
170# [[processors.converter]]
171# ## Tags to convert
172# ##
173# ## The table key determines the target type, and the array of key-values
174# ## select the keys to convert. The array may contain globs.
175# ## <target-type> = [<tag-key>...]
176# [processors.converter.tags]
177# string = []
178# integer = []
179# unsigned = []
180# boolean = []
181# float = []
182#
183# ## Fields to convert
184# ##
185# ## The table key determines the target type, and the array of key-values
186# ## select the keys to convert. The array may contain globs.
187# ## <target-type> = [<field-key>...]
188# [processors.converter.fields]
189# tag = []
190# string = []
191# integer = []
192# unsigned = []
193# boolean = []
194# float = []
195
196
197# # Map enum values according to given table.
198# [[processors.enum]]
199# [[processors.enum.mapping]]
200# ## Name of the field to map
201# field = "status"
202#
203# ## Name of the tag to map
204# # tag = "status"
205#
206# ## Destination tag or field to be used for the mapped value. By default the
207# ## source tag or field is used, overwriting the original value.
208# dest = "status_code"
209#
210# ## Default value to be used for all values not contained in the mapping
211# ## table. When unset, the unmodified value for the field will be used if no
212# ## match is found.
213# # default = 0
214#
215# ## Table of mappings
216# [processors.enum.mapping.value_mappings]
217# green = 1
218# amber = 2
219# red = 3
220
221
222# # Apply metric modifications using override semantics.
223# [[processors.override]]
224# ## All modifications on inputs and aggregators can be overridden:
225# # name_override = "new_name"
226# # name_prefix = "new_name_prefix"
227# # name_suffix = "new_name_suffix"
228#
229# ## Tags to be added (all values must be strings)
230# # [processors.override.tags]
231# # additional_tag = "tag_value"
232
233
234# # Parse a value in a specified field/tag(s) and add the result in a new metric
235# [[processors.parser]]
236# ## The name of the fields whose value will be parsed.
237# parse_fields = []
238#
239# ## If true, incoming metrics are not emitted.
240# drop_original = false
241#
242# ## If set to override, emitted metrics will be merged by overriding the
243# ## original metric using the newly parsed metrics.
244# merge = "override"
245#
246# ## The dataformat to be read from files
247# ## Each data format has its own unique set of configuration options, read
248# ## more about them here:
249# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
250# data_format = "influx"
251
252
253# # Print all metrics that pass through this filter.
254# [[processors.printer]]
255
256
257# # Transforms tag and field values with regex pattern
258# [[processors.regex]]
259# ## Tag and field conversions defined in a separate sub-tables
260# # [[processors.regex.tags]]
261# # ## Tag to change
262# # key = "resp_code"
263# # ## Regular expression to match on a tag value
264# # pattern = "^(\\d)\\d\\d$"
265# # ## Pattern for constructing a new value (${1} represents first subgroup)
266# # replacement = "${1}xx"
267#
268# # [[processors.regex.fields]]
269# # key = "request"
270# # ## All the power of the Go regular expressions available here
271# # ## For example, named subgroups
272# # pattern = "^/api(?P<method>/[\\w/]+)\\S*"
273# # replacement = "${method}"
274# # ## If result_key is present, a new field will be created
275# # ## instead of changing existing field
276# # result_key = "method"
277#
278# ## Multiple conversions may be applied for one field sequentially
279# ## Let's extract one more value
280# # [[processors.regex.fields]]
281# # key = "request"
282# # pattern = ".*category=(\\w+).*"
283# # replacement = "${1}"
284# # result_key = "search_category"
285
286
287# # Rename measurements, tags, and fields that pass through this filter.
288# [[processors.rename]]
289
290
291# # Perform string processing on tags, fields, and measurements
292# [[processors.strings]]
293# ## Convert a tag value to uppercase
294# # [[processors.strings.uppercase]]
295# # tag = "method"
296#
297# ## Convert a field value to lowercase and store in a new field
298# # [[processors.strings.lowercase]]
299# # field = "uri_stem"
300# # dest = "uri_stem_normalised"
301#
302# ## Trim leading and trailing whitespace using the default cutset
303# # [[processors.strings.trim]]
304# # field = "message"
305#
306# ## Trim leading characters in cutset
307# # [[processors.strings.trim_left]]
308# # field = "message"
309# # cutset = "\t"
310#
311# ## Trim trailing characters in cutset
312# # [[processors.strings.trim_right]]
313# # field = "message"
314# # cutset = "\r\n"
315#
316# ## Trim the given prefix from the field
317# # [[processors.strings.trim_prefix]]
318# # field = "my_value"
319# # prefix = "my_"
320#
321# ## Trim the given suffix from the field
322# # [[processors.strings.trim_suffix]]
323# # field = "read_count"
324# # suffix = "_count"
325#
326# ## Replace all non-overlapping instances of old with new
327# # [[processors.strings.replace]]
328# # measurement = "*"
329# # old = ":"
330# # new = "_"
331
332
333# # Print all metrics that pass through this filter.
334# [[processors.topk]]
335# ## How many seconds between aggregations
336# # period = 10
337#
338# ## How many top metrics to return
339# # k = 10
340#
341# ## Over which tags should the aggregation be done. Globs can be specified, in
342# ## which case any tag matching the glob will aggregated over. If set to an
343# ## empty list is no aggregation over tags is done
344# # group_by = ['*']
345#
346# ## Over which fields are the top k are calculated
347# # fields = ["value"]
348#
349# ## What aggregation to use. Options: sum, mean, min, max
350# # aggregation = "mean"
351#
352# ## Instead of the top k largest metrics, return the bottom k lowest metrics
353# # bottomk = false
354#
355# ## The plugin assigns each metric a GroupBy tag generated from its name and
356# ## tags. If this setting is different than "" the plugin will add a
357# ## tag (which name will be the value of this setting) to each metric with
358# ## the value of the calculated GroupBy tag. Useful for debugging
359# # add_groupby_tag = ""
360#
361# ## These settings provide a way to know the position of each metric in
362# ## the top k. The 'add_rank_field' setting allows to specify for which
363# ## fields the position is required. If the list is non empty, then a field
364# ## will be added to each and every metric for each string present in this
365# ## setting. This field will contain the ranking of the group that
366# ## the metric belonged to when aggregated over that field.
367# ## The name of the field will be set to the name of the aggregation field,
368# ## suffixed with the string '_topk_rank'
369# # add_rank_fields = []
370#
371# ## These settings provide a way to know what values the plugin is generating
372# ## when aggregating metrics. The 'add_agregate_field' setting allows to
373# ## specify for which fields the final aggregation value is required. If the
374# ## list is non empty, then a field will be added to each every metric for
375# ## each field present in this setting. This field will contain
376# ## the computed aggregation for the group that the metric belonged to when
377# ## aggregated over that field.
378# ## The name of the field will be set to the name of the aggregation field,
379# ## suffixed with the string '_topk_aggregate'
380# # add_aggregate_fields = []
381
382
383###############################################################################
384# AGGREGATOR PLUGINS #
385###############################################################################
386
387
388# # Keep the aggregate basicstats of each metric passing through.
389# [[aggregators.basicstats]]
390# ## The period on which to flush & clear the aggregator.
391# period = "30s"
392# ## If true, the original metric will be dropped by the
393# ## aggregator and will not get sent to the output plugins.
394# drop_original = false
395#
396# ## Configures which basic stats to push as fields
397# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]
398
399
400# # Report the final metric of a series
401# [[aggregators.final]]
402# ## The period on which to flush & clear the aggregator.
403# period = "30s"
404# ## If true, the original metric will be dropped by the
405# ## aggregator and will not get sent to the output plugins.
406# drop_original = false
407#
408# ## The time that a series is not updated until considering it final.
409# series_timeout = "5m"
410
411
412# # Create aggregate histograms.
413# [[aggregators.histogram]]
414# ## The period in which to flush the aggregator.
415# period = "30s"
416#
417# ## If true, the original metric will be dropped by the
418# ## aggregator and will not get sent to the output plugins.
419# drop_original = false
420#
421# ## If true, the histogram will be reset on flush instead
422# ## of accumulating the results.
423# reset = false
424#
425# ## Example config that aggregates all fields of the metric.
426# # [[aggregators.histogram.config]]
427# # ## The set of buckets.
428# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
429# # ## The name of metric.
430# # measurement_name = "cpu"
431#
432# ## Example config that aggregates only specific fields of the metric.
433# # [[aggregators.histogram.config]]
434# # ## The set of buckets.
435# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
436# # ## The name of metric.
437# # measurement_name = "diskio"
438# # ## The concrete fields of metric
439# # fields = ["io_time", "read_time", "write_time"]
440
441
442# # Keep the aggregate min/max of each metric passing through.
443# [[aggregators.minmax]]
444# ## General Aggregator Arguments:
445# ## The period on which to flush & clear the aggregator.
446# period = "30s"
447# ## If true, the original metric will be dropped by the
448# ## aggregator and will not get sent to the output plugins.
449# drop_original = false
450
451
452# # Count the occurrence of values in fields.
453# [[aggregators.valuecounter]]
454# ## General Aggregator Arguments:
455# ## The period on which to flush & clear the aggregator.
456# period = "30s"
457# ## If true, the original metric will be dropped by the
458# ## aggregator and will not get sent to the output plugins.
459# drop_original = false
460# ## The fields for which the values will be counted
461# fields = []
462
463
464###############################################################################
465# INPUT PLUGINS #
466###############################################################################
467
468
469# Read metrics about cpu usage
470[[inputs.cpu]]
471 ## Whether to report per-cpu stats or not
472 percpu = true
473 ## Whether to report total system cpu stats or not
474 totalcpu = true
475 ## If true, collect raw CPU time metrics.
476 collect_cpu_time = false
477 ## If true, compute and report the sum of all non-idle CPU states.
478 report_active = false
479
480
481# Read metrics about memory usage
482[[inputs.mem]]
483 # no configuration
484
485[[inputs.diskio]]