· 6 years ago · Jun 27, 2019, 12:28 PM
1# Telegraf Configuration
2#
3# Telegraf is entirely plugin driven. All metrics are gathered from the
4# declared inputs, and sent to the declared outputs.
5#
6# Plugins must be declared in here to be active.
7# To deactivate a plugin, comment out the name and any variables.
8#
9# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
10# file would generate.
11#
12# Environment variables can be used anywhere in this config file, simply surround
13# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
14# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
15
16
17# Global tags can be specified here in key="value" format.
18[global_tags]
19 # dc = "us-east-1" # will tag all metrics with dc=us-east-1
20 # rack = "1a"
21 ## Environment variables can be used as tags, and throughout the config file
22 # user = "$USER"
23
24
25# Configuration for telegraf agent
26[agent]
27 ## Default data collection interval for all inputs
28 interval = "1s"
29 ## Rounds collection interval to 'interval'
30 ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
31 round_interval = true
32
33 ## Telegraf will send metrics to outputs in batches of at most
34 ## metric_batch_size metrics.
35 ## This controls the size of writes that Telegraf sends to output plugins.
36 metric_batch_size = 1000
37
38 ## Maximum number of unwritten metrics per output.
39 metric_buffer_limit = 10000
40
41 ## Collection jitter is used to jitter the collection by a random amount.
42 ## Each plugin will sleep for a random time within jitter before collecting.
43 ## This can be used to avoid many plugins querying things like sysfs at the
44 ## same time, which can have a measurable effect on the system.
45 collection_jitter = "0s"
46
47 ## Default flushing interval for all outputs. Maximum flush_interval will be
48 ## flush_interval + flush_jitter
49 flush_interval = "1s"
50 ## Jitter the flush interval by a random amount. This is primarily to avoid
51 ## large write spikes for users running a large number of telegraf instances.
52 ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
53 flush_jitter = "0s"
54
55 ## By default or when set to "0s", precision will be set to the same
56 ## timestamp order as the collection interval, with the maximum being 1s.
57 ## ie, when interval = "10s", precision will be "1s"
58 ## when interval = "250ms", precision will be "1ms"
59 ## Precision will NOT be used for service inputs. It is up to each individual
60 ## service input to set the timestamp at the appropriate precision.
61 ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
62 precision = ""
63
64 ## Log at debug level.
65 # debug = false
66 ## Log only error level messages.
67 # quiet = false
68
69 ## Log file name, the empty string means to log to stderr.
70 # logfile = ""
71
72 ## The logfile will be rotated after the time interval specified. When set
73 ## to 0 no time based rotation is performed.
74 # logfile_rotation_interval = "0d"
75
76 ## The logfile will be rotated when it becomes larger than the specified
77 ## size. When set to 0 no size based rotation is performed.
78 # logfile_rotation_max_size = "0MB"
79
80 ## Maximum number of rotated archives to keep, any older logs are deleted.
81 ## If set to -1, no archives are removed.
82 # logfile_rotation_max_archives = 5
83
84 ## Override default hostname, if empty use os.Hostname()
85 hostname = ""
86 ## If set to true, do no set the "host" tag in the telegraf agent.
87 omit_hostname = true
88
89
90###############################################################################
91# OUTPUT PLUGINS #
92###############################################################################
93
94
95# Configuration for sending metrics to InfluxDB
96#[[outputs.influxdb]]
97 ## The full HTTP or UDP URL for your InfluxDB instance.
98 ##
99 ## Multiple URLs can be specified for a single cluster, only ONE of the
100 ## urls will be written to each interval.
101 # urls = ["unix:///var/run/influxdb.sock"]
102 # urls = ["udp://127.0.0.1:8089"]
103 #urls = ["http://127.0.0.1:8086"]
104
105 ## The target database for metrics; will be created as needed.
106 ## For UDP url endpoint database needs to be configured on server side.
107 # database = "telegraf"
108
109 ## The value of this tag will be used to determine the database. If this
110 ## tag is not set the 'database' option is used as the default.
111 # database_tag = ""
112
113 ## If true, no CREATE DATABASE queries will be sent. Set to true when using
114 ## Telegraf with a user without permissions to create databases or when the
115 ## database already exists.
116 # skip_database_creation = false
117
118 ## Name of existing retention policy to write to. Empty string writes to
119 ## the default retention policy. Only takes effect when using HTTP.
120 # retention_policy = ""
121
122 ## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
123 ## Only takes effect when using HTTP.
124 # write_consistency = "any"
125
126 ## Timeout for HTTP messages.
127 # timeout = "5s"
128
129 ## HTTP Basic Auth
130 # username = "telegraf"
131 # password = "metricsmetricsmetricsmetrics"
132
133 ## HTTP User-Agent
134 # user_agent = "telegraf"
135
136 ## UDP payload size is the maximum packet size to send.
137 # udp_payload = "512B"
138
139 ## Optional TLS Config for use on HTTP connections.
140 # tls_ca = "/etc/telegraf/ca.pem"
141 # tls_cert = "/etc/telegraf/cert.pem"
142 # tls_key = "/etc/telegraf/key.pem"
143 ## Use TLS but skip chain & host verification
144 # insecure_skip_verify = false
145
146 ## HTTP Proxy override, if unset values the standard proxy environment
147 ## variables are consulted to determine which proxy, if any, should be used.
148 # http_proxy = "http://corporate.proxy:3128"
149
150 ## Additional HTTP headers
151 # http_headers = {"X-Special-Header" = "Special-Value"}
152
153 ## HTTP Content-Encoding for write request body, can be set to "gzip" to
154 ## compress body or "identity" to apply no encoding.
155 # content_encoding = "identity"
156
157 ## When true, Telegraf will output unsigned integers as unsigned values,
158 ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
159 ## integer values. Enabling this option will result in field type errors if
160 ## existing data has been written.
161 # influx_uint_support = false
162
163
164
165[[outputs.file]]
166 files = ["stdout"]
167
168###############################################################################
169# PROCESSOR PLUGINS #
170###############################################################################
171
172
173# # Convert values to another metric value type
174# [[processors.converter]]
175# ## Tags to convert
176# ##
177# ## The table key determines the target type, and the array of key-values
178# ## select the keys to convert. The array may contain globs.
179# ## <target-type> = [<tag-key>...]
180# [processors.converter.tags]
181# string = []
182# integer = []
183# unsigned = []
184# boolean = []
185# float = []
186#
187# ## Fields to convert
188# ##
189# ## The table key determines the target type, and the array of key-values
190# ## select the keys to convert. The array may contain globs.
191# ## <target-type> = [<field-key>...]
192# [processors.converter.fields]
193# tag = []
194# string = []
195# integer = []
196# unsigned = []
197# boolean = []
198# float = []
199
200
201# # Map enum values according to given table.
202# [[processors.enum]]
203# [[processors.enum.mapping]]
204# ## Name of the field to map
205# field = "status"
206#
207# ## Name of the tag to map
208# # tag = "status"
209#
210# ## Destination tag or field to be used for the mapped value. By default the
211# ## source tag or field is used, overwriting the original value.
212# dest = "status_code"
213#
214# ## Default value to be used for all values not contained in the mapping
215# ## table. When unset, the unmodified value for the field will be used if no
216# ## match is found.
217# # default = 0
218#
219# ## Table of mappings
220# [processors.enum.mapping.value_mappings]
221# green = 1
222# amber = 2
223# red = 3
224
225
226# # Apply metric modifications using override semantics.
227# [[processors.override]]
228# ## All modifications on inputs and aggregators can be overridden:
229# # name_override = "new_name"
230# # name_prefix = "new_name_prefix"
231# # name_suffix = "new_name_suffix"
232#
233# ## Tags to be added (all values must be strings)
234# # [processors.override.tags]
235# # additional_tag = "tag_value"
236
237
238# # Parse a value in a specified field/tag(s) and add the result in a new metric
239# [[processors.parser]]
240# ## The name of the fields whose value will be parsed.
241# parse_fields = []
242#
243# ## If true, incoming metrics are not emitted.
244# drop_original = false
245#
246# ## If set to override, emitted metrics will be merged by overriding the
247# ## original metric using the newly parsed metrics.
248# merge = "override"
249#
250# ## The dataformat to be read from files
251# ## Each data format has its own unique set of configuration options, read
252# ## more about them here:
253# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
254# data_format = "influx"
255
256
257# # Print all metrics that pass through this filter.
258# [[processors.printer]]
259
260
261# # Transforms tag and field values with regex pattern
262# [[processors.regex]]
263# ## Tag and field conversions defined in a separate sub-tables
264# # [[processors.regex.tags]]
265# # ## Tag to change
266# # key = "resp_code"
267# # ## Regular expression to match on a tag value
268# # pattern = "^(\\d)\\d\\d$"
269# # ## Pattern for constructing a new value (${1} represents first subgroup)
270# # replacement = "${1}xx"
271#
272# # [[processors.regex.fields]]
273# # key = "request"
274# # ## All the power of the Go regular expressions available here
275# # ## For example, named subgroups
276# # pattern = "^/api(?P<method>/[\\w/]+)\\S*"
277# # replacement = "${method}"
278# # ## If result_key is present, a new field will be created
279# # ## instead of changing existing field
280# # result_key = "method"
281#
282# ## Multiple conversions may be applied for one field sequentially
283# ## Let's extract one more value
284# # [[processors.regex.fields]]
285# # key = "request"
286# # pattern = ".*category=(\\w+).*"
287# # replacement = "${1}"
288# # result_key = "search_category"
289
290
291# # Rename measurements, tags, and fields that pass through this filter.
292# [[processors.rename]]
293
294
295# # Perform string processing on tags, fields, and measurements
296# [[processors.strings]]
297# ## Convert a tag value to uppercase
298# # [[processors.strings.uppercase]]
299# # tag = "method"
300#
301# ## Convert a field value to lowercase and store in a new field
302# # [[processors.strings.lowercase]]
303# # field = "uri_stem"
304# # dest = "uri_stem_normalised"
305#
306# ## Trim leading and trailing whitespace using the default cutset
307# # [[processors.strings.trim]]
308# # field = "message"
309#
310# ## Trim leading characters in cutset
311# # [[processors.strings.trim_left]]
312# # field = "message"
313# # cutset = "\t"
314#
315# ## Trim trailing characters in cutset
316# # [[processors.strings.trim_right]]
317# # field = "message"
318# # cutset = "\r\n"
319#
320# ## Trim the given prefix from the field
321# # [[processors.strings.trim_prefix]]
322# # field = "my_value"
323# # prefix = "my_"
324#
325# ## Trim the given suffix from the field
326# # [[processors.strings.trim_suffix]]
327# # field = "read_count"
328# # suffix = "_count"
329#
330# ## Replace all non-overlapping instances of old with new
331# # [[processors.strings.replace]]
332# # measurement = "*"
333# # old = ":"
334# # new = "_"
335
336
337# # Print all metrics that pass through this filter.
338# [[processors.topk]]
339# ## How many seconds between aggregations
340# # period = 10
341#
342# ## How many top metrics to return
343# # k = 10
344#
345# ## Over which tags should the aggregation be done. Globs can be specified, in
346# ## which case any tag matching the glob will aggregated over. If set to an
347# ## empty list is no aggregation over tags is done
348# # group_by = ['*']
349#
350# ## Over which fields are the top k are calculated
351# # fields = ["value"]
352#
353# ## What aggregation to use. Options: sum, mean, min, max
354# # aggregation = "mean"
355#
356# ## Instead of the top k largest metrics, return the bottom k lowest metrics
357# # bottomk = false
358#
359# ## The plugin assigns each metric a GroupBy tag generated from its name and
360# ## tags. If this setting is different than "" the plugin will add a
361# ## tag (which name will be the value of this setting) to each metric with
362# ## the value of the calculated GroupBy tag. Useful for debugging
363# # add_groupby_tag = ""
364#
365# ## These settings provide a way to know the position of each metric in
366# ## the top k. The 'add_rank_field' setting allows to specify for which
367# ## fields the position is required. If the list is non empty, then a field
368# ## will be added to each and every metric for each string present in this
369# ## setting. This field will contain the ranking of the group that
370# ## the metric belonged to when aggregated over that field.
371# ## The name of the field will be set to the name of the aggregation field,
372# ## suffixed with the string '_topk_rank'
373# # add_rank_fields = []
374#
375# ## These settings provide a way to know what values the plugin is generating
376# ## when aggregating metrics. The 'add_agregate_field' setting allows to
377# ## specify for which fields the final aggregation value is required. If the
378# ## list is non empty, then a field will be added to each every metric for
379# ## each field present in this setting. This field will contain
380# ## the computed aggregation for the group that the metric belonged to when
381# ## aggregated over that field.
382# ## The name of the field will be set to the name of the aggregation field,
383# ## suffixed with the string '_topk_aggregate'
384# # add_aggregate_fields = []
385
386
387###############################################################################
388# AGGREGATOR PLUGINS #
389###############################################################################
390
391
392# # Keep the aggregate basicstats of each metric passing through.
393# [[aggregators.basicstats]]
394# ## The period on which to flush & clear the aggregator.
395# period = "30s"
396# ## If true, the original metric will be dropped by the
397# ## aggregator and will not get sent to the output plugins.
398# drop_original = false
399#
400# ## Configures which basic stats to push as fields
401# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]
402
403
404# # Report the final metric of a series
405# [[aggregators.final]]
406# ## The period on which to flush & clear the aggregator.
407# period = "30s"
408# ## If true, the original metric will be dropped by the
409# ## aggregator and will not get sent to the output plugins.
410# drop_original = false
411#
412# ## The time that a series is not updated until considering it final.
413# series_timeout = "5m"
414
415
416# # Create aggregate histograms.
417# [[aggregators.histogram]]
418# ## The period in which to flush the aggregator.
419# period = "30s"
420#
421# ## If true, the original metric will be dropped by the
422# ## aggregator and will not get sent to the output plugins.
423# drop_original = false
424#
425# ## If true, the histogram will be reset on flush instead
426# ## of accumulating the results.
427# reset = false
428#
429# ## Example config that aggregates all fields of the metric.
430# # [[aggregators.histogram.config]]
431# # ## The set of buckets.
432# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
433# # ## The name of metric.
434# # measurement_name = "cpu"
435#
436# ## Example config that aggregates only specific fields of the metric.
437# # [[aggregators.histogram.config]]
438# # ## The set of buckets.
439# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
440# # ## The name of metric.
441# # measurement_name = "diskio"
442# # ## The concrete fields of metric
443# # fields = ["io_time", "read_time", "write_time"]
444
445
446# # Keep the aggregate min/max of each metric passing through.
447# [[aggregators.minmax]]
448# ## General Aggregator Arguments:
449# ## The period on which to flush & clear the aggregator.
450# period = "30s"
451# ## If true, the original metric will be dropped by the
452# ## aggregator and will not get sent to the output plugins.
453# drop_original = false
454
455
456# # Count the occurrence of values in fields.
457# [[aggregators.valuecounter]]
458# ## General Aggregator Arguments:
459# ## The period on which to flush & clear the aggregator.
460# period = "30s"
461# ## If true, the original metric will be dropped by the
462# ## aggregator and will not get sent to the output plugins.
463# drop_original = false
464# ## The fields for which the values will be counted
465# fields = []
466
467
468###############################################################################
469# INPUT PLUGINS #
470###############################################################################
471
472
473###############################################################################
474# SERVICE INPUT PLUGINS #
475###############################################################################
476
477
478# Stream and parse log file(s).
479[[inputs.logparser]]
480 ## Log files to parse.
481 ## These accept standard unix glob matching rules, but with the addition of
482 ## ** as a "super asterisk". ie:
483 ## /var/log/**.log -> recursively find all .log files in /var/log
484 ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
485 ## /var/log/apache.log -> only tail the apache log file
486 files = ["C:/BlueFors/logs/maxigauge**.log"]
487
488 ## Read files that currently exist from the beginning. Files that are created
489 ## while telegraf is running (and that match the "files" globs) will always
490 ## be read from the beginning.
491 from_beginning = true
492
493 ## Method used to watch for file updates. Can be either "inotify" or "poll".
494 #watch_method = "inotify"
495
496 ## Parse logstash-style "grok" patterns:
497 [inputs.logparser.grok]
498 ## This is a list of patterns to check the given log file(s) for.
499 ## Note that adding patterns here increases processing time. The most
500 ## efficient configuration is to have one pattern per logparser.
501 ## Other common built-in patterns are:
502 ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
503 ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
504 patterns = ['%{DATE_MAXI:timestamp:ts-"02-01-06.15:04:05"},CH1,P1 ,1, %{SCI_NUMBER:P1:float},0,1,CH2,P2 ,1, %{SCI_NUMBER:P2:float},0,1,CH3,P3 ,1, %{SCI_NUMBER:P3:float},0,1,CH4,P4 ,1, %{SCI_NUMBER:P4:float},0,1,CH5,P5 ,1, %{SCI_NUMBER:P5:float},0,1,CH6,P6,1, %{SCI_NUMBER:P6:float}']
505
506
507 ## Name of the outputted measurement name.
508 measurement = "pressures"
509
510 ## Full path(s) to custom pattern files.
511 custom_pattern_files = []
512
513 ## Custom patterns can also be defined here. Put one pattern per line.
514 custom_patterns = '''
515 DATE_MAXI %{DATE_EU},%{TIME}
516 SCI_NUMBER %{INT}.%{INT}E%{INT}
517 '''
518
519 ## Timezone allows you to provide an override for timestamps that
520 ## don't already include an offset
521 ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
522 ##
523 ## Default: "" which renders UTC
524 ## Options are as follows:
525 ## 1. Local -- interpret based on machine localtime
526 ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
527 ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
528 timezone = "Europe/Paris"
529
530 ## When set to "disable", timestamp will not incremented if there is a
531 ## duplicate.
532 # unique_timestamp = "auto"