· 8 years ago · Nov 14, 2017, 02:10 AM
1# Telegraf Configuration
2#
3# Telegraf is entirely plugin driven. All metrics are gathered from the
4# declared inputs, and sent to the declared outputs.
5#
6# Plugins must be declared in here to be active. To deactivate a plugin,
7# comment out the name and any variables.
8#
9# Use 'telegraf -config telegraf.conf -test' to see what metrics a
10# config file would generate.
11#
12# Environment variables can be used anywhere in this config file, simply
13# prepend them with $. For strings the variable must be within quotes
14# (ie, "$STR_VAR"), for numbers and booleans they should be plain (ie,
15# $INT_VAR, $BOOL_VAR) Global tags can be specified here in key="value"
16# format.
17[global_tags]
18 # dc = "us-east-1" # will tag all metrics with dc=us-east-1 rack =
19 # "1a"
20 ## Environment variables can be used as tags, and throughout the
21 ## config file
22 # user = "$USER"
23# Configuration for telegraf agent
24[agent]
25 ## Default data collection interval for all inputs
26 interval = "30s"
27 ## Rounds collection interval to 'interval' ie, if interval="10s" then
28 ## always collect on :00, :10, :20, etc.
29 round_interval = true
30 ## Telegraf will send metrics to outputoutputs in batches of at most
31 ## metric_batch_size metrics. This controls the size of writes that
32 ## Telegraf sends to output plugins.
33 metric_batch_size = 1000
34 ## For failed writes, telegraf will cache metric_buffer_limit metrics
35 ## for each output, and will flush this buffer on a successful write.
36 ## Oldest metrics are dropped first when this buffer fills. This
37 ## buffer only fills when writes fail to output plugin(s).
38 metric_buffer_limit = 10000
39 ## Collection jitter is used to jitter the collection by a random
40 ## amount. Each plugin will sleep for a random time within jitter
41 ## before collecting. This can be used to avoid many plugins querying
42 ## things like sysfs at the same time, which can have a measurable
43 ## effect on the system.
44 collection_jitter = "0s"
45 ## Default flushing interval for all outputs. You shouldn't set this
46 ## below interval. Maximum flush_interval will be flush_interval +
47 ## flush_jitter
48 flush_interval = "10s"
49 ## Jitter the flush interval by a random amount. This is primarily to
50 ## avoid large write spikes for users running a large number of
51 ## telegraf instances. ie, a jitter of 5s and interval 10s means
52 ## flushes will happen every 10-15s
53 flush_jitter = "0s"
54 ## By default or when set to "0s", precision will be set to the same
55 ## timestamp order as the collection interval, with the maximum being
56 ## 1s.
57 ## ie, when interval = "10s", precision will be "1s"
58 ## when interval = "250ms", precision will be "1ms" Precision
59 ## will NOT be used for service inputs. It is up to each individual
60 ## service input to set the timestamp at the appropriate precision.
61 ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
62 precision = ""
63 ## Logging configuration: Run telegraf with debug log messages.
64 debug = false
65 ## Run telegraf in quiet mode (error log messages only).
66 quiet = false
67 ## Specify the log file name. The empty string means to log to stderr.
68 logfile = ""
69 ## Override default hostname, if empty use os.Hostname()
70 hostname = ""
71 ## If set to true, do no set the "host" tag in the telegraf agent.
72 omit_hostname = false
73###############################################################################
74# OUTPUT PLUGINS #
75###############################################################################
76# Configuration for influxdb server to send metrics to
77[[outputs.influxdb]]
78 ## The full HTTP or UDP URL for your InfluxDB instance.
79 ##
80 ## Multiple urls can be specified as part of the same cluster, this
81 ## means that only ONE of the urls will be written to each interval.
82 # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
83 urls = ["http://192.168.1.111:8086"] # required
84 ## The target database for metrics (telegraf will create it if not
85 ## exists).
86 database = "telegraf" # required
87 ## Name of existing retention policy to write to. Empty string writes
88 ## to the default retention policy.
89 retention_policy = ""
90 ## Write consistency (clusters only), can be: "any", "one", "quorum",
91 ## "all"
92 write_consistency = "any"
93 ## Write timeout (for the InfluxDB client), formatted as a string. If
94 ## not provided, will default to 5s. 0s means no timeout (not
95 ## recommended).
96 timeout = "5s"
97 # username = "telegraf" password = "metricsmetricsmetricsmetrics"
98 ## Set the user agent for HTTP POSTs (can be useful for log
99 ## differentiation)
100 # user_agent = "telegraf"
101 ## Set UDP payload size, defaults to InfluxDB UDP Client default (512
102 ## bytes)
103 # udp_payload = 512
104 ## Optional SSL Config
105 # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert = "/etc/telegraf/cert.pem"
106 # ssl_key = "/etc/telegraf/key.pem"
107 ## Use SSL but skip chain & host verification
108 # insecure_skip_verify = false
109 ## HTTP Proxy Config
110 # http_proxy = "http://corporate.proxy:3128"
111 ## Optional HTTP headers
112 # http_headers = {"X-Special-Header" = "Special-Value"}
113 ## Compress each HTTP request payload using GZIP.
114 # content_encoding = "gzip"
115# # Configuration for Amon Server to send metrics to.
116# [[outputs.amon]]
117# ## Amon Server Key
118# server_key = "my-server-key" # required.
119#
120# ## Amon Instance URL
121# amon_instance = "https://youramoninstance" # required
122#
123# ## Connection timeout.
124# # timeout = "5s"
125# # Configuration for the AMQP server to send metrics to
126# [[outputs.amqp]]
127# ## AMQP url
128# url = "amqp://localhost:5672/influxdb"
129# ## AMQP exchange
130# exchange = "telegraf"
131# ## Auth method. PLAIN and EXTERNAL are supported Using EXTERNAL
132# ## requires enabling the rabbitmq_auth_mechanism_ssl plugin as
133# ## described here: https://www.rabbitmq.com/plugins.html
134# # auth_method = "PLAIN"
135# ## Telegraf tag to use as a routing key
136# ## ie, if this tag exists, its value will be used as the routing
137# ## key
138# routing_tag = "host"
139#
140# ## InfluxDB retention policy
141# # retention_policy = "default"
142# ## InfluxDB database
143# # database = "telegraf"
144#
145# ## Write timeout, formatted as a string. If not provided, will
146# ## default to 5s. 0s means no timeout (not recommended).
147# # timeout = "5s"
148#
149# ## Optional SSL Config
150# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
151# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
152# ## Use SSL but skip chain & host verification
153# # insecure_skip_verify = false
154#
155# ## Data format to output. Each data format has its own unique set of
156# ## configuration options, read more about them here:
157# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
158# data_format = "influx"
159# # Configuration for AWS CloudWatch output.
160# [[outputs.cloudwatch]]
161# ## Amazon REGION
162# region = "us-east-1"
163#
164# ## Amazon Credentials Credentials are loaded in the following order
165# ## 1) Assumed credentials via STS if role_arn is specified 2)
166# ## explicit credentials from 'access_key' and 'secret_key' 3) shared
167# ## profile from 'profile' 4) environment variables 5) shared
168# ## credentials file 6) EC2 Instance Profile
169# #access_key = "" secret_key = "" token = "" role_arn = "" profile =
170# #"" shared_credential_file = ""
171#
172# ## Namespace for the CloudWatch MetricDatums
173# namespace = "InfluxData/Telegraf"
174# # Configuration for CrateDB to send metrics to.
175# [[outputs.cratedb]]
176# # A github.com/jackc/pgx connection string. See
177# # https://godoc.org/github.com/jackc/pgx#ParseDSN
178# url = "postgres://user:password@localhost/schema?sslmode=disable"
179# # Timeout for all CrateDB queries.
180# timeout = "5s"
181# # Name of the table to store metrics in.
182# table = "metrics"
183# # If true, and the metrics table does not exist, create it
184# # automatically.
185# table_create = true
186# # Configuration for DataDog API to send metrics to.
187# [[outputs.datadog]]
188# ## Datadog API key
189# apikey = "my-secret-key" # required.
190#
191# ## Connection timeout.
192# # timeout = "5s"
193# # Send metrics to nowhere at all
194# [[outputs.discard]]
195# # no configuration
196# # Configuration for Elasticsearch to send metrics to.
197# [[outputs.elasticsearch]]
198# ## The full HTTP endpoint URL for your Elasticsearch instance
199# ## Multiple urls can be specified as part of the same cluster, this
200# ## means that only ONE of the urls will be written to each interval.
201# urls = [ "http://node1.es.example.com:9200" ] # required.
202# ## Elasticsearch client timeout, defaults to "5s" if not set.
203# timeout = "5s"
204# ## Set to true to ask Elasticsearch a list of all cluster nodes,
205# ## thus it is not necessary to list all nodes in the urls config
206# ## option.
207# enable_sniffer = false
208# ## Set the interval to check if the Elasticsearch nodes are
209# ## available Setting to "0s" will disable the health check (not
210# ## recommended in production)
211# health_check_interval = "10s"
212# ## HTTP basic authentication details (eg. when using Shield)
213# # username = "telegraf" password = "mypassword"
214#
215# ## Index Config The target index for metrics (Elasticsearch will
216# ## create if it not exists). You can use the date specifiers below
217# ## to create indexes per time frame. The metric timestamp will be
218# ## used to decide the destination index name
219# # %Y - year (2016) %y - last two digits of year (00..99) %m - month
220# # (01..12) %d - day of month (e.g., 01) %H - hour (00..23)
221# index_name = "telegraf-%Y.%m.%d" # required.
222#
223# ## Template Config Set to true if you want telegraf to manage its
224# ## index template. If enabled it will create a recommended index
225# ## template for telegraf indexes
226# manage_template = true
227# ## The template name used for telegraf indexes
228# template_name = "telegraf"
229# ## Set to true if you want telegraf to overwrite an existing
230# ## template
231# overwrite_template = false
232# # Send telegraf metrics to file(s)
233# [[outputs.file]]
234# ## Files to write to, "stdout" is a specially handled file.
235# files = ["stdout", "/tmp/metrics.out"]
236#
237# ## Data format to output. Each data format has its own unique set of
238# ## configuration options, read more about them here:
239# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
240# data_format = "influx"
241# # Configuration for Graphite server to send metrics to
242# [[outputs.graphite]]
243# ## TCP endpoint for your graphite instance. If multiple endpoints
244# ## are configured, output will be load balanced. Only one of the
245# ## endpoints will be written to with each iteration.
246# servers = ["localhost:2003"]
247# ## Prefix metrics name
248# prefix = ""
249# ## Graphite output template see
250# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
251# template = "host.tags.measurement.field"
252# ## timeout in seconds for the write connection to graphite
253# timeout = 2
254#
255# ## Optional SSL Config
256# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
257# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
258# ## Use SSL but skip chain & host verification
259# # insecure_skip_verify = false
260# # Send telegraf metrics to graylog(s)
261# [[outputs.graylog]]
262# ## UDP endpoint for your graylog instance.
263# servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
264# # Configuration for sending metrics to an Instrumental project
265# [[outputs.instrumental]]
266# ## Project API Token (required)
267# api_token = "API Token" # required
268# ## Prefix the metrics with a given name
269# prefix = ""
270# ## Stats output template (Graphite formatting) see
271# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
272# template = "host.tags.measurement.field"
273# ## Timeout in seconds to connect
274# timeout = "2s"
275# ## Display Communcation to Instrumental
276# debug = false
277# # Configuration for the Kafka server to send metrics to
278# [[outputs.kafka]]
279# ## URLs of kafka brokers
280# brokers = ["localhost:9092"]
281# ## Kafka topic for producer messages
282# topic = "telegraf"
283#
284# ## Optional topic suffix configuration. If the section is omitted,
285# ## no suffix is used. Following topic suffix methods are supported:
286# ## measurement - suffix equals to separator + measurement's name
287# ## tags - suffix equals to separator + specified tags' values
288# ## interleaved with separator
289#
290# ## Suffix equals to "_" + measurement name
291# # [outputs.kafka.topic_suffix]
292# # method = "measurement" separator = "_"
293#
294# ## Suffix equals to "__" + measurement's "foo" tag value.
295# ## If there's no such a tag, suffix equals to an empty string
296# # [outputs.kafka.topic_suffix]
297# # method = "tags" keys = ["foo"] separator = "__"
298#
299# ## Suffix equals to "_" + measurement's "foo" and "bar"
300# ## tag values, separated by "_". If there is no such tags, their
301# ## values treated as empty strings.
302# # [outputs.kafka.topic_suffix]
303# # method = "tags" keys = ["foo", "bar"] separator = "_"
304#
305# ## Telegraf tag to use as a routing key
306# ## ie, if this tag exists, its value will be used as the routing
307# ## key
308# routing_tag = "host"
309#
310# ## CompressionCodec represents the various compression codecs
311# ## recognized by Kafka in messages.
312# ## 0 : No compression 1 : Gzip compression 2 : Snappy compression
313# compression_codec = 0
314#
315# ## RequiredAcks is used in Produce Requests to tell the broker how
316# ## many replica acknowledgements it must see before responding
317# ## 0 : the producer never waits for an acknowledgement from the
318# ## broker.
319# ## This option provides the lowest latency but the weakest
320# ## durability guarantees (some data will be lost when a server
321# ## fails). 1 : the producer gets an acknowledgement after the
322# ## leader replica has
323# ## received the data. This option provides better durability
324# ## as the client waits until the server acknowledges the request
325# ## as successful (only messages that were written to the now-dead
326# ## leader but not yet replicated will be lost). -1: the producer
327# ## gets an acknowledgement after all in-sync replicas have
328# ## received the data. This option provides the best
329# ## durability, we guarantee that no messages will be lost as
330# ## long as at least one in sync replica remains.
331# required_acks = -1
332#
333# ## The total number of times to retry sending a message
334# max_retry = 3
335#
336# ## Optional SSL Config
337# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
338# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
339# ## Use SSL but skip chain & host verification
340# # insecure_skip_verify = false
341#
342# ## Optional SASL Config
343# # sasl_username = "kafka" sasl_password = "secret"
344#
345# ## Data format to output. Each data format has its own unique set of
346# ## configuration options, read more about them here:
347# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
348# data_format = "influx"
349# # Configuration for the AWS Kinesis output.
350# [[outputs.kinesis]]
351# ## Amazon REGION of kinesis endpoint.
352# region = "ap-southeast-2"
353#
354# ## Amazon Credentials Credentials are loaded in the following order
355# ## 1) Assumed credentials via STS if role_arn is specified 2)
356# ## explicit credentials from 'access_key' and 'secret_key' 3) shared
357# ## profile from 'profile' 4) environment variables 5) shared
358# ## credentials file 6) EC2 Instance Profile
359# #access_key = "" secret_key = "" token = "" role_arn = "" profile =
360# #"" shared_credential_file = ""
361#
362# ## Kinesis StreamName must exist prior to starting telegraf.
363# streamname = "StreamName"
364# ## DEPRECATED: PartitionKey as used for sharding data.
365# partitionkey = "PartitionKey"
366# ## DEPRECATED: If set the paritionKey will be a random UUID on every
367# ## put. This allows for scaling across multiple shards in a stream.
368# ## This will cause issues with ordering.
369# use_random_partitionkey = false
370# ## The partition key can be calculated using one of several methods:
371# ##
372# ## Use a static value for all writes:
373# # [outputs.kinesis.partition]
374# # method = "static" key = "howdy"
375# #
376# ## Use a random partition key on each write:
377# # [outputs.kinesis.partition]
378# # method = "random"
379# #
380# ## Use the measurement name as the partition key:
381# # [outputs.kinesis.partition]
382# # method = "measurement"
383# #
384# ## Use the value of a tag for all writes, if the tag is not set the
385# ## empty string will be used:
386# # [outputs.kinesis.partition]
387# # method = "tag" key = "host"
388#
389#
390# ## Data format to output. Each data format has its own unique set of
391# ## configuration options, read more about them here:
392# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
393# data_format = "influx"
394#
395# ## debug will show upstream aws messages.
396# debug = false
397# # Configuration for Librato API to send metrics to.
398# [[outputs.librato]]
399# ## Librator API Docs
400# ## http://dev.librato.com/v1/metrics-authentication Librato API user
401# api_user = "telegraf@influxdb.com" # required.
402# ## Librato API token
403# api_token = "my-secret-token" # required.
404# ## Debug
405# # debug = false
406# ## Connection timeout.
407# # timeout = "5s"
408# ## Output source Template (same as graphite buckets) see
409# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
410# ## This template is used in librato's source (not metric's name)
411# template = "host"
412#
413# # Configuration for MQTT server to send metrics to
414# [[outputs.mqtt]]
415# servers = ["localhost:1883"] # required.
416#
417# ## MQTT outputs send metrics to this topic format
418# ## "<topic_prefix>/<hostname>/<pluginname>/" ex:
419# ## prefix/web01.example.com/mem
420# topic_prefix = "telegraf"
421#
422# ## username and password to connect MQTT server.
423# # username = "telegraf" password = "metricsmetricsmetricsmetrics"
424#
425# ## client ID, if not set a random ID is generated
426# # client_id = ""
427#
428# ## Optional SSL Config
429# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
430# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
431# ## Use SSL but skip chain & host verification
432# # insecure_skip_verify = false
433#
434# ## Data format to output. Each data format has its own unique set of
435# ## configuration options, read more about them here:
436# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
437# data_format = "influx"
438# # Send telegraf measurements to NATS
439# [[outputs.nats]]
440# ## URLs of NATS servers
441# servers = ["nats://localhost:4222"]
442# ## Optional credentials
443# # username = "" password = ""
444# ## NATS subject for producer messages
445# subject = "telegraf"
446#
447# ## Optional SSL Config
448# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
449# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
450# ## Use SSL but skip chain & host verification
451# # insecure_skip_verify = false
452#
453# ## Data format to output. Each data format has its own unique set of
454# ## configuration options, read more about them here:
455# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
456# data_format = "influx"
457# # Send telegraf measurements to NSQD
458# [[outputs.nsq]]
459# ## Location of nsqd instance listening on TCP
460# server = "localhost:4150"
461# ## NSQ topic for producer messages
462# topic = "telegraf"
463#
464# ## Data format to output. Each data format has its own unique set of
465# ## configuration options, read more about them here:
466# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
467# data_format = "influx"
468# # Configuration for OpenTSDB server to send metrics to
469# [[outputs.opentsdb]]
470# ## prefix for metrics keys
471# prefix = "my.specific.prefix."
472#
473# ## DNS name of the OpenTSDB server Using "opentsdb.example.com" or
474# ## "tcp://opentsdb.example.com" will use the telnet API.
475# ## "http://opentsdb.example.com" will use the Http API.
476# host = "opentsdb.example.com"
477#
478# ## Port of the OpenTSDB server
479# port = 4242
480#
481# ## Number of data points to send to OpenTSDB in Http requests. Not
482# ## used with telnet API.
483# httpBatchSize = 50
484#
485# ## Debug true - Prints OpenTSDB communication
486# debug = false
487#
488# ## Separator separates measurement name from field
489# separator = "_"
490# # Configuration for the Prometheus client to spawn
491# [[outputs.prometheus_client]]
492# ## Address to listen on
493# # listen = ":9273"
494#
495# ## Interval to expire metrics and not deliver to prometheus, 0 == no
496# ## expiration
497# # expiration_interval = "60s"
498# # Configuration for the Riemann server to send metrics to
499# [[outputs.riemann]]
500# ## The full TCP or UDP URL of the Riemann server
501# url = "tcp://localhost:5555"
502#
503# ## Riemann event TTL, floating-point time in seconds. Defines how
504# ## long that an event is considered valid for in Riemann
505# # ttl = 30.0
506#
507# ## Separator to use between measurement and field name in Riemann
508# ## service name This does not have any effect if
509# ## 'measurement_as_attribute' is set to 'true'
510# separator = "/"
511#
512# ## Set measurement name as Riemann attribute 'measurement', instead
513# ## of prepending it to the Riemann service name
514# # measurement_as_attribute = false
515#
516# ## Send string metrics as Riemann event states. Unless enabled all
517# ## string metrics will be ignored
518# # string_as_state = false
519#
520# ## A list of tag keys whose values get sent as Riemann tags. If
521# ## empty, all Telegraf tag values will be sent as tags
522# # tag_keys = ["telegraf","custom_tag"]
523#
524# ## Additional Riemann tags to send.
525# # tags = ["telegraf-output"]
526#
527# ## Description for Riemann event
528# # description_text = "metrics collected from telegraf"
529#
530# ## Riemann client write timeout, defaults to "5s" if not set.
531# # timeout = "5s"
532# # Configuration for the Riemann server to send metrics to
533# [[outputs.riemann_legacy]]
534# ## URL of server
535# url = "localhost:5555"
536# ## transport protocol to use either tcp or udp
537# transport = "tcp"
538# ## separator to use between input name and field name in Riemann
539# ## service name
540# separator = " "
541# # Generic socket writer capable of handling multiple socket types.
542# [[outputs.socket_writer]]
543# ## URL to connect to
544# # address = "tcp://127.0.0.1:8094" address =
545# # "tcp://example.com:http" address = "tcp4://127.0.0.1:8094" address
546# # = "tcp6://127.0.0.1:8094" address = "tcp6://[2001:db8::1]:8094"
547# # address = "udp://127.0.0.1:8094" address = "udp4://127.0.0.1:8094"
548# # address = "udp6://127.0.0.1:8094" address =
549# # "unix:///tmp/telegraf.sock" address =
550# # "unixgram:///tmp/telegraf.sock"
551#
552# ## Period between keep alive probes. Only applies to TCP sockets. 0
553# ## disables keep alive probes. Defaults to the OS configuration.
554# # keep_alive_period = "5m"
555#
556# ## Data format to generate. Each data format has its own unique set
557# ## of configuration options, read more about them here:
558# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
559# # data_format = "influx"
560###############################################################################
561# PROCESSOR PLUGINS #
562###############################################################################
563# # Print all metrics that pass through this filter.
564# [[processors.printer]]
565###############################################################################
566# AGGREGATOR PLUGINS #
567###############################################################################
568# # Create aggregate histograms.
569# [[aggregators.histogram]]
570# ## The period in which to flush the aggregator.
571# period = "30s"
572#
573# ## If true, the original metric will be dropped by the aggregator
574# ## and will not get sent to the output plugins.
575# drop_original = false
576#
577# ## Example config that aggregates all fields of the metric.
578# # [[aggregators.histogram.config]]
579# # ## The set of buckets.
580# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
581# # ## The name of metric.
582# # measurement_name = "cpu"
583#
584# ## Example config that aggregates only specific fields of the
585# ## metric.
586# # [[aggregators.histogram.config]]
587# # ## The set of buckets.
588# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0,
589# # 90.0, 100.0]
590# # ## The name of metric.
591# # measurement_name = "diskio"
592# # ## The concrete fields of metric
593# # fields = ["io_time", "read_time", "write_time"]
594# # Keep the aggregate min/max of each metric passing through.
595# [[aggregators.minmax]]
596# ## General Aggregator Arguments: The period on which to flush &
597# ## clear the aggregator.
598# period = "30s"
599# ## If true, the original metric will be dropped by the aggregator
600# ## and will not get sent to the output plugins.
601# drop_original = false
602###############################################################################
603# INPUT PLUGINS #
604###############################################################################
605# Read metrics about cpu usage
606[[inputs.cpu]]
607 ## Whether to report per-cpu stats or not
608 percpu = true
609 ## Whether to report total system cpu stats or not
610 totalcpu = true
611 ## If true, collect raw CPU time metrics.
612 collect_cpu_time = false
613 ## If true, compute and report the sum of all non-idle CPU states.
614 report_active = false
615# Read metrics about disk usage by mount point
616[[inputs.disk]]
617 ## By default, telegraf gather stats for all mountpoints. Setting
618 ## mountpoints will restrict the stats to the specified mountpoints.
619 mount_points = ["/"]
620 ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs
621 ## (usually present on /run, /var/run, /dev/shm or /dev).
622 ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
623# Read metrics about disk IO by device
624[[inputs.diskio]]
625 ## By default, telegraf will gather stats for all devices including
626 ## disk partitions. Setting devices will restrict the stats to the
627 ## specified devices.
628 # devices = ["sda", "sdb"]
629 ## Uncomment the following line if you need disk serial numbers.
630 # skip_serial_number = false
631 #
632 ## On systems which support it, device metadata can be added in the
633 ## form of tags. Currently only Linux is supported via udev
634 ## properties. You can view available properties for a device by
635 ## running: 'udevadm info -q property -n /dev/sda'
636 # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
637 #
638 ## Using the same metadata source as device_tags, you can also
639 ## customize the name of the device via templates. The
640 ## 'name_templates' parameter is a list of templates to try and apply
641 ## to the device. The template may contain variables in the form of
642 ## '$PROPERTY' or '${PROPERTY}'. The first template which does not
643 ## contain any variables not present for the device is used as the
644 ## device name tag. The typical use case is for LVM volumes, to get
645 ## the VG/LV name instead of the near-meaningless DM-0 name.
646 # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
647# Get kernel statistics from /proc/stat
648[[inputs.kernel]]
649 # no configuration
650# Read metrics about memory usage
651[[inputs.mem]]
652 # no configuration
653# Get the number of processes and group them by status
654[[inputs.processes]]
655 # no configuration
656# Read metrics about swap memory usage
657[[inputs.swap]]
658 # no configuration
659# Read metrics about system load & uptime
660[[inputs.system]]
661 # no configuration
662
663[[inputs.net]]
664# # Read stats from aerospike server(s)
665# [[inputs.aerospike]]
666# ## Aerospike servers to connect to (with port) This plugin will
667# ## query all namespaces the aerospike server has configured and get
668# ## stats for them.
669# servers = ["localhost:3000"]
670# # Read Apache status information (mod_status)
671# [[inputs.apache]]
672# ## An array of URLs to gather from, must be directed at the machine
673# ## readable version of the mod_status page including the auto query
674# ## string. Default is "http://localhost/server-status?auto".
675# urls = ["http://localhost/server-status?auto"]
676#
677# ## Credentials for basic HTTP authentication.
678# # username = "myuser" password = "mypassword"
679#
680# ## Maximum time to receive response.
681# # response_timeout = "5s"
682#
683# ## Optional SSL Config
684# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
685# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
686# ## Use SSL but skip chain & host verification
687# # insecure_skip_verify = false
688# # Read metrics of bcache from stats_total and dirty_data
689# [[inputs.bcache]]
690# ## Bcache sets path If not specified, then default is:
691# bcachePath = "/sys/fs/bcache"
692#
693# ## By default, telegraf gather stats for all bcache devices Setting
694# ## devices will restrict the stats to the specified bcache devices.
695# bcacheDevs = ["bcache0"]
696# # Read Cassandra metrics through Jolokia
697# [[inputs.cassandra]]
698# # This is the context root used to compose the jolokia url
699# context = "/jolokia/read"
700# ## List of cassandra servers exposing jolokia read service
701# servers =
702# ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
703# ## List of metrics collected on above servers Each metric consists
704# ## of a jmx path. This will collect all heap memory usage metrics
705# ## from the jvm and ReadLatency metrics for all keyspaces and
706# ## tables. "type=Table" in the query works with Cassandra3.0. Older
707# ## versions might need to use "type=ColumnFamily"
708# metrics = [
709# "/java.lang:type=Memory/HeapMemoryUsage",
710# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
711# ]
712# # Collects performance metrics from the MON and OSD nodes in a Ceph
713# # storage cluster.
714# [[inputs.ceph]]
715# ## This is the recommended interval to poll. Too frequent and you
716# ## will lose data points due to timeouts during rebalancing and
717# ## recovery
718# interval = '1m'
719#
720# ## All configuration values are optional, defaults are shown below
721#
722# ## location of ceph binary
723# ceph_binary = "/usr/bin/ceph"
724#
725# ## directory in which to look for socket files
726# socket_dir = "/var/run/ceph"
727#
728# ## prefix of MON and OSD socket files, used to determine socket type
729# mon_prefix = "ceph-mon" osd_prefix = "ceph-osd"
730#
731# ## suffix used to identify socket files
732# socket_suffix = "asok"
733#
734# ## Ceph user to authenticate as
735# ceph_user = "client.admin"
736#
737# ## Ceph configuration to use to locate the cluster
738# ceph_config = "/etc/ceph/ceph.conf"
739#
740# ## Whether to gather statistics via the admin socket
741# gather_admin_socket_stats = true
742#
743# ## Whether to gather statistics via ceph commands
744# gather_cluster_stats = false
745# # Read specific statistics per cgroup
746# [[inputs.cgroup]]
747# ## Directories in which to look for files, globs are supported.
748# ## Consider restricting paths to the set of cgroups you really want
749# ## to monitor if you have a large number of cgroups, to avoid any
750# ## cardinality issues.
751# # paths = [
752# # "/cgroup/memory", "/cgroup/memory/child1",
753# # "/cgroup/memory/child2/*", ]
754# ## cgroup stat fields, as file names, globs are supported. these
755# ## file names are appended to each path from above.
756# # files = ["memory.*usage*", "memory.limit_in_bytes"]
757# # Get standard chrony metrics, requires chronyc executable.
758# [[inputs.chrony]]
759# ## If true, chronyc tries to perform a DNS lookup for the time
760# ## server.
761# # dns_lookup = false
762# # Pull Metric Statistics from Amazon CloudWatch
763# [[inputs.cloudwatch]]
764# ## Amazon Region
765# region = "us-east-1"
766#
767# ## Amazon Credentials Credentials are loaded in the following order
768# ## 1) Assumed credentials via STS if role_arn is specified 2)
769# ## explicit credentials from 'access_key' and 'secret_key' 3) shared
770# ## profile from 'profile' 4) environment variables 5) shared
771# ## credentials file 6) EC2 Instance Profile
772# #access_key = "" secret_key = "" token = "" role_arn = "" profile =
773# #"" shared_credential_file = ""
774#
775# # The minimum period for Cloudwatch metrics is 1 minute (60s).
776# # However not all metrics are made available to the 1 minute period.
777# # Some are collected at 3 minute, 5 minute, or larger intervals. See
778# # https://aws.amazon.com/cloudwatch/faqs/#monitoring. Note that if a
779# # period is configured that is smaller than the minimum for a
780# # particular metric, that metric will not be returned by the
781# # Cloudwatch API and will not be collected by Telegraf.
782# #
783# ## Requested CloudWatch aggregation Period (required - must be a
784# ## multiple of 60s)
785# period = "5m"
786#
787# ## Collection Delay (required - must account for metrics
788# ## availability via CloudWatch API)
789# delay = "5m"
790#
791# ## Recomended: use metric 'interval' that is a multiple of 'period'
792# ## to avoid gaps or overlap in pulled data
793# interval = "5m"
794#
795# ## Configure the TTL for the internal cache of metrics. Defaults to
796# ## 1 hr if not specified
797# #cache_ttl = "10m"
798#
799# ## Metric Statistic Namespace (required)
800# namespace = "AWS/ELB"
801#
802# ## Maximum requests per second. Note that the global default AWS
803# ## rate limit is 400 reqs/sec, so if you define multiple namespaces,
804# ## these should add up to a maximum of 400. Optional - default value
805# ## is 200. See
806# ## http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
807# ratelimit = 200
808#
809# ## Metrics to Pull (optional) Defaults to all Metrics in Namespace
810# ## if nothing is provided Refreshes Namespace available metrics
811# ## every 1h
812# #[[inputs.cloudwatch.metrics]]
813# # names = ["Latency", "RequestCount"]
814# #
815# # ## Dimension filters for Metric (optional)
816# # [[inputs.cloudwatch.metrics.dimensions]]
817# # name = "LoadBalancerName" value = "p-example"
818# # Collects conntrack stats from the configured directories and files.
819# [[inputs.conntrack]]
820# ## The following defaults would work with multiple versions of
821# ## conntrack. Note the nf_ and ip_ filename prefixes are mutually
822# ## exclusive across kernel versions, as are the directory
823# ## locations.
824#
825# ## Superset of filenames to look for within the conntrack dirs.
826# ## Missing files will be ignored.
827# files = ["ip_conntrack_count","ip_conntrack_max",
828# "nf_conntrack_count","nf_conntrack_max"]
829#
830# ## Directories to search within for the conntrack files above.
831# ## Missing directrories will be ignored.
832# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
833# # Gather health check statuses from services registered in Consul
834# [[inputs.consul]]
835# ## Most of these values defaults to the one configured on a Consul's
836# ## agent level. Optional Consul server address (default:
837# ## "localhost")
838# # address = "localhost"
839# ## Optional URI scheme for the Consul server (default: "http")
840# # scheme = "http"
841# ## Optional ACL token used in every request (default: "")
842# # token = ""
843# ## Optional username used for request HTTP Basic Authentication
844# ## (default: "")
845# # username = ""
846# ## Optional password used for HTTP Basic Authentication (default:
847# ## "")
848# # password = ""
849# ## Optional data centre to query the health checks from (default:
850# ## "")
851# # datacentre = ""
852# # Read metrics from one or many couchbase clusters
853# [[inputs.couchbase]]
854# ## specify servers via a url matching:
855# ## [protocol://][:password]@address[:port] e.g.
856# ## http://couchbase-0.example.com/
857# ## http://admin:secret@couchbase-0.example.com:8091/
858# ##
859# ## If no servers are specified, then localhost is used as the host.
860# ## If no protocol is specifed, HTTP is used. If no port is
861# ## specified, 8091 is used.
862# servers = ["http://localhost:8091"]
863# # Read CouchDB Stats from one or more servers
864# [[inputs.couchdb]]
865# ## Works with CouchDB stats endpoints out of the box Multiple HOSTs
866# ## from which to read CouchDB stats:
867# hosts = ["http://localhost:8086/_stats"]
868# # Read metrics from one or many disque servers
869# [[inputs.disque]]
870# ## An array of URI to gather stats about. Specify an ip or hostname
871# ## with optional port and password. ie disque://localhost,
872# ## disque://10.10.3.33:18832, 10.0.0.1:10000, etc. If no servers are
873# ## specified, then localhost is used as the host.
874# servers = ["localhost"]
875# # Provide a native collection for dmsetup based statistics for
876# # dm-cache
877# [[inputs.dmcache]]
878# ## Whether to report per-device stats or not
879# per_device = true
880# # Query given DNS server and gives statistics
881# [[inputs.dns_query]]
882# ## servers to query
883# servers = ["8.8.8.8"]
884#
885# ## Network is the network protocol name.
886# # network = "udp"
887#
888# ## Domains or subdomains to query.
889# # domains = ["."]
890#
891# ## Query record type. Posible values: A, AAAA, CNAME, MX, NS, PTR,
892# ## TXT, SOA, SPF, SRV.
893# # record_type = "A"
894#
895# ## Dns server port.
896# # port = 53
897#
898# ## Query timeout in seconds.
899# # timeout = 2
900# # Read metrics about docker containers
901 [[inputs.docker]]
902# ## Docker Endpoint
903# ## To use TCP, set endpoint = "tcp://[ip]:[port]" To use
904# ## environment variables (ie, docker-machine), set endpoint =
905# ## "ENV"
906# endpoint = "unix:///var/run/docker.sock"
907#
908# ## Only collect metrics for these containers, collect all if empty
909# container_names = []
910#
911# ## Containers to include and exclude. Globs accepted. Note that an
912# ## empty array for both will include all containers
913# container_name_include = [] container_name_exclude = []
914#
915# ## Timeout for docker list, info, and stats commands
916# timeout = "5s"
917#
918# ## Whether to report for each container per-device blkio (8:0,
919# ## 8:1...) and network (eth0, eth1, ...) stats or not
920# perdevice = true
921# ## Whether to report for each container total blkio and network
922# ## stats or not
923# total = false
924# ## Which environment variables should we use as a tag tag_env =
925# ##["JAVA_HOME", "HEAP_SIZE"]
926#
927# ## docker labels to include and exclude as tags. Globs accepted.
928# ## Note that an empty array for both will include all labels as tags
929# docker_label_include = [] docker_label_exclude = []
930#
931# ## Optional SSL Config
932# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
933# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
934# ## Use SSL but skip chain & host verification
935# # insecure_skip_verify = false
936# # Read statistics from one or many dovecot servers
937# [[inputs.dovecot]]
938# ## specify dovecot servers via an address:port list
939# ## e.g.
940# ## localhost:24242
941# ##
942# ## If no servers are specified, then localhost is used as the host.
943# servers = ["localhost:24242"]
944# ## Type is one of "user", "domain", "ip", or "global"
945# type = "global"
946# ## Wildcard matches like "*.com". An empty string "" is same as "*"
947# ## If type = "ip" filters should be <IP/network>
948# filters = [""]
949# # Read stats from one or more Elasticsearch servers or clusters
950# [[inputs.elasticsearch]]
951# ## specify a list of one or more Elasticsearch servers
952# # you can add username and password to your url to use basic
953# # authentication: servers = ["http://user:pass@localhost:9200"]
954# servers = ["http://localhost:9200"]
955#
956# ## Timeout for HTTP requests to the elastic search server(s)
957# http_timeout = "5s"
958#
959# ## When local is true (the default), the node will read only its own
960# ## stats. Set local to false when you want to read the node stats
961# ## from all nodes of the cluster.
962# local = true
963#
964# ## Set cluster_health to true when you want to also obtain cluster
965# ## health stats
966# cluster_health = false
967#
968# ## Set cluster_stats to true when you want to also obtain cluster
969# ## stats from the Master node.
970# cluster_stats = false
971#
972# ## Optional SSL Config
973# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
974# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
975# ## Use SSL but skip chain & host verification
976# # insecure_skip_verify = false
977# # Read metrics from one or more commands that can output to stdout
978# [[inputs.exec]]
979# ## Commands array
980# commands = [
981# "/tmp/test.sh", "/usr/bin/mycollector --foo=bar",
982# "/tmp/collect_*.sh" ]
983#
984# ## Timeout for each command to complete.
985# timeout = "5s"
986#
987# ## measurement name suffix (for separating different commands)
988# name_suffix = "_mycollector"
989#
990# ## Data format to consume. Each data format has its own unique set
991# ## of configuration options, read more about them here:
992# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
993# data_format = "influx"
994# # Read metrics from fail2ban.
995# [[inputs.fail2ban]]
996# ## Use sudo to run fail2ban-client
997# use_sudo = false
998# # Read stats about given file(s)
999# [[inputs.filestat]]
1000# ## Files to gather stats about. These accept standard unix glob
1001# ## matching rules, but with the addition of ** as a "super
1002# ## asterisk". ie:
1003# ## "/var/log/**.log" -> recursively find all .log files in
1004# ## /var/log "/var/log/*/*.log" -> find all .log files with a
1005# ## parent dir in /var/log "/var/log/apache.log" -> just tail the
1006# ## apache log file
1007# ##
1008# ## See https://github.com/gobwas/glob for more examples
1009# ##
1010# files = ["/var/log/**.log"]
1011# ## If true, read the entire file and calculate an md5 checksum.
1012# md5 = false
1013# # Read metrics exposed by fluentd in_monitor plugin
1014# [[inputs.fluentd]]
1015# ## This plugin reads information exposed by fluentd (using
1016# ## /api/plugins.json endpoint).
1017# ##
1018# ## Endpoint: - only one URI is allowed - https is not supported
1019# endpoint = "http://localhost:24220/api/plugins.json"
1020#
1021# ## Define which plugins have to be excluded (based on "type" field -
1022# ## e.g. monitor_agent)
1023# exclude = [
1024# "monitor_agent", "dummy",
1025# ]
1026# # Read flattened metrics from one or more GrayLog HTTP endpoints
1027# [[inputs.graylog]]
1028# ## API endpoint, currently supported API:
1029# ##
1030# ## - multiple (Ex http://<host>:12900/system/metrics/multiple) -
1031# ## namespace (Ex
1032# ## http://<host>:12900/system/metrics/namespace/{namespace})
1033# ##
1034# ## For namespace endpoint, the metrics array will be ignored for
1035# ## that call. Endpoint can contain namespace and multiple type
1036# ## calls.
1037# ##
1038# ## Please check http://[graylog-server-ip]:12900/api-browser for
1039# ## full list of endpoints
1040# servers = [
1041# "http://[graylog-server-ip]:12900/system/metrics/multiple", ]
1042#
1043# ## Metrics list List of metrics can be found on Graylog webservice
1044# ## documentation. Or by hitting the the web service api at:
1045# ## http://[graylog-host]:12900/system/metrics
1046# metrics = [
1047# "jvm.cl.loaded", "jvm.memory.pools.Metaspace.committed" ]
1048#
1049# ## Username and password
1050# username = "" password = ""
1051#
1052# ## Optional SSL Config
1053# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
1054# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
1055# ## Use SSL but skip chain & host verification
1056# # insecure_skip_verify = false
1057# # Read metrics of haproxy, via socket or csv stats page
1058# [[inputs.haproxy]]
1059# ## An array of address to gather stats about. Specify an ip on
1060# ## hostname with optional port. ie localhost, 10.10.3.33:1936, etc.
1061# ## Make sure you specify the complete path to the stats endpoint
1062# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
1063#
1064# ## If no servers are specified, then default to
1065# ## 127.0.0.1:1936/haproxy?stats
1066# servers = ["http://myhaproxy.com:1936/haproxy?stats"]
1067#
1068# ## You can also use local socket with standard wildcard globbing.
1069# ## Server address not starting with 'http' will be treated as a
1070# ## possible socket, so both examples below are valid.
1071# # servers = ["socket:/run/haproxy/admin.sock",
1072# # "/run/haproxy/*.sock"]
1073#
1074# ## By default, some of the fields are renamed from what haproxy
1075# ## calls them. Setting this option to true results in the plugin
1076# ## keeping the original field names.
1077# # keep_field_names = true
1078#
1079# ## Optional SSL Config
1080# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
1081# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
1082# ## Use SSL but skip chain & host verification
1083# # insecure_skip_verify = false
1084# # Monitor disks' temperatures using hddtemp
1085# [[inputs.hddtemp]]
1086# ## By default, telegraf gathers temps data from all disks detected
1087# ## by the hddtemp.
1088# ##
1089# ## Only collect temps from the selected disks.
1090# ##
1091# ## A * as the device name will return the temperature values of all
1092# ## disks.
1093# ##
1094# # address = "127.0.0.1:7634" devices = ["sda", "*"]
1095# # HTTP/HTTPS request given an address a method and a timeout
1096# [[inputs.http_response]]
1097# ## Server address (default http://localhost)
1098# # address = "http://localhost"
1099#
1100# ## Set response_timeout (default 5 seconds)
1101# # response_timeout = "5s"
1102#
1103# ## HTTP Request Method
1104# # method = "GET"
1105#
1106# ## Whether to follow redirects from the server (defaults to false)
1107# # follow_redirects = false
1108#
1109# ## Optional HTTP Request Body
1110# # body = ''' {'fake':'data'} '''
1111#
1112# ## Optional substring or regex match in body of the response
1113# # response_string_match = "\"service_status\": \"up\""
1114# # response_string_match = "ok" response_string_match =
1115# # "\".*_status\".?:.?\"up\""
1116#
1117# ## Optional SSL Config
1118# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
1119# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
1120# ## Use SSL but skip chain & host verification
1121# # insecure_skip_verify = false
1122#
1123# ## HTTP Request Headers (all values must be strings)
1124# # [inputs.http_response.headers]
1125# # Host = "github.com"
1126# # Read flattened metrics from one or more JSON HTTP endpoints
1127# [[inputs.httpjson]]
1128# ## NOTE This plugin only reads numerical measurements, strings and
1129# ## booleans will be ignored.
1130#
1131# ## Name for the service being polled. Will be appended to the name
1132# ## of the measurement e.g. httpjson_webserver_stats
1133# ##
1134# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix
1135# ## instead.
1136# name = "webserver_stats"
1137#
1138# ## URL of each server in the service's cluster
1139# servers = [
1140# "http://localhost:9999/stats/", "http://localhost:9998/stats/", ]
1141# ## Set response_timeout (default 5 seconds)
1142# response_timeout = "5s"
1143#
1144# ## HTTP method to use: GET or POST (case-sensitive)
1145# method = "GET"
1146#
1147# ## List of tag names to extract from top-level of JSON server
1148# ## response
1149# # tag_keys = [
1150# # "my_tag_1", "my_tag_2" ]
1151#
1152# ## HTTP parameters (all values must be strings). For "GET"
1153# ## requests, data will be included in the query. For "POST"
1154# ## requests, data will be included in the request body as
1155# ## "x-www-form-urlencoded".
1156# # [inputs.httpjson.parameters]
1157# # event_type = "cpu_spike" threshold = "0.75"
1158#
1159# ## HTTP Headers (all values must be strings)
1160# # [inputs.httpjson.headers]
1161# # X-Auth-Token = "my-xauth-token" apiVersion = "v1"
1162#
1163# ## Optional SSL Config
1164# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
1165# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
1166# ## Use SSL but skip chain & host verification
1167# # insecure_skip_verify = false
1168# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
1169# [[inputs.influxdb]]
1170# ## Works with InfluxDB debug endpoints out of the box, but other
1171# ## services can use this format too. See the influxdb plugin's
1172# ## README for more details.
1173#
1174# ## Multiple URLs from which to read InfluxDB-formatted JSON Default
1175# ## is "http://localhost:8086/debug/vars".
1176# urls = [
1177# "http://localhost:8086/debug/vars" ]
1178#
1179# ## Optional SSL Config
1180# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
1181# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
1182# ## Use SSL but skip chain & host verification
1183# # insecure_skip_verify = false
1184#
1185# ## http request & header timeout
1186# timeout = "5s"
1187# # Collect statistics about itself
1188# [[inputs.internal]]
1189# ## If true, collect telegraf memory stats.
1190# # collect_memstats = true
1191# # This plugin gathers interrupts data from /proc/interrupts and
1192# # /proc/softirqs.
1193# [[inputs.interrupts]]
1194# ## To filter which IRQs to collect, make use of tagpass / tagdrop,
1195# ## i.e.
1196# # [inputs.interrupts.tagdrop]
1197# # irq = [ "NET_RX", "TASKLET" ]
1198# # Read metrics from the bare metal servers via IPMI
1199# [[inputs.ipmi_sensor]]
1200# ## optionally specify the path to the ipmitool executable
1201# # path = "/usr/bin/ipmitool"
1202# #
1203# ## optionally specify one or more servers via a url matching
1204# ## [username[:password]@][protocol[(address)]] e.g.
1205# ## root:passwd@lan(127.0.0.1)
1206# ##
1207# ## if no servers are specified, local machine sensor stats will be
1208# ## queried
1209# ##
1210# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
1211#
1212# ## Recomended: use metric 'interval' that is a multiple of 'timeout'
1213# ## to avoid gaps or overlap in pulled data
1214# interval = "30s"
1215#
1216# ## Timeout for the ipmitool command to complete
1217# timeout = "20s"
1218# # Gather packets and bytes throughput from iptables
1219# [[inputs.iptables]]
1220# ## iptables require root access on most systems. Setting 'use_sudo'
1221# ## to true will make use of sudo to run iptables. Users must
1222# ## configure sudo to allow telegraf user to run iptables with no
1223# ## password. iptables can be restricted to only list command
1224# ## "iptables -nvL".
1225# use_sudo = false
1226# ## Setting 'use_lock' to true runs iptables with the "-w" option.
1227# ## Adjust your sudo settings appropriately if using this option
1228# ## ("iptables -wnvl")
1229# use_lock = false
1230# ## defines the table to monitor:
1231# table = "filter"
1232# ## defines the chains to monitor. NOTE: iptables rules without a
1233# ## comment will not be monitored. Read the plugin documentation for
1234# ## more information.
1235# chains = [ "INPUT" ]
1236# # Read JMX metrics through Jolokia
1237# [[inputs.jolokia]]
1238# # DEPRECATED: the jolokia plugin has been deprecated in favor of the
1239# # jolokia2 plugin see
1240# # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
1241#
1242# ## This is the context root used to compose the jolokia url NOTE
1243# ## that Jolokia requires a trailing slash at the end of the context
1244# ## root NOTE that your jolokia security policy must allow for POST
1245# ## requests.
1246# context = "/jolokia/"
1247#
1248# ## This specifies the mode used
1249# # mode = "proxy"
1250# #
1251# ## When in proxy mode this section is used to specify further proxy
1252# ## address configurations. Remember to change host address to fit
1253# ## your environment.
1254# # [inputs.jolokia.proxy]
1255# # host = "127.0.0.1" port = "8080"
1256#
1257# ## Optional http timeouts
1258# ##
1259# ## response_header_timeout, if non-zero, specifies the amount of
1260# ## time to wait for a server's response headers after fully writing
1261# ## the request.
1262# # response_header_timeout = "3s"
1263# ##
1264# ## client_timeout specifies a time limit for requests made by this
1265# ## client. Includes connection time, any redirects, and reading the
1266# ## response body.
1267# # client_timeout = "4s"
1268#
1269# ## Attribute delimiter
1270# ##
1271# ## When multiple attributes are returned for a single
1272# ## [inputs.jolokia.metrics], the field name is a concatenation of
1273# ## the metric name, and the attribute name, separated by the given
1274# ## delimiter.
1275# # delimiter = "_"
1276#
1277# ## List of servers exposing jolokia read service
1278# [[inputs.jolokia.servers]]
1279# name = "as-server-01" host = "127.0.0.1" port = "8080"
1280# # username = "myuser" password = "mypassword"
1281#
1282# ## List of metrics collected on above servers Each metric consists
1283# ##in a name, a jmx path and either a pass or drop slice attribute.
1284# ##Â This collect all heap memory usage metrics.
1285# [[inputs.jolokia.metrics]]
1286# name = "heap_memory_usage" mbean = "java.lang:type=Memory"
1287# attribute = "HeapMemoryUsage"
1288#
1289# ##Â This collect thread counts metrics.
1290# [[inputs.jolokia.metrics]]
1291# name = "thread_count" mbean = "java.lang:type=Threading" attribute
1292# =
1293# "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
1294#
1295# ##Â This collect number of class loaded/unloaded counts metrics.
1296# [[inputs.jolokia.metrics]]
1297# name = "class_count" mbean = "java.lang:type=ClassLoading"
1298# attribute =
1299# "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
1300# # Read JMX metrics from a Jolokia REST agent endpoint
1301# [[inputs.jolokia2_agent]]
1302# # default_tag_prefix = "" default_field_prefix = ""
1303# # default_field_separator = "."
1304#
1305# # Add agents URLs to query
1306# urls = ["http://localhost:8080/jolokia"]
1307# # username = "" password = "" response_timeout = "5s"
1308#
1309# ## Optional SSL config
1310# # ssl_ca = "/var/private/ca.pem" ssl_cert =
1311# # "/var/private/client.pem" ssl_key = "/var/private/client-key.pem"
1312# # insecure_skip_verify = false
1313#
1314# ## Add metrics to read
1315# [[inputs.jolokia2.metric]]
1316# name = "java_runtime" mbean = "java.lang:type=Runtime" paths =
1317# ["Uptime"]
1318# # Read JMX metrics from a Jolokia REST proxy endpoint
1319# [[inputs.jolokia2_proxy]]
1320# # default_tag_prefix = "" default_field_prefix = ""
1321# # default_field_separator = "."
1322#
1323# ## Proxy agent
1324# url = "http://localhost:8080/jolokia"
1325# # username = "" password = "" response_timeout = "5s"
1326#
1327# ## Optional SSL config
1328# # ssl_ca = "/var/private/ca.pem" ssl_cert =
1329# # "/var/private/client.pem" ssl_key = "/var/private/client-key.pem"
1330# # insecure_skip_verify = false
1331#
1332# ## Add proxy targets to query
1333# # default_target_username = "" default_target_password = ""
1334# [[inputs.jolokia_proxy.target]]
1335# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
1336# # username = "" password = ""
1337#
1338# ## Add metrics to read
1339# [[inputs.jolokia_proxy.metric]]
1340# name = "java_runtime" mbean = "java.lang:type=Runtime" paths =
1341# ["Uptime"]
1342# # Read Kapacitor-formatted JSON metrics from one or more HTTP
1343# # endpoints
1344# [[inputs.kapacitor]]
1345# ## Multiple URLs from which to read Kapacitor-formatted JSON Default
1346# ## is "http://localhost:9092/kapacitor/v1/debug/vars".
1347# urls = [
1348# "http://localhost:9092/kapacitor/v1/debug/vars" ]
1349#
1350# ## Time limit for http requests
1351# timeout = "5s"
1352# # Get kernel statistics from /proc/vmstat
1353# [[inputs.kernel_vmstat]]
1354# # no configuration
1355# # Read metrics from the kubernetes kubelet api
1356# [[inputs.kubernetes]]
1357# ## URL for the kubelet
1358# url = "http://1.1.1.1:10255"
1359#
1360# ## Use bearer token for authorization
1361# # bearer_token = /path/to/bearer/token
1362#
1363# ## Set response_timeout (default 5 seconds)
1364# # response_timeout = "5s"
1365#
1366# ## Optional SSL Config
1367# # ssl_ca = /path/to/cafile ssl_cert = /path/to/certfile ssl_key =
1368# # /path/to/keyfile
1369# ## Use SSL but skip chain & host verification
1370# # insecure_skip_verify = false
1371# # Read metrics from a LeoFS Server via SNMP
1372# [[inputs.leofs]]
1373# ## An array of URLs of the form:
1374# ## host [ ":" port]
1375# servers = ["127.0.0.1:4020"]
1376# # Provides Linux sysctl fs metrics
1377# [[inputs.linux_sysctl_fs]]
1378# # no configuration
1379# # Read metrics from local Lustre service on OST, MDS
1380# [[inputs.lustre2]]
1381# ## An array of /proc globs to search for Lustre stats If not
1382# ## specified, the default will work on Lustre 2.5.x
1383# ##
1384# # ost_procfiles = [
1385# # "/proc/fs/lustre/obdfilter/*/stats",
1386# # "/proc/fs/lustre/osd-ldiskfs/*/stats",
1387# # "/proc/fs/lustre/obdfilter/*/job_stats", ] mds_procfiles = [
1388# # "/proc/fs/lustre/mdt/*/md_stats",
1389# # "/proc/fs/lustre/mdt/*/job_stats", ]
1390# # Gathers metrics from the /3.0/reports MailChimp API
1391# [[inputs.mailchimp]]
1392# ## MailChimp API key get from
1393# ## https://admin.mailchimp.com/account/api/
1394# api_key = "" # required
1395# ## Reports for campaigns sent more than days_old ago will not be
1396# ## collected. 0 means collect all.
1397# days_old = 0
1398# ## Campaign ID to get, if empty gets all campaigns, this option
1399# ## overrides days_old
1400# # campaign_id = ""
1401# # Read metrics from one or many memcached servers
1402# [[inputs.memcached]]
1403# ## An array of address to gather stats about. Specify an ip on
1404# ## hostname with optional port. ie localhost, 10.0.0.1:11211, etc.
1405# servers = ["localhost:11211"]
1406# # unix_sockets = ["/var/run/memcached.sock"]
1407# # Telegraf plugin for gathering metrics from N Mesos masters
1408# [[inputs.mesos]]
1409# ## Timeout, in ms.
1410# timeout = 100
1411# ## A list of Mesos masters.
1412# masters = ["localhost:5050"]
1413# ## Master metrics groups to be collected, by default, all enabled.
1414# master_collections = [
1415# "resources", "master", "system", "agents", "frameworks", "tasks",
1416# "messages", "evqueue", "registrar", ]
1417# ## A list of Mesos slaves, default is []
1418# # slaves = []
1419# ## Slave metrics groups to be collected, by default, all enabled.
1420# # slave_collections = [
1421# # "resources", "agent", "system", "executors", "tasks",
1422# # "messages", ]
1423# # Collects scores from a minecraft server's scoreboard using the RCON
1424# # protocol
1425# [[inputs.minecraft]]
1426# ## server address for minecraft
1427# # server = "localhost"
1428# ## port for RCON
1429# # port = "25575"
1430# ## password RCON for mincraft server
1431# # password = ""
1432# # Read metrics from one or many MongoDB servers
1433# [[inputs.mongodb]]
1434# ## An array of URLs of the form:
1435# ## "mongodb://" [user ":" pass "@"] host [ ":" port] For example:
1436# ## mongodb://user:auth_key@10.10.3.30:27017,
1437# ## mongodb://10.10.3.33:18832,
1438# servers = ["mongodb://127.0.0.1:27017"] gather_perdb_stats = false
1439#
1440# ## Optional SSL Config
1441# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
1442# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
1443# ## Use SSL but skip chain & host verification
1444# # insecure_skip_verify = false
1445# # Read metrics from one or many mysql servers
1446# [[inputs.mysql]]
1447# ## specify servers via a url matching:
1448# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
1449# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
1450# ## e.g.
1451# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
1452# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
1453# #
1454# ## If no servers are specified, then localhost is used as the host.
1455# servers = ["tcp(127.0.0.1:3306)/"]
1456# ## the limits for metrics form perf_events_statements
1457# perf_events_statements_digest_text_limit = 120
1458# perf_events_statements_limit = 250 perf_events_statements_time_limit
1459# = 86400
1460# #
1461# ## if the list is empty, then metrics are gathered from all
1462# ## databasee tables
1463# table_schema_databases = []
1464# #
1465# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases
1466# ## provided above list
1467# gather_table_schema = false
1468# #
1469# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
1470# gather_process_list = true
1471# #
1472# ## gather thread state counts from
1473# ## INFORMATION_SCHEMA.USER_STATISTICS
1474# gather_user_statistics = true
1475# #
1476# ## gather auto_increment columns and max values from information
1477# ## schema
1478# gather_info_schema_auto_inc = true
1479# #
1480# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
1481# gather_innodb_metrics = true
1482# #
1483# ## gather metrics from SHOW SLAVE STATUS command output
1484# gather_slave_status = true
1485# #
1486# ## gather metrics from SHOW BINARY LOGS command output
1487# gather_binary_logs = false
1488# #
1489# ## gather metrics from
1490# ## PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
1491# gather_table_io_waits = false
1492# #
1493# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
1494# gather_table_lock_waits = false
1495# #
1496# ## gather metrics from
1497# ## PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
1498# gather_index_io_waits = false
1499# #
1500# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
1501# gather_event_waits = false
1502# #
1503# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
1504# gather_file_events_stats = false
1505# #
1506# ## gather metrics from
1507# ## PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
1508# gather_perf_events_statements = false
1509# #
1510# ## Some queries we may want to run less often (such as SHOW GLOBAL
1511# ## VARIABLES)
1512# interval_slow = "30m"
1513#
1514# ## Optional SSL Config (will be used if tls=custom parameter
1515# ## specified in server uri)
1516# ssl_ca = "/etc/telegraf/ca.pem" ssl_cert = "/etc/telegraf/cert.pem"
1517# ssl_key = "/etc/telegraf/key.pem"
1518# # Read metrics about network interface usage
1519# [[inputs.net]]
1520# ## By default, telegraf gathers stats from any up interface
1521# ## (excluding loopback) Setting interfaces will tell it to gather
1522# ## these explicit interfaces, regardless of status.
1523# ##
1524# # interfaces = ["eth0"]
1525# # TCP or UDP 'ping' given url and collect response time in seconds
1526# [[inputs.net_response]]
1527# ## Protocol, must be "tcp" or "udp" NOTE: because the "udp" protocol
1528# ## does not respond to requests, it requires a send/expect string
1529# ## pair (see below).
1530# protocol = "tcp"
1531# ## Server address (default localhost)
1532# address = "localhost:80"
1533# ## Set timeout
1534# timeout = "1s"
1535#
1536# ## Set read timeout (only used if expecting a response)
1537# read_timeout = "1s"
1538#
1539# ## The following options are required for UDP checks. For TCP, they
1540# ## are optional. The plugin will send the given string to the server
1541# ## and then expect to receive the given 'expect' string back. string
1542# ## sent to the server
1543# # send = "ssh"
1544# ## expected string in answer
1545# # expect = "ssh"
1546# # Read TCP metrics such as established, time wait and sockets counts.
1547# [[inputs.netstat]]
1548# # no configuration
1549# # Read Nginx's basic status information (ngx_http_stub_status_module)
1550# [[inputs.nginx]]
1551# # An array of Nginx stub_status URI to gather stats.
1552# urls = ["http://localhost/server_status"]
1553#
1554# # TLS/SSL configuration
1555# ssl_ca = "/etc/telegraf/ca.pem" ssl_cert = "/etc/telegraf/cert.cer"
1556# ssl_key = "/etc/telegraf/key.key" insecure_skip_verify = false
1557#
1558# # HTTP response timeout (default: 5s)
1559# response_timeout = "5s"
1560# # Read Nginx Plus' full status information (ngx_http_status_module)
1561# [[inputs.nginx_plus]]
1562# ## An array of ngx_http_status_module or status URI to gather stats.
1563# urls = ["http://localhost/status"]
1564#
1565# # HTTP response timeout (default: 5s)
1566# response_timeout = "5s"
1567# # Read NSQ topic and channel statistics.
1568# [[inputs.nsq]]
1569# ## An array of NSQD HTTP API endpoints
1570# endpoints = ["http://localhost:4151"]
1571# # Collect kernel snmp counters and network interface statistics
1572# [[inputs.nstat]]
1573# ## file paths for proc files. If empty default paths will be used:
1574# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 These can
1575# ## also be overridden with env variables, see README.
1576# proc_net_netstat = "/proc/net/netstat" proc_net_snmp =
1577# "/proc/net/snmp" proc_net_snmp6 = "/proc/net/snmp6"
1578# ## dump metrics with 0 values too
1579# dump_zeros = true
1580# # Get standard NTP query metrics, requires ntpq executable.
1581# [[inputs.ntpq]]
1582# ## If false, set the -n ntpq flag. Can reduce metric gather time.
1583# dns_lookup = true
1584# # OpenLDAP cn=Monitor plugin
1585# [[inputs.openldap]]
1586# host = "localhost" port = 389
1587#
1588# # ldaps, starttls, or no encryption. default is an empty string,
1589# # disabling all encryption. note that port will likely need to be
1590# # changed to 636 for ldaps valid options: "" | "starttls" | "ldaps"
1591# ssl = ""
1592#
1593# # skip peer certificate verification. Default is false.
1594# insecure_skip_verify = false
1595#
1596# # Path to PEM-encoded Root certificate to use to verify server
1597# # certificate
1598# ssl_ca = "/etc/ssl/certs.pem"
1599#
1600# # dn/password to bind with. If bind_dn is empty, an anonymous bind
1601# # is performed.
1602# bind_dn = "" bind_password = ""
1603# # Read metrics of passenger using passenger-status
1604# [[inputs.passenger]]
1605# ## Path of passenger-status.
1606# ##
1607# ## Plugin gather metric via parsing XML output of passenger-status
1608# ## More information about the tool:
1609# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
1610# ##
1611# ## If no path is specified, then the plugin simply execute
1612# ## passenger-status hopefully it can be found in your PATH
1613# command = "passenger-status -v --show=xml"
1614# # Read metrics of phpfpm, via HTTP status page or socket
1615# [[inputs.phpfpm]]
1616# ## An array of addresses to gather stats about. Specify an ip or
1617# ## hostname with optional port and path
1618# ##
1619# ## Plugin can be configured in three modes (either can be used):
1620# ## - http: the URL must start with http:// or https://, ie:
1621# ## "http://localhost/status"
1622# ## "http://192.168.130.1/status?full"
1623# ##
1624# ## - unixsocket: path to fpm socket, ie:
1625# ## "/var/run/php5-fpm.sock" or using a custom fpm status path:
1626# ## "/var/run/php5-fpm.sock:fpm-custom-status-path"
1627# ##
1628# ## - fcgi: the URL must start with fcgi:// or cgi://, and port
1629# ## must be present, ie:
1630# ## "fcgi://10.0.0.12:9000/status"
1631# ## "cgi://10.0.10.12:9001/status"
1632# ##
1633# ## Example of multiple gathering from local socket and remove host
1634# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
1635# urls = ["http://localhost/status"]
1636# # Ping given url(s) and return statistics
1637# [[inputs.ping]]
1638# ## NOTE: this plugin forks the ping command. You may need to set
1639# ## capabilities via setcap cap_net_raw+p /bin/ping
1640# #
1641# ## List of urls to ping
1642# urls = ["www.google.com"] # required
1643# ## number of pings to send per collection (ping -c <COUNT>)
1644# # count = 1
1645# ## interval, in s, at which to ping. 0 == default (ping -i
1646# ## <PING_INTERVAL>)
1647# # ping_interval = 1.0
1648# ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
1649# # timeout = 1.0
1650# ## interface to send ping from (ping -I <INTERFACE>)
1651# # interface = ""
1652# # Read metrics from one or many postgresql servers
1653# [[inputs.postgresql]]
1654# ## specify address via a url matching:
1655# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
1656# ## ?sslmode=[disable|verify-ca|verify-full] or a simple
1657# ## string:
1658# ## host=localhost user=pqotest password=... sslmode=...
1659# ## dbname=app_production
1660# ##
1661# ## All connection parameters are optional.
1662# ##
1663# ## Without the dbname parameter, the driver will default to a
1664# ## database with the same name as the user. This dbname is just for
1665# ## instantiating a connection with the server and doesn't restrict
1666# ## the databases we are trying to grab metrics for.
1667# ##
1668# address = "host=localhost user=postgres sslmode=disable"
1669#
1670# ## A list of databases to explicitly ignore. If not specified,
1671# ## metrics for all databases are gathered. Do NOT use with the
1672# ## 'databases' option.
1673# # ignored_databases = ["postgres", "template0", "template1"]
1674#
1675# ## A list of databases to pull metrics about. If not specified,
1676# ## metrics for all databases are gathered. Do NOT use with the
1677# ## 'ignored_databases' option.
1678# # databases = ["app_production", "testing"]
1679# # Read metrics from one or many postgresql servers
1680# [[inputs.postgresql_extensible]]
1681# ## specify address via a url matching:
1682# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
1683# ## ?sslmode=[disable|verify-ca|verify-full] or a simple
1684# ## string:
1685# ## host=localhost user=pqotest password=... sslmode=...
1686# ## dbname=app_production
1687# #
1688# ## All connection parameters are optional. # Without the dbname
1689# ## parameter, the driver will default to a database with the same
1690# ## name as the user. This dbname is just for instantiating a
1691# ## connection with the server and doesn't restrict the databases we
1692# ## are trying to grab metrics for.
1693# #
1694# address = "host=localhost user=postgres sslmode=disable"
1695# ## A list of databases to pull metrics about. If not specified,
1696# ## metrics for all databases are gathered. databases =
1697# ## ["app_production", "testing"]
1698# #
1699# # outputaddress = "db01"
1700# ## A custom name for the database that will be used as the "server"
1701# ## tag in the measurement output. If not specified, a default one
1702# ## generated from the connection address is used.
1703# #
1704# ## Define the toml config where the sql queries are stored New
1705# ## queries can be added, if the withdbname is set to true and there
1706# ## is no databases defined in the 'databases field', the sql query
1707# ## is ended by a 'is not null' in order to make the query succeed.
1708# ## Example : The sqlquery : "SELECT * FROM pg_stat_database where
1709# ## datname" become "SELECT * FROM pg_stat_database where datname IN
1710# ## ('postgres', 'pgbench')" because the databases variable was set
1711# ## to ['postgres', 'pgbench' ] and the withdbname was true. Be
1712# ## careful that if the withdbname is set to false you don't have to
1713# ## define the where clause (aka with the dbname) the tagvalue field
1714# ## is used to define custom tags (separated by commas) The optional
1715# ## "measurement" value can be used to override the default output
1716# ## measurement name ("postgresql").
1717# #
1718# ## Structure : [[inputs.postgresql_extensible.query]]
1719# ## sqlquery string version string withdbname boolean tagvalue
1720# ## string (comma separated) measurement string
1721# [[inputs.postgresql_extensible.query]]
1722# sqlquery="SELECT * FROM pg_stat_database" version=901
1723# withdbname=false tagvalue="" measurement=""
1724# [[inputs.postgresql_extensible.query]]
1725# sqlquery="SELECT * FROM pg_stat_bgwriter" version=901
1726# withdbname=false tagvalue="postgresql.stats"
1727# # Read metrics from one or many PowerDNS servers
1728# [[inputs.powerdns]]
1729# ## An array of sockets to gather stats about. Specify a path to unix
1730# ## socket.
1731# unix_sockets = ["/var/run/pdns.controlsocket"]
1732# # Monitor process cpu and memory usage
1733# [[inputs.procstat]]
1734# ## Must specify one of: pid_file, exe, or pattern PID file to
1735# ## monitor process
1736# pid_file = "/var/run/nginx.pid"
1737# ## executable name (ie, pgrep <exe>)
1738# # exe = "nginx"
1739# ## pattern as argument for pgrep (ie, pgrep -f <pattern>)
1740# # pattern = "nginx"
1741# ## user as argument for pgrep (ie, pgrep -u <user>)
1742# # user = "nginx"
1743#
1744# ## override for process_name This is optional; default is sourced
1745# ## from /proc/<pid>/status
1746# # process_name = "bar"
1747# ## Field name prefix
1748# prefix = ""
1749# ## comment this out if you want raw cpu_time stats
1750# fielddrop = ["cpu_time_*"]
1751# ## This is optional; moves pid into a tag instead of a field
1752# pid_tag = false
1753# # Read metrics from one or many prometheus clients
1754# [[inputs.prometheus]]
1755# ## An array of urls to scrape metrics from.
1756# urls = ["http://localhost:9100/metrics"]
1757#
1758# ## An array of Kubernetes services to scrape metrics from.
1759# # kubernetes_services =
1760# # ["http://my-service-dns.my-namespace:9100/metrics"]
1761#
1762# ## Use bearer token for authorization
1763# # bearer_token = /path/to/bearer/token
1764#
1765# ## Specify timeout duration for slower prometheus clients (default
1766# ## is 3s)
1767# # response_timeout = "3s"
1768#
1769# ## Optional SSL Config
1770# # ssl_ca = /path/to/cafile ssl_cert = /path/to/certfile ssl_key =
1771# # /path/to/keyfile
1772# ## Use SSL but skip chain & host verification
1773# # insecure_skip_verify = false
1774# # Reads last_run_summary.yaml file and converts to measurments
1775# [[inputs.puppetagent]]
1776# ## Location of puppet last run summary file
1777# location = "/var/lib/puppet/state/last_run_summary.yaml"
1778# # Reads metrics from RabbitMQ servers via the Management Plugin
1779# [[inputs.rabbitmq]]
1780# ## Management Plugin url. (default: http://localhost:15672)
1781# # url = "http://localhost:15672"
1782# ## Tag added to rabbitmq_overview series; deprecated: use tags
1783# # name = "rmq-server-1"
1784# ## Credentials
1785# # username = "guest" password = "guest"
1786#
1787# ## Optional SSL Config
1788# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
1789# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
1790# ## Use SSL but skip chain & host verification
1791# # insecure_skip_verify = false
1792#
1793# ## Optional request timeouts
1794# ##
1795# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time
1796# ## to wait for a server's response headers after fully writing the
1797# ## request.
1798# # header_timeout = "3s"
1799# ##
1800# ## client_timeout specifies a time limit for requests made by this
1801# ## client. Includes connection time, any redirects, and reading the
1802# ## response body.
1803# # client_timeout = "4s"
1804#
1805# ## A list of nodes to gather as the rabbitmq_node measurement. If
1806# ## not specified, metrics for all nodes are gathered.
1807# # nodes = ["rabbit@node1", "rabbit@node2"]
1808#
1809# ## A list of queues to gather as the rabbitmq_queue measurement. If
1810# ## not specified, metrics for all queues are gathered.
1811# # queues = ["telegraf"]
1812# # Read raindrops stats (raindrops - real-time stats for preforking
1813# # Rack servers)
1814# [[inputs.raindrops]]
1815# ## An array of raindrops middleware URI to gather stats.
1816# urls = ["http://localhost:8080/_raindrops"]
1817# # Read metrics from one or many redis servers
1818# [[inputs.redis]]
1819# ## specify servers via a url matching:
1820# ## [protocol://][:password]@address[:port] e.g.
1821# ## tcp://localhost:6379 tcp://:password@192.168.99.100
1822# ## unix:///var/run/redis.sock
1823# ##
1824# ## If no servers are specified, then localhost is used as the host.
1825# ## If no port is specified, 6379 is used
1826# servers = ["tcp://localhost:6379"]
1827# # Read metrics from one or many RethinkDB servers
1828# [[inputs.rethinkdb]]
1829# ## An array of URI to gather stats about. Specify an ip or hostname
1830# ## with optional port add password. ie,
1831# ## rethinkdb://user:auth_key@10.10.3.30:28105,
1832# ## rethinkdb://10.10.3.33:18832, 10.0.0.1:10000, etc.
1833# servers = ["127.0.0.1:28015"]
1834# ##
1835# ## If you use actual rethinkdb of > 2.3.0 with username/password
1836# ## authorization, protocol have to be named "rethinkdb2" - it will
1837# ## use 1_0 H.
1838# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
1839# ##
1840# ## If you use older versions of rethinkdb (<2.2) with auth_key,
1841# ## protocol have to be named "rethinkdb".
1842# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
1843# # Read metrics one or many Riak servers
1844# [[inputs.riak]]
1845# # Specify a list of one or more riak http servers
1846# servers = ["http://localhost:8098"]
1847# # Read API usage and limits for a Salesforce organisation
1848# [[inputs.salesforce]]
1849# ## specify your credentials
1850# ##
1851# username = "your_username" password = "your_password"
1852# ##
1853# ## (optional) security token
1854# # security_token = "your_security_token"
1855# ##
1856# ## (optional) environment type (sandbox or production) default is:
1857# ## production
1858# ##
1859# # environment = "production"
1860# ##
1861# ## (optional) API version (default: "39.0")
1862# ##
1863# # version = "39.0"
1864# # Monitor sensors, requires lm-sensors package
1865# [[inputs.sensors]]
1866# ## Remove numbers from field names. If true, a field name like
1867# ## 'temp1_input' will be changed to 'temp_input'.
1868# # remove_numbers = true
1869# # Retrieves SNMP values from remote agents
1870# [[inputs.snmp]]
1871# agents = [ "127.0.0.1:161" ]
1872# ## Timeout for each SNMP query.
1873# timeout = "5s"
1874# ## Number of retries to attempt within timeout.
1875# retries = 3
1876# ## SNMP version, values can be 1, 2, or 3
1877# version = 2
1878#
1879# ## SNMP community string.
1880# community = "public"
1881#
1882# ## The GETBULK max-repetitions parameter
1883# max_repetitions = 10
1884#
1885# ## SNMPv3 auth parameters
1886# #sec_name = "myuser" auth_protocol = "md5" # Values: "MD5", "SHA",
1887# #"" auth_password = "pass" sec_level = "authNoPriv" # Values:
1888# #"noAuthNoPriv", "authNoPriv", "authPriv" context_name = ""
1889# #priv_protocol = "" # Values: "DES", "AES", "" priv_password = ""
1890#
1891# ## measurement name
1892# name = "system" [[inputs.snmp.field]]
1893# name = "hostname" oid = ".1.0.0.1.1" [[inputs.snmp.field]] name =
1894# "uptime" oid = ".1.0.0.1.2" [[inputs.snmp.field]]
1895# name = "load" oid = ".1.0.0.1.3" [[inputs.snmp.field]] oid =
1896# "HOST-RESOURCES-MIB::hrMemorySize"
1897#
1898# [[inputs.snmp.table]]
1899# ## measurement name
1900# name = "remote_servers" inherit_tags = [ "hostname" ]
1901# [[inputs.snmp.table.field]]
1902# name = "server" oid = ".1.0.0.0.1.0" is_tag = true
1903# [[inputs.snmp.table.field]]
1904# name = "connections" oid = ".1.0.0.0.1.1"
1905# [[inputs.snmp.table.field]]
1906# name = "latency" oid = ".1.0.0.0.1.2"
1907#
1908# [[inputs.snmp.table]]
1909# ## auto populate table's fields using the MIB
1910# oid = "HOST-RESOURCES-MIB::hrNetworkTable"
1911# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
1912# [[inputs.snmp_legacy]]
1913# ## Use 'oids.txt' file to translate oids to names To generate
1914# ## 'oids.txt' you need to run:
1915# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
1916# ## Or if you have an other MIB folder with custom MIBs
1917# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e
1918# ## 's/"//g' > oids.txt
1919# snmptranslate_file = "/tmp/oids.txt" [[inputs.snmp.host]]
1920# address = "192.168.2.2:161"
1921# # SNMP community
1922# community = "public" # default public
1923# # SNMP version (1, 2 or 3) Version 3 not supported yet
1924# version = 2 # default 2
1925# # SNMP response timeout
1926# timeout = 2.0 # default 2.0
1927# # SNMP request retries
1928# retries = 2 # default 2
1929# # Which get/bulk do you want to collect for this host
1930# collect = ["mybulk", "sysservices", "sysdescr"]
1931# # Simple list of OIDs to get, in addition to "collect"
1932# get_oids = []
1933#
1934# [[inputs.snmp.host]]
1935# address = "192.168.2.3:161" community = "public" version = 2
1936# timeout = 2.0 retries = 2 collect = ["mybulk"] get_oids = [
1937# "ifNumber", ".1.3.6.1.2.1.1.3.0", ]
1938#
1939# [[inputs.snmp.get]]
1940# name = "ifnumber" oid = "ifNumber"
1941#
1942# [[inputs.snmp.get]]
1943# name = "interface_speed" oid = "ifSpeed" instance = "0"
1944#
1945# [[inputs.snmp.get]]
1946# name = "sysuptime" oid = ".1.3.6.1.2.1.1.3.0" unit = "second"
1947#
1948# [[inputs.snmp.bulk]]
1949# name = "mybulk" max_repetition = 127 oid = ".1.3.6.1.2.1.1"
1950#
1951# [[inputs.snmp.bulk]]
1952# name = "ifoutoctets" max_repetition = 127 oid = "ifOutOctets"
1953#
1954# [[inputs.snmp.host]]
1955# address = "192.168.2.13:161"
1956# #address = "127.0.0.1:161"
1957# community = "public" version = 2 timeout = 2.0 retries = 2
1958# #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
1959# collect = ["sysuptime" ] [[inputs.snmp.host.table]]
1960# name = "iftable3" include_instances = ["enp5s0", "eth1"]
1961#
1962# # SNMP TABLEs table without mapping neither subtables
1963# [[inputs.snmp.table]]
1964# name = "iftable1" oid = ".1.3.6.1.2.1.31.1.1.1"
1965#
1966# # table without mapping but with subtables
1967# [[inputs.snmp.table]]
1968# name = "iftable2" oid = ".1.3.6.1.2.1.31.1.1.1" sub_tables =
1969# [".1.3.6.1.2.1.2.2.1.13"]
1970#
1971# # table with mapping but without subtables
1972# [[inputs.snmp.table]]
1973# name = "iftable3" oid = ".1.3.6.1.2.1.31.1.1.1"
1974# # if empty. get all instances
1975# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
1976# # if empty, get all subtables
1977#
1978# # table with both mapping and subtables
1979# [[inputs.snmp.table]]
1980# name = "iftable4" oid = ".1.3.6.1.2.1.31.1.1.1"
1981# # if empty get all instances
1982# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
1983# # if empty get all subtables sub_tables could be not "real
1984# # subtables"
1985# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
1986# # Read metrics from Solr Server
1987# [[inputs.solr]]
1988# ## specify a list of one or more Solr servers
1989# servers = ["http://localhost:8983"]
1990# ##
1991# ## specify a list of one or more Solr cores (default - all)
1992# # cores = ["main"]
1993# # Read metrics from Microsoft SQL Server
1994# [[inputs.sqlserver]]
1995# ## Specify instances to monitor with a list of connection strings.
1996# ## All connection parameters are optional. By default, the host is
1997# ## localhost, listening on default port, TCP 1433.
1998# ## for Windows, the user is the currently running AD user (SSO).
1999# ## See https://github.com/denisenkom/go-mssqldb for detailed
2000# ## connection parameters.
2001# # servers = [
2002# # "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app
2003# # name=telegraf;log=1;", ]
2004# # Sysstat metrics collector
2005# [[inputs.sysstat]]
2006# ## Path to the sadc command.
2007# #
2008# ## Common Defaults:
2009# ## Debian/Ubuntu: /usr/lib/sysstat/sadc Arch: /usr/lib/sa/sadc
2010# ## RHEL/CentOS: /usr/lib64/sa/sadc
2011# sadc_path = "/usr/lib/sa/sadc" # required
2012# #
2013# #
2014# ## Path to the sadf command, if it is not in PATH
2015# # sadf_path = "/usr/bin/sadf"
2016# #
2017# #
2018# ## Activities is a list of activities, that are passed as argument
2019# ## to the sadc collector utility (e.g: DISK, SNMP etc...) The more
2020# ## activities that are added, the more data is collected.
2021# # activities = ["DISK"]
2022# #
2023# #
2024# ## Group metrics to measurements.
2025# ##
2026# ## If group is false each metric will be prefixed with a description
2027# ## and represents itself a measurement.
2028# ##
2029# ## If Group is true, corresponding metrics are grouped to a single
2030# ## measurement.
2031# # group = true
2032# #
2033# #
2034# ## Options for the sadf command. The values on the left represent
2035# ## the sadf options and the values on the right their description
2036# ## (wich are used for grouping and prefixing metrics).
2037# ##
2038# ## Run 'sar -h' or 'man sar' to find out the supported options for
2039# ## your sysstat version.
2040# [inputs.sysstat.options]
2041# -C = "cpu" -B = "paging" -b = "io" -d = "disk" # requires DISK
2042# activity "-n ALL" = "network" "-P ALL" = "per_cpu" -q = "queue" -R
2043# = "mem" -r = "mem_util" -S = "swap_util" -u = "cpu_util" -v =
2044# "inode" -W = "swap" -w = "task"
2045# # -H = "hugepages" # only available for newer linux distributions
2046# # "-I ALL" = "interrupts" # requires INT activity
2047# #
2048# #
2049# ## Device tags can be used to add additional tags for devices. For
2050# ## example the configuration below adds a tag vg with value rootvg
2051# ## for all metrics with sda devices.
2052# # [[inputs.sysstat.device_tags.sda]]
2053# # vg = "rootvg"
2054# # Gather metrics from the Tomcat server status page.
2055# [[inputs.tomcat]]
2056# ## URL of the Tomcat server status
2057# # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
2058#
2059# ## HTTP Basic Auth Credentials
2060# # username = "tomcat" password = "s3cret"
2061#
2062# ## Request timeout
2063# # timeout = "5s"
2064#
2065# ## Optional SSL Config
2066# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
2067# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
2068# ## Use SSL but skip chain & host verification
2069# # insecure_skip_verify = false
2070# # Inserts sine and cosine waves for demonstration purposes
2071# [[inputs.trig]]
2072# ## Set the amplitude
2073# amplitude = 10.0
2074# # Read Twemproxy stats data
2075# [[inputs.twemproxy]]
2076# ## Twemproxy stats address and port (no scheme)
2077# addr = "localhost:22222"
2078# ## Monitor pool name
2079# pools = ["redis_pool", "mc_pool"]
2080# # A plugin to collect stats from Varnish HTTP Cache
2081# [[inputs.varnish]]
2082# ## If running as a restricted user you can prepend sudo for
2083# ## additional access:
2084# #use_sudo = false
2085#
2086# ## The default location of the varnishstat binary can be overridden
2087# ## with:
2088# binary = "/usr/bin/varnishstat"
2089#
2090# ## By default, telegraf gather stats for 3 metric points. Setting
2091# ## stats will override the defaults shown below. Glob matching can
2092# ## be used, ie, stats = ["MAIN.*"] stats may also be set to ["*"],
2093# ## which will collect all stats
2094# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
2095# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats,
2096# # and pools
2097# [[inputs.zfs]]
2098# ## ZFS kstat path. Ignored on FreeBSD If not specified, then default
2099# ## is:
2100# # kstatPath = "/proc/spl/kstat/zfs"
2101#
2102# ## By default, telegraf gather all zfs stats If not specified, then
2103# ## default is:
2104# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
2105#
2106# ## By default, don't gather zpool stats
2107# # poolMetrics = false
2108# # Reads 'mntr' stats from one or many zookeeper servers
2109# [[inputs.zookeeper]]
2110# ## An array of address to gather stats about. Specify an ip or
2111# ## hostname with port. ie localhost:2181, 10.0.0.1:2181, etc.
2112#
2113# ## If no servers are specified, then localhost is used as the host.
2114# ## If no port is specified, 2181 is used
2115# servers = [":2181"]
2116###############################################################################
2117# SERVICE INPUT PLUGINS #
2118###############################################################################
2119# # AMQP consumer plugin
2120# [[inputs.amqp_consumer]]
2121# ## AMQP url
2122# url = "amqp://localhost:5672/influxdb"
2123# ## AMQP exchange
2124# exchange = "telegraf"
2125# ## AMQP queue name
2126# queue = "telegraf"
2127# ## Binding Key
2128# binding_key = "#"
2129#
2130# ## Maximum number of messages server should give to the worker.
2131# prefetch_count = 50
2132#
2133# ## Auth method. PLAIN and EXTERNAL are supported Using EXTERNAL
2134# ## requires enabling the rabbitmq_auth_mechanism_ssl plugin as
2135# ## described here: https://www.rabbitmq.com/plugins.html
2136# # auth_method = "PLAIN"
2137#
2138# ## Optional SSL Config
2139# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
2140# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
2141# ## Use SSL but skip chain & host verification
2142# # insecure_skip_verify = false
2143#
2144# ## Data format to consume. Each data format has its own unique set
2145# ## of configuration options, read more about them here:
2146# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2147# data_format = "influx"
2148# # Influx HTTP write listener
2149# [[inputs.http_listener]]
2150# ## Address and port to host HTTP listener on
2151# service_address = ":8186"
2152#
2153# ## maximum duration before timing out read of the request
2154# read_timeout = "10s"
2155# ## maximum duration before timing out write of the response
2156# write_timeout = "10s"
2157#
2158# ## Maximum allowed http request body size in bytes. 0 means to use
2159# ## the default of 536,870,912 bytes (500 mebibytes)
2160# max_body_size = 0
2161#
2162# ## Maximum line size allowed to be sent in bytes. 0 means to use the
2163# ## default of 65536 bytes (64 kibibytes)
2164# max_line_size = 0
2165#
2166# ## Set one or more allowed client CA certificate file names to
2167# ## enable mutually authenticated TLS connections
2168# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
2169#
2170# ## Add service certificate and key
2171# tls_cert = "/etc/telegraf/cert.pem" tls_key =
2172# "/etc/telegraf/key.pem"
2173# # Read metrics from Kafka topic(s)
2174# [[inputs.kafka_consumer]]
2175# ## kafka servers
2176# brokers = ["localhost:9092"]
2177# ## topic(s) to consume
2178# topics = ["telegraf"]
2179#
2180# ## Optional SSL Config
2181# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
2182# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
2183# ## Use SSL but skip chain & host verification
2184# # insecure_skip_verify = false
2185#
2186# ## Optional SASL Config
2187# # sasl_username = "kafka" sasl_password = "secret"
2188#
2189# ## the name of the consumer group
2190# consumer_group = "telegraf_metrics_consumers"
2191# ## Offset (must be either "oldest" or "newest")
2192# offset = "oldest"
2193#
2194# ## Data format to consume. Each data format has its own unique set
2195# ## of configuration options, read more about them here:
2196# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2197# data_format = "influx"
2198#
2199# ## Maximum length of a message to consume, in bytes (default
2200# ## 0/unlimited); larger messages are dropped
2201# max_message_len = 65536
2202# # Read metrics from Kafka topic(s)
2203# [[inputs.kafka_consumer_legacy]]
2204# ## topic(s) to consume
2205# topics = ["telegraf"]
2206# ## an array of Zookeeper connection strings
2207# zookeeper_peers = ["localhost:2181"]
2208# ## Zookeeper Chroot
2209# zookeeper_chroot = ""
2210# ## the name of the consumer group
2211# consumer_group = "telegraf_metrics_consumers"
2212# ## Offset (must be either "oldest" or "newest")
2213# offset = "oldest"
2214#
2215# ## Data format to consume. Each data format has its own unique set
2216# ## of configuration options, read more about them here:
2217# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2218# data_format = "influx"
2219#
2220# ## Maximum length of a message to consume, in bytes (default
2221# ## 0/unlimited); larger messages are dropped
2222# max_message_len = 65536
2223# # Stream and parse log file(s).
2224# [[inputs.logparser]]
2225# ## Log files to parse. These accept standard unix glob matching
2226# ## rules, but with the addition of ** as a "super asterisk". ie:
2227# ## /var/log/**.log -> recursively find all .log files in /var/log
2228# ## /var/log/*/*.log -> find all .log files with a parent dir in
2229# ## /var/log /var/log/apache.log -> only tail the apache log file
2230# files = ["/var/log/apache/access.log"]
2231#
2232# ## Read files that currently exist from the beginning. Files that
2233# ## are created while telegraf is running (and that match the "files"
2234# ## globs) will always be read from the beginning.
2235# from_beginning = false
2236#
2237# ## Method used to watch for file updates. Can be either "inotify"
2238# ## or "poll".
2239# # watch_method = "inotify"
2240#
2241# ## Parse logstash-style "grok" patterns:
2242# ## Telegraf built-in parsing patterns: https://goo.gl/dkay10
2243# [inputs.logparser.grok]
2244# ## This is a list of patterns to check the given log file(s) for.
2245# ## Note that adding patterns here increases processing time. The
2246# ## most efficient configuration is to have one pattern per
2247# ## logparser. Other common built-in patterns are:
2248# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
2249# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
2250# patterns = ["%{COMBINED_LOG_FORMAT}"]
2251#
2252# ## Name of the outputted measurement name.
2253# measurement = "apache_access_log"
2254#
2255# ## Full path(s) to custom pattern files.
2256# custom_pattern_files = []
2257#
2258# ## Custom patterns can also be defined here. Put one pattern per
2259# ## line.
2260# custom_patterns = '''
2261#
2262# ## Timezone allows you to provide an override for timestamps that
2263# ## don't already include an offset e.g. 04/06/2016 12:41:45 data
2264# ## one two 5.43µs
2265# ##
2266# ## Default: "" which renders UTC Options are as follows:
2267# ## 1. Local -- interpret based on machine localtime 2.
2268# ## "Canada/Eastern" -- Unix TZ values like those found in
2269# ## https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
2270# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
2271# timezone = "Canada/Eastern" '''
2272# # Read metrics from MQTT topic(s)
2273# [[inputs.mqtt_consumer]]
2274# servers = ["localhost:1883"]
2275# ## MQTT QoS, must be 0, 1, or 2
2276# qos = 0
2277# ## Connection timeout for initial connection in seconds
2278# connection_timeout = "30s"
2279#
2280# ## Topics to subscribe to
2281# topics = [
2282# "telegraf/host01/cpu", "telegraf/+/mem", "sensors/#", ]
2283#
2284# # if true, messages that can't be delivered while the subscriber is
2285# # offline will be delivered when it comes back (such as on service
2286# # restart). NOTE: if true, client_id MUST be set
2287# persistent_session = false
2288# # If empty, a random client ID will be generated.
2289# client_id = ""
2290#
2291# ## username and password to connect MQTT server.
2292# # username = "telegraf" password = "metricsmetricsmetricsmetrics"
2293#
2294# ## Optional SSL Config
2295# # ssl_ca = "/etc/telegraf/ca.pem" ssl_cert =
2296# # "/etc/telegraf/cert.pem" ssl_key = "/etc/telegraf/key.pem"
2297# ## Use SSL but skip chain & host verification
2298# # insecure_skip_verify = false
2299#
2300# ## Data format to consume. Each data format has its own unique set
2301# ## of configuration options, read more about them here:
2302# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2303# data_format = "influx"
2304# # Read metrics from NATS subject(s)
2305# [[inputs.nats_consumer]]
2306# ## urls of NATS servers
2307# # servers = ["nats://localhost:4222"]
2308# ## Use Transport Layer Security
2309# # secure = false
2310# ## subject(s) to consume
2311# # subjects = ["telegraf"]
2312# ## name a queue group
2313# # queue_group = "telegraf_consumers"
2314#
2315# ## Sets the limits for pending msgs and bytes for each subscription
2316# ## These shouldn't need to be adjusted except in very high
2317# ## throughput scenarios
2318# # pending_message_limit = 65536 pending_bytes_limit = 67108864
2319#
2320# ## Data format to consume. Each data format has its own unique set
2321# ## of configuration options, read more about them here:
2322# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2323# data_format = "influx"
2324# # Read NSQ topic for metrics.
2325# [[inputs.nsq_consumer]]
2326# ## Server option still works but is deprecated, we just prepend it
2327# ## to the nsqd array.
2328# # server = "localhost:4150"
2329# ## An array representing the NSQD TCP HTTP Endpoints
2330# nsqd = ["localhost:4150"]
2331# ## An array representing the NSQLookupd HTTP Endpoints
2332# nsqlookupd = ["localhost:4161"] topic = "telegraf" channel =
2333# "consumer" max_in_flight = 100
2334#
2335# ## Data format to consume. Each data format has its own unique set
2336# ## of configuration options, read more about them here:
2337# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2338# data_format = "influx"
2339# # Generic socket listener capable of handling multiple socket types.
2340# [[inputs.socket_listener]]
2341# ## URL to listen on
2342# # service_address = "tcp://:8094" service_address =
2343# # "tcp://127.0.0.1:http" service_address = "tcp4://:8094"
2344# # service_address = "tcp6://:8094" service_address =
2345# # "tcp6://[2001:db8::1]:8094" service_address = "udp://:8094"
2346# # service_address = "udp4://:8094" service_address = "udp6://:8094"
2347# # service_address = "unix:///tmp/telegraf.sock" service_address =
2348# # "unixgram:///tmp/telegraf.sock"
2349#
2350# ## Maximum number of concurrent connections. Only applies to stream
2351# ## sockets (e.g. TCP). 0 (default) is unlimited.
2352# # max_connections = 1024
2353#
2354# ## Read timeout. Only applies to stream sockets (e.g. TCP). 0
2355# ## (default) is unlimited.
2356# # read_timeout = "30s"
2357#
2358# ## Maximum socket buffer size in bytes. For stream sockets, once the
2359# ## buffer fills up, the sender will start backing up. For datagram
2360# ## sockets, once the buffer fills up, metrics will start dropping.
2361# ## Defaults to the OS default.
2362# # read_buffer_size = 65535
2363#
2364# ## Period between keep alive probes. Only applies to TCP sockets. 0
2365# ## disables keep alive probes. Defaults to the OS configuration.
2366# # keep_alive_period = "5m"
2367#
2368# ## Data format to consume. Each data format has its own unique set
2369# ## of configuration options, read more about them here:
2370# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2371# # data_format = "influx"
2372# # Statsd UDP/TCP Server
2373# [[inputs.statsd]]
2374# ## Protocol, must be "tcp" or "udp" (default=udp)
2375# protocol = "udp"
2376#
2377# ## MaxTCPConnection - applicable when protocol is set to tcp
2378# ## (default=250)
2379# max_tcp_connections = 250
2380#
2381# ## Address and port to host UDP listener on
2382# service_address = ":8125"
2383#
2384# ## The following configuration options control when telegraf clears
2385# ## it's cache of previous values. If set to false, then telegraf
2386# ## will only clear it's cache when the daemon is restarted. Reset
2387# ## gauges every interval (default=true)
2388# delete_gauges = true
2389# ## Reset counters every interval (default=true)
2390# delete_counters = true
2391# ## Reset sets every interval (default=true)
2392# delete_sets = true
2393# ## Reset timings & histograms every interval (default=true)
2394# delete_timings = true
2395#
2396# ## Percentiles to calculate for timing & histogram stats
2397# percentiles = [90]
2398#
2399# ## separator to use between elements of a statsd metric
2400# metric_separator = "_"
2401#
2402# ## Parses tags in the datadog statsd format
2403# ## http://docs.datadoghq.com/guides/dogstatsd/
2404# parse_data_dog_tags = false
2405#
2406# ## Statsd data translation templates, more info can be read here:
2407# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
2408# # templates = [
2409# # "cpu.* measurement*" ]
2410#
2411# ## Number of UDP messages allowed to queue up, once filled, the
2412# ## statsd server will start dropping packets
2413# allowed_pending_messages = 10000
2414#
2415# ## Number of timing/histogram values to track per-measurement in the
2416# ## calculation of percentiles. Raising this limit increases the
2417# ## accuracy of percentiles but also increases the memory usage and
2418# ## cpu time.
2419# percentile_limit = 1000
2420# # Stream a log file, like the tail -f command
2421# [[inputs.tail]]
2422# ## files to tail. These accept standard unix glob matching rules,
2423# ## but with the addition of ** as a "super asterisk". ie:
2424# ## "/var/log/**.log" -> recursively find all .log files in
2425# ## /var/log "/var/log/*/*.log" -> find all .log files with a
2426# ## parent dir in /var/log "/var/log/apache.log" -> just tail the
2427# ## apache log file
2428# ##
2429# ## See https://github.com/gobwas/glob for more examples
2430# ##
2431# files = ["/var/mymetrics.out"]
2432# ## Read file from beginning.
2433# from_beginning = false
2434# ## Whether file is a named pipe
2435# pipe = false
2436#
2437# ## Method used to watch for file updates. Can be either "inotify"
2438# ## or "poll".
2439# # watch_method = "inotify"
2440#
2441# ## Data format to consume. Each data format has its own unique set
2442# ## of configuration options, read more about them here:
2443# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2444# data_format = "influx"
2445# # Generic TCP listener
2446# [[inputs.tcp_listener]]
2447# # DEPRECATED: the TCP listener plugin has been deprecated in favor
2448# # of the socket_listener plugin see
2449# # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
2450# # Generic UDP listener
2451# [[inputs.udp_listener]]
2452# # DEPRECATED: the TCP listener plugin has been deprecated in favor
2453# # of the socket_listener plugin see
2454# # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
2455# # A Webhooks Event collector
2456# [[inputs.webhooks]]
2457# ## Address and port to host Webhook listener on
2458# service_address = ":1619"
2459#
2460# [inputs.webhooks.filestack]
2461# path = "/filestack"
2462#
2463# [inputs.webhooks.github]
2464# path = "/github"
2465# # secret = ""
2466#
2467# [inputs.webhooks.mandrill]
2468# path = "/mandrill"
2469#
2470# [inputs.webhooks.rollbar]
2471# path = "/rollbar"
2472#
2473# [inputs.webhooks.papertrail]
2474# path = "/papertrail"
2475# # This plugin implements the Zipkin http server to gather trace and
2476# # timing data needed to troubleshoot latency problems in microservice
2477# # architectures.
2478# [[inputs.zipkin]]
2479# # path = "/api/v1/spans" # URL path for span data port = 9411 # Port
2480# # on which Telegraf listens