· 7 years ago · Sep 03, 2018, 11:24 AM
1# Telegraf Configuration
2#
3# Telegraf is entirely plugin driven. All metrics are gathered from the
4# declared inputs, and sent to the declared outputs.
5#
6# Plugins must be declared in here to be active.
7# To deactivate a plugin, comment out the name and any variables.
8#
9# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
10# file would generate.
11#
12# Environment variables can be used anywhere in this config file, simply prepend
13# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
14# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
15
16
17# Global tags can be specified here in key="value" format.
18[global_tags]
19 # dc = "us-east-1" # will tag all metrics with dc=us-east-1
20 # rack = "1a"
21 ## Environment variables can be used as tags, and throughout the config file
22 # user = "$USER"
23
24
25# Configuration for telegraf agent
26[agent]
27 ## Default data collection interval for all inputs
28 interval = "10s"
29 ## Rounds collection interval to 'interval'
30 ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
31 round_interval = true
32
33 ## Telegraf will send metrics to outputs in batches of at most
34 ## metric_batch_size metrics.
35 ## This controls the size of writes that Telegraf sends to output plugins.
36 metric_batch_size = 1000
37
38 ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
39 ## output, and will flush this buffer on a successful write. Oldest metrics
40 ## are dropped first when this buffer fills.
41 ## This buffer only fills when writes fail to output plugin(s).
42 metric_buffer_limit = 10000
43
44 ## Collection jitter is used to jitter the collection by a random amount.
45 ## Each plugin will sleep for a random time within jitter before collecting.
46 ## This can be used to avoid many plugins querying things like sysfs at the
47 ## same time, which can have a measurable effect on the system.
48 collection_jitter = "0s"
49
50 ## Default flushing interval for all outputs. You shouldn't set this below
51 ## interval. Maximum flush_interval will be flush_interval + flush_jitter
52 flush_interval = "10s"
53 ## Jitter the flush interval by a random amount. This is primarily to avoid
54 ## large write spikes for users running a large number of telegraf instances.
55 ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
56 flush_jitter = "0s"
57
58 ## By default or when set to "0s", precision will be set to the same
59 ## timestamp order as the collection interval, with the maximum being 1s.
60 ## ie, when interval = "10s", precision will be "1s"
61 ## when interval = "250ms", precision will be "1ms"
62 ## Precision will NOT be used for service inputs. It is up to each individual
63 ## service input to set the timestamp at the appropriate precision.
64 ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
65 precision = ""
66
67 ## Logging configuration:
68 ## Run telegraf with debug log messages.
69 debug = false
70 ## Run telegraf in quiet mode (error log messages only).
71 quiet = false
72 ## Specify the log file name. The empty string means to log to stderr.
73 logfile = ""
74
75 ## Override default hostname, if empty use os.Hostname()
76 hostname = ""
77 ## If set to true, do no set the "host" tag in the telegraf agent.
78 omit_hostname = false
79
80
81###############################################################################
82# OUTPUT PLUGINS #
83###############################################################################
84
85# Configuration for sending metrics to InfluxDB
86[[outputs.influxdb]]
87 ## The full HTTP or UDP URL for your InfluxDB instance.
88 ##
89 ## Multiple URLs can be specified for a single cluster, only ONE of the
90 ## urls will be written to each interval.
91 # urls = ["unix:///var/run/influxdb.sock"]
92 # urls = ["udp://127.0.0.1:8089"]
93 # urls = ["http://127.0.0.1:8086"]
94urls = ["http://influxdb:8086"]
95 ## The target database for metrics; will be created as needed.
96 database = "telegraf"
97
98 ## If true, no CREATE DATABASE queries will be sent. Set to true when using
99 ## Telegraf with a user without permissions to create databases or when the
100 ## database already exists.
101 # skip_database_creation = false
102
103 ## Name of existing retention policy to write to. Empty string writes to
104 ## the default retention policy. Only takes effect when using HTTP.
105 # retention_policy = ""
106
107 ## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
108 ## Only takes effect when using HTTP.
109 # write_consistency = "any"
110
111 ## Timeout for HTTP messages.
112 # timeout = "5s"
113
114 ## HTTP Basic Auth
115 username = "telegraf_user"
116 password = "telegraf_password"
117
118 ## HTTP User-Agent
119 # user_agent = "telegraf"
120
121 ## UDP payload size is the maximum packet size to send.
122 # udp_payload = 512
123
124 ## Optional TLS Config for use on HTTP connections.
125 # tls_ca = "/etc/telegraf/ca.pem"
126 # tls_cert = "/etc/telegraf/cert.pem"
127 # tls_key = "/etc/telegraf/key.pem"
128 ## Use TLS but skip chain & host verification
129 # insecure_skip_verify = false
130
131 ## HTTP Proxy override, if unset values the standard proxy environment
132 ## variables are consulted to determine which proxy, if any, should be used.
133 # http_proxy = "http://corporate.proxy:3128"
134
135 ## Additional HTTP headers
136 # http_headers = {"X-Special-Header" = "Special-Value"}
137
138 ## HTTP Content-Encoding for write request body, can be set to "gzip" to
139 ## compress body or "identity" to apply no encoding.
140 # content_encoding = "identity"
141
142 ## When true, Telegraf will output unsigned integers as unsigned values,
143 ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
144 ## integer values. Enabling this option will result in field type errors if
145 ## existing data has been written.
146 # influx_uint_support = false
147
148
149# # Configuration for Amon Server to send metrics to.
150# [[outputs.amon]]
151# ## Amon Server Key
152# server_key = "my-server-key" # required.
153#
154# ## Amon Instance URL
155# amon_instance = "https://youramoninstance" # required
156#
157# ## Connection timeout.
158# # timeout = "5s"
159
160
161# # Publishes metrics to an AMQP broker
162# [[outputs.amqp]]
163# ## Broker to publish to.
164# ## deprecated in 1.7; use the brokers option
165# # url = "amqp://localhost:5672/influxdb"
166#
167# ## Brokers to publish to. If multiple brokers are specified a random broker
168# ## will be selected anytime a connection is established. This can be
169# ## helpful for load balancing when not using a dedicated load balancer.
170# brokers = ["amqp://localhost:5672/influxdb"]
171#
172# ## Maximum messages to send over a connection. Once this is reached, the
173# ## connection is closed and a new connection is made. This can be helpful for
174# ## load balancing when not using a dedicated load balancer.
175# # max_messages = 0
176#
177# ## Exchange to declare and publish to.
178# exchange = "telegraf"
179#
180# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
181# # exchange_type = "topic"
182#
183# ## If true, exchange will be passively declared.
184# # exchange_declare_passive = false
185#
186# ## If true, exchange will be created as a durable exchange.
187# # exchange_durable = true
188#
189# ## Additional exchange arguments.
190# # exchange_arguments = { }
191# # exchange_arguments = {"hash_propery" = "timestamp"}
192#
193# ## Authentication credentials for the PLAIN auth_method.
194# # username = ""
195# # password = ""
196#
197# ## Auth method. PLAIN and EXTERNAL are supported
198# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
199# ## described here: https://www.rabbitmq.com/plugins.html
200# # auth_method = "PLAIN"
201#
202# ## Metric tag to use as a routing key.
203# ## ie, if this tag exists, its value will be used as the routing key
204# # routing_tag = "host"
205#
206# ## Static routing key. Used when no routing_tag is set or as a fallback
207# ## when the tag specified in routing tag is not found.
208# # routing_key = ""
209# # routing_key = "telegraf"
210#
211# ## Delivery Mode controls if a published message is persistent.
212# ## One of "transient" or "persistent".
213# # delivery_mode = "transient"
214#
215# ## InfluxDB database added as a message header.
216# ## deprecated in 1.7; use the headers option
217# # database = "telegraf"
218#
219# ## InfluxDB retention policy added as a message header
220# ## deprecated in 1.7; use the headers option
221# # retention_policy = "default"
222#
223# ## Static headers added to each published message.
224# # headers = { }
225# # headers = {"database" = "telegraf", "retention_policy" = "default"}
226#
227# ## Connection timeout. If not provided, will default to 5s. 0s means no
228# ## timeout (not recommended).
229# # timeout = "5s"
230#
231# ## Optional TLS Config
232# # tls_ca = "/etc/telegraf/ca.pem"
233# # tls_cert = "/etc/telegraf/cert.pem"
234# # tls_key = "/etc/telegraf/key.pem"
235# ## Use TLS but skip chain & host verification
236# # insecure_skip_verify = false
237#
238# ## If true use batch serialization format instead of line based delimiting.
239# ## Only applies to data formats which are not line based such as JSON.
240# ## Recommended to set to true.
241# # use_batch_format = false
242#
243# ## Data format to output.
244# ## Each data format has its own unique set of configuration options, read
245# ## more about them here:
246# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
247# # data_format = "influx"
248
249
250# # Send metrics to Azure Application Insights
251# [[outputs.application_insights]]
252# ## Instrumentation key of the Application Insights resource.
253# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
254#
255# ## Timeout for closing (default: 5s).
256# # timeout = "5s"
257#
258# ## Enable additional diagnostic logging.
259# # enable_diagnosic_logging = false
260#
261# ## Context Tag Sources add Application Insights context tags to a tag value.
262# ##
263# ## For list of allowed context tag keys see:
264# ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go
265# # [outputs.application_insights.context_tag_sources]
266# # "ai.cloud.role" = "kubernetes_container_name"
267# # "ai.cloud.roleInstance" = "kubernetes_pod_name"
268
269
270# # Configuration for AWS CloudWatch output.
271# [[outputs.cloudwatch]]
272# ## Amazon REGION
273# region = "us-east-1"
274#
275# ## Amazon Credentials
276# ## Credentials are loaded in the following order
277# ## 1) Assumed credentials via STS if role_arn is specified
278# ## 2) explicit credentials from 'access_key' and 'secret_key'
279# ## 3) shared profile from 'profile'
280# ## 4) environment variables
281# ## 5) shared credentials file
282# ## 6) EC2 Instance Profile
283# #access_key = ""
284# #secret_key = ""
285# #token = ""
286# #role_arn = ""
287# #profile = ""
288# #shared_credential_file = ""
289#
290# ## Namespace for the CloudWatch MetricDatums
291# namespace = "InfluxData/Telegraf"
292
293
294# # Configuration for CrateDB to send metrics to.
295# [[outputs.cratedb]]
296# # A github.com/jackc/pgx connection string.
297# # See https://godoc.org/github.com/jackc/pgx#ParseDSN
298# url = "postgres://user:password@localhost/schema?sslmode=disable"
299# # Timeout for all CrateDB queries.
300# timeout = "5s"
301# # Name of the table to store metrics in.
302# table = "metrics"
303# # If true, and the metrics table does not exist, create it automatically.
304# table_create = true
305
306
307# # Configuration for DataDog API to send metrics to.
308# [[outputs.datadog]]
309# ## Datadog API key
310# apikey = "my-secret-key" # required.
311#
312# ## Connection timeout.
313# # timeout = "5s"
314
315
316# # Send metrics to nowhere at all
317# [[outputs.discard]]
318# # no configuration
319
320
321# # Configuration for Elasticsearch to send metrics to.
322# [[outputs.elasticsearch]]
323# ## The full HTTP endpoint URL for your Elasticsearch instance
324# ## Multiple urls can be specified as part of the same cluster,
325# ## this means that only ONE of the urls will be written to each interval.
326# urls = [ "http://node1.es.example.com:9200" ] # required.
327# ## Elasticsearch client timeout, defaults to "5s" if not set.
328# timeout = "5s"
329# ## Set to true to ask Elasticsearch a list of all cluster nodes,
330# ## thus it is not necessary to list all nodes in the urls config option.
331# enable_sniffer = false
332# ## Set the interval to check if the Elasticsearch nodes are available
333# ## Setting to "0s" will disable the health check (not recommended in production)
334# health_check_interval = "10s"
335# ## HTTP basic authentication details (eg. when using Shield)
336# # username = "telegraf"
337# # password = "mypassword"
338#
339# ## Index Config
340# ## The target index for metrics (Elasticsearch will create if it not exists).
341# ## You can use the date specifiers below to create indexes per time frame.
342# ## The metric timestamp will be used to decide the destination index name
343# # %Y - year (2016)
344# # %y - last two digits of year (00..99)
345# # %m - month (01..12)
346# # %d - day of month (e.g., 01)
347# # %H - hour (00..23)
348# # %V - week of the year (ISO week) (01..53)
349# ## Additionally, you can specify a tag name using the notation {{tag_name}}
350# ## which will be used as part of the index name. If the tag does not exist,
351# ## the default tag value will be used.
352# # index_name = "telegraf-{{host}}-%Y.%m.%d"
353# # default_tag_value = "none"
354# index_name = "telegraf-%Y.%m.%d" # required.
355#
356# ## Optional TLS Config
357# # tls_ca = "/etc/telegraf/ca.pem"
358# # tls_cert = "/etc/telegraf/cert.pem"
359# # tls_key = "/etc/telegraf/key.pem"
360# ## Use TLS but skip chain & host verification
361# # insecure_skip_verify = false
362#
363# ## Template Config
364# ## Set to true if you want telegraf to manage its index template.
365# ## If enabled it will create a recommended index template for telegraf indexes
366# manage_template = true
367# ## The template name used for telegraf indexes
368# template_name = "telegraf"
369# ## Set to true if you want telegraf to overwrite an existing template
370# overwrite_template = false
371
372
373# # Send telegraf metrics to file(s)
374# [[outputs.file]]
375# ## Files to write to, "stdout" is a specially handled file.
376# files = ["stdout", "/tmp/metrics.out"]
377#
378# ## Data format to output.
379# ## Each data format has its own unique set of configuration options, read
380# ## more about them here:
381# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
382# data_format = "influx"
383
384
385# # Configuration for Graphite server to send metrics to
386# [[outputs.graphite]]
387# ## TCP endpoint for your graphite instance.
388# ## If multiple endpoints are configured, output will be load balanced.
389# ## Only one of the endpoints will be written to with each iteration.
390# servers = ["localhost:2003"]
391# ## Prefix metrics name
392# prefix = ""
393# ## Graphite output template
394# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
395# template = "host.tags.measurement.field"
396#
397# ## Enable Graphite tags support
398# # graphite_tag_support = false
399#
400# ## timeout in seconds for the write connection to graphite
401# timeout = 2
402#
403# ## Optional TLS Config
404# # tls_ca = "/etc/telegraf/ca.pem"
405# # tls_cert = "/etc/telegraf/cert.pem"
406# # tls_key = "/etc/telegraf/key.pem"
407# ## Use TLS but skip chain & host verification
408# # insecure_skip_verify = false
409
410
411# # Send telegraf metrics to graylog(s)
412# [[outputs.graylog]]
413# ## UDP endpoint for your graylog instance.
414# servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
415
416
417# # A plugin that can transmit metrics over HTTP
418# [[outputs.http]]
419# ## URL is the address to send metrics to
420# url = "http://127.0.0.1:8080/metric"
421#
422# ## Timeout for HTTP message
423# # timeout = "5s"
424#
425# ## HTTP method, one of: "POST" or "PUT"
426# # method = "POST"
427#
428# ## HTTP Basic Auth credentials
429# # username = "username"
430# # password = "pa$$word"
431#
432# ## Optional TLS Config
433# # tls_ca = "/etc/telegraf/ca.pem"
434# # tls_cert = "/etc/telegraf/cert.pem"
435# # tls_key = "/etc/telegraf/key.pem"
436# ## Use TLS but skip chain & host verification
437# # insecure_skip_verify = false
438#
439# ## Data format to output.
440# ## Each data format has it's own unique set of configuration options, read
441# ## more about them here:
442# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
443# # data_format = "influx"
444#
445# ## Additional HTTP headers
446# # [outputs.http.headers]
447# # # Should be set manually to "application/json" for json data_format
448# # Content-Type = "text/plain; charset=utf-8"
449
450
451# # Configuration for sending metrics to an Instrumental project
452# [[outputs.instrumental]]
453# ## Project API Token (required)
454# api_token = "API Token" # required
455# ## Prefix the metrics with a given name
456# prefix = ""
457# ## Stats output template (Graphite formatting)
458# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
459# template = "host.tags.measurement.field"
460# ## Timeout in seconds to connect
461# timeout = "2s"
462# ## Display Communcation to Instrumental
463# debug = false
464
465
466# # Configuration for the Kafka server to send metrics to
467# [[outputs.kafka]]
468# ## URLs of kafka brokers
469# brokers = ["localhost:9092"]
470# ## Kafka topic for producer messages
471# topic = "telegraf"
472#
473# ## Optional topic suffix configuration.
474# ## If the section is omitted, no suffix is used.
475# ## Following topic suffix methods are supported:
476# ## measurement - suffix equals to separator + measurement's name
477# ## tags - suffix equals to separator + specified tags' values
478# ## interleaved with separator
479#
480# ## Suffix equals to "_" + measurement name
481# # [outputs.kafka.topic_suffix]
482# # method = "measurement"
483# # separator = "_"
484#
485# ## Suffix equals to "__" + measurement's "foo" tag value.
486# ## If there's no such a tag, suffix equals to an empty string
487# # [outputs.kafka.topic_suffix]
488# # method = "tags"
489# # keys = ["foo"]
490# # separator = "__"
491#
492# ## Suffix equals to "_" + measurement's "foo" and "bar"
493# ## tag values, separated by "_". If there is no such tags,
494# ## their values treated as empty strings.
495# # [outputs.kafka.topic_suffix]
496# # method = "tags"
497# # keys = ["foo", "bar"]
498# # separator = "_"
499#
500# ## Telegraf tag to use as a routing key
501# ## ie, if this tag exists, its value will be used as the routing key
502# routing_tag = "host"
503#
504# ## CompressionCodec represents the various compression codecs recognized by
505# ## Kafka in messages.
506# ## 0 : No compression
507# ## 1 : Gzip compression
508# ## 2 : Snappy compression
509# # compression_codec = 0
510#
511# ## RequiredAcks is used in Produce Requests to tell the broker how many
512# ## replica acknowledgements it must see before responding
513# ## 0 : the producer never waits for an acknowledgement from the broker.
514# ## This option provides the lowest latency but the weakest durability
515# ## guarantees (some data will be lost when a server fails).
516# ## 1 : the producer gets an acknowledgement after the leader replica has
517# ## received the data. This option provides better durability as the
518# ## client waits until the server acknowledges the request as successful
519# ## (only messages that were written to the now-dead leader but not yet
520# ## replicated will be lost).
521# ## -1: the producer gets an acknowledgement after all in-sync replicas have
522# ## received the data. This option provides the best durability, we
523# ## guarantee that no messages will be lost as long as at least one in
524# ## sync replica remains.
525# # required_acks = -1
526#
527# ## The maximum number of times to retry sending a metric before failing
528# ## until the next flush.
529# # max_retry = 3
530#
531# ## Optional TLS Config
532# # tls_ca = "/etc/telegraf/ca.pem"
533# # tls_cert = "/etc/telegraf/cert.pem"
534# # tls_key = "/etc/telegraf/key.pem"
535# ## Use TLS but skip chain & host verification
536# # insecure_skip_verify = false
537#
538# ## Optional SASL Config
539# # sasl_username = "kafka"
540# # sasl_password = "secret"
541#
542# ## Data format to output.
543# ## Each data format has its own unique set of configuration options, read
544# ## more about them here:
545# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
546# # data_format = "influx"
547
548
549# # Configuration for the AWS Kinesis output.
550# [[outputs.kinesis]]
551# ## Amazon REGION of kinesis endpoint.
552# region = "ap-southeast-2"
553#
554# ## Amazon Credentials
555# ## Credentials are loaded in the following order
556# ## 1) Assumed credentials via STS if role_arn is specified
557# ## 2) explicit credentials from 'access_key' and 'secret_key'
558# ## 3) shared profile from 'profile'
559# ## 4) environment variables
560# ## 5) shared credentials file
561# ## 6) EC2 Instance Profile
562# #access_key = ""
563# #secret_key = ""
564# #token = ""
565# #role_arn = ""
566# #profile = ""
567# #shared_credential_file = ""
568#
569# ## Kinesis StreamName must exist prior to starting telegraf.
570# streamname = "StreamName"
571# ## DEPRECATED: PartitionKey as used for sharding data.
572# partitionkey = "PartitionKey"
573# ## DEPRECATED: If set the paritionKey will be a random UUID on every put.
574# ## This allows for scaling across multiple shards in a stream.
575# ## This will cause issues with ordering.
576# use_random_partitionkey = false
577# ## The partition key can be calculated using one of several methods:
578# ##
579# ## Use a static value for all writes:
580# # [outputs.kinesis.partition]
581# # method = "static"
582# # key = "howdy"
583# #
584# ## Use a random partition key on each write:
585# # [outputs.kinesis.partition]
586# # method = "random"
587# #
588# ## Use the measurement name as the partition key:
589# # [outputs.kinesis.partition]
590# # method = "measurement"
591# #
592# ## Use the value of a tag for all writes, if the tag is not set the empty
593# ## string will be used:
594# # [outputs.kinesis.partition]
595# # method = "tag"
596# # key = "host"
597#
598#
599# ## Data format to output.
600# ## Each data format has its own unique set of configuration options, read
601# ## more about them here:
602# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
603# data_format = "influx"
604#
605# ## debug will show upstream aws messages.
606# debug = false
607
608
609# # Configuration for Librato API to send metrics to.
610# [[outputs.librato]]
611# ## Librator API Docs
612# ## http://dev.librato.com/v1/metrics-authentication
613# ## Librato API user
614# api_user = "telegraf@influxdb.com" # required.
615# ## Librato API token
616# api_token = "my-secret-token" # required.
617# ## Debug
618# # debug = false
619# ## Connection timeout.
620# # timeout = "5s"
621# ## Output source Template (same as graphite buckets)
622# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
623# ## This template is used in librato's source (not metric's name)
624# template = "host"
625#
626
627
628# # Configuration for MQTT server to send metrics to
629# [[outputs.mqtt]]
630# servers = ["localhost:1883"] # required.
631#
632# ## MQTT outputs send metrics to this topic format
633# ## "<topic_prefix>/<hostname>/<pluginname>/"
634# ## ex: prefix/web01.example.com/mem
635# topic_prefix = "telegraf"
636#
637# ## QoS policy for messages
638# ## 0 = at most once
639# ## 1 = at least once
640# ## 2 = exactly once
641# # qos = 2
642#
643# ## username and password to connect MQTT server.
644# # username = "telegraf"
645# # password = "metricsmetricsmetricsmetrics"
646#
647# ## client ID, if not set a random ID is generated
648# # client_id = ""
649#
650# ## Timeout for write operations. default: 5s
651# # timeout = "5s"
652#
653# ## Optional TLS Config
654# # tls_ca = "/etc/telegraf/ca.pem"
655# # tls_cert = "/etc/telegraf/cert.pem"
656# # tls_key = "/etc/telegraf/key.pem"
657# ## Use TLS but skip chain & host verification
658# # insecure_skip_verify = false
659#
660# ## When true, metrics will be sent in one MQTT message per flush. Otherwise,
661# ## metrics are written one metric per MQTT message.
662# # batch = false
663#
664# ## Data format to output.
665# ## Each data format has its own unique set of configuration options, read
666# ## more about them here:
667# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
668# data_format = "influx"
669
670
671# # Send telegraf measurements to NATS
672# [[outputs.nats]]
673# ## URLs of NATS servers
674# servers = ["nats://localhost:4222"]
675# ## Optional credentials
676# # username = ""
677# # password = ""
678# ## NATS subject for producer messages
679# subject = "telegraf"
680#
681# ## Optional TLS Config
682# # tls_ca = "/etc/telegraf/ca.pem"
683# # tls_cert = "/etc/telegraf/cert.pem"
684# # tls_key = "/etc/telegraf/key.pem"
685# ## Use TLS but skip chain & host verification
686# # insecure_skip_verify = false
687#
688# ## Data format to output.
689# ## Each data format has its own unique set of configuration options, read
690# ## more about them here:
691# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
692# data_format = "influx"
693
694
695# # Send telegraf measurements to NSQD
696# [[outputs.nsq]]
697# ## Location of nsqd instance listening on TCP
698# server = "localhost:4150"
699# ## NSQ topic for producer messages
700# topic = "telegraf"
701#
702# ## Data format to output.
703# ## Each data format has its own unique set of configuration options, read
704# ## more about them here:
705# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
706# data_format = "influx"
707
708
709# # Configuration for OpenTSDB server to send metrics to
710# [[outputs.opentsdb]]
711# ## prefix for metrics keys
712# prefix = "my.specific.prefix."
713#
714# ## DNS name of the OpenTSDB server
715# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
716# ## telnet API. "http://opentsdb.example.com" will use the Http API.
717# host = "opentsdb.example.com"
718#
719# ## Port of the OpenTSDB server
720# port = 4242
721#
722# ## Number of data points to send to OpenTSDB in Http requests.
723# ## Not used with telnet API.
724# httpBatchSize = 50
725#
726# ## Debug true - Prints OpenTSDB communication
727# debug = false
728#
729# ## Separator separates measurement name from field
730# separator = "_"
731
732
733# # Configuration for the Prometheus client to spawn
734# [[outputs.prometheus_client]]
735# ## Address to listen on
736# # listen = ":9273"
737#
738# ## Use TLS
739# #tls_cert = "/etc/ssl/telegraf.crt"
740# #tls_key = "/etc/ssl/telegraf.key"
741#
742# ## Use http basic authentication
743# #basic_username = "Foo"
744# #basic_password = "Bar"
745#
746# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration
747# # expiration_interval = "60s"
748#
749# ## Collectors to enable, valid entries are "gocollector" and "process".
750# ## If unset, both are enabled.
751# collectors_exclude = ["gocollector", "process"]
752#
753# # Send string metrics as Prometheus labels.
754# # Unless set to false all string metrics will be sent as labels.
755# string_as_label = true
756
757
758# # Configuration for the Riemann server to send metrics to
759# [[outputs.riemann]]
760# ## The full TCP or UDP URL of the Riemann server
761# url = "tcp://localhost:5555"
762#
763# ## Riemann event TTL, floating-point time in seconds.
764# ## Defines how long that an event is considered valid for in Riemann
765# # ttl = 30.0
766#
767# ## Separator to use between measurement and field name in Riemann service name
768# ## This does not have any effect if 'measurement_as_attribute' is set to 'true'
769# separator = "/"
770#
771# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name
772# # measurement_as_attribute = false
773#
774# ## Send string metrics as Riemann event states.
775# ## Unless enabled all string metrics will be ignored
776# # string_as_state = false
777#
778# ## A list of tag keys whose values get sent as Riemann tags.
779# ## If empty, all Telegraf tag values will be sent as tags
780# # tag_keys = ["telegraf","custom_tag"]
781#
782# ## Additional Riemann tags to send.
783# # tags = ["telegraf-output"]
784#
785# ## Description for Riemann event
786# # description_text = "metrics collected from telegraf"
787#
788# ## Riemann client write timeout, defaults to "5s" if not set.
789# # timeout = "5s"
790
791
792# # Configuration for the Riemann server to send metrics to
793# [[outputs.riemann_legacy]]
794# ## URL of server
795# url = "localhost:5555"
796# ## transport protocol to use either tcp or udp
797# transport = "tcp"
798# ## separator to use between input name and field name in Riemann service name
799# separator = " "
800
801
802# # Generic socket writer capable of handling multiple socket types.
803# [[outputs.socket_writer]]
804# ## URL to connect to
805# # address = "tcp://127.0.0.1:8094"
806# # address = "tcp://example.com:http"
807# # address = "tcp4://127.0.0.1:8094"
808# # address = "tcp6://127.0.0.1:8094"
809# # address = "tcp6://[2001:db8::1]:8094"
810# # address = "udp://127.0.0.1:8094"
811# # address = "udp4://127.0.0.1:8094"
812# # address = "udp6://127.0.0.1:8094"
813# # address = "unix:///tmp/telegraf.sock"
814# # address = "unixgram:///tmp/telegraf.sock"
815#
816# ## Optional TLS Config
817# # tls_ca = "/etc/telegraf/ca.pem"
818# # tls_cert = "/etc/telegraf/cert.pem"
819# # tls_key = "/etc/telegraf/key.pem"
820# ## Use TLS but skip chain & host verification
821# # insecure_skip_verify = false
822#
823# ## Period between keep alive probes.
824# ## Only applies to TCP sockets.
825# ## 0 disables keep alive probes.
826# ## Defaults to the OS configuration.
827# # keep_alive_period = "5m"
828#
829# ## Data format to generate.
830# ## Each data format has its own unique set of configuration options, read
831# ## more about them here:
832# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
833# # data_format = "influx"
834
835
836# # Configuration for Wavefront server to send metrics to
837# [[outputs.wavefront]]
838# ## DNS name of the wavefront proxy server
839# host = "wavefront.example.com"
840#
841# ## Port that the Wavefront proxy server listens on
842# port = 2878
843#
844# ## prefix for metrics keys
845# #prefix = "my.specific.prefix."
846#
847# ## whether to use "value" for name of simple fields
848# #simple_fields = false
849#
850# ## character to use between metric and field name. defaults to . (dot)
851# #metric_separator = "."
852#
853# ## Convert metric name paths to use metricSeperator character
854# ## When true (default) will convert all _ (underscore) chartacters in final metric name
855# #convert_paths = true
856#
857# ## Use Regex to sanitize metric and tag names from invalid characters
858# ## Regex is more thorough, but significantly slower
859# #use_regex = false
860#
861# ## point tags to use as the source name for Wavefront (if none found, host will be used)
862# #source_override = ["hostname", "agent_host", "node_host"]
863#
864# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default true
865# #convert_bool = true
866#
867# ## Define a mapping, namespaced by metric prefix, from string values to numeric values
868# ## The example below maps "green" -> 1.0, "yellow" -> 0.5, "red" -> 0.0 for
869# ## any metrics beginning with "elasticsearch"
870# #[[outputs.wavefront.string_to_number.elasticsearch]]
871# # green = 1.0
872# # yellow = 0.5
873# # red = 0.0
874
875
876
877###############################################################################
878# PROCESSOR PLUGINS #
879###############################################################################
880
881# # Convert values to another metric value type
882# [[processors.converter]]
883# ## Tags to convert
884# ##
885# ## The table key determines the target type, and the array of key-values
886# ## select the keys to convert. The array may contain globs.
887# ## <target-type> = [<tag-key>...]
888# [processors.converter.tags]
889# string = []
890# integer = []
891# unsigned = []
892# boolean = []
893# float = []
894#
895# ## Fields to convert
896# ##
897# ## The table key determines the target type, and the array of key-values
898# ## select the keys to convert. The array may contain globs.
899# ## <target-type> = [<field-key>...]
900# [processors.converter.fields]
901# tag = []
902# string = []
903# integer = []
904# unsigned = []
905# boolean = []
906# float = []
907
908
909# # Apply metric modifications using override semantics.
910# [[processors.override]]
911# ## All modifications on inputs and aggregators can be overridden:
912# # name_override = "new_name"
913# # name_prefix = "new_name_prefix"
914# # name_suffix = "new_name_suffix"
915#
916# ## Tags to be added (all values must be strings)
917# # [processors.override.tags]
918# # additional_tag = "tag_value"
919
920
921# # Print all metrics that pass through this filter.
922# [[processors.printer]]
923
924
925# # Transforms tag and field values with regex pattern
926# [[processors.regex]]
927# ## Tag and field conversions defined in a separate sub-tables
928# # [[processors.regex.tags]]
929# # ## Tag to change
930# # key = "resp_code"
931# # ## Regular expression to match on a tag value
932# # pattern = "^(\\d)\\d\\d$"
933# # ## Pattern for constructing a new value (${1} represents first subgroup)
934# # replacement = "${1}xx"
935#
936# # [[processors.regex.fields]]
937# # key = "request"
938# # ## All the power of the Go regular expressions available here
939# # ## For example, named subgroups
940# # pattern = "^/api(?P<method>/[\\w/]+)\\S*"
941# # replacement = "${method}"
942# # ## If result_key is present, a new field will be created
943# # ## instead of changing existing field
944# # result_key = "method"
945#
946# ## Multiple conversions may be applied for one field sequentially
947# ## Let's extract one more value
948# # [[processors.regex.fields]]
949# # key = "request"
950# # pattern = ".*category=(\\w+).*"
951# # replacement = "${1}"
952# # result_key = "search_category"
953
954
955# # Print all metrics that pass through this filter.
956# [[processors.topk]]
957# ## How many seconds between aggregations
958# # period = 10
959#
960# ## How many top metrics to return
961# # k = 10
962#
963# ## Over which tags should the aggregation be done. Globs can be specified, in
964# ## which case any tag matching the glob will aggregated over. If set to an
965# ## empty list is no aggregation over tags is done
966# # group_by = ['*']
967#
968# ## Over which fields are the top k are calculated
969# # fields = ["value"]
970#
971# ## What aggregation to use. Options: sum, mean, min, max
972# # aggregation = "mean"
973#
974# ## Instead of the top k largest metrics, return the bottom k lowest metrics
975# # bottomk = false
976#
977# ## The plugin assigns each metric a GroupBy tag generated from its name and
978# ## tags. If this setting is different than "" the plugin will add a
979# ## tag (which name will be the value of this setting) to each metric with
980# ## the value of the calculated GroupBy tag. Useful for debugging
981# # add_groupby_tag = ""
982#
983# ## These settings provide a way to know the position of each metric in
984# ## the top k. The 'add_rank_field' setting allows to specify for which
985# ## fields the position is required. If the list is non empty, then a field
986# ## will be added to each and every metric for each string present in this
987# ## setting. This field will contain the ranking of the group that
988# ## the metric belonged to when aggregated over that field.
989# ## The name of the field will be set to the name of the aggregation field,
990# ## suffixed with the string '_topk_rank'
991# # add_rank_fields = []
992#
993# ## These settings provide a way to know what values the plugin is generating
994# ## when aggregating metrics. The 'add_agregate_field' setting allows to
995# ## specify for which fields the final aggregation value is required. If the
996# ## list is non empty, then a field will be added to each every metric for
997# ## each field present in this setting. This field will contain
998# ## the computed aggregation for the group that the metric belonged to when
999# ## aggregated over that field.
1000# ## The name of the field will be set to the name of the aggregation field,
1001# ## suffixed with the string '_topk_aggregate'
1002# # add_aggregate_fields = []
1003
1004
1005
1006###############################################################################
1007# AGGREGATOR PLUGINS #
1008###############################################################################
1009
1010# # Keep the aggregate basicstats of each metric passing through.
1011# [[aggregators.basicstats]]
1012# ## General Aggregator Arguments:
1013# ## The period on which to flush & clear the aggregator.
1014# period = "30s"
1015# ## If true, the original metric will be dropped by the
1016# ## aggregator and will not get sent to the output plugins.
1017# drop_original = false
1018
1019
1020# # Create aggregate histograms.
1021# [[aggregators.histogram]]
1022# ## The period in which to flush the aggregator.
1023# period = "30s"
1024#
1025# ## If true, the original metric will be dropped by the
1026# ## aggregator and will not get sent to the output plugins.
1027# drop_original = false
1028#
1029# ## Example config that aggregates all fields of the metric.
1030# # [[aggregators.histogram.config]]
1031# # ## The set of buckets.
1032# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
1033# # ## The name of metric.
1034# # measurement_name = "cpu"
1035#
1036# ## Example config that aggregates only specific fields of the metric.
1037# # [[aggregators.histogram.config]]
1038# # ## The set of buckets.
1039# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
1040# # ## The name of metric.
1041# # measurement_name = "diskio"
1042# # ## The concrete fields of metric
1043# # fields = ["io_time", "read_time", "write_time"]
1044
1045
1046# # Keep the aggregate min/max of each metric passing through.
1047# [[aggregators.minmax]]
1048# ## General Aggregator Arguments:
1049# ## The period on which to flush & clear the aggregator.
1050# period = "30s"
1051# ## If true, the original metric will be dropped by the
1052# ## aggregator and will not get sent to the output plugins.
1053# drop_original = false
1054
1055
1056
1057###############################################################################
1058# INPUT PLUGINS #
1059###############################################################################
1060
1061# Read metrics about cpu usage
1062[[inputs.cpu]]
1063 ## Whether to report per-cpu stats or not
1064 percpu = true
1065 ## Whether to report total system cpu stats or not
1066 totalcpu = true
1067 ## If true, collect raw CPU time metrics.
1068 collect_cpu_time = false
1069 ## If true, compute and report the sum of all non-idle CPU states.
1070 report_active = false
1071
1072
1073# Read metrics about disk usage by mount point
1074[[inputs.disk]]
1075 ## By default stats will be gathered for all mount points.
1076 ## Set mount_points will restrict the stats to only the specified mount points.
1077 # mount_points = ["/"]
1078
1079 ## Ignore mount points by filesystem type.
1080 ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
1081
1082
1083# Read metrics about disk IO by device
1084[[inputs.diskio]]
1085 ## By default, telegraf will gather stats for all devices including
1086 ## disk partitions.
1087 ## Setting devices will restrict the stats to the specified devices.
1088 # devices = ["sda", "sdb", "vd*"]
1089 ## Uncomment the following line if you need disk serial numbers.
1090 # skip_serial_number = false
1091 #
1092 ## On systems which support it, device metadata can be added in the form of
1093 ## tags.
1094 ## Currently only Linux is supported via udev properties. You can view
1095 ## available properties for a device by running:
1096 ## 'udevadm info -q property -n /dev/sda'
1097 # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
1098 #
1099 ## Using the same metadata source as device_tags, you can also customize the
1100 ## name of the device via templates.
1101 ## The 'name_templates' parameter is a list of templates to try and apply to
1102 ## the device. The template may contain variables in the form of '$PROPERTY' or
1103 ## '${PROPERTY}'. The first template which does not contain any variables not
1104 ## present for the device is used as the device name tag.
1105 ## The typical use case is for LVM volumes, to get the VG/LV name instead of
1106 ## the near-meaningless DM-0 name.
1107 # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
1108
1109
1110# Get kernel statistics from /proc/stat
1111[[inputs.kernel]]
1112 # no configuration
1113
1114
1115# Read metrics about memory usage
1116[[inputs.mem]]
1117 # no configuration
1118
1119
1120# Get the number of processes and group them by status
1121[[inputs.processes]]
1122 # no configuration
1123
1124
1125# Read metrics about swap memory usage
1126[[inputs.swap]]
1127 # no configuration
1128
1129
1130# Read metrics about system load & uptime
1131[[inputs.system]]
1132 # no configuration
1133
1134
1135# # Read stats from aerospike server(s)
1136# [[inputs.aerospike]]
1137# ## Aerospike servers to connect to (with port)
1138# ## This plugin will query all namespaces the aerospike
1139# ## server has configured and get stats for them.
1140# servers = ["localhost:3000"]
1141#
1142# # username = "telegraf"
1143# # password = "pa$$word"
1144#
1145# ## Optional TLS Config
1146# # enable_tls = false
1147# # tls_ca = "/etc/telegraf/ca.pem"
1148# # tls_cert = "/etc/telegraf/cert.pem"
1149# # tls_key = "/etc/telegraf/key.pem"
1150# ## If false, skip chain & host verification
1151# # insecure_skip_verify = true
1152
1153
1154# # Read Apache status information (mod_status)
1155# [[inputs.apache]]
1156# ## An array of URLs to gather from, must be directed at the machine
1157# ## readable version of the mod_status page including the auto query string.
1158# ## Default is "http://localhost/server-status?auto".
1159# urls = ["http://localhost/server-status?auto"]
1160#
1161# ## Credentials for basic HTTP authentication.
1162# # username = "myuser"
1163# # password = "mypassword"
1164#
1165# ## Maximum time to receive response.
1166# # response_timeout = "5s"
1167#
1168# ## Optional TLS Config
1169# # tls_ca = "/etc/telegraf/ca.pem"
1170# # tls_cert = "/etc/telegraf/cert.pem"
1171# # tls_key = "/etc/telegraf/key.pem"
1172# ## Use TLS but skip chain & host verification
1173# # insecure_skip_verify = false
1174
1175
1176# # Gather metrics from Apache Aurora schedulers
1177# [[inputs.aurora]]
1178# ## Schedulers are the base addresses of your Aurora Schedulers
1179# schedulers = ["http://127.0.0.1:8081"]
1180#
1181# ## Set of role types to collect metrics from.
1182# ##
1183# ## The scheduler roles are checked each interval by contacting the
1184# ## scheduler nodes; zookeeper is not contacted.
1185# # roles = ["leader", "follower"]
1186#
1187# ## Timeout is the max time for total network operations.
1188# # timeout = "5s"
1189#
1190# ## Username and password are sent using HTTP Basic Auth.
1191# # username = "username"
1192# # password = "pa$$word"
1193#
1194# ## Optional TLS Config
1195# # tls_ca = "/etc/telegraf/ca.pem"
1196# # tls_cert = "/etc/telegraf/cert.pem"
1197# # tls_key = "/etc/telegraf/key.pem"
1198# ## Use TLS but skip chain & host verification
1199# # insecure_skip_verify = false
1200
1201
1202# # Read metrics of bcache from stats_total and dirty_data
1203# [[inputs.bcache]]
1204# ## Bcache sets path
1205# ## If not specified, then default is:
1206# bcachePath = "/sys/fs/bcache"
1207#
1208# ## By default, telegraf gather stats for all bcache devices
1209# ## Setting devices will restrict the stats to the specified
1210# ## bcache devices.
1211# bcacheDevs = ["bcache0"]
1212
1213
1214# # Collect bond interface status, slaves statuses and failures count
1215# [[inputs.bond]]
1216# ## Sets 'proc' directory path
1217# ## If not specified, then default is /proc
1218# # host_proc = "/proc"
1219#
1220# ## By default, telegraf gather stats for all bond interfaces
1221# ## Setting interfaces will restrict the stats to the specified
1222# ## bond interfaces.
1223# # bond_interfaces = ["bond0"]
1224
1225
1226# # Collect Kafka topics and consumers status from Burrow HTTP API.
1227# [[inputs.burrow]]
1228# ## Burrow API endpoints in format "schema://host:port".
1229# ## Default is "http://localhost:8000".
1230# servers = ["http://localhost:8000"]
1231#
1232# ## Override Burrow API prefix.
1233# ## Useful when Burrow is behind reverse-proxy.
1234# # api_prefix = "/v3/kafka"
1235#
1236# ## Maximum time to receive response.
1237# # response_timeout = "5s"
1238#
1239# ## Limit per-server concurrent connections.
1240# ## Useful in case of large number of topics or consumer groups.
1241# # concurrent_connections = 20
1242#
1243# ## Filter clusters, default is no filtering.
1244# ## Values can be specified as glob patterns.
1245# # clusters_include = []
1246# # clusters_exclude = []
1247#
1248# ## Filter consumer groups, default is no filtering.
1249# ## Values can be specified as glob patterns.
1250# # groups_include = []
1251# # groups_exclude = []
1252#
1253# ## Filter topics, default is no filtering.
1254# ## Values can be specified as glob patterns.
1255# # topics_include = []
1256# # topics_exclude = []
1257#
1258# ## Credentials for basic HTTP authentication.
1259# # username = ""
1260# # password = ""
1261#
1262# ## Optional SSL config
1263# # ssl_ca = "/etc/telegraf/ca.pem"
1264# # ssl_cert = "/etc/telegraf/cert.pem"
1265# # ssl_key = "/etc/telegraf/key.pem"
1266# # insecure_skip_verify = false
1267
1268
1269# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
1270# [[inputs.ceph]]
1271# ## This is the recommended interval to poll. Too frequent and you will lose
1272# ## data points due to timeouts during rebalancing and recovery
1273# interval = '1m'
1274#
1275# ## All configuration values are optional, defaults are shown below
1276#
1277# ## location of ceph binary
1278# ceph_binary = "/usr/bin/ceph"
1279#
1280# ## directory in which to look for socket files
1281# socket_dir = "/var/run/ceph"
1282#
1283# ## prefix of MON and OSD socket files, used to determine socket type
1284# mon_prefix = "ceph-mon"
1285# osd_prefix = "ceph-osd"
1286#
1287# ## suffix used to identify socket files
1288# socket_suffix = "asok"
1289#
1290# ## Ceph user to authenticate as
1291# ceph_user = "client.admin"
1292#
1293# ## Ceph configuration to use to locate the cluster
1294# ceph_config = "/etc/ceph/ceph.conf"
1295#
1296# ## Whether to gather statistics via the admin socket
1297# gather_admin_socket_stats = true
1298#
1299# ## Whether to gather statistics via ceph commands
1300# gather_cluster_stats = false
1301
1302
1303# # Read specific statistics per cgroup
1304# [[inputs.cgroup]]
1305# ## Directories in which to look for files, globs are supported.
1306# ## Consider restricting paths to the set of cgroups you really
1307# ## want to monitor if you have a large number of cgroups, to avoid
1308# ## any cardinality issues.
1309# # paths = [
1310# # "/cgroup/memory",
1311# # "/cgroup/memory/child1",
1312# # "/cgroup/memory/child2/*",
1313# # ]
1314# ## cgroup stat fields, as file names, globs are supported.
1315# ## these file names are appended to each path from above.
1316# # files = ["memory.*usage*", "memory.limit_in_bytes"]
1317
1318
1319# # Get standard chrony metrics, requires chronyc executable.
1320# [[inputs.chrony]]
1321# ## If true, chronyc tries to perform a DNS lookup for the time server.
1322# # dns_lookup = false
1323
1324
1325# # Pull Metric Statistics from Amazon CloudWatch
1326# [[inputs.cloudwatch]]
1327# ## Amazon Region
1328# region = "us-east-1"
1329#
1330# ## Amazon Credentials
1331# ## Credentials are loaded in the following order
1332# ## 1) Assumed credentials via STS if role_arn is specified
1333# ## 2) explicit credentials from 'access_key' and 'secret_key'
1334# ## 3) shared profile from 'profile'
1335# ## 4) environment variables
1336# ## 5) shared credentials file
1337# ## 6) EC2 Instance Profile
1338# #access_key = ""
1339# #secret_key = ""
1340# #token = ""
1341# #role_arn = ""
1342# #profile = ""
1343# #shared_credential_file = ""
1344#
1345# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
1346# # metrics are made available to the 1 minute period. Some are collected at
1347# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
1348# # Note that if a period is configured that is smaller than the minimum for a
1349# # particular metric, that metric will not be returned by the Cloudwatch API
1350# # and will not be collected by Telegraf.
1351# #
1352# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
1353# period = "5m"
1354#
1355# ## Collection Delay (required - must account for metrics availability via CloudWatch API)
1356# delay = "5m"
1357#
1358# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
1359# ## gaps or overlap in pulled data
1360# interval = "5m"
1361#
1362# ## Configure the TTL for the internal cache of metrics.
1363# ## Defaults to 1 hr if not specified
1364# #cache_ttl = "10m"
1365#
1366# ## Metric Statistic Namespace (required)
1367# namespace = "AWS/ELB"
1368#
1369# ## Maximum requests per second. Note that the global default AWS rate limit is
1370# ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
1371# ## maximum of 400. Optional - default value is 200.
1372# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
1373# ratelimit = 200
1374#
1375# ## Metrics to Pull (optional)
1376# ## Defaults to all Metrics in Namespace if nothing is provided
1377# ## Refreshes Namespace available metrics every 1h
1378# #[[inputs.cloudwatch.metrics]]
1379# # names = ["Latency", "RequestCount"]
1380# #
1381# # ## Dimension filters for Metric (optional)
1382# # [[inputs.cloudwatch.metrics.dimensions]]
1383# # name = "LoadBalancerName"
1384# # value = "p-example"
1385
1386
1387# # Collects conntrack stats from the configured directories and files.
1388# [[inputs.conntrack]]
1389# ## The following defaults would work with multiple versions of conntrack.
1390# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across
1391# ## kernel versions, as are the directory locations.
1392#
1393# ## Superset of filenames to look for within the conntrack dirs.
1394# ## Missing files will be ignored.
1395# files = ["ip_conntrack_count","ip_conntrack_max",
1396# "nf_conntrack_count","nf_conntrack_max"]
1397#
1398# ## Directories to search within for the conntrack files above.
1399# ## Missing directrories will be ignored.
1400# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
1401
1402
1403# # Gather health check statuses from services registered in Consul
1404# [[inputs.consul]]
1405# ## Consul server address
1406# # address = "localhost"
1407#
1408# ## URI scheme for the Consul server, one of "http", "https"
1409# # scheme = "http"
1410#
1411# ## ACL token used in every request
1412# # token = ""
1413#
1414# ## HTTP Basic Authentication username and password.
1415# # username = ""
1416# # password = ""
1417#
1418# ## Data centre to query the health checks from
1419# # datacentre = ""
1420#
1421# ## Optional TLS Config
1422# # tls_ca = "/etc/telegraf/ca.pem"
1423# # tls_cert = "/etc/telegraf/cert.pem"
1424# # tls_key = "/etc/telegraf/key.pem"
1425# ## Use TLS but skip chain & host verification
1426# # insecure_skip_verify = true
1427#
1428# ## Consul checks' tag splitting
1429# # When tags are formatted like "key:value" with ":" as a delimiter then
1430# # they will be splitted and reported as proper key:value in Telegraf
1431# # tag_delimiter = ":"
1432
1433
1434# # Read metrics from one or many couchbase clusters
1435# [[inputs.couchbase]]
1436# ## specify servers via a url matching:
1437# ## [protocol://][:password]@address[:port]
1438# ## e.g.
1439# ## http://couchbase-0.example.com/
1440# ## http://admin:secret@couchbase-0.example.com:8091/
1441# ##
1442# ## If no servers are specified, then localhost is used as the host.
1443# ## If no protocol is specified, HTTP is used.
1444# ## If no port is specified, 8091 is used.
1445# servers = ["http://localhost:8091"]
1446
1447
1448# # Read CouchDB Stats from one or more servers
1449# [[inputs.couchdb]]
1450# ## Works with CouchDB stats endpoints out of the box
1451# ## Multiple HOSTs from which to read CouchDB stats:
1452# hosts = ["http://localhost:8086/_stats"]
1453
1454
1455# # Input plugin for DC/OS metrics
1456# [[inputs.dcos]]
1457# ## The DC/OS cluster URL.
1458# cluster_url = "https://dcos-ee-master-1"
1459#
1460# ## The ID of the service account.
1461# service_account_id = "telegraf"
1462# ## The private key file for the service account.
1463# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
1464#
1465# ## Path containing login token. If set, will read on every gather.
1466# # token_file = "/home/dcos/.dcos/token"
1467#
1468# ## In all filter options if both include and exclude are empty all items
1469# ## will be collected. Arrays may contain glob patterns.
1470# ##
1471# ## Node IDs to collect metrics from. If a node is excluded, no metrics will
1472# ## be collected for its containers or apps.
1473# # node_include = []
1474# # node_exclude = []
1475# ## Container IDs to collect container metrics from.
1476# # container_include = []
1477# # container_exclude = []
1478# ## Container IDs to collect app metrics from.
1479# # app_include = []
1480# # app_exclude = []
1481#
1482# ## Maximum concurrent connections to the cluster.
1483# # max_connections = 10
1484# ## Maximum time to receive a response from cluster.
1485# # response_timeout = "20s"
1486#
1487# ## Optional TLS Config
1488# # tls_ca = "/etc/telegraf/ca.pem"
1489# # tls_cert = "/etc/telegraf/cert.pem"
1490# # tls_key = "/etc/telegraf/key.pem"
1491# ## If false, skip chain & host verification
1492# # insecure_skip_verify = true
1493#
1494# ## Recommended filtering to reduce series cardinality.
1495# # [inputs.dcos.tagdrop]
1496# # path = ["/var/lib/mesos/slave/slaves/*"]
1497
1498
1499# # Read metrics from one or many disque servers
1500# [[inputs.disque]]
1501# ## An array of URI to gather stats about. Specify an ip or hostname
1502# ## with optional port and password.
1503# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
1504# ## If no servers are specified, then localhost is used as the host.
1505# servers = ["localhost"]
1506
1507
1508# # Provide a native collection for dmsetup based statistics for dm-cache
1509# [[inputs.dmcache]]
1510# ## Whether to report per-device stats or not
1511# per_device = true
1512
1513
1514# # Query given DNS server and gives statistics
1515# [[inputs.dns_query]]
1516# ## servers to query
1517# servers = ["8.8.8.8"]
1518#
1519# ## Network is the network protocol name.
1520# # network = "udp"
1521#
1522# ## Domains or subdomains to query.
1523# # domains = ["."]
1524#
1525# ## Query record type.
1526# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
1527# # record_type = "A"
1528#
1529# ## Dns server port.
1530# # port = 53
1531#
1532# ## Query timeout in seconds.
1533# # timeout = 2
1534
1535
1536# # Read metrics about docker containers
1537 [[inputs.docker]]
1538# ## Docker Endpoint
1539# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
1540# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
1541 endpoint = "unix:///var/run/docker.sock"
1542#
1543# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
1544# gather_services = false
1545#
1546# ## Only collect metrics for these containers, collect all if empty
1547# container_names = []
1548#
1549# ## Containers to include and exclude. Globs accepted.
1550# ## Note that an empty array for both will include all containers
1551# container_name_include = []
1552# container_name_exclude = []
1553#
1554# ## Container states to include and exclude. Globs accepted.
1555# ## When empty only containers in the "running" state will be captured.
1556# # container_state_include = []
1557# # container_state_exclude = []
1558#
1559# ## Timeout for docker list, info, and stats commands
1560# timeout = "5s"
1561#
1562# ## Whether to report for each container per-device blkio (8:0, 8:1...) and
1563# ## network (eth0, eth1, ...) stats or not
1564# perdevice = true
1565# ## Whether to report for each container total blkio and network stats or not
1566# total = false
1567# ## Which environment variables should we use as a tag
1568# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
1569#
1570# ## docker labels to include and exclude as tags. Globs accepted.
1571# ## Note that an empty array for both will include all labels as tags
1572# docker_label_include = []
1573# docker_label_exclude = []
1574#
1575# ## Optional TLS Config
1576# # tls_ca = "/etc/telegraf/ca.pem"
1577# # tls_cert = "/etc/telegraf/cert.pem"
1578# # tls_key = "/etc/telegraf/key.pem"
1579# ## Use TLS but skip chain & host verification
1580# # insecure_skip_verify = false
1581
1582
1583# # Read statistics from one or many dovecot servers
1584# [[inputs.dovecot]]
1585# ## specify dovecot servers via an address:port list
1586# ## e.g.
1587# ## localhost:24242
1588# ##
1589# ## If no servers are specified, then localhost is used as the host.
1590# servers = ["localhost:24242"]
1591# ## Type is one of "user", "domain", "ip", or "global"
1592# type = "global"
1593# ## Wildcard matches like "*.com". An empty string "" is same as "*"
1594# ## If type = "ip" filters should be <IP/network>
1595# filters = [""]
1596
1597
1598# # Read stats from one or more Elasticsearch servers or clusters
1599# [[inputs.elasticsearch]]
1600# ## specify a list of one or more Elasticsearch servers
1601# # you can add username and password to your url to use basic authentication:
1602# # servers = ["http://user:pass@localhost:9200"]
1603# servers = ["http://localhost:9200"]
1604#
1605# ## Timeout for HTTP requests to the elastic search server(s)
1606# http_timeout = "5s"
1607#
1608# ## When local is true (the default), the node will read only its own stats.
1609# ## Set local to false when you want to read the node stats from all nodes
1610# ## of the cluster.
1611# local = true
1612#
1613# ## Set cluster_health to true when you want to also obtain cluster health stats
1614# cluster_health = false
1615#
1616# ## Adjust cluster_health_level when you want to also obtain detailed health stats
1617# ## The options are
1618# ## - indices (default)
1619# ## - cluster
1620# # cluster_health_level = "indices"
1621#
1622# ## Set cluster_stats to true when you want to also obtain cluster stats from the
1623# ## Master node.
1624# cluster_stats = false
1625#
1626# ## node_stats is a list of sub-stats that you want to have gathered. Valid options
1627# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
1628# ## "breaker". Per default, all stats are gathered.
1629# # node_stats = ["jvm", "http"]
1630#
1631# ## Optional TLS Config
1632# # tls_ca = "/etc/telegraf/ca.pem"
1633# # tls_cert = "/etc/telegraf/cert.pem"
1634# # tls_key = "/etc/telegraf/key.pem"
1635# ## Use TLS but skip chain & host verification
1636# # insecure_skip_verify = false
1637
1638
1639# # Read metrics from one or more commands that can output to stdout
1640# [[inputs.exec]]
1641# ## Commands array
1642# commands = [
1643# "/tmp/test.sh",
1644# "/usr/bin/mycollector --foo=bar",
1645# "/tmp/collect_*.sh"
1646# ]
1647#
1648# ## Timeout for each command to complete.
1649# timeout = "5s"
1650#
1651# ## measurement name suffix (for separating different commands)
1652# name_suffix = "_mycollector"
1653#
1654# ## Data format to consume.
1655# ## Each data format has its own unique set of configuration options, read
1656# ## more about them here:
1657# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
1658# data_format = "influx"
1659
1660
1661# # Read metrics from fail2ban.
1662# [[inputs.fail2ban]]
1663# ## Use sudo to run fail2ban-client
1664# use_sudo = false
1665
1666
1667# # Read devices value(s) from a Fibaro controller
1668# [[inputs.fibaro]]
1669# ## Required Fibaro controller address/hostname.
1670# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
1671# url = "http://<controller>:80"
1672#
1673# ## Required credentials to access the API (http://<controller/api/<component>)
1674# username = "<username>"
1675# password = "<password>"
1676#
1677# ## Amount of time allowed to complete the HTTP request
1678# # timeout = "5s"
1679
1680
1681# # Read stats about given file(s)
1682# [[inputs.filestat]]
1683# ## Files to gather stats about.
1684# ## These accept standard unix glob matching rules, but with the addition of
1685# ## ** as a "super asterisk". ie:
1686# ## "/var/log/**.log" -> recursively find all .log files in /var/log
1687# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
1688# ## "/var/log/apache.log" -> just tail the apache log file
1689# ##
1690# ## See https://github.com/gobwas/glob for more examples
1691# ##
1692# files = ["/var/log/**.log"]
1693# ## If true, read the entire file and calculate an md5 checksum.
1694# md5 = false
1695
1696
1697# # Read metrics exposed by fluentd in_monitor plugin
1698# [[inputs.fluentd]]
1699# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
1700# ##
1701# ## Endpoint:
1702# ## - only one URI is allowed
1703# ## - https is not supported
1704# endpoint = "http://localhost:24220/api/plugins.json"
1705#
1706# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
1707# exclude = [
1708# "monitor_agent",
1709# "dummy",
1710# ]
1711
1712
1713# # Read flattened metrics from one or more GrayLog HTTP endpoints
1714# [[inputs.graylog]]
1715# ## API endpoint, currently supported API:
1716# ##
1717# ## - multiple (Ex http://<host>:12900/system/metrics/multiple)
1718# ## - namespace (Ex http://<host>:12900/system/metrics/namespace/{namespace})
1719# ##
1720# ## For namespace endpoint, the metrics array will be ignored for that call.
1721# ## Endpoint can contain namespace and multiple type calls.
1722# ##
1723# ## Please check http://[graylog-server-ip]:12900/api-browser for full list
1724# ## of endpoints
1725# servers = [
1726# "http://[graylog-server-ip]:12900/system/metrics/multiple",
1727# ]
1728#
1729# ## Metrics list
1730# ## List of metrics can be found on Graylog webservice documentation.
1731# ## Or by hitting the the web service api at:
1732# ## http://[graylog-host]:12900/system/metrics
1733# metrics = [
1734# "jvm.cl.loaded",
1735# "jvm.memory.pools.Metaspace.committed"
1736# ]
1737#
1738# ## Username and password
1739# username = ""
1740# password = ""
1741#
1742# ## Optional TLS Config
1743# # tls_ca = "/etc/telegraf/ca.pem"
1744# # tls_cert = "/etc/telegraf/cert.pem"
1745# # tls_key = "/etc/telegraf/key.pem"
1746# ## Use TLS but skip chain & host verification
1747# # insecure_skip_verify = false
1748
1749
1750# # Read metrics of haproxy, via socket or csv stats page
1751# [[inputs.haproxy]]
1752# ## An array of address to gather stats about. Specify an ip on hostname
1753# ## with optional port. ie localhost, 10.10.3.33:1936, etc.
1754# ## Make sure you specify the complete path to the stats endpoint
1755# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
1756#
1757# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
1758# servers = ["http://myhaproxy.com:1936/haproxy?stats"]
1759#
1760# ## You can also use local socket with standard wildcard globbing.
1761# ## Server address not starting with 'http' will be treated as a possible
1762# ## socket, so both examples below are valid.
1763# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
1764#
1765# ## By default, some of the fields are renamed from what haproxy calls them.
1766# ## Setting this option to true results in the plugin keeping the original
1767# ## field names.
1768# # keep_field_names = false
1769#
1770# ## Optional TLS Config
1771# # tls_ca = "/etc/telegraf/ca.pem"
1772# # tls_cert = "/etc/telegraf/cert.pem"
1773# # tls_key = "/etc/telegraf/key.pem"
1774# ## Use TLS but skip chain & host verification
1775# # insecure_skip_verify = false
1776
1777
1778# # Monitor disks' temperatures using hddtemp
1779# [[inputs.hddtemp]]
1780# ## By default, telegraf gathers temps data from all disks detected by the
1781# ## hddtemp.
1782# ##
1783# ## Only collect temps from the selected disks.
1784# ##
1785# ## A * as the device name will return the temperature values of all disks.
1786# ##
1787# # address = "127.0.0.1:7634"
1788# # devices = ["sda", "*"]
1789
1790
1791# # Read formatted metrics from one or more HTTP endpoints
1792# [[inputs.http]]
1793# ## One or more URLs from which to read formatted metrics
1794# urls = [
1795# "http://localhost/metrics"
1796# ]
1797#
1798# ## HTTP method
1799# # method = "GET"
1800#
1801# ## Optional HTTP headers
1802# # headers = {"X-Special-Header" = "Special-Value"}
1803#
1804# ## Optional HTTP Basic Auth Credentials
1805# # username = "username"
1806# # password = "pa$$word"
1807#
1808# ## Tag all metrics with the url
1809# # tag_url = true
1810#
1811# ## Optional TLS Config
1812# # tls_ca = "/etc/telegraf/ca.pem"
1813# # tls_cert = "/etc/telegraf/cert.pem"
1814# # tls_key = "/etc/telegraf/key.pem"
1815# ## Use TLS but skip chain & host verification
1816# # insecure_skip_verify = false
1817#
1818# ## Amount of time allowed to complete the HTTP request
1819# # timeout = "5s"
1820#
1821# ## Data format to consume.
1822# ## Each data format has its own unique set of configuration options, read
1823# ## more about them here:
1824# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
1825# # data_format = "influx"
1826
1827
1828# # HTTP/HTTPS request given an address a method and a timeout
1829# [[inputs.http_response]]
1830# ## Server address (default http://localhost)
1831# # address = "http://localhost"
1832#
1833# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
1834# # http_proxy = "http://localhost:8888"
1835#
1836# ## Set response_timeout (default 5 seconds)
1837# # response_timeout = "5s"
1838#
1839# ## HTTP Request Method
1840# # method = "GET"
1841#
1842# ## Whether to follow redirects from the server (defaults to false)
1843# # follow_redirects = false
1844#
1845# ## Optional HTTP Request Body
1846# # body = '''
1847# # {'fake':'data'}
1848# # '''
1849#
1850# ## Optional substring or regex match in body of the response
1851# # response_string_match = "\"service_status\": \"up\""
1852# # response_string_match = "ok"
1853# # response_string_match = "\".*_status\".?:.?\"up\""
1854#
1855# ## Optional TLS Config
1856# # tls_ca = "/etc/telegraf/ca.pem"
1857# # tls_cert = "/etc/telegraf/cert.pem"
1858# # tls_key = "/etc/telegraf/key.pem"
1859# ## Use TLS but skip chain & host verification
1860# # insecure_skip_verify = false
1861#
1862# ## HTTP Request Headers (all values must be strings)
1863# # [inputs.http_response.headers]
1864# # Host = "github.com"
1865
1866
1867# # Read flattened metrics from one or more JSON HTTP endpoints
1868# [[inputs.httpjson]]
1869# ## NOTE This plugin only reads numerical measurements, strings and booleans
1870# ## will be ignored.
1871#
1872# ## Name for the service being polled. Will be appended to the name of the
1873# ## measurement e.g. httpjson_webserver_stats
1874# ##
1875# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
1876# name = "webserver_stats"
1877#
1878# ## URL of each server in the service's cluster
1879# servers = [
1880# "http://localhost:9999/stats/",
1881# "http://localhost:9998/stats/",
1882# ]
1883# ## Set response_timeout (default 5 seconds)
1884# response_timeout = "5s"
1885#
1886# ## HTTP method to use: GET or POST (case-sensitive)
1887# method = "GET"
1888#
1889# ## List of tag names to extract from top-level of JSON server response
1890# # tag_keys = [
1891# # "my_tag_1",
1892# # "my_tag_2"
1893# # ]
1894#
1895# ## Optional TLS Config
1896# # tls_ca = "/etc/telegraf/ca.pem"
1897# # tls_cert = "/etc/telegraf/cert.pem"
1898# # tls_key = "/etc/telegraf/key.pem"
1899# ## Use TLS but skip chain & host verification
1900# # insecure_skip_verify = false
1901#
1902# ## HTTP parameters (all values must be strings). For "GET" requests, data
1903# ## will be included in the query. For "POST" requests, data will be included
1904# ## in the request body as "x-www-form-urlencoded".
1905# # [inputs.httpjson.parameters]
1906# # event_type = "cpu_spike"
1907# # threshold = "0.75"
1908#
1909# ## HTTP Headers (all values must be strings)
1910# # [inputs.httpjson.headers]
1911# # X-Auth-Token = "my-xauth-token"
1912# # apiVersion = "v1"
1913
1914
1915# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
1916# [[inputs.influxdb]]
1917# ## Works with InfluxDB debug endpoints out of the box,
1918# ## but other services can use this format too.
1919# ## See the influxdb plugin's README for more details.
1920#
1921# ## Multiple URLs from which to read InfluxDB-formatted JSON
1922# ## Default is "http://localhost:8086/debug/vars".
1923# urls = [
1924# "http://localhost:8086/debug/vars"
1925# ]
1926#
1927# ## Optional TLS Config
1928# # tls_ca = "/etc/telegraf/ca.pem"
1929# # tls_cert = "/etc/telegraf/cert.pem"
1930# # tls_key = "/etc/telegraf/key.pem"
1931# ## Use TLS but skip chain & host verification
1932# # insecure_skip_verify = false
1933#
1934# ## http request & header timeout
1935# timeout = "5s"
1936
1937
1938# # Collect statistics about itself
1939# [[inputs.internal]]
1940# ## If true, collect telegraf memory stats.
1941# # collect_memstats = true
1942
1943
1944# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.
1945# [[inputs.interrupts]]
1946# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
1947# # [inputs.interrupts.tagdrop]
1948# # irq = [ "NET_RX", "TASKLET" ]
1949
1950
1951# # Read metrics from the bare metal servers via IPMI
1952# [[inputs.ipmi_sensor]]
1953# ## optionally specify the path to the ipmitool executable
1954# # path = "/usr/bin/ipmitool"
1955# ##
1956# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR
1957# # privilege = "ADMINISTRATOR"
1958# ##
1959# ## optionally specify one or more servers via a url matching
1960# ## [username[:password]@][protocol[(address)]]
1961# ## e.g.
1962# ## root:passwd@lan(127.0.0.1)
1963# ##
1964# ## if no servers are specified, local machine sensor stats will be queried
1965# ##
1966# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
1967#
1968# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid
1969# ## gaps or overlap in pulled data
1970# interval = "30s"
1971#
1972# ## Timeout for the ipmitool command to complete
1973# timeout = "20s"
1974
1975
1976# # Gather packets and bytes counters from Linux ipsets
1977# [[inputs.ipset]]
1978# ## By default, we only show sets which have already matched at least 1 packet.
1979# ## set include_unmatched_sets = true to gather them all.
1980# include_unmatched_sets = false
1981# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save")
1982# use_sudo = false
1983# ## The default timeout of 1s for ipset execution can be overridden here:
1984# # timeout = "1s"
1985
1986
1987# # Gather packets and bytes throughput from iptables
1988# [[inputs.iptables]]
1989# ## iptables require root access on most systems.
1990# ## Setting 'use_sudo' to true will make use of sudo to run iptables.
1991# ## Users must configure sudo to allow telegraf user to run iptables with no password.
1992# ## iptables can be restricted to only list command "iptables -nvL".
1993# use_sudo = false
1994# ## Setting 'use_lock' to true runs iptables with the "-w" option.
1995# ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl")
1996# use_lock = false
1997# ## defines the table to monitor:
1998# table = "filter"
1999# ## defines the chains to monitor.
2000# ## NOTE: iptables rules without a comment will not be monitored.
2001# ## Read the plugin documentation for more information.
2002# chains = [ "INPUT" ]
2003
2004
2005# # Read JMX metrics through Jolokia
2006# [[inputs.jolokia]]
2007# # DEPRECATED: the jolokia plugin has been deprecated in favor of the
2008# # jolokia2 plugin
2009# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
2010#
2011# ## This is the context root used to compose the jolokia url
2012# ## NOTE that Jolokia requires a trailing slash at the end of the context root
2013# ## NOTE that your jolokia security policy must allow for POST requests.
2014# context = "/jolokia/"
2015#
2016# ## This specifies the mode used
2017# # mode = "proxy"
2018# #
2019# ## When in proxy mode this section is used to specify further
2020# ## proxy address configurations.
2021# ## Remember to change host address to fit your environment.
2022# # [inputs.jolokia.proxy]
2023# # host = "127.0.0.1"
2024# # port = "8080"
2025#
2026# ## Optional http timeouts
2027# ##
2028# ## response_header_timeout, if non-zero, specifies the amount of time to wait
2029# ## for a server's response headers after fully writing the request.
2030# # response_header_timeout = "3s"
2031# ##
2032# ## client_timeout specifies a time limit for requests made by this client.
2033# ## Includes connection time, any redirects, and reading the response body.
2034# # client_timeout = "4s"
2035#
2036# ## Attribute delimiter
2037# ##
2038# ## When multiple attributes are returned for a single
2039# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric
2040# ## name, and the attribute name, separated by the given delimiter.
2041# # delimiter = "_"
2042#
2043# ## List of servers exposing jolokia read service
2044# [[inputs.jolokia.servers]]
2045# name = "as-server-01"
2046# host = "127.0.0.1"
2047# port = "8080"
2048# # username = "myuser"
2049# # password = "mypassword"
2050#
2051# ## List of metrics collected on above servers
2052# ## Each metric consists in a name, a jmx path and either
2053# ## a pass or drop slice attribute.
2054# ##Â This collect all heap memory usage metrics.
2055# [[inputs.jolokia.metrics]]
2056# name = "heap_memory_usage"
2057# mbean = "java.lang:type=Memory"
2058# attribute = "HeapMemoryUsage"
2059#
2060# ##Â This collect thread counts metrics.
2061# [[inputs.jolokia.metrics]]
2062# name = "thread_count"
2063# mbean = "java.lang:type=Threading"
2064# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
2065#
2066# ##Â This collect number of class loaded/unloaded counts metrics.
2067# [[inputs.jolokia.metrics]]
2068# name = "class_count"
2069# mbean = "java.lang:type=ClassLoading"
2070# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
2071
2072
2073# # Read JMX metrics from a Jolokia REST agent endpoint
2074# [[inputs.jolokia2_agent]]
2075# # default_tag_prefix = ""
2076# # default_field_prefix = ""
2077# # default_field_separator = "."
2078#
2079# # Add agents URLs to query
2080# urls = ["http://localhost:8080/jolokia"]
2081# # username = ""
2082# # password = ""
2083# # response_timeout = "5s"
2084#
2085# ## Optional TLS config
2086# # tls_ca = "/var/private/ca.pem"
2087# # tls_cert = "/var/private/client.pem"
2088# # tls_key = "/var/private/client-key.pem"
2089# # insecure_skip_verify = false
2090#
2091# ## Add metrics to read
2092# [[inputs.jolokia2_agent.metric]]
2093# name = "java_runtime"
2094# mbean = "java.lang:type=Runtime"
2095# paths = ["Uptime"]
2096
2097
2098# # Read JMX metrics from a Jolokia REST proxy endpoint
2099# [[inputs.jolokia2_proxy]]
2100# # default_tag_prefix = ""
2101# # default_field_prefix = ""
2102# # default_field_separator = "."
2103#
2104# ## Proxy agent
2105# url = "http://localhost:8080/jolokia"
2106# # username = ""
2107# # password = ""
2108# # response_timeout = "5s"
2109#
2110# ## Optional TLS config
2111# # tls_ca = "/var/private/ca.pem"
2112# # tls_cert = "/var/private/client.pem"
2113# # tls_key = "/var/private/client-key.pem"
2114# # insecure_skip_verify = false
2115#
2116# ## Add proxy targets to query
2117# # default_target_username = ""
2118# # default_target_password = ""
2119# [[inputs.jolokia2_proxy.target]]
2120# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
2121# # username = ""
2122# # password = ""
2123#
2124# ## Add metrics to read
2125# [[inputs.jolokia2_proxy.metric]]
2126# name = "java_runtime"
2127# mbean = "java.lang:type=Runtime"
2128# paths = ["Uptime"]
2129
2130
2131# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints
2132# [[inputs.kapacitor]]
2133# ## Multiple URLs from which to read Kapacitor-formatted JSON
2134# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars".
2135# urls = [
2136# "http://localhost:9092/kapacitor/v1/debug/vars"
2137# ]
2138#
2139# ## Time limit for http requests
2140# timeout = "5s"
2141#
2142# ## Optional TLS Config
2143# # tls_ca = "/etc/telegraf/ca.pem"
2144# # tls_cert = "/etc/telegraf/cert.pem"
2145# # tls_key = "/etc/telegraf/key.pem"
2146# ## Use TLS but skip chain & host verification
2147# # insecure_skip_verify = false
2148
2149
2150# # Get kernel statistics from /proc/vmstat
2151# [[inputs.kernel_vmstat]]
2152# # no configuration
2153
2154
2155# # Read metrics from the kubernetes kubelet api
2156# [[inputs.kubernetes]]
2157# ## URL for the kubelet
2158# url = "http://1.1.1.1:10255"
2159#
2160# ## Use bearer token for authorization
2161# # bearer_token = /path/to/bearer/token
2162#
2163# ## Set response_timeout (default 5 seconds)
2164# # response_timeout = "5s"
2165#
2166# ## Optional TLS Config
2167# # tls_ca = /path/to/cafile
2168# # tls_cert = /path/to/certfile
2169# # tls_key = /path/to/keyfile
2170# ## Use TLS but skip chain & host verification
2171# # insecure_skip_verify = false
2172
2173
2174# # Read metrics from a LeoFS Server via SNMP
2175# [[inputs.leofs]]
2176# ## An array of URLs of the form:
2177# ## host [ ":" port]
2178# servers = ["127.0.0.1:4020"]
2179
2180
2181# # Provides Linux sysctl fs metrics
2182# [[inputs.linux_sysctl_fs]]
2183# # no configuration
2184
2185
2186# # Read metrics from local Lustre service on OST, MDS
2187# [[inputs.lustre2]]
2188# ## An array of /proc globs to search for Lustre stats
2189# ## If not specified, the default will work on Lustre 2.5.x
2190# ##
2191# # ost_procfiles = [
2192# # "/proc/fs/lustre/obdfilter/*/stats",
2193# # "/proc/fs/lustre/osd-ldiskfs/*/stats",
2194# # "/proc/fs/lustre/obdfilter/*/job_stats",
2195# # ]
2196# # mds_procfiles = [
2197# # "/proc/fs/lustre/mdt/*/md_stats",
2198# # "/proc/fs/lustre/mdt/*/job_stats",
2199# # ]
2200
2201
2202# # Gathers metrics from the /3.0/reports MailChimp API
2203# [[inputs.mailchimp]]
2204# ## MailChimp API key
2205# ## get from https://admin.mailchimp.com/account/api/
2206# api_key = "" # required
2207# ## Reports for campaigns sent more than days_old ago will not be collected.
2208# ## 0 means collect all.
2209# days_old = 0
2210# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
2211# # campaign_id = ""
2212
2213
2214# # Read metrics from one or many mcrouter servers
2215# [[inputs.mcrouter]]
2216# ## An array of address to gather stats about. Specify an ip or hostname
2217# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc.
2218# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"]
2219#
2220# ## Timeout for metric collections from all servers. Minimum timeout is "1s".
2221# # timeout = "5s"
2222
2223
2224# # Read metrics from one or many memcached servers
2225# [[inputs.memcached]]
2226# ## An array of address to gather stats about. Specify an ip on hostname
2227# ## with optional port. ie localhost, 10.0.0.1:11211, etc.
2228# servers = ["localhost:11211"]
2229# # unix_sockets = ["/var/run/memcached.sock"]
2230
2231
2232# # Telegraf plugin for gathering metrics from N Mesos masters
2233# [[inputs.mesos]]
2234# ## Timeout, in ms.
2235# timeout = 100
2236# ## A list of Mesos masters.
2237# masters = ["http://localhost:5050"]
2238# ## Master metrics groups to be collected, by default, all enabled.
2239# master_collections = [
2240# "resources",
2241# "master",
2242# "system",
2243# "agents",
2244# "frameworks",
2245# "tasks",
2246# "messages",
2247# "evqueue",
2248# "registrar",
2249# ]
2250# ## A list of Mesos slaves, default is []
2251# # slaves = []
2252# ## Slave metrics groups to be collected, by default, all enabled.
2253# # slave_collections = [
2254# # "resources",
2255# # "agent",
2256# # "system",
2257# # "executors",
2258# # "tasks",
2259# # "messages",
2260# # ]
2261#
2262# ## Optional TLS Config
2263# # tls_ca = "/etc/telegraf/ca.pem"
2264# # tls_cert = "/etc/telegraf/cert.pem"
2265# # tls_key = "/etc/telegraf/key.pem"
2266# ## Use TLS but skip chain & host verification
2267# # insecure_skip_verify = false
2268
2269
2270# # Collects scores from a minecraft server's scoreboard using the RCON protocol
2271# [[inputs.minecraft]]
2272# ## server address for minecraft
2273# # server = "localhost"
2274# ## port for RCON
2275# # port = "25575"
2276# ## password RCON for mincraft server
2277# # password = ""
2278
2279
2280# # Read metrics from one or many MongoDB servers
2281# [[inputs.mongodb]]
2282# ## An array of URLs of the form:
2283# ## "mongodb://" [user ":" pass "@"] host [ ":" port]
2284# ## For example:
2285# ## mongodb://user:auth_key@10.10.3.30:27017,
2286# ## mongodb://10.10.3.33:18832,
2287# servers = ["mongodb://127.0.0.1:27017"]
2288#
2289# ## When true, collect per database stats
2290# # gather_perdb_stats = false
2291#
2292# ## Optional TLS Config
2293# # tls_ca = "/etc/telegraf/ca.pem"
2294# # tls_cert = "/etc/telegraf/cert.pem"
2295# # tls_key = "/etc/telegraf/key.pem"
2296# ## Use TLS but skip chain & host verification
2297# # insecure_skip_verify = false
2298
2299
2300# # Read metrics from one or many mysql servers
2301# [[inputs.mysql]]
2302# ## specify servers via a url matching:
2303# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
2304# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
2305# ## e.g.
2306# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
2307# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
2308# #
2309# ## If no servers are specified, then localhost is used as the host.
2310# servers = ["tcp(127.0.0.1:3306)/"]
2311#
2312# ## Selects the metric output format.
2313# ##
2314# ## This option exists to maintain backwards compatibility, if you have
2315# ## existing metrics do not set or change this value until you are ready to
2316# ## migrate to the new format.
2317# ##
2318# ## If you do not have existing metrics from this plugin set to the latest
2319# ## version.
2320# ##
2321# ## Telegraf >=1.6: metric_version = 2
2322# ## <1.6: metric_version = 1 (or unset)
2323# metric_version = 2
2324#
2325# ## the limits for metrics form perf_events_statements
2326# perf_events_statements_digest_text_limit = 120
2327# perf_events_statements_limit = 250
2328# perf_events_statements_time_limit = 86400
2329# #
2330# ## if the list is empty, then metrics are gathered from all databasee tables
2331# table_schema_databases = []
2332# #
2333# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
2334# gather_table_schema = false
2335# #
2336# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
2337# gather_process_list = true
2338# #
2339# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
2340# gather_user_statistics = true
2341# #
2342# ## gather auto_increment columns and max values from information schema
2343# gather_info_schema_auto_inc = true
2344# #
2345# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
2346# gather_innodb_metrics = true
2347# #
2348# ## gather metrics from SHOW SLAVE STATUS command output
2349# gather_slave_status = true
2350# #
2351# ## gather metrics from SHOW BINARY LOGS command output
2352# gather_binary_logs = false
2353# #
2354# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
2355# gather_table_io_waits = false
2356# #
2357# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
2358# gather_table_lock_waits = false
2359# #
2360# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
2361# gather_index_io_waits = false
2362# #
2363# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
2364# gather_event_waits = false
2365# #
2366# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
2367# gather_file_events_stats = false
2368# #
2369# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
2370# gather_perf_events_statements = false
2371# #
2372# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
2373# interval_slow = "30m"
2374#
2375# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri)
2376# # tls_ca = "/etc/telegraf/ca.pem"
2377# # tls_cert = "/etc/telegraf/cert.pem"
2378# # tls_key = "/etc/telegraf/key.pem"
2379# ## Use TLS but skip chain & host verification
2380# # insecure_skip_verify = false
2381
2382
2383# # Provides metrics about the state of a NATS server
2384# [[inputs.nats]]
2385# ## The address of the monitoring endpoint of the NATS server
2386# server = "http://localhost:8222"
2387#
2388# ## Maximum time to receive response
2389# # response_timeout = "5s"
2390
2391
2392# # Read metrics about network interface usage
2393# [[inputs.net]]
2394# ## By default, telegraf gathers stats from any up interface (excluding loopback)
2395# ## Setting interfaces will tell it to gather these explicit interfaces,
2396# ## regardless of status.
2397# ##
2398# # interfaces = ["eth0"]
2399# ##
2400# ## On linux systems telegraf also collects protocol stats.
2401# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
2402# ##
2403# # ignore_protocol_stats = false
2404# ##
2405
2406
2407# # Collect response time of a TCP or UDP connection
2408# [[inputs.net_response]]
2409# ## Protocol, must be "tcp" or "udp"
2410# ## NOTE: because the "udp" protocol does not respond to requests, it requires
2411# ## a send/expect string pair (see below).
2412# protocol = "tcp"
2413# ## Server address (default localhost)
2414# address = "localhost:80"
2415#
2416# ## Set timeout
2417# # timeout = "1s"
2418#
2419# ## Set read timeout (only used if expecting a response)
2420# # read_timeout = "1s"
2421#
2422# ## The following options are required for UDP checks. For TCP, they are
2423# ## optional. The plugin will send the given string to the server and then
2424# ## expect to receive the given 'expect' string back.
2425# ## string sent to the server
2426# # send = "ssh"
2427# ## expected string in answer
2428# # expect = "ssh"
2429#
2430# ## Uncomment to remove deprecated fields
2431# # fieldexclude = ["result_type", "string_found"]
2432
2433
2434# # Read TCP metrics such as established, time wait and sockets counts.
2435# [[inputs.netstat]]
2436# # no configuration
2437
2438
2439# # Read Nginx's basic status information (ngx_http_stub_status_module)
2440# [[inputs.nginx]]
2441# # An array of Nginx stub_status URI to gather stats.
2442# urls = ["http://localhost/server_status"]
2443#
2444# ## Optional TLS Config
2445# tls_ca = "/etc/telegraf/ca.pem"
2446# tls_cert = "/etc/telegraf/cert.cer"
2447# tls_key = "/etc/telegraf/key.key"
2448# ## Use TLS but skip chain & host verification
2449# insecure_skip_verify = false
2450#
2451# # HTTP response timeout (default: 5s)
2452# response_timeout = "5s"
2453
2454
2455# # Read Nginx Plus' full status information (ngx_http_status_module)
2456# [[inputs.nginx_plus]]
2457# ## An array of ngx_http_status_module or status URI to gather stats.
2458# urls = ["http://localhost/status"]
2459#
2460# # HTTP response timeout (default: 5s)
2461# response_timeout = "5s"
2462
2463
2464# # Read NSQ topic and channel statistics.
2465# [[inputs.nsq]]
2466# ## An array of NSQD HTTP API endpoints
2467# endpoints = ["http://localhost:4151"]
2468
2469
2470# # Collect kernel snmp counters and network interface statistics
2471# [[inputs.nstat]]
2472# ## file paths for proc files. If empty default paths will be used:
2473# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
2474# ## These can also be overridden with env variables, see README.
2475# proc_net_netstat = "/proc/net/netstat"
2476# proc_net_snmp = "/proc/net/snmp"
2477# proc_net_snmp6 = "/proc/net/snmp6"
2478# ## dump metrics with 0 values too
2479# dump_zeros = true
2480
2481
2482# # Get standard NTP query metrics, requires ntpq executable.
2483# [[inputs.ntpq]]
2484# ## If false, set the -n ntpq flag. Can reduce metric gather time.
2485# dns_lookup = true
2486
2487
2488# # Pulls statistics from nvidia GPUs attached to the host
2489# [[inputs.nvidia_smi]]
2490# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath
2491# # bin_path = /usr/bin/nvidia-smi
2492#
2493# ## Optional: timeout for GPU polling
2494# # timeout = 5s
2495
2496
2497# # OpenLDAP cn=Monitor plugin
2498# [[inputs.openldap]]
2499# host = "localhost"
2500# port = 389
2501#
2502# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
2503# # note that port will likely need to be changed to 636 for ldaps
2504# # valid options: "" | "starttls" | "ldaps"
2505# tls = ""
2506#
2507# # skip peer certificate verification. Default is false.
2508# insecure_skip_verify = false
2509#
2510# # Path to PEM-encoded Root certificate to use to verify server certificate
2511# tls_ca = "/etc/ssl/certs.pem"
2512#
2513# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
2514# bind_dn = ""
2515# bind_password = ""
2516#
2517# # Reverse metric names so they sort more naturally. Recommended.
2518# # This defaults to false if unset, but is set to true when generating a new config
2519# reverse_metric_names = true
2520
2521
2522# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver
2523# [[inputs.opensmtpd]]
2524# ## If running as a restricted user you can prepend sudo for additional access:
2525# #use_sudo = false
2526#
2527# ## The default location of the smtpctl binary can be overridden with:
2528# binary = "/usr/sbin/smtpctl"
2529#
2530# ## The default timeout of 1000ms can be overriden with (in milliseconds):
2531# timeout = 1000
2532
2533
2534# # Read metrics of passenger using passenger-status
2535# [[inputs.passenger]]
2536# ## Path of passenger-status.
2537# ##
2538# ## Plugin gather metric via parsing XML output of passenger-status
2539# ## More information about the tool:
2540# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
2541# ##
2542# ## If no path is specified, then the plugin simply execute passenger-status
2543# ## hopefully it can be found in your PATH
2544# command = "passenger-status -v --show=xml"
2545
2546
2547# # Gather counters from PF
2548# [[inputs.pf]]
2549# ## PF require root access on most systems.
2550# ## Setting 'use_sudo' to true will make use of sudo to run pfctl.
2551# ## Users must configure sudo to allow telegraf user to run pfctl with no password.
2552# ## pfctl can be restricted to only list command "pfctl -s info".
2553# use_sudo = false
2554
2555
2556# # Read metrics of phpfpm, via HTTP status page or socket
2557# [[inputs.phpfpm]]
2558# ## An array of addresses to gather stats about. Specify an ip or hostname
2559# ## with optional port and path
2560# ##
2561# ## Plugin can be configured in three modes (either can be used):
2562# ## - http: the URL must start with http:// or https://, ie:
2563# ## "http://localhost/status"
2564# ## "http://192.168.130.1/status?full"
2565# ##
2566# ## - unixsocket: path to fpm socket, ie:
2567# ## "/var/run/php5-fpm.sock"
2568# ## or using a custom fpm status path:
2569# ## "/var/run/php5-fpm.sock:fpm-custom-status-path"
2570# ##
2571# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
2572# ## "fcgi://10.0.0.12:9000/status"
2573# ## "cgi://10.0.10.12:9001/status"
2574# ##
2575# ## Example of multiple gathering from local socket and remove host
2576# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
2577# urls = ["http://localhost/status"]
2578
2579
2580# # Ping given url(s) and return statistics
2581# [[inputs.ping]]
2582# ## NOTE: this plugin forks the ping command. You may need to set capabilities
2583# ## via setcap cap_net_raw+p /bin/ping
2584# #
2585# ## List of urls to ping
2586# urls = ["www.google.com"] # required
2587# ## number of pings to send per collection (ping -c <COUNT>)
2588# # count = 1
2589# ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
2590# # ping_interval = 1.0
2591# ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
2592# # timeout = 1.0
2593# ## total-ping deadline, in s. 0 == no deadline (ping -w <DEADLINE>)
2594# # deadline = 10
2595# ## interface or source address to send ping from (ping -I <INTERFACE/SRC_ADDR>)
2596# ## on Darwin and Freebsd only source address possible: (ping -S <SRC_ADDR>)
2597# # interface = ""
2598
2599
2600# # Measure postfix queue statistics
2601# [[inputs.postfix]]
2602# ## Postfix queue directory. If not provided, telegraf will try to use
2603# ## 'postconf -h queue_directory' to determine it.
2604# # queue_directory = "/var/spool/postfix"
2605
2606
2607# # Read metrics from one or many PowerDNS servers
2608# [[inputs.powerdns]]
2609# ## An array of sockets to gather stats about.
2610# ## Specify a path to unix socket.
2611# unix_sockets = ["/var/run/pdns.controlsocket"]
2612
2613
2614# # Monitor process cpu and memory usage
2615# [[inputs.procstat]]
2616# ## PID file to monitor process
2617# pid_file = "/var/run/nginx.pid"
2618# ## executable name (ie, pgrep <exe>)
2619# # exe = "nginx"
2620# ## pattern as argument for pgrep (ie, pgrep -f <pattern>)
2621# # pattern = "nginx"
2622# ## user as argument for pgrep (ie, pgrep -u <user>)
2623# # user = "nginx"
2624# ## Systemd unit name
2625# # systemd_unit = "nginx.service"
2626# ## CGroup name or path
2627# # cgroup = "systemd/system.slice/nginx.service"
2628#
2629# ## override for process_name
2630# ## This is optional; default is sourced from /proc/<pid>/status
2631# # process_name = "bar"
2632#
2633# ## Field name prefix
2634# # prefix = ""
2635#
2636# ## Add PID as a tag instead of a field; useful to differentiate between
2637# ## processes whose tags are otherwise the same. Can create a large number
2638# ## of series, use judiciously.
2639# # pid_tag = false
2640#
2641# ## Method to use when finding process IDs. Can be one of 'pgrep', or
2642# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while
2643# ## the native finder performs the search directly in a manor dependent on the
2644# ## platform. Default is 'pgrep'
2645# # pid_finder = "pgrep"
2646
2647
2648# # Read metrics from one or many prometheus clients
2649# [[inputs.prometheus]]
2650# ## An array of urls to scrape metrics from.
2651# urls = ["http://localhost:9100/metrics"]
2652#
2653# ## An array of Kubernetes services to scrape metrics from.
2654# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
2655#
2656# ## Use bearer token for authorization
2657# # bearer_token = /path/to/bearer/token
2658#
2659# ## Specify timeout duration for slower prometheus clients (default is 3s)
2660# # response_timeout = "3s"
2661#
2662# ## Optional TLS Config
2663# # tls_ca = /path/to/cafile
2664# # tls_cert = /path/to/certfile
2665# # tls_key = /path/to/keyfile
2666# ## Use TLS but skip chain & host verification
2667# # insecure_skip_verify = false
2668
2669
2670# # Reads last_run_summary.yaml file and converts to measurments
2671# [[inputs.puppetagent]]
2672# ## Location of puppet last run summary file
2673# location = "/var/lib/puppet/state/last_run_summary.yaml"
2674
2675
2676# # Reads metrics from RabbitMQ servers via the Management Plugin
2677# [[inputs.rabbitmq]]
2678# ## Management Plugin url. (default: http://localhost:15672)
2679# # url = "http://localhost:15672"
2680# ## Tag added to rabbitmq_overview series; deprecated: use tags
2681# # name = "rmq-server-1"
2682# ## Credentials
2683# # username = "guest"
2684# # password = "guest"
2685#
2686# ## Optional TLS Config
2687# # tls_ca = "/etc/telegraf/ca.pem"
2688# # tls_cert = "/etc/telegraf/cert.pem"
2689# # tls_key = "/etc/telegraf/key.pem"
2690# ## Use TLS but skip chain & host verification
2691# # insecure_skip_verify = false
2692#
2693# ## Optional request timeouts
2694# ##
2695# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait
2696# ## for a server's response headers after fully writing the request.
2697# # header_timeout = "3s"
2698# ##
2699# ## client_timeout specifies a time limit for requests made by this client.
2700# ## Includes connection time, any redirects, and reading the response body.
2701# # client_timeout = "4s"
2702#
2703# ## A list of nodes to gather as the rabbitmq_node measurement. If not
2704# ## specified, metrics for all nodes are gathered.
2705# # nodes = ["rabbit@node1", "rabbit@node2"]
2706#
2707# ## A list of queues to gather as the rabbitmq_queue measurement. If not
2708# ## specified, metrics for all queues are gathered.
2709# # queues = ["telegraf"]
2710#
2711# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not
2712# ## specified, metrics for all exchanges are gathered.
2713# # exchanges = ["telegraf"]
2714#
2715# ## Queues to include and exclude. Globs accepted.
2716# ## Note that an empty array for both will include all queues
2717# queue_name_include = []
2718# queue_name_exclude = []
2719
2720
2721# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
2722# [[inputs.raindrops]]
2723# ## An array of raindrops middleware URI to gather stats.
2724# urls = ["http://localhost:8080/_raindrops"]
2725
2726
2727# # Read metrics from one or many redis servers
2728# [[inputs.redis]]
2729# ## specify servers via a url matching:
2730# ## [protocol://][:password]@address[:port]
2731# ## e.g.
2732# ## tcp://localhost:6379
2733# ## tcp://:password@192.168.99.100
2734# ## unix:///var/run/redis.sock
2735# ##
2736# ## If no servers are specified, then localhost is used as the host.
2737# ## If no port is specified, 6379 is used
2738# servers = ["tcp://localhost:6379"]
2739
2740
2741# # Read metrics from one or many RethinkDB servers
2742# [[inputs.rethinkdb]]
2743# ## An array of URI to gather stats about. Specify an ip or hostname
2744# ## with optional port add password. ie,
2745# ## rethinkdb://user:auth_key@10.10.3.30:28105,
2746# ## rethinkdb://10.10.3.33:18832,
2747# ## 10.0.0.1:10000, etc.
2748# servers = ["127.0.0.1:28015"]
2749# ##
2750# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
2751# ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
2752# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
2753# ##
2754# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
2755# ## have to be named "rethinkdb".
2756# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
2757
2758
2759# # Read metrics one or many Riak servers
2760# [[inputs.riak]]
2761# # Specify a list of one or more riak http servers
2762# servers = ["http://localhost:8098"]
2763
2764
2765# # Read API usage and limits for a Salesforce organisation
2766# [[inputs.salesforce]]
2767# ## specify your credentials
2768# ##
2769# username = "your_username"
2770# password = "your_password"
2771# ##
2772# ## (optional) security token
2773# # security_token = "your_security_token"
2774# ##
2775# ## (optional) environment type (sandbox or production)
2776# ## default is: production
2777# ##
2778# # environment = "production"
2779# ##
2780# ## (optional) API version (default: "39.0")
2781# ##
2782# # version = "39.0"
2783
2784
2785# # Monitor sensors, requires lm-sensors package
2786# [[inputs.sensors]]
2787# ## Remove numbers from field names.
2788# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
2789# # remove_numbers = true
2790#
2791# ## Timeout is the maximum amount of time that the sensors command can run.
2792# # timeout = "5s"
2793
2794
2795# # Read metrics from storage devices supporting S.M.A.R.T.
2796# [[inputs.smart]]
2797# ## Optionally specify the path to the smartctl executable
2798# # path = "/usr/bin/smartctl"
2799# #
2800# ## On most platforms smartctl requires root access.
2801# ## Setting 'use_sudo' to true will make use of sudo to run smartctl.
2802# ## Sudo must be configured to to allow the telegraf user to run smartctl
2803# ## with out password.
2804# # use_sudo = false
2805# #
2806# ## Skip checking disks in this power mode. Defaults to
2807# ## "standby" to not wake up disks that have stoped rotating.
2808# ## See --nocheck in the man pages for smartctl.
2809# ## smartctl version 5.41 and 5.42 have faulty detection of
2810# ## power mode and might require changing this value to
2811# ## "never" depending on your disks.
2812# # nocheck = "standby"
2813# #
2814# ## Gather detailed metrics for each SMART Attribute.
2815# ## Defaults to "false"
2816# ##
2817# # attributes = false
2818# #
2819# ## Optionally specify devices to exclude from reporting.
2820# # excludes = [ "/dev/pass6" ]
2821# #
2822# ## Optionally specify devices and device type, if unset
2823# ## a scan (smartctl --scan) for S.M.A.R.T. devices will
2824# ## done and all found will be included except for the
2825# ## excluded in excludes.
2826# # devices = [ "/dev/ada0 -d atacam" ]
2827
2828
2829# # Retrieves SNMP values from remote agents
2830# [[inputs.snmp]]
2831# agents = [ "127.0.0.1:161" ]
2832# ## Timeout for each SNMP query.
2833# timeout = "5s"
2834# ## Number of retries to attempt within timeout.
2835# retries = 3
2836# ## SNMP version, values can be 1, 2, or 3
2837# version = 2
2838#
2839# ## SNMP community string.
2840# community = "public"
2841#
2842# ## The GETBULK max-repetitions parameter
2843# max_repetitions = 10
2844#
2845# ## SNMPv3 auth parameters
2846# #sec_name = "myuser"
2847# #auth_protocol = "md5" # Values: "MD5", "SHA", ""
2848# #auth_password = "pass"
2849# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
2850# #context_name = ""
2851# #priv_protocol = "" # Values: "DES", "AES", ""
2852# #priv_password = ""
2853#
2854# ## measurement name
2855# name = "system"
2856# [[inputs.snmp.field]]
2857# name = "hostname"
2858# oid = ".1.0.0.1.1"
2859# [[inputs.snmp.field]]
2860# name = "uptime"
2861# oid = ".1.0.0.1.2"
2862# [[inputs.snmp.field]]
2863# name = "load"
2864# oid = ".1.0.0.1.3"
2865# [[inputs.snmp.field]]
2866# oid = "HOST-RESOURCES-MIB::hrMemorySize"
2867#
2868# [[inputs.snmp.table]]
2869# ## measurement name
2870# name = "remote_servers"
2871# inherit_tags = [ "hostname" ]
2872# [[inputs.snmp.table.field]]
2873# name = "server"
2874# oid = ".1.0.0.0.1.0"
2875# is_tag = true
2876# [[inputs.snmp.table.field]]
2877# name = "connections"
2878# oid = ".1.0.0.0.1.1"
2879# [[inputs.snmp.table.field]]
2880# name = "latency"
2881# oid = ".1.0.0.0.1.2"
2882#
2883# [[inputs.snmp.table]]
2884# ## auto populate table's fields using the MIB
2885# oid = "HOST-RESOURCES-MIB::hrNetworkTable"
2886
2887
2888# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
2889# [[inputs.snmp_legacy]]
2890# ## Use 'oids.txt' file to translate oids to names
2891# ## To generate 'oids.txt' you need to run:
2892# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
2893# ## Or if you have an other MIB folder with custom MIBs
2894# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
2895# snmptranslate_file = "/tmp/oids.txt"
2896# [[inputs.snmp.host]]
2897# address = "192.168.2.2:161"
2898# # SNMP community
2899# community = "public" # default public
2900# # SNMP version (1, 2 or 3)
2901# # Version 3 not supported yet
2902# version = 2 # default 2
2903# # SNMP response timeout
2904# timeout = 2.0 # default 2.0
2905# # SNMP request retries
2906# retries = 2 # default 2
2907# # Which get/bulk do you want to collect for this host
2908# collect = ["mybulk", "sysservices", "sysdescr"]
2909# # Simple list of OIDs to get, in addition to "collect"
2910# get_oids = []
2911#
2912# [[inputs.snmp.host]]
2913# address = "192.168.2.3:161"
2914# community = "public"
2915# version = 2
2916# timeout = 2.0
2917# retries = 2
2918# collect = ["mybulk"]
2919# get_oids = [
2920# "ifNumber",
2921# ".1.3.6.1.2.1.1.3.0",
2922# ]
2923#
2924# [[inputs.snmp.get]]
2925# name = "ifnumber"
2926# oid = "ifNumber"
2927#
2928# [[inputs.snmp.get]]
2929# name = "interface_speed"
2930# oid = "ifSpeed"
2931# instance = "0"
2932#
2933# [[inputs.snmp.get]]
2934# name = "sysuptime"
2935# oid = ".1.3.6.1.2.1.1.3.0"
2936# unit = "second"
2937#
2938# [[inputs.snmp.bulk]]
2939# name = "mybulk"
2940# max_repetition = 127
2941# oid = ".1.3.6.1.2.1.1"
2942#
2943# [[inputs.snmp.bulk]]
2944# name = "ifoutoctets"
2945# max_repetition = 127
2946# oid = "ifOutOctets"
2947#
2948# [[inputs.snmp.host]]
2949# address = "192.168.2.13:161"
2950# #address = "127.0.0.1:161"
2951# community = "public"
2952# version = 2
2953# timeout = 2.0
2954# retries = 2
2955# #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
2956# collect = ["sysuptime" ]
2957# [[inputs.snmp.host.table]]
2958# name = "iftable3"
2959# include_instances = ["enp5s0", "eth1"]
2960#
2961# # SNMP TABLEs
2962# # table without mapping neither subtables
2963# [[inputs.snmp.table]]
2964# name = "iftable1"
2965# oid = ".1.3.6.1.2.1.31.1.1.1"
2966#
2967# # table without mapping but with subtables
2968# [[inputs.snmp.table]]
2969# name = "iftable2"
2970# oid = ".1.3.6.1.2.1.31.1.1.1"
2971# sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
2972#
2973# # table with mapping but without subtables
2974# [[inputs.snmp.table]]
2975# name = "iftable3"
2976# oid = ".1.3.6.1.2.1.31.1.1.1"
2977# # if empty. get all instances
2978# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
2979# # if empty, get all subtables
2980#
2981# # table with both mapping and subtables
2982# [[inputs.snmp.table]]
2983# name = "iftable4"
2984# oid = ".1.3.6.1.2.1.31.1.1.1"
2985# # if empty get all instances
2986# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
2987# # if empty get all subtables
2988# # sub_tables could be not "real subtables"
2989# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
2990
2991
2992# # Read stats from one or more Solr servers or cores
2993# [[inputs.solr]]
2994# ## specify a list of one or more Solr servers
2995# servers = ["http://localhost:8983"]
2996#
2997# ## specify a list of one or more Solr cores (default - all)
2998# # cores = ["main"]
2999
3000
3001# # Read metrics from Microsoft SQL Server
3002# [[inputs.sqlserver]]
3003# ## Specify instances to monitor with a list of connection strings.
3004# ## All connection parameters are optional.
3005# ## By default, the host is localhost, listening on default port, TCP 1433.
3006# ## for Windows, the user is the currently running AD user (SSO).
3007# ## See https://github.com/denisenkom/go-mssqldb for detailed connection
3008# ## parameters.
3009# # servers = [
3010# # "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
3011# # ]
3012#
3013# ## Optional parameter, setting this to 2 will use a new version
3014# ## of the collection queries that break compatibility with the original
3015# ## dashboards.
3016# query_version = 2
3017#
3018# ## If you are using AzureDB, setting this to true will gather resource utilization metrics
3019# # azuredb = false
3020#
3021# ## If you would like to exclude some of the metrics queries, list them here
3022# ## Possible choices:
3023# ## - PerformanceCounters
3024# ## - WaitStatsCategorized
3025# ## - DatabaseIO
3026# ## - DatabaseProperties
3027# ## - CPUHistory
3028# ## - DatabaseSize
3029# ## - DatabaseStats
3030# ## - MemoryClerk
3031# ## - VolumeSpace
3032# ## - PerformanceMetrics
3033# # exclude_query = [ 'DatabaseIO' ]
3034
3035
3036# # Sysstat metrics collector
3037# [[inputs.sysstat]]
3038# ## Path to the sadc command.
3039# #
3040# ## Common Defaults:
3041# ## Debian/Ubuntu: /usr/lib/sysstat/sadc
3042# ## Arch: /usr/lib/sa/sadc
3043# ## RHEL/CentOS: /usr/lib64/sa/sadc
3044# sadc_path = "/usr/lib/sa/sadc" # required
3045# #
3046# #
3047# ## Path to the sadf command, if it is not in PATH
3048# # sadf_path = "/usr/bin/sadf"
3049# #
3050# #
3051# ## Activities is a list of activities, that are passed as argument to the
3052# ## sadc collector utility (e.g: DISK, SNMP etc...)
3053# ## The more activities that are added, the more data is collected.
3054# # activities = ["DISK"]
3055# #
3056# #
3057# ## Group metrics to measurements.
3058# ##
3059# ## If group is false each metric will be prefixed with a description
3060# ## and represents itself a measurement.
3061# ##
3062# ## If Group is true, corresponding metrics are grouped to a single measurement.
3063# # group = true
3064# #
3065# #
3066# ## Options for the sadf command. The values on the left represent the sadf
3067# ## options and the values on the right their description (which are used for
3068# ## grouping and prefixing metrics).
3069# ##
3070# ## Run 'sar -h' or 'man sar' to find out the supported options for your
3071# ## sysstat version.
3072# [inputs.sysstat.options]
3073# -C = "cpu"
3074# -B = "paging"
3075# -b = "io"
3076# -d = "disk" # requires DISK activity
3077# "-n ALL" = "network"
3078# "-P ALL" = "per_cpu"
3079# -q = "queue"
3080# -R = "mem"
3081# -r = "mem_util"
3082# -S = "swap_util"
3083# -u = "cpu_util"
3084# -v = "inode"
3085# -W = "swap"
3086# -w = "task"
3087# # -H = "hugepages" # only available for newer linux distributions
3088# # "-I ALL" = "interrupts" # requires INT activity
3089# #
3090# #
3091# ## Device tags can be used to add additional tags for devices.
3092# ## For example the configuration below adds a tag vg with value rootvg for
3093# ## all metrics with sda devices.
3094# # [[inputs.sysstat.device_tags.sda]]
3095# # vg = "rootvg"
3096
3097
3098# # Reads metrics from a Teamspeak 3 Server via ServerQuery
3099# [[inputs.teamspeak]]
3100# ## Server address for Teamspeak 3 ServerQuery
3101# # server = "127.0.0.1:10011"
3102# ## Username for ServerQuery
3103# username = "serverqueryuser"
3104# ## Password for ServerQuery
3105# password = "secret"
3106# ## Array of virtual servers
3107# # virtual_servers = [1]
3108
3109
3110# # Gather metrics from the Tomcat server status page.
3111# [[inputs.tomcat]]
3112# ## URL of the Tomcat server status
3113# # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
3114#
3115# ## HTTP Basic Auth Credentials
3116# # username = "tomcat"
3117# # password = "s3cret"
3118#
3119# ## Request timeout
3120# # timeout = "5s"
3121#
3122# ## Optional TLS Config
3123# # tls_ca = "/etc/telegraf/ca.pem"
3124# # tls_cert = "/etc/telegraf/cert.pem"
3125# # tls_key = "/etc/telegraf/key.pem"
3126# ## Use TLS but skip chain & host verification
3127# # insecure_skip_verify = false
3128
3129
3130# # Inserts sine and cosine waves for demonstration purposes
3131# [[inputs.trig]]
3132# ## Set the amplitude
3133# amplitude = 10.0
3134
3135
3136# # Read Twemproxy stats data
3137# [[inputs.twemproxy]]
3138# ## Twemproxy stats address and port (no scheme)
3139# addr = "localhost:22222"
3140# ## Monitor pool name
3141# pools = ["redis_pool", "mc_pool"]
3142
3143
3144# # A plugin to collect stats from the Unbound DNS resolver
3145# [[inputs.unbound]]
3146# ## Address of server to connect to, read from unbound conf default, optionally ':port'
3147# ## Will lookup IP if given a hostname
3148# server = "127.0.0.1:8953"
3149#
3150# ## If running as a restricted user you can prepend sudo for additional access:
3151# # use_sudo = false
3152#
3153# ## The default location of the unbound-control binary can be overridden with:
3154# # binary = "/usr/sbin/unbound-control"
3155#
3156# ## The default timeout of 1s can be overriden with:
3157# # timeout = "1s"
3158#
3159# ## When set to true, thread metrics are tagged with the thread id.
3160# ##
3161# ## The default is false for backwards compatibility, and will be change to
3162# ## true in a future version. It is recommended to set to true on new
3163# ## deployments.
3164# thread_as_tag = false
3165
3166
3167# # A plugin to collect stats from Varnish HTTP Cache
3168# [[inputs.varnish]]
3169# ## If running as a restricted user you can prepend sudo for additional access:
3170# #use_sudo = false
3171#
3172# ## The default location of the varnishstat binary can be overridden with:
3173# binary = "/usr/bin/varnishstat"
3174#
3175# ## By default, telegraf gather stats for 3 metric points.
3176# ## Setting stats will override the defaults shown below.
3177# ## Glob matching can be used, ie, stats = ["MAIN.*"]
3178# ## stats may also be set to ["*"], which will collect all stats
3179# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
3180#
3181# ## Optional name for the varnish instance (or working directory) to query
3182# ## Usually appened after -n in varnish cli
3183# # instance_name = instanceName
3184
3185
3186# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
3187# [[inputs.zfs]]
3188# ## ZFS kstat path. Ignored on FreeBSD
3189# ## If not specified, then default is:
3190# # kstatPath = "/proc/spl/kstat/zfs"
3191#
3192# ## By default, telegraf gather all zfs stats
3193# ## If not specified, then default is:
3194# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
3195# ## For Linux, the default is:
3196# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats",
3197# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"]
3198# ## By default, don't gather zpool stats
3199# # poolMetrics = false
3200
3201
3202# # Reads 'mntr' stats from one or many zookeeper servers
3203# [[inputs.zookeeper]]
3204# ## An array of address to gather stats about. Specify an ip or hostname
3205# ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
3206#
3207# ## If no servers are specified, then localhost is used as the host.
3208# ## If no port is specified, 2181 is used
3209# servers = [":2181"]
3210#
3211# ## Timeout for metric collections from all servers. Minimum timeout is "1s".
3212# # timeout = "5s"
3213#
3214# ## Optional TLS Config
3215# # enable_tls = true
3216# # tls_ca = "/etc/telegraf/ca.pem"
3217# # tls_cert = "/etc/telegraf/cert.pem"
3218# # tls_key = "/etc/telegraf/key.pem"
3219# ## If false, skip chain & host verification
3220# # insecure_skip_verify = true
3221
3222
3223
3224###############################################################################
3225# SERVICE INPUT PLUGINS #
3226###############################################################################
3227
3228# # AMQP consumer plugin
3229# [[inputs.amqp_consumer]]
3230# ## Broker to consume from.
3231# ## deprecated in 1.7; use the brokers option
3232# # url = "amqp://localhost:5672/influxdb"
3233#
3234# ## Brokers to consume from. If multiple brokers are specified a random broker
3235# ## will be selected anytime a connection is established. This can be
3236# ## helpful for load balancing when not using a dedicated load balancer.
3237# brokers = ["amqp://localhost:5672/influxdb"]
3238#
3239# ## Authentication credentials for the PLAIN auth_method.
3240# # username = ""
3241# # password = ""
3242#
3243# ## Exchange to declare and consume from.
3244# exchange = "telegraf"
3245#
3246# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
3247# # exchange_type = "topic"
3248#
3249# ## If true, exchange will be passively declared.
3250# # exchange_passive = false
3251#
3252# ## Exchange durability can be either "transient" or "durable".
3253# # exchange_durability = "durable"
3254#
3255# ## Additional exchange arguments.
3256# # exchange_arguments = { }
3257# # exchange_arguments = {"hash_propery" = "timestamp"}
3258#
3259# ## AMQP queue name
3260# queue = "telegraf"
3261#
3262# ## Binding Key
3263# binding_key = "#"
3264#
3265# ## Maximum number of messages server should give to the worker.
3266# # prefetch_count = 50
3267#
3268# ## Auth method. PLAIN and EXTERNAL are supported
3269# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
3270# ## described here: https://www.rabbitmq.com/plugins.html
3271# # auth_method = "PLAIN"
3272#
3273# ## Optional TLS Config
3274# # tls_ca = "/etc/telegraf/ca.pem"
3275# # tls_cert = "/etc/telegraf/cert.pem"
3276# # tls_key = "/etc/telegraf/key.pem"
3277# ## Use TLS but skip chain & host verification
3278# # insecure_skip_verify = false
3279#
3280# ## Data format to consume.
3281# ## Each data format has its own unique set of configuration options, read
3282# ## more about them here:
3283# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
3284# data_format = "influx"
3285
3286
3287# # Read Cassandra metrics through Jolokia
3288# [[inputs.cassandra]]
3289# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the
3290# ## jolokia2 plugin instead.
3291# ##
3292# ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
3293#
3294# context = "/jolokia/read"
3295# ## List of cassandra servers exposing jolokia read service
3296# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
3297# ## List of metrics collected on above servers
3298# ## Each metric consists of a jmx path.
3299# ## This will collect all heap memory usage metrics from the jvm and
3300# ## ReadLatency metrics for all keyspaces and tables.
3301# ## "type=Table" in the query works with Cassandra3.0. Older versions might
3302# ## need to use "type=ColumnFamily"
3303# metrics = [
3304# "/java.lang:type=Memory/HeapMemoryUsage",
3305# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
3306# ]
3307
3308
3309# # Influx HTTP write listener
3310# [[inputs.http_listener]]
3311# ## Address and port to host HTTP listener on
3312# service_address = ":8186"
3313#
3314# ## maximum duration before timing out read of the request
3315# read_timeout = "10s"
3316# ## maximum duration before timing out write of the response
3317# write_timeout = "10s"
3318#
3319# ## Maximum allowed http request body size in bytes.
3320# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes)
3321# max_body_size = 0
3322#
3323# ## Maximum line size allowed to be sent in bytes.
3324# ## 0 means to use the default of 65536 bytes (64 kibibytes)
3325# max_line_size = 0
3326#
3327# ## Set one or more allowed client CA certificate file names to
3328# ## enable mutually authenticated TLS connections
3329# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
3330#
3331# ## Add service certificate and key
3332# tls_cert = "/etc/telegraf/cert.pem"
3333# tls_key = "/etc/telegraf/key.pem"
3334#
3335# ## Optional username and password to accept for HTTP basic authentication.
3336# ## You probably want to make sure you have TLS configured above for this.
3337# # basic_username = "foobar"
3338# # basic_password = "barfoo"
3339
3340
3341# # Read JTI OpenConfig Telemetry from listed sensors
3342# [[inputs.jti_openconfig_telemetry]]
3343# ## List of device addresses to collect telemetry from
3344# servers = ["localhost:1883"]
3345#
3346# ## Authentication details. Username and password are must if device expects
3347# ## authentication. Client ID must be unique when connecting from multiple instances
3348# ## of telegraf to the same device
3349# username = "user"
3350# password = "pass"
3351# client_id = "telegraf"
3352#
3353# ## Frequency to get data
3354# sample_frequency = "1000ms"
3355#
3356# ## Sensors to subscribe for
3357# ## A identifier for each sensor can be provided in path by separating with space
3358# ## Else sensor path will be used as identifier
3359# ## When identifier is used, we can provide a list of space separated sensors.
3360# ## A single subscription will be created with all these sensors and data will
3361# ## be saved to measurement with this identifier name
3362# sensors = [
3363# "/interfaces/",
3364# "collection /components/ /lldp",
3365# ]
3366#
3367# ## We allow specifying sensor group level reporting rate. To do this, specify the
3368# ## reporting rate in Duration at the beginning of sensor paths / collection
3369# ## name. For entries without reporting rate, we use configured sample frequency
3370# sensors = [
3371# "1000ms customReporting /interfaces /lldp",
3372# "2000ms collection /components",
3373# "/interfaces",
3374# ]
3375#
3376# ## x509 Certificate to use with TLS connection. If it is not provided, an insecure
3377# ## channel will be opened with server
3378# ssl_cert = "/etc/telegraf/cert.pem"
3379#
3380# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
3381# ## Failed streams/calls will not be retried if 0 is provided
3382# retry_delay = "1000ms"
3383#
3384# ## To treat all string values as tags, set this to true
3385# str_as_tags = false
3386
3387
3388# # Read metrics from Kafka topic(s)
3389# [[inputs.kafka_consumer]]
3390# ## kafka servers
3391# brokers = ["localhost:9092"]
3392# ## topic(s) to consume
3393# topics = ["telegraf"]
3394#
3395# ## Optional TLS Config
3396# # tls_ca = "/etc/telegraf/ca.pem"
3397# # tls_cert = "/etc/telegraf/cert.pem"
3398# # tls_key = "/etc/telegraf/key.pem"
3399# ## Use TLS but skip chain & host verification
3400# # insecure_skip_verify = false
3401#
3402# ## Optional SASL Config
3403# # sasl_username = "kafka"
3404# # sasl_password = "secret"
3405#
3406# ## the name of the consumer group
3407# consumer_group = "telegraf_metrics_consumers"
3408# ## Offset (must be either "oldest" or "newest")
3409# offset = "oldest"
3410#
3411# ## Data format to consume.
3412# ## Each data format has its own unique set of configuration options, read
3413# ## more about them here:
3414# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
3415# data_format = "influx"
3416#
3417# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
3418# ## larger messages are dropped
3419# max_message_len = 65536
3420
3421
3422# # Read metrics from Kafka topic(s)
3423# [[inputs.kafka_consumer_legacy]]
3424# ## topic(s) to consume
3425# topics = ["telegraf"]
3426# ## an array of Zookeeper connection strings
3427# zookeeper_peers = ["localhost:2181"]
3428# ## Zookeeper Chroot
3429# zookeeper_chroot = ""
3430# ## the name of the consumer group
3431# consumer_group = "telegraf_metrics_consumers"
3432# ## Offset (must be either "oldest" or "newest")
3433# offset = "oldest"
3434#
3435# ## Data format to consume.
3436# ## Each data format has its own unique set of configuration options, read
3437# ## more about them here:
3438# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
3439# data_format = "influx"
3440#
3441# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
3442# ## larger messages are dropped
3443# max_message_len = 65536
3444
3445
3446# # Stream and parse log file(s).
3447# [[inputs.logparser]]
3448# ## Log files to parse.
3449# ## These accept standard unix glob matching rules, but with the addition of
3450# ## ** as a "super asterisk". ie:
3451# ## /var/log/**.log -> recursively find all .log files in /var/log
3452# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
3453# ## /var/log/apache.log -> only tail the apache log file
3454# files = ["/var/log/apache/access.log"]
3455#
3456# ## Read files that currently exist from the beginning. Files that are created
3457# ## while telegraf is running (and that match the "files" globs) will always
3458# ## be read from the beginning.
3459# from_beginning = false
3460#
3461# ## Method used to watch for file updates. Can be either "inotify" or "poll".
3462# # watch_method = "inotify"
3463#
3464# ## Parse logstash-style "grok" patterns:
3465# [inputs.logparser.grok]
3466# ## This is a list of patterns to check the given log file(s) for.
3467# ## Note that adding patterns here increases processing time. The most
3468# ## efficient configuration is to have one pattern per logparser.
3469# ## Other common built-in patterns are:
3470# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
3471# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
3472# patterns = ["%{COMBINED_LOG_FORMAT}"]
3473#
3474# ## Name of the outputted measurement name.
3475# measurement = "apache_access_log"
3476#
3477# ## Full path(s) to custom pattern files.
3478# custom_pattern_files = []
3479#
3480# ## Custom patterns can also be defined here. Put one pattern per line.
3481# custom_patterns = '''
3482#
3483# ## Timezone allows you to provide an override for timestamps that
3484# ## don't already include an offset
3485# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
3486# ##
3487# ## Default: "" which renders UTC
3488# ## Options are as follows:
3489# ## 1. Local -- interpret based on machine localtime
3490# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
3491# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
3492# timezone = "Canada/Eastern"
3493# '''
3494
3495
3496# # Read metrics from MQTT topic(s)
3497# [[inputs.mqtt_consumer]]
3498# ## MQTT broker URLs to be used. The format should be scheme://host:port,
3499# ## schema can be tcp, ssl, or ws.
3500# servers = ["tcp://localhost:1883"]
3501#
3502# ## MQTT QoS, must be 0, 1, or 2
3503# qos = 0
3504# ## Connection timeout for initial connection in seconds
3505# connection_timeout = "30s"
3506#
3507# ## Topics to subscribe to
3508# topics = [
3509# "telegraf/host01/cpu",
3510# "telegraf/+/mem",
3511# "sensors/#",
3512# ]
3513#
3514# # if true, messages that can't be delivered while the subscriber is offline
3515# # will be delivered when it comes back (such as on service restart).
3516# # NOTE: if true, client_id MUST be set
3517# persistent_session = false
3518# # If empty, a random client ID will be generated.
3519# client_id = ""
3520#
3521# ## username and password to connect MQTT server.
3522# # username = "telegraf"
3523# # password = "metricsmetricsmetricsmetrics"
3524#
3525# ## Optional TLS Config
3526# # tls_ca = "/etc/telegraf/ca.pem"
3527# # tls_cert = "/etc/telegraf/cert.pem"
3528# # tls_key = "/etc/telegraf/key.pem"
3529# ## Use TLS but skip chain & host verification
3530# # insecure_skip_verify = false
3531#
3532# ## Data format to consume.
3533# ## Each data format has its own unique set of configuration options, read
3534# ## more about them here:
3535# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
3536# data_format = "influx"
3537
3538
3539# # Read metrics from NATS subject(s)
3540# [[inputs.nats_consumer]]
3541# ## urls of NATS servers
3542# # servers = ["nats://localhost:4222"]
3543# ## Use Transport Layer Security
3544# # secure = false
3545# ## subject(s) to consume
3546# # subjects = ["telegraf"]
3547# ## name a queue group
3548# # queue_group = "telegraf_consumers"
3549#
3550# ## Sets the limits for pending msgs and bytes for each subscription
3551# ## These shouldn't need to be adjusted except in very high throughput scenarios
3552# # pending_message_limit = 65536
3553# # pending_bytes_limit = 67108864
3554#
3555# ## Data format to consume.
3556# ## Each data format has its own unique set of configuration options, read
3557# ## more about them here:
3558# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
3559# data_format = "influx"
3560
3561
3562# # Read NSQ topic for metrics.
3563# [[inputs.nsq_consumer]]
3564# ## Server option still works but is deprecated, we just prepend it to the nsqd array.
3565# # server = "localhost:4150"
3566# ## An array representing the NSQD TCP HTTP Endpoints
3567# nsqd = ["localhost:4150"]
3568# ## An array representing the NSQLookupd HTTP Endpoints
3569# nsqlookupd = ["localhost:4161"]
3570# topic = "telegraf"
3571# channel = "consumer"
3572# max_in_flight = 100
3573#
3574# ## Data format to consume.
3575# ## Each data format has its own unique set of configuration options, read
3576# ## more about them here:
3577# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
3578# data_format = "influx"
3579
3580
3581# # Read metrics from one or many postgresql servers
3582# [[inputs.postgresql]]
3583# ## specify address via a url matching:
3584# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
3585# ## ?sslmode=[disable|verify-ca|verify-full]
3586# ## or a simple string:
3587# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
3588# ##
3589# ## All connection parameters are optional.
3590# ##
3591# ## Without the dbname parameter, the driver will default to a database
3592# ## with the same name as the user. This dbname is just for instantiating a
3593# ## connection with the server and doesn't restrict the databases we are trying
3594# ## to grab metrics for.
3595# ##
3596# address = "host=localhost user=postgres sslmode=disable"
3597# ## A custom name for the database that will be used as the "server" tag in the
3598# ## measurement output. If not specified, a default one generated from
3599# ## the connection address is used.
3600# # outputaddress = "db01"
3601#
3602# ## connection configuration.
3603# ## maxlifetime - specify the maximum lifetime of a connection.
3604# ## default is forever (0s)
3605# max_lifetime = "0s"
3606#
3607# ## A list of databases to explicitly ignore. If not specified, metrics for all
3608# ## databases are gathered. Do NOT use with the 'databases' option.
3609# # ignored_databases = ["postgres", "template0", "template1"]
3610#
3611# ## A list of databases to pull metrics about. If not specified, metrics for all
3612# ## databases are gathered. Do NOT use with the 'ignored_databases' option.
3613# # databases = ["app_production", "testing"]
3614
3615
3616# # Read metrics from one or many postgresql servers
3617# [[inputs.postgresql_extensible]]
3618# ## specify address via a url matching:
3619# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
3620# ## ?sslmode=[disable|verify-ca|verify-full]
3621# ## or a simple string:
3622# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
3623# #
3624# ## All connection parameters are optional. #
3625# ## Without the dbname parameter, the driver will default to a database
3626# ## with the same name as the user. This dbname is just for instantiating a
3627# ## connection with the server and doesn't restrict the databases we are trying
3628# ## to grab metrics for.
3629# #
3630# address = "host=localhost user=postgres sslmode=disable"
3631#
3632# ## connection configuration.
3633# ## maxlifetime - specify the maximum lifetime of a connection.
3634# ## default is forever (0s)
3635# max_lifetime = "0s"
3636#
3637# ## A list of databases to pull metrics about. If not specified, metrics for all
3638# ## databases are gathered.
3639# ## databases = ["app_production", "testing"]
3640# #
3641# ## A custom name for the database that will be used as the "server" tag in the
3642# ## measurement output. If not specified, a default one generated from
3643# ## the connection address is used.
3644# # outputaddress = "db01"
3645# #
3646# ## Define the toml config where the sql queries are stored
3647# ## New queries can be added, if the withdbname is set to true and there is no
3648# ## databases defined in the 'databases field', the sql query is ended by a
3649# ## 'is not null' in order to make the query succeed.
3650# ## Example :
3651# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
3652# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
3653# ## because the databases variable was set to ['postgres', 'pgbench' ] and the
3654# ## withdbname was true. Be careful that if the withdbname is set to false you
3655# ## don't have to define the where clause (aka with the dbname) the tagvalue
3656# ## field is used to define custom tags (separated by commas)
3657# ## The optional "measurement" value can be used to override the default
3658# ## output measurement name ("postgresql").
3659# #
3660# ## Structure :
3661# ## [[inputs.postgresql_extensible.query]]
3662# ## sqlquery string
3663# ## version string
3664# ## withdbname boolean
3665# ## tagvalue string (comma separated)
3666# ## measurement string
3667# [[inputs.postgresql_extensible.query]]
3668# sqlquery="SELECT * FROM pg_stat_database"
3669# version=901
3670# withdbname=false
3671# tagvalue=""
3672# measurement=""
3673# [[inputs.postgresql_extensible.query]]
3674# sqlquery="SELECT * FROM pg_stat_bgwriter"
3675# version=901
3676# withdbname=false
3677# tagvalue="postgresql.stats"
3678
3679
3680# # Generic socket listener capable of handling multiple socket types.
3681# [[inputs.socket_listener]]
3682# ## URL to listen on
3683# # service_address = "tcp://:8094"
3684# # service_address = "tcp://127.0.0.1:http"
3685# # service_address = "tcp4://:8094"
3686# # service_address = "tcp6://:8094"
3687# # service_address = "tcp6://[2001:db8::1]:8094"
3688# # service_address = "udp://:8094"
3689# # service_address = "udp4://:8094"
3690# # service_address = "udp6://:8094"
3691# # service_address = "unix:///tmp/telegraf.sock"
3692# # service_address = "unixgram:///tmp/telegraf.sock"
3693#
3694# ## Maximum number of concurrent connections.
3695# ## Only applies to stream sockets (e.g. TCP).
3696# ## 0 (default) is unlimited.
3697# # max_connections = 1024
3698#
3699# ## Read timeout.
3700# ## Only applies to stream sockets (e.g. TCP).
3701# ## 0 (default) is unlimited.
3702# # read_timeout = "30s"
3703#
3704# ## Optional TLS configuration.
3705# ## Only applies to stream sockets (e.g. TCP).
3706# # tls_cert = "/etc/telegraf/cert.pem"
3707# # tls_key = "/etc/telegraf/key.pem"
3708# ## Enables client authentication if set.
3709# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
3710#
3711# ## Maximum socket buffer size in bytes.
3712# ## For stream sockets, once the buffer fills up, the sender will start backing up.
3713# ## For datagram sockets, once the buffer fills up, metrics will start dropping.
3714# ## Defaults to the OS default.
3715# # read_buffer_size = 65535
3716#
3717# ## Period between keep alive probes.
3718# ## Only applies to TCP sockets.
3719# ## 0 disables keep alive probes.
3720# ## Defaults to the OS configuration.
3721# # keep_alive_period = "5m"
3722#
3723# ## Data format to consume.
3724# ## Each data format has its own unique set of configuration options, read
3725# ## more about them here:
3726# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
3727# # data_format = "influx"
3728
3729
3730# # Statsd UDP/TCP Server
3731# [[inputs.statsd]]
3732# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp)
3733# protocol = "udp"
3734#
3735# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
3736# max_tcp_connections = 250
3737#
3738# ## Enable TCP keep alive probes (default=false)
3739# tcp_keep_alive = false
3740#
3741# ## Specifies the keep-alive period for an active network connection.
3742# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
3743# ## Defaults to the OS configuration.
3744# # tcp_keep_alive_period = "2h"
3745#
3746# ## Address and port to host UDP listener on
3747# service_address = ":8125"
3748#
3749# ## The following configuration options control when telegraf clears it's cache
3750# ## of previous values. If set to false, then telegraf will only clear it's
3751# ## cache when the daemon is restarted.
3752# ## Reset gauges every interval (default=true)
3753# delete_gauges = true
3754# ## Reset counters every interval (default=true)
3755# delete_counters = true
3756# ## Reset sets every interval (default=true)
3757# delete_sets = true
3758# ## Reset timings & histograms every interval (default=true)
3759# delete_timings = true
3760#
3761# ## Percentiles to calculate for timing & histogram stats
3762# percentiles = [90]
3763#
3764# ## separator to use between elements of a statsd metric
3765# metric_separator = "_"
3766#
3767# ## Parses tags in the datadog statsd format
3768# ## http://docs.datadoghq.com/guides/dogstatsd/
3769# parse_data_dog_tags = false
3770#
3771# ## Statsd data translation templates, more info can be read here:
3772# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
3773# # templates = [
3774# # "cpu.* measurement*"
3775# # ]
3776#
3777# ## Number of UDP messages allowed to queue up, once filled,
3778# ## the statsd server will start dropping packets
3779# allowed_pending_messages = 10000
3780#
3781# ## Number of timing/histogram values to track per-measurement in the
3782# ## calculation of percentiles. Raising this limit increases the accuracy
3783# ## of percentiles but also increases the memory usage and cpu time.
3784# percentile_limit = 1000
3785
3786
3787# # Accepts syslog messages per RFC5425
3788# [[inputs.syslog]]
3789# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514
3790# ## Protocol, address and port to host the syslog receiver.
3791# ## If no host is specified, then localhost is used.
3792# ## If no port is specified, 6514 is used (RFC5425#section-4.1).
3793# server = "tcp://:6514"
3794#
3795# ## TLS Config
3796# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"]
3797# # tls_cert = "/etc/telegraf/cert.pem"
3798# # tls_key = "/etc/telegraf/key.pem"
3799#
3800# ## Period between keep alive probes.
3801# ## 0 disables keep alive probes.
3802# ## Defaults to the OS configuration.
3803# ## Only applies to stream sockets (e.g. TCP).
3804# # keep_alive_period = "5m"
3805#
3806# ## Maximum number of concurrent connections (default = 0).
3807# ## 0 means unlimited.
3808# ## Only applies to stream sockets (e.g. TCP).
3809# # max_connections = 1024
3810#
3811# ## Read timeout is the maximum time allowed for reading a single message (default = 5s).
3812# ## 0 means unlimited.
3813# # read_timeout = "5s"
3814#
3815# ## Whether to parse in best effort mode or not (default = false).
3816# ## By default best effort parsing is off.
3817# # best_effort = false
3818#
3819# ## Character to prepend to SD-PARAMs (default = "_").
3820# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section.
3821# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"]
3822# ## For each combination a field is created.
3823# ## Its name is created concatenating identifier, sdparam_separator, and parameter name.
3824# # sdparam_separator = "_"
3825
3826
3827# # Stream a log file, like the tail -f command
3828# [[inputs.tail]]
3829# ## files to tail.
3830# ## These accept standard unix glob matching rules, but with the addition of
3831# ## ** as a "super asterisk". ie:
3832# ## "/var/log/**.log" -> recursively find all .log files in /var/log
3833# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
3834# ## "/var/log/apache.log" -> just tail the apache log file
3835# ##
3836# ## See https://github.com/gobwas/glob for more examples
3837# ##
3838# files = ["/var/mymetrics.out"]
3839# ## Read file from beginning.
3840# from_beginning = false
3841# ## Whether file is a named pipe
3842# pipe = false
3843#
3844# ## Method used to watch for file updates. Can be either "inotify" or "poll".
3845# # watch_method = "inotify"
3846#
3847# ## Data format to consume.
3848# ## Each data format has its own unique set of configuration options, read
3849# ## more about them here:
3850# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
3851# data_format = "influx"
3852
3853
3854# # Generic TCP listener
3855# [[inputs.tcp_listener]]
3856# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
3857# # socket_listener plugin
3858# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
3859
3860
3861# # Generic UDP listener
3862# [[inputs.udp_listener]]
3863# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
3864# # socket_listener plugin
3865# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
3866
3867
3868# # A Webhooks Event collector
3869# [[inputs.webhooks]]
3870# ## Address and port to host Webhook listener on
3871# service_address = ":1619"
3872#
3873# [inputs.webhooks.filestack]
3874# path = "/filestack"
3875#
3876# [inputs.webhooks.github]
3877# path = "/github"
3878# # secret = ""
3879#
3880# [inputs.webhooks.mandrill]
3881# path = "/mandrill"
3882#
3883# [inputs.webhooks.rollbar]
3884# path = "/rollbar"
3885#
3886# [inputs.webhooks.papertrail]
3887# path = "/papertrail"
3888#
3889# [inputs.webhooks.particle]
3890# path = "/particle"
3891
3892
3893# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
3894# [[inputs.zipkin]]
3895# # path = "/api/v1/spans" # URL path for span data
3896# # port = 9411 # Port on which Telegraf listens