· 7 years ago · Apr 10, 2018, 10:52 PM
1# Telegraf Configuration
2#
3# Telegraf is entirely plugin driven. All metrics are gathered from the
4# declared inputs, and sent to the declared outputs.
5#
6# Plugins must be declared in here to be active.
7# To deactivate a plugin, comment out the name and any variables.
8#
9# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
10# file would generate.
11#
12# Environment variables can be used anywhere in this config file, simply prepend
13# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
14# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
15
16
17# Global tags can be specified here in key="value" format.
18[global_tags]
19 # dc = "us-east-1" # will tag all metrics with dc=us-east-1
20 # rack = "1a"
21 ## Environment variables can be used as tags, and throughout the config file
22 # user = "$USER"
23
24
25# Configuration for telegraf agent
26[agent]
27 ## Default data collection interval for all inputs
28 interval = "10s"
29 ## Rounds collection interval to 'interval'
30 ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
31 round_interval = true
32
33 ## Telegraf will send metrics to outputs in batches of at most
34 ## metric_batch_size metrics.
35 ## This controls the size of writes that Telegraf sends to output plugins.
36 metric_batch_size = 1000
37
38 ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
39 ## output, and will flush this buffer on a successful write. Oldest metrics
40 ## are dropped first when this buffer fills.
41 ## This buffer only fills when writes fail to output plugin(s).
42 metric_buffer_limit = 10000
43
44 ## Collection jitter is used to jitter the collection by a random amount.
45 ## Each plugin will sleep for a random time within jitter before collecting.
46 ## This can be used to avoid many plugins querying things like sysfs at the
47 ## same time, which can have a measurable effect on the system.
48 collection_jitter = "0s"
49
50 ## Default flushing interval for all outputs. You shouldn't set this below
51 ## interval. Maximum flush_interval will be flush_interval + flush_jitter
52 flush_interval = "10s"
53 ## Jitter the flush interval by a random amount. This is primarily to avoid
54 ## large write spikes for users running a large number of telegraf instances.
55 ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
56 flush_jitter = "0s"
57
58 ## By default or when set to "0s", precision will be set to the same
59 ## timestamp order as the collection interval, with the maximum being 1s.
60 ## ie, when interval = "10s", precision will be "1s"
61 ## when interval = "250ms", precision will be "1ms"
62 ## Precision will NOT be used for service inputs. It is up to each individual
63 ## service input to set the timestamp at the appropriate precision.
64 ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
65 precision = ""
66
67 ## Logging configuration:
68 ## Run telegraf with debug log messages.
69 debug = false
70 ## Run telegraf in quiet mode (error log messages only).
71 quiet = false
72 ## Specify the log file name. The empty string means to log to stderr.
73 logfile = ""
74
75 ## Override default hostname, if empty use os.Hostname()
76 hostname = ""
77 ## If set to true, do no set the "host" tag in the telegraf agent.
78 omit_hostname = false
79
80
81###############################################################################
82# OUTPUT PLUGINS #
83###############################################################################
84
85# Configuration for influxdb server to send metrics to
86[[outputs.influxdb]]
87 ## The full HTTP or UDP URL for your InfluxDB instance.
88 ##
89 ## Multiple urls can be specified as part of the same cluster,
90 ## this means that only ONE of the urls will be written to each interval.
91 # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
92 urls = ["http://192.168.1.24:8086"] # required
93 ## The target database for metrics (telegraf will create it if not exists).
94 database = "telegraf" # required
95
96 ## Name of existing retention policy to write to. Empty string writes to
97 ## the default retention policy.
98 retention_policy = ""
99 ## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
100 write_consistency = "any"
101
102 ## Write timeout (for the InfluxDB client), formatted as a string.
103 ## If not provided, will default to 5s. 0s means no timeout (not recommended).
104 timeout = "5s"
105 username = "telegraf"
106 password = "A9ZUcoYimjsLyJ5cbL4O"
107 ## Set the user agent for HTTP POSTs (can be useful for log differentiation)
108 # user_agent = "telegraf"
109 ## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
110 # udp_payload = 512
111
112 ## Optional SSL Config
113 # ssl_ca = "/etc/telegraf/ca.pem"
114 # ssl_cert = "/etc/telegraf/cert.pem"
115 # ssl_key = "/etc/telegraf/key.pem"
116 ## Use SSL but skip chain & host verification
117 # insecure_skip_verify = false
118
119 ## HTTP Proxy Config
120 # http_proxy = "http://corporate.proxy:3128"
121
122 ## Optional HTTP headers
123 # http_headers = {"X-Special-Header" = "Special-Value"}
124
125 ## Compress each HTTP request payload using GZIP.
126 # content_encoding = "gzip"
127
128
129# # Configuration for Amon Server to send metrics to.
130# [[outputs.amon]]
131# ## Amon Server Key
132# server_key = "my-server-key" # required.
133#
134# ## Amon Instance URL
135# amon_instance = "https://youramoninstance" # required
136#
137# ## Connection timeout.
138# # timeout = "5s"
139
140
141# # Configuration for the AMQP server to send metrics to
142# [[outputs.amqp]]
143# ## AMQP url
144# url = "amqp://localhost:5672/influxdb"
145# ## AMQP exchange
146# exchange = "telegraf"
147# ## Auth method. PLAIN and EXTERNAL are supported
148# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
149# ## described here: https://www.rabbitmq.com/plugins.html
150# # auth_method = "PLAIN"
151# ## Telegraf tag to use as a routing key
152# ## ie, if this tag exists, its value will be used as the routing key
153# routing_tag = "host"
154# ## Delivery Mode controls if a published message is persistent
155# ## Valid options are "transient" and "persistent". default: "transient"
156# delivery_mode = "transient"
157#
158# ## InfluxDB retention policy
159# # retention_policy = "default"
160# ## InfluxDB database
161# # database = "telegraf"
162#
163# ## Write timeout, formatted as a string. If not provided, will default
164# ## to 5s. 0s means no timeout (not recommended).
165# # timeout = "5s"
166#
167# ## Optional SSL Config
168# # ssl_ca = "/etc/telegraf/ca.pem"
169# # ssl_cert = "/etc/telegraf/cert.pem"
170# # ssl_key = "/etc/telegraf/key.pem"
171# ## Use SSL but skip chain & host verification
172# # insecure_skip_verify = false
173#
174# ## Data format to output.
175# ## Each data format has its own unique set of configuration options, read
176# ## more about them here:
177# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
178# data_format = "influx"
179
180
181# # Configuration for AWS CloudWatch output.
182# [[outputs.cloudwatch]]
183# ## Amazon REGION
184# region = "us-east-1"
185#
186# ## Amazon Credentials
187# ## Credentials are loaded in the following order
188# ## 1) Assumed credentials via STS if role_arn is specified
189# ## 2) explicit credentials from 'access_key' and 'secret_key'
190# ## 3) shared profile from 'profile'
191# ## 4) environment variables
192# ## 5) shared credentials file
193# ## 6) EC2 Instance Profile
194# #access_key = ""
195# #secret_key = ""
196# #token = ""
197# #role_arn = ""
198# #profile = ""
199# #shared_credential_file = ""
200#
201# ## Namespace for the CloudWatch MetricDatums
202# namespace = "InfluxData/Telegraf"
203
204
205# # Configuration for CrateDB to send metrics to.
206# [[outputs.cratedb]]
207# # A github.com/jackc/pgx connection string.
208# # See https://godoc.org/github.com/jackc/pgx#ParseDSN
209# url = "postgres://user:password@localhost/schema?sslmode=disable"
210# # Timeout for all CrateDB queries.
211# timeout = "5s"
212# # Name of the table to store metrics in.
213# table = "metrics"
214# # If true, and the metrics table does not exist, create it automatically.
215# table_create = true
216
217
218# # Configuration for DataDog API to send metrics to.
219# [[outputs.datadog]]
220# ## Datadog API key
221# apikey = "my-secret-key" # required.
222#
223# ## Connection timeout.
224# # timeout = "5s"
225
226
227# # Send metrics to nowhere at all
228# [[outputs.discard]]
229# # no configuration
230
231
232# # Configuration for Elasticsearch to send metrics to.
233# [[outputs.elasticsearch]]
234# ## The full HTTP endpoint URL for your Elasticsearch instance
235# ## Multiple urls can be specified as part of the same cluster,
236# ## this means that only ONE of the urls will be written to each interval.
237# urls = [ "http://node1.es.example.com:9200" ] # required.
238# ## Elasticsearch client timeout, defaults to "5s" if not set.
239# timeout = "5s"
240# ## Set to true to ask Elasticsearch a list of all cluster nodes,
241# ## thus it is not necessary to list all nodes in the urls config option.
242# enable_sniffer = false
243# ## Set the interval to check if the Elasticsearch nodes are available
244# ## Setting to "0s" will disable the health check (not recommended in production)
245# health_check_interval = "10s"
246# ## HTTP basic authentication details (eg. when using Shield)
247# # username = "telegraf"
248# # password = "mypassword"
249#
250# ## Index Config
251# ## The target index for metrics (Elasticsearch will create if it not exists).
252# ## You can use the date specifiers below to create indexes per time frame.
253# ## The metric timestamp will be used to decide the destination index name
254# # %Y - year (2016)
255# # %y - last two digits of year (00..99)
256# # %m - month (01..12)
257# # %d - day of month (e.g., 01)
258# # %H - hour (00..23)
259# # %V - week of the year (ISO week) (01..53)
260# ## Additionally, you can specify a tag name using the notation {{tag_name}}
261# ## which will be used as part of the index name. If the tag does not exist,
262# ## the default tag value will be used.
263# # index_name = "telegraf-{{host}}-%Y.%m.%d"
264# # default_tag_value = "none"
265# index_name = "telegraf-%Y.%m.%d" # required.
266#
267# ## Optional SSL Config
268# # ssl_ca = "/etc/telegraf/ca.pem"
269# # ssl_cert = "/etc/telegraf/cert.pem"
270# # ssl_key = "/etc/telegraf/key.pem"
271# ## Use SSL but skip chain & host verification
272# # insecure_skip_verify = false
273#
274# ## Template Config
275# ## Set to true if you want telegraf to manage its index template.
276# ## If enabled it will create a recommended index template for telegraf indexes
277# manage_template = true
278# ## The template name used for telegraf indexes
279# template_name = "telegraf"
280# ## Set to true if you want telegraf to overwrite an existing template
281# overwrite_template = false
282
283
284# # Send telegraf metrics to file(s)
285# [[outputs.file]]
286# ## Files to write to, "stdout" is a specially handled file.
287# files = ["stdout", "/tmp/metrics.out"]
288#
289# ## Data format to output.
290# ## Each data format has its own unique set of configuration options, read
291# ## more about them here:
292# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
293# data_format = "influx"
294
295
296# # Configuration for Graphite server to send metrics to
297# [[outputs.graphite]]
298# ## TCP endpoint for your graphite instance.
299# ## If multiple endpoints are configured, output will be load balanced.
300# ## Only one of the endpoints will be written to with each iteration.
301# servers = ["localhost:2003"]
302# ## Prefix metrics name
303# prefix = ""
304# ## Graphite output template
305# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
306# template = "host.tags.measurement.field"
307# ## timeout in seconds for the write connection to graphite
308# timeout = 2
309#
310# ## Optional SSL Config
311# # ssl_ca = "/etc/telegraf/ca.pem"
312# # ssl_cert = "/etc/telegraf/cert.pem"
313# # ssl_key = "/etc/telegraf/key.pem"
314# ## Use SSL but skip chain & host verification
315# # insecure_skip_verify = false
316
317
318# # Send telegraf metrics to graylog(s)
319# [[outputs.graylog]]
320# ## UDP endpoint for your graylog instance.
321# servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
322
323
324# # Configuration for sending metrics to an Instrumental project
325# [[outputs.instrumental]]
326# ## Project API Token (required)
327# api_token = "API Token" # required
328# ## Prefix the metrics with a given name
329# prefix = ""
330# ## Stats output template (Graphite formatting)
331# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
332# template = "host.tags.measurement.field"
333# ## Timeout in seconds to connect
334# timeout = "2s"
335# ## Display Communcation to Instrumental
336# debug = false
337
338
339# # Configuration for the Kafka server to send metrics to
340# [[outputs.kafka]]
341# ## URLs of kafka brokers
342# brokers = ["localhost:9092"]
343# ## Kafka topic for producer messages
344# topic = "telegraf"
345#
346# ## Optional topic suffix configuration.
347# ## If the section is omitted, no suffix is used.
348# ## Following topic suffix methods are supported:
349# ## measurement - suffix equals to separator + measurement's name
350# ## tags - suffix equals to separator + specified tags' values
351# ## interleaved with separator
352#
353# ## Suffix equals to "_" + measurement name
354# # [outputs.kafka.topic_suffix]
355# # method = "measurement"
356# # separator = "_"
357#
358# ## Suffix equals to "__" + measurement's "foo" tag value.
359# ## If there's no such a tag, suffix equals to an empty string
360# # [outputs.kafka.topic_suffix]
361# # method = "tags"
362# # keys = ["foo"]
363# # separator = "__"
364#
365# ## Suffix equals to "_" + measurement's "foo" and "bar"
366# ## tag values, separated by "_". If there is no such tags,
367# ## their values treated as empty strings.
368# # [outputs.kafka.topic_suffix]
369# # method = "tags"
370# # keys = ["foo", "bar"]
371# # separator = "_"
372#
373# ## Telegraf tag to use as a routing key
374# ## ie, if this tag exists, its value will be used as the routing key
375# routing_tag = "host"
376#
377# ## CompressionCodec represents the various compression codecs recognized by
378# ## Kafka in messages.
379# ## 0 : No compression
380# ## 1 : Gzip compression
381# ## 2 : Snappy compression
382# compression_codec = 0
383#
384# ## RequiredAcks is used in Produce Requests to tell the broker how many
385# ## replica acknowledgements it must see before responding
386# ## 0 : the producer never waits for an acknowledgement from the broker.
387# ## This option provides the lowest latency but the weakest durability
388# ## guarantees (some data will be lost when a server fails).
389# ## 1 : the producer gets an acknowledgement after the leader replica has
390# ## received the data. This option provides better durability as the
391# ## client waits until the server acknowledges the request as successful
392# ## (only messages that were written to the now-dead leader but not yet
393# ## replicated will be lost).
394# ## -1: the producer gets an acknowledgement after all in-sync replicas have
395# ## received the data. This option provides the best durability, we
396# ## guarantee that no messages will be lost as long as at least one in
397# ## sync replica remains.
398# required_acks = -1
399#
400# ## The total number of times to retry sending a message
401# max_retry = 3
402#
403# ## Optional SSL Config
404# # ssl_ca = "/etc/telegraf/ca.pem"
405# # ssl_cert = "/etc/telegraf/cert.pem"
406# # ssl_key = "/etc/telegraf/key.pem"
407# ## Use SSL but skip chain & host verification
408# # insecure_skip_verify = false
409#
410# ## Optional SASL Config
411# # sasl_username = "kafka"
412# # sasl_password = "secret"
413#
414# ## Data format to output.
415# ## Each data format has its own unique set of configuration options, read
416# ## more about them here:
417# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
418# data_format = "influx"
419
420
421# # Configuration for the AWS Kinesis output.
422# [[outputs.kinesis]]
423# ## Amazon REGION of kinesis endpoint.
424# region = "ap-southeast-2"
425#
426# ## Amazon Credentials
427# ## Credentials are loaded in the following order
428# ## 1) Assumed credentials via STS if role_arn is specified
429# ## 2) explicit credentials from 'access_key' and 'secret_key'
430# ## 3) shared profile from 'profile'
431# ## 4) environment variables
432# ## 5) shared credentials file
433# ## 6) EC2 Instance Profile
434# #access_key = ""
435# #secret_key = ""
436# #token = ""
437# #role_arn = ""
438# #profile = ""
439# #shared_credential_file = ""
440#
441# ## Kinesis StreamName must exist prior to starting telegraf.
442# streamname = "StreamName"
443# ## DEPRECATED: PartitionKey as used for sharding data.
444# partitionkey = "PartitionKey"
445# ## DEPRECATED: If set the paritionKey will be a random UUID on every put.
446# ## This allows for scaling across multiple shards in a stream.
447# ## This will cause issues with ordering.
448# use_random_partitionkey = false
449# ## The partition key can be calculated using one of several methods:
450# ##
451# ## Use a static value for all writes:
452# # [outputs.kinesis.partition]
453# # method = "static"
454# # key = "howdy"
455# #
456# ## Use a random partition key on each write:
457# # [outputs.kinesis.partition]
458# # method = "random"
459# #
460# ## Use the measurement name as the partition key:
461# # [outputs.kinesis.partition]
462# # method = "measurement"
463# #
464# ## Use the value of a tag for all writes, if the tag is not set the empty
465# ## string will be used:
466# # [outputs.kinesis.partition]
467# # method = "tag"
468# # key = "host"
469#
470#
471# ## Data format to output.
472# ## Each data format has its own unique set of configuration options, read
473# ## more about them here:
474# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
475# data_format = "influx"
476#
477# ## debug will show upstream aws messages.
478# debug = false
479
480
481# # Configuration for Librato API to send metrics to.
482# [[outputs.librato]]
483# ## Librator API Docs
484# ## http://dev.librato.com/v1/metrics-authentication
485# ## Librato API user
486# api_user = "telegraf@influxdb.com" # required.
487# ## Librato API token
488# api_token = "my-secret-token" # required.
489# ## Debug
490# # debug = false
491# ## Connection timeout.
492# # timeout = "5s"
493# ## Output source Template (same as graphite buckets)
494# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
495# ## This template is used in librato's source (not metric's name)
496# template = "host"
497#
498
499
500# # Configuration for MQTT server to send metrics to
501# [[outputs.mqtt]]
502# servers = ["localhost:1883"] # required.
503#
504# ## MQTT outputs send metrics to this topic format
505# ## "<topic_prefix>/<hostname>/<pluginname>/"
506# ## ex: prefix/web01.example.com/mem
507# topic_prefix = "telegraf"
508#
509# ## username and password to connect MQTT server.
510# # username = "telegraf"
511# # password = "metricsmetricsmetricsmetrics"
512#
513# ## client ID, if not set a random ID is generated
514# # client_id = ""
515#
516# ## Optional SSL Config
517# # ssl_ca = "/etc/telegraf/ca.pem"
518# # ssl_cert = "/etc/telegraf/cert.pem"
519# # ssl_key = "/etc/telegraf/key.pem"
520# ## Use SSL but skip chain & host verification
521# # insecure_skip_verify = false
522#
523# ## Data format to output.
524# ## Each data format has its own unique set of configuration options, read
525# ## more about them here:
526# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
527# data_format = "influx"
528
529
530# # Send telegraf measurements to NATS
531# [[outputs.nats]]
532# ## URLs of NATS servers
533# servers = ["nats://localhost:4222"]
534# ## Optional credentials
535# # username = ""
536# # password = ""
537# ## NATS subject for producer messages
538# subject = "telegraf"
539#
540# ## Optional SSL Config
541# # ssl_ca = "/etc/telegraf/ca.pem"
542# # ssl_cert = "/etc/telegraf/cert.pem"
543# # ssl_key = "/etc/telegraf/key.pem"
544# ## Use SSL but skip chain & host verification
545# # insecure_skip_verify = false
546#
547# ## Data format to output.
548# ## Each data format has its own unique set of configuration options, read
549# ## more about them here:
550# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
551# data_format = "influx"
552
553
554# # Send telegraf measurements to NSQD
555# [[outputs.nsq]]
556# ## Location of nsqd instance listening on TCP
557# server = "localhost:4150"
558# ## NSQ topic for producer messages
559# topic = "telegraf"
560#
561# ## Data format to output.
562# ## Each data format has its own unique set of configuration options, read
563# ## more about them here:
564# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
565# data_format = "influx"
566
567
568# # Configuration for OpenTSDB server to send metrics to
569# [[outputs.opentsdb]]
570# ## prefix for metrics keys
571# prefix = "my.specific.prefix."
572#
573# ## DNS name of the OpenTSDB server
574# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
575# ## telnet API. "http://opentsdb.example.com" will use the Http API.
576# host = "opentsdb.example.com"
577#
578# ## Port of the OpenTSDB server
579# port = 4242
580#
581# ## Number of data points to send to OpenTSDB in Http requests.
582# ## Not used with telnet API.
583# httpBatchSize = 50
584#
585# ## Debug true - Prints OpenTSDB communication
586# debug = false
587#
588# ## Separator separates measurement name from field
589# separator = "_"
590
591
592# # Configuration for the Prometheus client to spawn
593# [[outputs.prometheus_client]]
594# ## Address to listen on
595# # listen = ":9273"
596#
597# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration
598# # expiration_interval = "60s"
599#
600# ## Collectors to enable, valid entries are "gocollector" and "process".
601# ## If unset, both are enabled.
602# collectors_exclude = ["gocollector", "process"]
603
604
605# # Configuration for the Riemann server to send metrics to
606# [[outputs.riemann]]
607# ## The full TCP or UDP URL of the Riemann server
608# url = "tcp://localhost:5555"
609#
610# ## Riemann event TTL, floating-point time in seconds.
611# ## Defines how long that an event is considered valid for in Riemann
612# # ttl = 30.0
613#
614# ## Separator to use between measurement and field name in Riemann service name
615# ## This does not have any effect if 'measurement_as_attribute' is set to 'true'
616# separator = "/"
617#
618# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name
619# # measurement_as_attribute = false
620#
621# ## Send string metrics as Riemann event states.
622# ## Unless enabled all string metrics will be ignored
623# # string_as_state = false
624#
625# ## A list of tag keys whose values get sent as Riemann tags.
626# ## If empty, all Telegraf tag values will be sent as tags
627# # tag_keys = ["telegraf","custom_tag"]
628#
629# ## Additional Riemann tags to send.
630# # tags = ["telegraf-output"]
631#
632# ## Description for Riemann event
633# # description_text = "metrics collected from telegraf"
634#
635# ## Riemann client write timeout, defaults to "5s" if not set.
636# # timeout = "5s"
637
638
639# # Configuration for the Riemann server to send metrics to
640# [[outputs.riemann_legacy]]
641# ## URL of server
642# url = "localhost:5555"
643# ## transport protocol to use either tcp or udp
644# transport = "tcp"
645# ## separator to use between input name and field name in Riemann service name
646# separator = " "
647
648
649# # Generic socket writer capable of handling multiple socket types.
650# [[outputs.socket_writer]]
651# ## URL to connect to
652# # address = "tcp://127.0.0.1:8094"
653# # address = "tcp://example.com:http"
654# # address = "tcp4://127.0.0.1:8094"
655# # address = "tcp6://127.0.0.1:8094"
656# # address = "tcp6://[2001:db8::1]:8094"
657# # address = "udp://127.0.0.1:8094"
658# # address = "udp4://127.0.0.1:8094"
659# # address = "udp6://127.0.0.1:8094"
660# # address = "unix:///tmp/telegraf.sock"
661# # address = "unixgram:///tmp/telegraf.sock"
662#
663# ## Period between keep alive probes.
664# ## Only applies to TCP sockets.
665# ## 0 disables keep alive probes.
666# ## Defaults to the OS configuration.
667# # keep_alive_period = "5m"
668#
669# ## Data format to generate.
670# ## Each data format has its own unique set of configuration options, read
671# ## more about them here:
672# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
673# # data_format = "influx"
674
675
676# # Configuration for Wavefront server to send metrics to
677# [[outputs.wavefront]]
678# ## DNS name of the wavefront proxy server
679# host = "wavefront.example.com"
680#
681# ## Port that the Wavefront proxy server listens on
682# port = 2878
683#
684# ## prefix for metrics keys
685# #prefix = "my.specific.prefix."
686#
687# ## whether to use "value" for name of simple fields
688# #simple_fields = false
689#
690# ## character to use between metric and field name. defaults to . (dot)
691# #metric_separator = "."
692#
693# ## Convert metric name paths to use metricSeperator character
694# ## When true (default) will convert all _ (underscore) chartacters in final metric name
695# #convert_paths = true
696#
697# ## Use Regex to sanitize metric and tag names from invalid characters
698# ## Regex is more thorough, but significantly slower
699# #use_regex = false
700#
701# ## point tags to use as the source name for Wavefront (if none found, host will be used)
702# #source_override = ["hostname", "snmp_host", "node_host"]
703#
704# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default true
705# #convert_bool = true
706#
707# ## Define a mapping, namespaced by metric prefix, from string values to numeric values
708# ## The example below maps "green" -> 1.0, "yellow" -> 0.5, "red" -> 0.0 for
709# ## any metrics beginning with "elasticsearch"
710# #[[outputs.wavefront.string_to_number.elasticsearch]]
711# # green = 1.0
712# # yellow = 0.5
713# # red = 0.0
714
715
716
717###############################################################################
718# PROCESSOR PLUGINS #
719###############################################################################
720
721# # Print all metrics that pass through this filter.
722# [[processors.printer]]
723
724
725
726###############################################################################
727# AGGREGATOR PLUGINS #
728###############################################################################
729
730# # Keep the aggregate basicstats of each metric passing through.
731# [[aggregators.basicstats]]
732# ## General Aggregator Arguments:
733# ## The period on which to flush & clear the aggregator.
734# period = "30s"
735# ## If true, the original metric will be dropped by the
736# ## aggregator and will not get sent to the output plugins.
737# drop_original = false
738
739
740# # Create aggregate histograms.
741# [[aggregators.histogram]]
742# ## The period in which to flush the aggregator.
743# period = "30s"
744#
745# ## If true, the original metric will be dropped by the
746# ## aggregator and will not get sent to the output plugins.
747# drop_original = false
748#
749# ## Example config that aggregates all fields of the metric.
750# # [[aggregators.histogram.config]]
751# # ## The set of buckets.
752# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
753# # ## The name of metric.
754# # measurement_name = "cpu"
755#
756# ## Example config that aggregates only specific fields of the metric.
757# # [[aggregators.histogram.config]]
758# # ## The set of buckets.
759# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
760# # ## The name of metric.
761# # measurement_name = "diskio"
762# # ## The concrete fields of metric
763# # fields = ["io_time", "read_time", "write_time"]
764
765
766# # Keep the aggregate min/max of each metric passing through.
767# [[aggregators.minmax]]
768# ## General Aggregator Arguments:
769# ## The period on which to flush & clear the aggregator.
770# period = "30s"
771# ## If true, the original metric will be dropped by the
772# ## aggregator and will not get sent to the output plugins.
773# drop_original = false
774
775
776
777###############################################################################
778# INPUT PLUGINS #
779###############################################################################
780
781# Read metrics about cpu usage
782[[inputs.cpu]]
783 ## Whether to report per-cpu stats or not
784 percpu = true
785 ## Whether to report total system cpu stats or not
786 totalcpu = true
787 ## If true, collect raw CPU time metrics.
788 collect_cpu_time = false
789 ## If true, compute and report the sum of all non-idle CPU states.
790 report_active = false
791
792
793# Read metrics about disk usage by mount point
794[[inputs.disk]]
795 ## By default, telegraf gather stats for all mountpoints.
796 ## Setting mountpoints will restrict the stats to the specified mountpoints.
797 # mount_points = ["/"]
798
799 ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
800 ## present on /run, /var/run, /dev/shm or /dev).
801 ignore_fs = ["tmpfs", "devtmpfs", "devfs"]
802
803
804# Read metrics about disk IO by device
805[[inputs.diskio]]
806 ## By default, telegraf will gather stats for all devices including
807 ## disk partitions.
808 ## Setting devices will restrict the stats to the specified devices.
809 # devices = ["sda", "sdb"]
810 ## Uncomment the following line if you need disk serial numbers.
811 # skip_serial_number = false
812 #
813 ## On systems which support it, device metadata can be added in the form of
814 ## tags.
815 ## Currently only Linux is supported via udev properties. You can view
816 ## available properties for a device by running:
817 ## 'udevadm info -q property -n /dev/sda'
818 # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
819 #
820 ## Using the same metadata source as device_tags, you can also customize the
821 ## name of the device via templates.
822 ## The 'name_templates' parameter is a list of templates to try and apply to
823 ## the device. The template may contain variables in the form of '$PROPERTY' or
824 ## '${PROPERTY}'. The first template which does not contain any variables not
825 ## present for the device is used as the device name tag.
826 ## The typical use case is for LVM volumes, to get the VG/LV name instead of
827 ## the near-meaningless DM-0 name.
828 # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
829
830
831# Get kernel statistics from /proc/stat
832[[inputs.kernel]]
833 # no configuration
834
835
836# Read metrics about memory usage
837[[inputs.mem]]
838 # no configuration
839
840
841# Get the number of processes and group them by status
842[[inputs.processes]]
843 # no configuration
844
845
846# Read metrics about swap memory usage
847[[inputs.swap]]
848 # no configuration
849
850
851# Read metrics about system load & uptime
852[[inputs.system]]
853 # no configuration
854
855
856# # Read stats from aerospike server(s)
857# [[inputs.aerospike]]
858# ## Aerospike servers to connect to (with port)
859# ## This plugin will query all namespaces the aerospike
860# ## server has configured and get stats for them.
861# servers = ["localhost:3000"]
862
863
864# # Read Apache status information (mod_status)
865# [[inputs.apache]]
866# ## An array of URLs to gather from, must be directed at the machine
867# ## readable version of the mod_status page including the auto query string.
868# ## Default is "http://localhost/server-status?auto".
869# urls = ["http://localhost/server-status?auto"]
870#
871# ## Credentials for basic HTTP authentication.
872# # username = "myuser"
873# # password = "mypassword"
874#
875# ## Maximum time to receive response.
876# # response_timeout = "5s"
877#
878# ## Optional SSL Config
879# # ssl_ca = "/etc/telegraf/ca.pem"
880# # ssl_cert = "/etc/telegraf/cert.pem"
881# # ssl_key = "/etc/telegraf/key.pem"
882# ## Use SSL but skip chain & host verification
883# # insecure_skip_verify = false
884
885
886# # Read metrics of bcache from stats_total and dirty_data
887# [[inputs.bcache]]
888# ## Bcache sets path
889# ## If not specified, then default is:
890# bcachePath = "/sys/fs/bcache"
891#
892# ## By default, telegraf gather stats for all bcache devices
893# ## Setting devices will restrict the stats to the specified
894# ## bcache devices.
895# bcacheDevs = ["bcache0"]
896
897
898# # Collect bond interface status, slaves statuses and failures count
899# [[inputs.bond]]
900# ## Sets 'proc' directory path
901# ## If not specified, then default is /proc
902# # host_proc = "/proc"
903#
904# ## By default, telegraf gather stats for all bond interfaces
905# ## Setting interfaces will restrict the stats to the specified
906# ## bond interfaces.
907# # bond_interfaces = ["bond0"]
908
909
910# # Read Cassandra metrics through Jolokia
911# [[inputs.cassandra]]
912# # This is the context root used to compose the jolokia url
913# context = "/jolokia/read"
914# ## List of cassandra servers exposing jolokia read service
915# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
916# ## List of metrics collected on above servers
917# ## Each metric consists of a jmx path.
918# ## This will collect all heap memory usage metrics from the jvm and
919# ## ReadLatency metrics for all keyspaces and tables.
920# ## "type=Table" in the query works with Cassandra3.0. Older versions might
921# ## need to use "type=ColumnFamily"
922# metrics = [
923# "/java.lang:type=Memory/HeapMemoryUsage",
924# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
925# ]
926
927
928# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
929# [[inputs.ceph]]
930# ## This is the recommended interval to poll. Too frequent and you will lose
931# ## data points due to timeouts during rebalancing and recovery
932# interval = '1m'
933#
934# ## All configuration values are optional, defaults are shown below
935#
936# ## location of ceph binary
937# ceph_binary = "/usr/bin/ceph"
938#
939# ## directory in which to look for socket files
940# socket_dir = "/var/run/ceph"
941#
942# ## prefix of MON and OSD socket files, used to determine socket type
943# mon_prefix = "ceph-mon"
944# osd_prefix = "ceph-osd"
945#
946# ## suffix used to identify socket files
947# socket_suffix = "asok"
948#
949# ## Ceph user to authenticate as
950# ceph_user = "client.admin"
951#
952# ## Ceph configuration to use to locate the cluster
953# ceph_config = "/etc/ceph/ceph.conf"
954#
955# ## Whether to gather statistics via the admin socket
956# gather_admin_socket_stats = true
957#
958# ## Whether to gather statistics via ceph commands
959# gather_cluster_stats = false
960
961
962# # Read specific statistics per cgroup
963# [[inputs.cgroup]]
964# ## Directories in which to look for files, globs are supported.
965# ## Consider restricting paths to the set of cgroups you really
966# ## want to monitor if you have a large number of cgroups, to avoid
967# ## any cardinality issues.
968# # paths = [
969# # "/cgroup/memory",
970# # "/cgroup/memory/child1",
971# # "/cgroup/memory/child2/*",
972# # ]
973# ## cgroup stat fields, as file names, globs are supported.
974# ## these file names are appended to each path from above.
975# # files = ["memory.*usage*", "memory.limit_in_bytes"]
976
977
978# # Get standard chrony metrics, requires chronyc executable.
979# [[inputs.chrony]]
980# ## If true, chronyc tries to perform a DNS lookup for the time server.
981# # dns_lookup = false
982
983
984# # Pull Metric Statistics from Amazon CloudWatch
985# [[inputs.cloudwatch]]
986# ## Amazon Region
987# region = "us-east-1"
988#
989# ## Amazon Credentials
990# ## Credentials are loaded in the following order
991# ## 1) Assumed credentials via STS if role_arn is specified
992# ## 2) explicit credentials from 'access_key' and 'secret_key'
993# ## 3) shared profile from 'profile'
994# ## 4) environment variables
995# ## 5) shared credentials file
996# ## 6) EC2 Instance Profile
997# #access_key = ""
998# #secret_key = ""
999# #token = ""
1000# #role_arn = ""
1001# #profile = ""
1002# #shared_credential_file = ""
1003#
1004# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
1005# # metrics are made available to the 1 minute period. Some are collected at
1006# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
1007# # Note that if a period is configured that is smaller than the minimum for a
1008# # particular metric, that metric will not be returned by the Cloudwatch API
1009# # and will not be collected by Telegraf.
1010# #
1011# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
1012# period = "5m"
1013#
1014# ## Collection Delay (required - must account for metrics availability via CloudWatch API)
1015# delay = "5m"
1016#
1017# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
1018# ## gaps or overlap in pulled data
1019# interval = "5m"
1020#
1021# ## Configure the TTL for the internal cache of metrics.
1022# ## Defaults to 1 hr if not specified
1023# #cache_ttl = "10m"
1024#
1025# ## Metric Statistic Namespace (required)
1026# namespace = "AWS/ELB"
1027#
1028# ## Maximum requests per second. Note that the global default AWS rate limit is
1029# ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
1030# ## maximum of 400. Optional - default value is 200.
1031# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
1032# ratelimit = 200
1033#
1034# ## Metrics to Pull (optional)
1035# ## Defaults to all Metrics in Namespace if nothing is provided
1036# ## Refreshes Namespace available metrics every 1h
1037# #[[inputs.cloudwatch.metrics]]
1038# # names = ["Latency", "RequestCount"]
1039# #
1040# # ## Dimension filters for Metric (optional)
1041# # [[inputs.cloudwatch.metrics.dimensions]]
1042# # name = "LoadBalancerName"
1043# # value = "p-example"
1044
1045
1046# # Collects conntrack stats from the configured directories and files.
1047# [[inputs.conntrack]]
1048# ## The following defaults would work with multiple versions of conntrack.
1049# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across
1050# ## kernel versions, as are the directory locations.
1051#
1052# ## Superset of filenames to look for within the conntrack dirs.
1053# ## Missing files will be ignored.
1054# files = ["ip_conntrack_count","ip_conntrack_max",
1055# "nf_conntrack_count","nf_conntrack_max"]
1056#
1057# ## Directories to search within for the conntrack files above.
1058# ## Missing directrories will be ignored.
1059# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
1060
1061
1062# # Gather health check statuses from services registered in Consul
1063# [[inputs.consul]]
1064# ## Most of these values defaults to the one configured on a Consul's agent level.
1065# ## Optional Consul server address (default: "localhost")
1066# # address = "localhost"
1067# ## Optional URI scheme for the Consul server (default: "http")
1068# # scheme = "http"
1069# ## Optional ACL token used in every request (default: "")
1070# # token = ""
1071# ## Optional username used for request HTTP Basic Authentication (default: "")
1072# # username = ""
1073# ## Optional password used for HTTP Basic Authentication (default: "")
1074# # password = ""
1075# ## Optional data centre to query the health checks from (default: "")
1076# # datacentre = ""
1077
1078
1079# # Read metrics from one or many couchbase clusters
1080# [[inputs.couchbase]]
1081# ## specify servers via a url matching:
1082# ## [protocol://][:password]@address[:port]
1083# ## e.g.
1084# ## http://couchbase-0.example.com/
1085# ## http://admin:secret@couchbase-0.example.com:8091/
1086# ##
1087# ## If no servers are specified, then localhost is used as the host.
1088# ## If no protocol is specified, HTTP is used.
1089# ## If no port is specified, 8091 is used.
1090# servers = ["http://localhost:8091"]
1091
1092
1093# # Read CouchDB Stats from one or more servers
1094# [[inputs.couchdb]]
1095# ## Works with CouchDB stats endpoints out of the box
1096# ## Multiple HOSTs from which to read CouchDB stats:
1097# hosts = ["http://localhost:8086/_stats"]
1098
1099
1100# # Input plugin for DC/OS metrics
1101# [[inputs.dcos]]
1102# ## The DC/OS cluster URL.
1103# cluster_url = "https://dcos-ee-master-1"
1104#
1105# ## The ID of the service account.
1106# service_account_id = "telegraf"
1107# ## The private key file for the service account.
1108# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
1109#
1110# ## Path containing login token. If set, will read on every gather.
1111# # token_file = "/home/dcos/.dcos/token"
1112#
1113# ## In all filter options if both include and exclude are empty all items
1114# ## will be collected. Arrays may contain glob patterns.
1115# ##
1116# ## Node IDs to collect metrics from. If a node is excluded, no metrics will
1117# ## be collected for its containers or apps.
1118# # node_include = []
1119# # node_exclude = []
1120# ## Container IDs to collect container metrics from.
1121# # container_include = []
1122# # container_exclude = []
1123# ## Container IDs to collect app metrics from.
1124# # app_include = []
1125# # app_exclude = []
1126#
1127# ## Maximum concurrent connections to the cluster.
1128# # max_connections = 10
1129# ## Maximum time to receive a response from cluster.
1130# # response_timeout = "20s"
1131#
1132# ## Optional SSL Config
1133# # ssl_ca = "/etc/telegraf/ca.pem"
1134# # ssl_cert = "/etc/telegraf/cert.pem"
1135# # ssl_key = "/etc/telegraf/key.pem"
1136# ## If false, skip chain & host verification
1137# # insecure_skip_verify = true
1138#
1139# ## Recommended filtering to reduce series cardinality.
1140# # [inputs.dcos.tagdrop]
1141# # path = ["/var/lib/mesos/slave/slaves/*"]
1142
1143
1144# # Read metrics from one or many disque servers
1145# [[inputs.disque]]
1146# ## An array of URI to gather stats about. Specify an ip or hostname
1147# ## with optional port and password.
1148# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
1149# ## If no servers are specified, then localhost is used as the host.
1150# servers = ["localhost"]
1151
1152
1153# # Provide a native collection for dmsetup based statistics for dm-cache
1154# [[inputs.dmcache]]
1155# ## Whether to report per-device stats or not
1156# per_device = true
1157
1158
1159# # Query given DNS server and gives statistics
1160# [[inputs.dns_query]]
1161# ## servers to query
1162# servers = ["8.8.8.8"]
1163#
1164# ## Network is the network protocol name.
1165# # network = "udp"
1166#
1167# ## Domains or subdomains to query.
1168# # domains = ["."]
1169#
1170# ## Query record type.
1171# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
1172# # record_type = "A"
1173#
1174# ## Dns server port.
1175# # port = 53
1176#
1177# ## Query timeout in seconds.
1178# # timeout = 2
1179
1180
1181# # Read metrics about docker containers
1182# [[inputs.docker]]
1183# ## Docker Endpoint
1184# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
1185# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
1186# endpoint = "unix:///var/run/docker.sock"
1187#
1188# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
1189# gather_services = false
1190#
1191# ## Only collect metrics for these containers, collect all if empty
1192# container_names = []
1193#
1194# ## Containers to include and exclude. Globs accepted.
1195# ## Note that an empty array for both will include all containers
1196# container_name_include = []
1197# container_name_exclude = []
1198#
1199# ## Timeout for docker list, info, and stats commands
1200# timeout = "5s"
1201#
1202# ## Whether to report for each container per-device blkio (8:0, 8:1...) and
1203# ## network (eth0, eth1, ...) stats or not
1204# perdevice = true
1205# ## Whether to report for each container total blkio and network stats or not
1206# total = false
1207# ## Which environment variables should we use as a tag
1208# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
1209#
1210# ## docker labels to include and exclude as tags. Globs accepted.
1211# ## Note that an empty array for both will include all labels as tags
1212# docker_label_include = []
1213# docker_label_exclude = []
1214#
1215# ## Optional SSL Config
1216# # ssl_ca = "/etc/telegraf/ca.pem"
1217# # ssl_cert = "/etc/telegraf/cert.pem"
1218# # ssl_key = "/etc/telegraf/key.pem"
1219# ## Use SSL but skip chain & host verification
1220# # insecure_skip_verify = false
1221
1222
1223# # Read statistics from one or many dovecot servers
1224# [[inputs.dovecot]]
1225# ## specify dovecot servers via an address:port list
1226# ## e.g.
1227# ## localhost:24242
1228# ##
1229# ## If no servers are specified, then localhost is used as the host.
1230# servers = ["localhost:24242"]
1231# ## Type is one of "user", "domain", "ip", or "global"
1232# type = "global"
1233# ## Wildcard matches like "*.com". An empty string "" is same as "*"
1234# ## If type = "ip" filters should be <IP/network>
1235# filters = [""]
1236
1237
1238# # Read stats from one or more Elasticsearch servers or clusters
1239# [[inputs.elasticsearch]]
1240# ## specify a list of one or more Elasticsearch servers
1241# # you can add username and password to your url to use basic authentication:
1242# # servers = ["http://user:pass@localhost:9200"]
1243# servers = ["http://localhost:9200"]
1244#
1245# ## Timeout for HTTP requests to the elastic search server(s)
1246# http_timeout = "5s"
1247#
1248# ## When local is true (the default), the node will read only its own stats.
1249# ## Set local to false when you want to read the node stats from all nodes
1250# ## of the cluster.
1251# local = true
1252#
1253# ## Set cluster_health to true when you want to also obtain cluster health stats
1254# cluster_health = false
1255#
1256# ## Adjust cluster_health_level when you want to also obtain detailed health stats
1257# ## The options are
1258# ## - indices (default)
1259# ## - cluster
1260# # cluster_health_level = "indices"
1261#
1262# ## Set cluster_stats to true when you want to also obtain cluster stats from the
1263# ## Master node.
1264# cluster_stats = false
1265#
1266# ## node_stats is a list of sub-stats that you want to have gathered. Valid options
1267# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
1268# ## "breakers". Per default, all stats are gathered.
1269# # node_stats = ["jvm", "http"]
1270#
1271# ## Optional SSL Config
1272# # ssl_ca = "/etc/telegraf/ca.pem"
1273# # ssl_cert = "/etc/telegraf/cert.pem"
1274# # ssl_key = "/etc/telegraf/key.pem"
1275# ## Use SSL but skip chain & host verification
1276# # insecure_skip_verify = false
1277
1278
1279# # Read metrics from one or more commands that can output to stdout
1280# [[inputs.exec]]
1281# ## Commands array
1282# commands = [
1283# "/tmp/test.sh",
1284# "/usr/bin/mycollector --foo=bar",
1285# "/tmp/collect_*.sh"
1286# ]
1287#
1288# ## Timeout for each command to complete.
1289# timeout = "5s"
1290#
1291# ## measurement name suffix (for separating different commands)
1292# name_suffix = "_mycollector"
1293#
1294# ## Data format to consume.
1295# ## Each data format has its own unique set of configuration options, read
1296# ## more about them here:
1297# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
1298# data_format = "influx"
1299
1300
1301# # Read metrics from fail2ban.
1302# [[inputs.fail2ban]]
1303# ## Use sudo to run fail2ban-client
1304# use_sudo = false
1305
1306
1307# # Read stats about given file(s)
1308# [[inputs.filestat]]
1309# ## Files to gather stats about.
1310# ## These accept standard unix glob matching rules, but with the addition of
1311# ## ** as a "super asterisk". ie:
1312# ## "/var/log/**.log" -> recursively find all .log files in /var/log
1313# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
1314# ## "/var/log/apache.log" -> just tail the apache log file
1315# ##
1316# ## See https://github.com/gobwas/glob for more examples
1317# ##
1318# files = ["/var/log/**.log"]
1319# ## If true, read the entire file and calculate an md5 checksum.
1320# md5 = false
1321
1322
1323# # Read metrics exposed by fluentd in_monitor plugin
1324# [[inputs.fluentd]]
1325# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
1326# ##
1327# ## Endpoint:
1328# ## - only one URI is allowed
1329# ## - https is not supported
1330# endpoint = "http://localhost:24220/api/plugins.json"
1331#
1332# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
1333# exclude = [
1334# "monitor_agent",
1335# "dummy",
1336# ]
1337
1338
1339# # Read flattened metrics from one or more GrayLog HTTP endpoints
1340# [[inputs.graylog]]
1341# ## API endpoint, currently supported API:
1342# ##
1343# ## - multiple (Ex http://<host>:12900/system/metrics/multiple)
1344# ## - namespace (Ex http://<host>:12900/system/metrics/namespace/{namespace})
1345# ##
1346# ## For namespace endpoint, the metrics array will be ignored for that call.
1347# ## Endpoint can contain namespace and multiple type calls.
1348# ##
1349# ## Please check http://[graylog-server-ip]:12900/api-browser for full list
1350# ## of endpoints
1351# servers = [
1352# "http://[graylog-server-ip]:12900/system/metrics/multiple",
1353# ]
1354#
1355# ## Metrics list
1356# ## List of metrics can be found on Graylog webservice documentation.
1357# ## Or by hitting the the web service api at:
1358# ## http://[graylog-host]:12900/system/metrics
1359# metrics = [
1360# "jvm.cl.loaded",
1361# "jvm.memory.pools.Metaspace.committed"
1362# ]
1363#
1364# ## Username and password
1365# username = ""
1366# password = ""
1367#
1368# ## Optional SSL Config
1369# # ssl_ca = "/etc/telegraf/ca.pem"
1370# # ssl_cert = "/etc/telegraf/cert.pem"
1371# # ssl_key = "/etc/telegraf/key.pem"
1372# ## Use SSL but skip chain & host verification
1373# # insecure_skip_verify = false
1374
1375
1376# # Read metrics of haproxy, via socket or csv stats page
1377# [[inputs.haproxy]]
1378# ## An array of address to gather stats about. Specify an ip on hostname
1379# ## with optional port. ie localhost, 10.10.3.33:1936, etc.
1380# ## Make sure you specify the complete path to the stats endpoint
1381# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
1382#
1383# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
1384# servers = ["http://myhaproxy.com:1936/haproxy?stats"]
1385#
1386# ## You can also use local socket with standard wildcard globbing.
1387# ## Server address not starting with 'http' will be treated as a possible
1388# ## socket, so both examples below are valid.
1389# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
1390#
1391# ## By default, some of the fields are renamed from what haproxy calls them.
1392# ## Setting this option to true results in the plugin keeping the original
1393# ## field names.
1394# # keep_field_names = true
1395#
1396# ## Optional SSL Config
1397# # ssl_ca = "/etc/telegraf/ca.pem"
1398# # ssl_cert = "/etc/telegraf/cert.pem"
1399# # ssl_key = "/etc/telegraf/key.pem"
1400# ## Use SSL but skip chain & host verification
1401# # insecure_skip_verify = false
1402
1403
1404# # Monitor disks' temperatures using hddtemp
1405# [[inputs.hddtemp]]
1406# ## By default, telegraf gathers temps data from all disks detected by the
1407# ## hddtemp.
1408# ##
1409# ## Only collect temps from the selected disks.
1410# ##
1411# ## A * as the device name will return the temperature values of all disks.
1412# ##
1413# # address = "127.0.0.1:7634"
1414# # devices = ["sda", "*"]
1415
1416
1417# # HTTP/HTTPS request given an address a method and a timeout
1418# [[inputs.http_response]]
1419# ## Server address (default http://localhost)
1420# # address = "http://localhost"
1421#
1422# ## Set response_timeout (default 5 seconds)
1423# # response_timeout = "5s"
1424#
1425# ## HTTP Request Method
1426# # method = "GET"
1427#
1428# ## Whether to follow redirects from the server (defaults to false)
1429# # follow_redirects = false
1430#
1431# ## Optional HTTP Request Body
1432# # body = '''
1433# # {'fake':'data'}
1434# # '''
1435#
1436# ## Optional substring or regex match in body of the response
1437# # response_string_match = "\"service_status\": \"up\""
1438# # response_string_match = "ok"
1439# # response_string_match = "\".*_status\".?:.?\"up\""
1440#
1441# ## Optional SSL Config
1442# # ssl_ca = "/etc/telegraf/ca.pem"
1443# # ssl_cert = "/etc/telegraf/cert.pem"
1444# # ssl_key = "/etc/telegraf/key.pem"
1445# ## Use SSL but skip chain & host verification
1446# # insecure_skip_verify = false
1447#
1448# ## HTTP Request Headers (all values must be strings)
1449# # [inputs.http_response.headers]
1450# # Host = "github.com"
1451
1452
1453# # Read flattened metrics from one or more JSON HTTP endpoints
1454# [[inputs.httpjson]]
1455# ## NOTE This plugin only reads numerical measurements, strings and booleans
1456# ## will be ignored.
1457#
1458# ## Name for the service being polled. Will be appended to the name of the
1459# ## measurement e.g. httpjson_webserver_stats
1460# ##
1461# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
1462# name = "webserver_stats"
1463#
1464# ## URL of each server in the service's cluster
1465# servers = [
1466# "http://localhost:9999/stats/",
1467# "http://localhost:9998/stats/",
1468# ]
1469# ## Set response_timeout (default 5 seconds)
1470# response_timeout = "5s"
1471#
1472# ## HTTP method to use: GET or POST (case-sensitive)
1473# method = "GET"
1474#
1475# ## List of tag names to extract from top-level of JSON server response
1476# # tag_keys = [
1477# # "my_tag_1",
1478# # "my_tag_2"
1479# # ]
1480#
1481# ## HTTP parameters (all values must be strings). For "GET" requests, data
1482# ## will be included in the query. For "POST" requests, data will be included
1483# ## in the request body as "x-www-form-urlencoded".
1484# # [inputs.httpjson.parameters]
1485# # event_type = "cpu_spike"
1486# # threshold = "0.75"
1487#
1488# ## HTTP Headers (all values must be strings)
1489# # [inputs.httpjson.headers]
1490# # X-Auth-Token = "my-xauth-token"
1491# # apiVersion = "v1"
1492#
1493# ## Optional SSL Config
1494# # ssl_ca = "/etc/telegraf/ca.pem"
1495# # ssl_cert = "/etc/telegraf/cert.pem"
1496# # ssl_key = "/etc/telegraf/key.pem"
1497# ## Use SSL but skip chain & host verification
1498# # insecure_skip_verify = false
1499
1500
1501# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
1502# [[inputs.influxdb]]
1503# ## Works with InfluxDB debug endpoints out of the box,
1504# ## but other services can use this format too.
1505# ## See the influxdb plugin's README for more details.
1506#
1507# ## Multiple URLs from which to read InfluxDB-formatted JSON
1508# ## Default is "http://localhost:8086/debug/vars".
1509# urls = [
1510# "http://localhost:8086/debug/vars"
1511# ]
1512#
1513# ## Optional SSL Config
1514# # ssl_ca = "/etc/telegraf/ca.pem"
1515# # ssl_cert = "/etc/telegraf/cert.pem"
1516# # ssl_key = "/etc/telegraf/key.pem"
1517# ## Use SSL but skip chain & host verification
1518# # insecure_skip_verify = false
1519#
1520# ## http request & header timeout
1521# timeout = "5s"
1522
1523
1524# # Collect statistics about itself
1525# [[inputs.internal]]
1526# ## If true, collect telegraf memory stats.
1527# # collect_memstats = true
1528
1529
1530# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.
1531# [[inputs.interrupts]]
1532# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
1533# # [inputs.interrupts.tagdrop]
1534# # irq = [ "NET_RX", "TASKLET" ]
1535
1536
1537# # Read metrics from the bare metal servers via IPMI
1538# [[inputs.ipmi_sensor]]
1539# ## optionally specify the path to the ipmitool executable
1540# # path = "/usr/bin/ipmitool"
1541# #
1542# ## optionally specify one or more servers via a url matching
1543# ## [username[:password]@][protocol[(address)]]
1544# ## e.g.
1545# ## root:passwd@lan(127.0.0.1)
1546# ##
1547# ## if no servers are specified, local machine sensor stats will be queried
1548# ##
1549# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
1550#
1551# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid
1552# ## gaps or overlap in pulled data
1553# interval = "30s"
1554#
1555# ## Timeout for the ipmitool command to complete
1556# timeout = "20s"
1557
1558
1559# # Gather packets and bytes throughput from iptables
1560# [[inputs.iptables]]
1561# ## iptables require root access on most systems.
1562# ## Setting 'use_sudo' to true will make use of sudo to run iptables.
1563# ## Users must configure sudo to allow telegraf user to run iptables with no password.
1564# ## iptables can be restricted to only list command "iptables -nvL".
1565# use_sudo = false
1566# ## Setting 'use_lock' to true runs iptables with the "-w" option.
1567# ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl")
1568# use_lock = false
1569# ## defines the table to monitor:
1570# table = "filter"
1571# ## defines the chains to monitor.
1572# ## NOTE: iptables rules without a comment will not be monitored.
1573# ## Read the plugin documentation for more information.
1574# chains = [ "INPUT" ]
1575
1576
1577# # Read JMX metrics through Jolokia
1578# [[inputs.jolokia]]
1579# # DEPRECATED: the jolokia plugin has been deprecated in favor of the
1580# # jolokia2 plugin
1581# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
1582#
1583# ## This is the context root used to compose the jolokia url
1584# ## NOTE that Jolokia requires a trailing slash at the end of the context root
1585# ## NOTE that your jolokia security policy must allow for POST requests.
1586# context = "/jolokia/"
1587#
1588# ## This specifies the mode used
1589# # mode = "proxy"
1590# #
1591# ## When in proxy mode this section is used to specify further
1592# ## proxy address configurations.
1593# ## Remember to change host address to fit your environment.
1594# # [inputs.jolokia.proxy]
1595# # host = "127.0.0.1"
1596# # port = "8080"
1597#
1598# ## Optional http timeouts
1599# ##
1600# ## response_header_timeout, if non-zero, specifies the amount of time to wait
1601# ## for a server's response headers after fully writing the request.
1602# # response_header_timeout = "3s"
1603# ##
1604# ## client_timeout specifies a time limit for requests made by this client.
1605# ## Includes connection time, any redirects, and reading the response body.
1606# # client_timeout = "4s"
1607#
1608# ## Attribute delimiter
1609# ##
1610# ## When multiple attributes are returned for a single
1611# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric
1612# ## name, and the attribute name, separated by the given delimiter.
1613# # delimiter = "_"
1614#
1615# ## List of servers exposing jolokia read service
1616# [[inputs.jolokia.servers]]
1617# name = "as-server-01"
1618# host = "127.0.0.1"
1619# port = "8080"
1620# # username = "myuser"
1621# # password = "mypassword"
1622#
1623# ## List of metrics collected on above servers
1624# ## Each metric consists in a name, a jmx path and either
1625# ## a pass or drop slice attribute.
1626# ##Â This collect all heap memory usage metrics.
1627# [[inputs.jolokia.metrics]]
1628# name = "heap_memory_usage"
1629# mbean = "java.lang:type=Memory"
1630# attribute = "HeapMemoryUsage"
1631#
1632# ##Â This collect thread counts metrics.
1633# [[inputs.jolokia.metrics]]
1634# name = "thread_count"
1635# mbean = "java.lang:type=Threading"
1636# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
1637#
1638# ##Â This collect number of class loaded/unloaded counts metrics.
1639# [[inputs.jolokia.metrics]]
1640# name = "class_count"
1641# mbean = "java.lang:type=ClassLoading"
1642# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
1643
1644
1645# # Read JMX metrics from a Jolokia REST agent endpoint
1646# [[inputs.jolokia2_agent]]
1647# # default_tag_prefix = ""
1648# # default_field_prefix = ""
1649# # default_field_separator = "."
1650#
1651# # Add agents URLs to query
1652# urls = ["http://localhost:8080/jolokia"]
1653# # username = ""
1654# # password = ""
1655# # response_timeout = "5s"
1656#
1657# ## Optional SSL config
1658# # ssl_ca = "/var/private/ca.pem"
1659# # ssl_cert = "/var/private/client.pem"
1660# # ssl_key = "/var/private/client-key.pem"
1661# # insecure_skip_verify = false
1662#
1663# ## Add metrics to read
1664# [[inputs.jolokia2_agent.metric]]
1665# name = "java_runtime"
1666# mbean = "java.lang:type=Runtime"
1667# paths = ["Uptime"]
1668
1669
1670# # Read JMX metrics from a Jolokia REST proxy endpoint
1671# [[inputs.jolokia2_proxy]]
1672# # default_tag_prefix = ""
1673# # default_field_prefix = ""
1674# # default_field_separator = "."
1675#
1676# ## Proxy agent
1677# url = "http://localhost:8080/jolokia"
1678# # username = ""
1679# # password = ""
1680# # response_timeout = "5s"
1681#
1682# ## Optional SSL config
1683# # ssl_ca = "/var/private/ca.pem"
1684# # ssl_cert = "/var/private/client.pem"
1685# # ssl_key = "/var/private/client-key.pem"
1686# # insecure_skip_verify = false
1687#
1688# ## Add proxy targets to query
1689# # default_target_username = ""
1690# # default_target_password = ""
1691# [[inputs.jolokia_proxy.target]]
1692# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
1693# # username = ""
1694# # password = ""
1695#
1696# ## Add metrics to read
1697# [[inputs.jolokia_proxy.metric]]
1698# name = "java_runtime"
1699# mbean = "java.lang:type=Runtime"
1700# paths = ["Uptime"]
1701
1702
1703# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints
1704# [[inputs.kapacitor]]
1705# ## Multiple URLs from which to read Kapacitor-formatted JSON
1706# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars".
1707# urls = [
1708# "http://localhost:9092/kapacitor/v1/debug/vars"
1709# ]
1710#
1711# ## Time limit for http requests
1712# timeout = "5s"
1713
1714
1715# # Get kernel statistics from /proc/vmstat
1716# [[inputs.kernel_vmstat]]
1717# # no configuration
1718
1719
1720# # Read metrics from the kubernetes kubelet api
1721# [[inputs.kubernetes]]
1722# ## URL for the kubelet
1723# url = "http://1.1.1.1:10255"
1724#
1725# ## Use bearer token for authorization
1726# # bearer_token = /path/to/bearer/token
1727#
1728# ## Set response_timeout (default 5 seconds)
1729# # response_timeout = "5s"
1730#
1731# ## Optional SSL Config
1732# # ssl_ca = /path/to/cafile
1733# # ssl_cert = /path/to/certfile
1734# # ssl_key = /path/to/keyfile
1735# ## Use SSL but skip chain & host verification
1736# # insecure_skip_verify = false
1737
1738
1739# # Read metrics from a LeoFS Server via SNMP
1740# [[inputs.leofs]]
1741# ## An array of URLs of the form:
1742# ## host [ ":" port]
1743# servers = ["127.0.0.1:4020"]
1744
1745
1746# # Provides Linux sysctl fs metrics
1747# [[inputs.linux_sysctl_fs]]
1748# # no configuration
1749
1750
1751# # Read metrics from local Lustre service on OST, MDS
1752# [[inputs.lustre2]]
1753# ## An array of /proc globs to search for Lustre stats
1754# ## If not specified, the default will work on Lustre 2.5.x
1755# ##
1756# # ost_procfiles = [
1757# # "/proc/fs/lustre/obdfilter/*/stats",
1758# # "/proc/fs/lustre/osd-ldiskfs/*/stats",
1759# # "/proc/fs/lustre/obdfilter/*/job_stats",
1760# # ]
1761# # mds_procfiles = [
1762# # "/proc/fs/lustre/mdt/*/md_stats",
1763# # "/proc/fs/lustre/mdt/*/job_stats",
1764# # ]
1765
1766
1767# # Gathers metrics from the /3.0/reports MailChimp API
1768# [[inputs.mailchimp]]
1769# ## MailChimp API key
1770# ## get from https://admin.mailchimp.com/account/api/
1771# api_key = "" # required
1772# ## Reports for campaigns sent more than days_old ago will not be collected.
1773# ## 0 means collect all.
1774# days_old = 0
1775# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
1776# # campaign_id = ""
1777
1778
1779# # Read metrics from one or many memcached servers
1780# [[inputs.memcached]]
1781# ## An array of address to gather stats about. Specify an ip on hostname
1782# ## with optional port. ie localhost, 10.0.0.1:11211, etc.
1783# servers = ["localhost:11211"]
1784# # unix_sockets = ["/var/run/memcached.sock"]
1785
1786
1787# # Telegraf plugin for gathering metrics from N Mesos masters
1788# [[inputs.mesos]]
1789# ## Timeout, in ms.
1790# timeout = 100
1791# ## A list of Mesos masters.
1792# masters = ["localhost:5050"]
1793# ## Master metrics groups to be collected, by default, all enabled.
1794# master_collections = [
1795# "resources",
1796# "master",
1797# "system",
1798# "agents",
1799# "frameworks",
1800# "tasks",
1801# "messages",
1802# "evqueue",
1803# "registrar",
1804# ]
1805# ## A list of Mesos slaves, default is []
1806# # slaves = []
1807# ## Slave metrics groups to be collected, by default, all enabled.
1808# # slave_collections = [
1809# # "resources",
1810# # "agent",
1811# # "system",
1812# # "executors",
1813# # "tasks",
1814# # "messages",
1815# # ]
1816
1817
1818# # Collects scores from a minecraft server's scoreboard using the RCON protocol
1819# [[inputs.minecraft]]
1820# ## server address for minecraft
1821# # server = "localhost"
1822# ## port for RCON
1823# # port = "25575"
1824# ## password RCON for mincraft server
1825# # password = ""
1826
1827
1828# # Read metrics from one or many MongoDB servers
1829# [[inputs.mongodb]]
1830# ## An array of URLs of the form:
1831# ## "mongodb://" [user ":" pass "@"] host [ ":" port]
1832# ## For example:
1833# ## mongodb://user:auth_key@10.10.3.30:27017,
1834# ## mongodb://10.10.3.33:18832,
1835# servers = ["mongodb://127.0.0.1:27017"]
1836# gather_perdb_stats = false
1837#
1838# ## Optional SSL Config
1839# # ssl_ca = "/etc/telegraf/ca.pem"
1840# # ssl_cert = "/etc/telegraf/cert.pem"
1841# # ssl_key = "/etc/telegraf/key.pem"
1842# ## Use SSL but skip chain & host verification
1843# # insecure_skip_verify = false
1844
1845
1846# # Read metrics from one or many mysql servers
1847# [[inputs.mysql]]
1848# ## specify servers via a url matching:
1849# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
1850# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
1851# ## e.g.
1852# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
1853# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
1854# #
1855# ## If no servers are specified, then localhost is used as the host.
1856# servers = ["tcp(127.0.0.1:3306)/"]
1857# ## the limits for metrics form perf_events_statements
1858# perf_events_statements_digest_text_limit = 120
1859# perf_events_statements_limit = 250
1860# perf_events_statements_time_limit = 86400
1861# #
1862# ## if the list is empty, then metrics are gathered from all databasee tables
1863# table_schema_databases = []
1864# #
1865# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
1866# gather_table_schema = false
1867# #
1868# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
1869# gather_process_list = true
1870# #
1871# ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS
1872# gather_user_statistics = true
1873# #
1874# ## gather auto_increment columns and max values from information schema
1875# gather_info_schema_auto_inc = true
1876# #
1877# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
1878# gather_innodb_metrics = true
1879# #
1880# ## gather metrics from SHOW SLAVE STATUS command output
1881# gather_slave_status = true
1882# #
1883# ## gather metrics from SHOW BINARY LOGS command output
1884# gather_binary_logs = false
1885# #
1886# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
1887# gather_table_io_waits = false
1888# #
1889# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
1890# gather_table_lock_waits = false
1891# #
1892# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
1893# gather_index_io_waits = false
1894# #
1895# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
1896# gather_event_waits = false
1897# #
1898# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
1899# gather_file_events_stats = false
1900# #
1901# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
1902# gather_perf_events_statements = false
1903# #
1904# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
1905# interval_slow = "30m"
1906#
1907# ## Optional SSL Config (will be used if tls=custom parameter specified in server uri)
1908# ssl_ca = "/etc/telegraf/ca.pem"
1909# ssl_cert = "/etc/telegraf/cert.pem"
1910# ssl_key = "/etc/telegraf/key.pem"
1911
1912
1913# # Read metrics about network interface usage
1914# [[inputs.net]]
1915# ## By default, telegraf gathers stats from any up interface (excluding loopback)
1916# ## Setting interfaces will tell it to gather these explicit interfaces,
1917# ## regardless of status.
1918# ##
1919# # interfaces = ["eth0"]
1920
1921
1922# # TCP or UDP 'ping' given url and collect response time in seconds
1923# [[inputs.net_response]]
1924# ## Protocol, must be "tcp" or "udp"
1925# ## NOTE: because the "udp" protocol does not respond to requests, it requires
1926# ## a send/expect string pair (see below).
1927# protocol = "tcp"
1928# ## Server address (default localhost)
1929# address = "localhost:80"
1930# ## Set timeout
1931# timeout = "1s"
1932#
1933# ## Set read timeout (only used if expecting a response)
1934# read_timeout = "1s"
1935#
1936# ## The following options are required for UDP checks. For TCP, they are
1937# ## optional. The plugin will send the given string to the server and then
1938# ## expect to receive the given 'expect' string back.
1939# ## string sent to the server
1940# # send = "ssh"
1941# ## expected string in answer
1942# # expect = "ssh"
1943
1944
1945# # Read TCP metrics such as established, time wait and sockets counts.
1946# [[inputs.netstat]]
1947# # no configuration
1948
1949
1950# # Read Nginx's basic status information (ngx_http_stub_status_module)
1951# [[inputs.nginx]]
1952# # An array of Nginx stub_status URI to gather stats.
1953# urls = ["http://localhost/server_status"]
1954#
1955# # TLS/SSL configuration
1956# ssl_ca = "/etc/telegraf/ca.pem"
1957# ssl_cert = "/etc/telegraf/cert.cer"
1958# ssl_key = "/etc/telegraf/key.key"
1959# insecure_skip_verify = false
1960#
1961# # HTTP response timeout (default: 5s)
1962# response_timeout = "5s"
1963
1964
1965# # Read Nginx Plus' full status information (ngx_http_status_module)
1966# [[inputs.nginx_plus]]
1967# ## An array of ngx_http_status_module or status URI to gather stats.
1968# urls = ["http://localhost/status"]
1969#
1970# # HTTP response timeout (default: 5s)
1971# response_timeout = "5s"
1972
1973
1974# # Read NSQ topic and channel statistics.
1975# [[inputs.nsq]]
1976# ## An array of NSQD HTTP API endpoints
1977# endpoints = ["http://localhost:4151"]
1978
1979
1980# # Collect kernel snmp counters and network interface statistics
1981# [[inputs.nstat]]
1982# ## file paths for proc files. If empty default paths will be used:
1983# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
1984# ## These can also be overridden with env variables, see README.
1985# proc_net_netstat = "/proc/net/netstat"
1986# proc_net_snmp = "/proc/net/snmp"
1987# proc_net_snmp6 = "/proc/net/snmp6"
1988# ## dump metrics with 0 values too
1989# dump_zeros = true
1990
1991
1992# # Get standard NTP query metrics, requires ntpq executable.
1993# [[inputs.ntpq]]
1994# ## If false, set the -n ntpq flag. Can reduce metric gather time.
1995# dns_lookup = true
1996
1997
1998# # OpenLDAP cn=Monitor plugin
1999# [[inputs.openldap]]
2000# host = "localhost"
2001# port = 389
2002#
2003# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
2004# # note that port will likely need to be changed to 636 for ldaps
2005# # valid options: "" | "starttls" | "ldaps"
2006# ssl = ""
2007#
2008# # skip peer certificate verification. Default is false.
2009# insecure_skip_verify = false
2010#
2011# # Path to PEM-encoded Root certificate to use to verify server certificate
2012# ssl_ca = "/etc/ssl/certs.pem"
2013#
2014# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
2015# bind_dn = ""
2016# bind_password = ""
2017
2018
2019# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver
2020# [[inputs.opensmtpd]]
2021# ## If running as a restricted user you can prepend sudo for additional access:
2022# #use_sudo = false
2023#
2024# ## The default location of the smtpctl binary can be overridden with:
2025# binary = "/usr/sbin/smtpctl"
2026#
2027# ## The default timeout of 1000ms can be overriden with (in milliseconds):
2028# timeout = 1000
2029
2030
2031# # Read metrics of passenger using passenger-status
2032# [[inputs.passenger]]
2033# ## Path of passenger-status.
2034# ##
2035# ## Plugin gather metric via parsing XML output of passenger-status
2036# ## More information about the tool:
2037# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
2038# ##
2039# ## If no path is specified, then the plugin simply execute passenger-status
2040# ## hopefully it can be found in your PATH
2041# command = "passenger-status -v --show=xml"
2042
2043
2044# # Gather counters from PF
2045# [[inputs.pf]]
2046# ## PF require root access on most systems.
2047# ## Setting 'use_sudo' to true will make use of sudo to run pfctl.
2048# ## Users must configure sudo to allow telegraf user to run pfctl with no password.
2049# ## pfctl can be restricted to only list command "pfctl -s info".
2050# use_sudo = false
2051
2052
2053# # Read metrics of phpfpm, via HTTP status page or socket
2054# [[inputs.phpfpm]]
2055# ## An array of addresses to gather stats about. Specify an ip or hostname
2056# ## with optional port and path
2057# ##
2058# ## Plugin can be configured in three modes (either can be used):
2059# ## - http: the URL must start with http:// or https://, ie:
2060# ## "http://localhost/status"
2061# ## "http://192.168.130.1/status?full"
2062# ##
2063# ## - unixsocket: path to fpm socket, ie:
2064# ## "/var/run/php5-fpm.sock"
2065# ## or using a custom fpm status path:
2066# ## "/var/run/php5-fpm.sock:fpm-custom-status-path"
2067# ##
2068# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
2069# ## "fcgi://10.0.0.12:9000/status"
2070# ## "cgi://10.0.10.12:9001/status"
2071# ##
2072# ## Example of multiple gathering from local socket and remove host
2073# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
2074# urls = ["http://localhost/status"]
2075
2076
2077# # Ping given url(s) and return statistics
2078# [[inputs.ping]]
2079# ## NOTE: this plugin forks the ping command. You may need to set capabilities
2080# ## via setcap cap_net_raw+p /bin/ping
2081# #
2082# ## List of urls to ping
2083# urls = ["www.google.com"] # required
2084# ## number of pings to send per collection (ping -c <COUNT>)
2085# # count = 1
2086# ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
2087# # ping_interval = 1.0
2088# ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
2089# # timeout = 1.0
2090# ## interface to send ping from (ping -I <INTERFACE>)
2091# # interface = ""
2092
2093
2094# # Measure postfix queue statistics
2095# [[inputs.postfix]]
2096# ## Postfix queue directory. If not provided, telegraf will try to use
2097# ## 'postconf -h queue_directory' to determine it.
2098# # queue_directory = "/var/spool/postfix"
2099
2100
2101# # Read metrics from one or many postgresql servers
2102# [[inputs.postgresql]]
2103# ## specify address via a url matching:
2104# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
2105# ## ?sslmode=[disable|verify-ca|verify-full]
2106# ## or a simple string:
2107# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
2108# ##
2109# ## All connection parameters are optional.
2110# ##
2111# ## Without the dbname parameter, the driver will default to a database
2112# ## with the same name as the user. This dbname is just for instantiating a
2113# ## connection with the server and doesn't restrict the databases we are trying
2114# ## to grab metrics for.
2115# ##
2116# address = "host=localhost user=postgres sslmode=disable"
2117#
2118# ## A list of databases to explicitly ignore. If not specified, metrics for all
2119# ## databases are gathered. Do NOT use with the 'databases' option.
2120# # ignored_databases = ["postgres", "template0", "template1"]
2121#
2122# ## A list of databases to pull metrics about. If not specified, metrics for all
2123# ## databases are gathered. Do NOT use with the 'ignored_databases' option.
2124# # databases = ["app_production", "testing"]
2125
2126
2127# # Read metrics from one or many postgresql servers
2128# [[inputs.postgresql_extensible]]
2129# ## specify address via a url matching:
2130# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
2131# ## ?sslmode=[disable|verify-ca|verify-full]
2132# ## or a simple string:
2133# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
2134# #
2135# ## All connection parameters are optional. #
2136# ## Without the dbname parameter, the driver will default to a database
2137# ## with the same name as the user. This dbname is just for instantiating a
2138# ## connection with the server and doesn't restrict the databases we are trying
2139# ## to grab metrics for.
2140# #
2141# address = "host=localhost user=postgres sslmode=disable"
2142# ## A list of databases to pull metrics about. If not specified, metrics for all
2143# ## databases are gathered.
2144# ## databases = ["app_production", "testing"]
2145# #
2146# # outputaddress = "db01"
2147# ## A custom name for the database that will be used as the "server" tag in the
2148# ## measurement output. If not specified, a default one generated from
2149# ## the connection address is used.
2150# #
2151# ## Define the toml config where the sql queries are stored
2152# ## New queries can be added, if the withdbname is set to true and there is no
2153# ## databases defined in the 'databases field', the sql query is ended by a
2154# ## 'is not null' in order to make the query succeed.
2155# ## Example :
2156# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
2157# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
2158# ## because the databases variable was set to ['postgres', 'pgbench' ] and the
2159# ## withdbname was true. Be careful that if the withdbname is set to false you
2160# ## don't have to define the where clause (aka with the dbname) the tagvalue
2161# ## field is used to define custom tags (separated by commas)
2162# ## The optional "measurement" value can be used to override the default
2163# ## output measurement name ("postgresql").
2164# #
2165# ## Structure :
2166# ## [[inputs.postgresql_extensible.query]]
2167# ## sqlquery string
2168# ## version string
2169# ## withdbname boolean
2170# ## tagvalue string (comma separated)
2171# ## measurement string
2172# [[inputs.postgresql_extensible.query]]
2173# sqlquery="SELECT * FROM pg_stat_database"
2174# version=901
2175# withdbname=false
2176# tagvalue=""
2177# measurement=""
2178# [[inputs.postgresql_extensible.query]]
2179# sqlquery="SELECT * FROM pg_stat_bgwriter"
2180# version=901
2181# withdbname=false
2182# tagvalue="postgresql.stats"
2183
2184
2185# # Read metrics from one or many PowerDNS servers
2186# [[inputs.powerdns]]
2187# ## An array of sockets to gather stats about.
2188# ## Specify a path to unix socket.
2189# unix_sockets = ["/var/run/pdns.controlsocket"]
2190
2191
2192# # Monitor process cpu and memory usage
2193# [[inputs.procstat]]
2194# ## Must specify one of: pid_file, exe, or pattern
2195# ## PID file to monitor process
2196# pid_file = "/var/run/nginx.pid"
2197# ## executable name (ie, pgrep <exe>)
2198# # exe = "nginx"
2199# ## pattern as argument for pgrep (ie, pgrep -f <pattern>)
2200# # pattern = "nginx"
2201# ## user as argument for pgrep (ie, pgrep -u <user>)
2202# # user = "nginx"
2203# ## Systemd unit name
2204# # systemd_unit = "nginx.service"
2205# ## CGroup name or path
2206# # cgroup = "systemd/system.slice/nginx.service"
2207#
2208# ## override for process_name
2209# ## This is optional; default is sourced from /proc/<pid>/status
2210# # process_name = "bar"
2211# ## Field name prefix
2212# prefix = ""
2213# ## comment this out if you want raw cpu_time stats
2214# fielddrop = ["cpu_time_*"]
2215# ## This is optional; moves pid into a tag instead of a field
2216# pid_tag = false
2217
2218
2219# # Read metrics from one or many prometheus clients
2220# [[inputs.prometheus]]
2221# ## An array of urls to scrape metrics from.
2222# urls = ["http://localhost:9100/metrics"]
2223#
2224# ## An array of Kubernetes services to scrape metrics from.
2225# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
2226#
2227# ## Use bearer token for authorization
2228# # bearer_token = /path/to/bearer/token
2229#
2230# ## Specify timeout duration for slower prometheus clients (default is 3s)
2231# # response_timeout = "3s"
2232#
2233# ## Optional SSL Config
2234# # ssl_ca = /path/to/cafile
2235# # ssl_cert = /path/to/certfile
2236# # ssl_key = /path/to/keyfile
2237# ## Use SSL but skip chain & host verification
2238# # insecure_skip_verify = false
2239
2240
2241# # Reads last_run_summary.yaml file and converts to measurments
2242# [[inputs.puppetagent]]
2243# ## Location of puppet last run summary file
2244# location = "/var/lib/puppet/state/last_run_summary.yaml"
2245
2246
2247# # Reads metrics from RabbitMQ servers via the Management Plugin
2248# [[inputs.rabbitmq]]
2249# ## Management Plugin url. (default: http://localhost:15672)
2250# # url = "http://localhost:15672"
2251# ## Tag added to rabbitmq_overview series; deprecated: use tags
2252# # name = "rmq-server-1"
2253# ## Credentials
2254# # username = "guest"
2255# # password = "guest"
2256#
2257# ## Optional SSL Config
2258# # ssl_ca = "/etc/telegraf/ca.pem"
2259# # ssl_cert = "/etc/telegraf/cert.pem"
2260# # ssl_key = "/etc/telegraf/key.pem"
2261# ## Use SSL but skip chain & host verification
2262# # insecure_skip_verify = false
2263#
2264# ## Optional request timeouts
2265# ##
2266# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait
2267# ## for a server's response headers after fully writing the request.
2268# # header_timeout = "3s"
2269# ##
2270# ## client_timeout specifies a time limit for requests made by this client.
2271# ## Includes connection time, any redirects, and reading the response body.
2272# # client_timeout = "4s"
2273#
2274# ## A list of nodes to gather as the rabbitmq_node measurement. If not
2275# ## specified, metrics for all nodes are gathered.
2276# # nodes = ["rabbit@node1", "rabbit@node2"]
2277#
2278# ## A list of queues to gather as the rabbitmq_queue measurement. If not
2279# ## specified, metrics for all queues are gathered.
2280# # queues = ["telegraf"]
2281
2282
2283# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
2284# [[inputs.raindrops]]
2285# ## An array of raindrops middleware URI to gather stats.
2286# urls = ["http://localhost:8080/_raindrops"]
2287
2288
2289# # Read metrics from one or many redis servers
2290# [[inputs.redis]]
2291# ## specify servers via a url matching:
2292# ## [protocol://][:password]@address[:port]
2293# ## e.g.
2294# ## tcp://localhost:6379
2295# ## tcp://:password@192.168.99.100
2296# ## unix:///var/run/redis.sock
2297# ##
2298# ## If no servers are specified, then localhost is used as the host.
2299# ## If no port is specified, 6379 is used
2300# servers = ["tcp://localhost:6379"]
2301
2302
2303# # Read metrics from one or many RethinkDB servers
2304# [[inputs.rethinkdb]]
2305# ## An array of URI to gather stats about. Specify an ip or hostname
2306# ## with optional port add password. ie,
2307# ## rethinkdb://user:auth_key@10.10.3.30:28105,
2308# ## rethinkdb://10.10.3.33:18832,
2309# ## 10.0.0.1:10000, etc.
2310# servers = ["127.0.0.1:28015"]
2311# ##
2312# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
2313# ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
2314# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
2315# ##
2316# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
2317# ## have to be named "rethinkdb".
2318# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
2319
2320
2321# # Read metrics one or many Riak servers
2322# [[inputs.riak]]
2323# # Specify a list of one or more riak http servers
2324# servers = ["http://localhost:8098"]
2325
2326
2327# # Read API usage and limits for a Salesforce organisation
2328# [[inputs.salesforce]]
2329# ## specify your credentials
2330# ##
2331# username = "your_username"
2332# password = "your_password"
2333# ##
2334# ## (optional) security token
2335# # security_token = "your_security_token"
2336# ##
2337# ## (optional) environment type (sandbox or production)
2338# ## default is: production
2339# ##
2340# # environment = "production"
2341# ##
2342# ## (optional) API version (default: "39.0")
2343# ##
2344# # version = "39.0"
2345
2346
2347# # Monitor sensors, requires lm-sensors package
2348# [[inputs.sensors]]
2349# ## Remove numbers from field names.
2350# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
2351# # remove_numbers = true
2352
2353
2354# # Read metrics from storage devices supporting S.M.A.R.T.
2355# [[inputs.smart]]
2356# ## Optionally specify the path to the smartctl executable
2357# # path = "/usr/bin/smartctl"
2358# #
2359# ## On most platforms smartctl requires root access.
2360# ## Setting 'use_sudo' to true will make use of sudo to run smartctl.
2361# ## Sudo must be configured to to allow the telegraf user to run smartctl
2362# ## with out password.
2363# # use_sudo = false
2364# #
2365# ## Skip checking disks in this power mode. Defaults to
2366# ## "standby" to not wake up disks that have stoped rotating.
2367# ## See --nocheck in the man pages for smartctl.
2368# ## smartctl version 5.41 and 5.42 have faulty detection of
2369# ## power mode and might require changing this value to
2370# ## "never" depending on your disks.
2371# # nocheck = "standby"
2372# #
2373# ## Gather detailed metrics for each SMART Attribute.
2374# ## Defaults to "false"
2375# ##
2376# # attributes = false
2377# #
2378# ## Optionally specify devices to exclude from reporting.
2379# # excludes = [ "/dev/pass6" ]
2380# #
2381# ## Optionally specify devices and device type, if unset
2382# ## a scan (smartctl --scan) for S.M.A.R.T. devices will
2383# ## done and all found will be included except for the
2384# ## excluded in excludes.
2385# # devices = [ "/dev/ada0 -d atacam" ]
2386
2387
2388# # Retrieves SNMP values from remote agents
2389# [[inputs.snmp]]
2390 agents = [ "192.168.1.3:161,192.168.1.5:161" ]
2391# ## Timeout for each SNMP query.
2392 timeout = "5s"
2393# ## Number of retries to attempt within timeout.
2394 retries = 3
2395# ## SNMP version, values can be 1, 2, or 3
2396 version = 2
2397#
2398# ## SNMP community string.
2399 community = "MYSTRING"
2400#
2401# ## The GETBULK max-repetitions parameter
2402 max_repetitions = 10
2403#
2404# ## SNMPv3 auth parameters
2405# #sec_name = "myuser"
2406# #auth_protocol = "md5" # Values: "MD5", "SHA", ""
2407# #auth_password = "pass"
2408# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
2409# #context_name = ""
2410# #priv_protocol = "" # Values: "DES", "AES", ""
2411# #priv_password = ""
2412#
2413# ## measurement name
2414 name = "system"
2415 [[inputs.snmp.field]]
2416 name= "host"
2417 oid= "iso.3.6.1.2.1.1.5.0"
2418 [[inputs.snmp.field]]
2419 name = "manufacturer"
2420 oid = "iso.3.6.1.2.1.47.1.1.1.1.12.1"
2421 [[inputs.snmp.field]]
2422 name = "model"
2423 oid = "iso.3.6.1.2.1.47.1.1.1.1.13.1"
2424 [[inputs.snmp.field]]
2425 name = "esx-version"
2426 oid = "iso.3.6.1.2.1.1.1.0"
2427 [[inputs.snmp.field]]
2428 name = "service-tag"
2429 oid = "iso.3.6.1.2.1.47.1.1.1.1.11.1"
2430 [[inputs.snmp.field]]
2431 name = "esxi-uptime"
2432 oid = "iso.3.6.1.2.1.25.1.1.0"
2433 [[inputs.snmp.field]]
2434 name = "esxi-cpuload01"
2435 oid = ".1.3.6.1.2.1.25.3.3.1.2.1"
2436 [[inputs.snmp.field]]
2437 name = "esxi-cpuload02"
2438 oid = ".1.3.6.1.2.1.25.3.3.1.2.2"
2439 [[inputs.snmp.field]]
2440 name = "esxi-cpuload03"
2441 oid = ".1.3.6.1.2.1.25.3.3.1.2.3"
2442 [[inputs.snmp.field]]
2443 name = "esxi-cpuload04"
2444 oid = ".1.3.6.1.2.1.25.3.3.1.2.4"
2445 [[inputs.snmp.field]]
2446 name = "esxi-cpuload05"
2447 oid = ".1.3.6.1.2.1.25.3.3.1.2.5"
2448 [[inputs.snmp.field]]
2449 name = "esxi-cpuload06"
2450 oid = ".1.3.6.1.2.1.25.3.3.1.2.6"
2451 [[inputs.snmp.field]]
2452 name = "esxi-cpuload07"
2453 oid = ".1.3.6.1.2.1.25.3.3.1.2.7"
2454 [[inputs.snmp.field]]
2455 name = "esxi-cpuload08"
2456 oid = ".1.3.6.1.2.1.25.3.3.1.2.8"
2457 [[inputs.snmp.field]]
2458 name = "esxi-cpuload09
2459 oid = ".1.3.6.1.2.1.25.3.3.1.2.9"
2460 [[inputs.snmp.field]]
2461 name = "esxi-cpuload10"
2462 oid = ".1.3.6.1.2.1.25.3.3.1.2.10"
2463 [[inputs.snmp.field]]
2464 name = "esxi-cpuload11"
2465 oid = ".1.3.6.1.2.1.25.3.3.1.2.11"
2466 [[inputs.snmp.field]]
2467 name = "esxi-cpuload12"
2468 oid = ".1.3.6.1.2.1.25.3.3.1.2.12"
2469 [[inputs.snmp.field]]
2470 name = "esxi-cpuload13"
2471 oid = ".1.3.6.1.2.1.25.3.3.1.2.13"
2472 [[inputs.snmp.field]]
2473 name = "esxi-cpuload14"
2474 oid = ".1.3.6.1.2.1.25.3.3.1.2.14"
2475 [[inputs.snmp.field]]
2476 name = "esxi-cpuload15"
2477 oid = ".1.3.6.1.2.1.25.3.3.1.2.15"
2478 [[inputs.snmp.field]]
2479 name = "esxi-cpuload16"
2480 oid = ".1.3.6.1.2.1.25.3.3.1.2.16"
2481 [[inputs.snmp.field]]
2482 name = "esxi-cpuload17"
2483 oid = ".1.3.6.1.2.1.25.3.3.1.2.17"
2484 [[inputs.snmp.field]]
2485 name = "esxi-cpuload18"
2486 oid = ".1.3.6.1.2.1.25.3.3.1.2.18"
2487 [[inputs.snmp.field]]
2488 name = "esxi-cpuload19"
2489 oid = ".1.3.6.1.2.1.25.3.3.1.2.19"
2490 [[inputs.snmp.field]]
2491 name = "esxi-cpuload20"
2492 oid = ".1.3.6.1.2.1.25.3.3.1.2.20"
2493 [[inputs.snmp.field]]
2494 name = "esxi-cpuload21"
2495 oid = ".1.3.6.1.2.1.25.3.3.1.2.21"
2496 [[inputs.snmp.field]]
2497 name = "esxi-cpuload22"
2498 oid = ".1.3.6.1.2.1.25.3.3.1.2.22"
2499 [[inputs.snmp.field]]
2500 name = "esxi-cpuload23"
2501 oid = ".1.3.6.1.2.1.25.3.3.1.2.23"
2502 [[inputs.snmp.field]]
2503 name = "esxi-cpuload24"
2504 oid = ".1.3.6.1.2.1.25.3.3.1.2.24"
2505 [[inputs.snmp.field]]
2506 name = "hostname"
2507 oid = ".1.0.0.1.1"
2508 [[inputs.snmp.field]]
2509 name = "uptime"
2510 oid = ".1.0.0.1.2"
2511 [[inputs.snmp.field]]
2512 name = "load"
2513 oid = ".1.0.0.1.3"
2514 [[inputs.snmp.field]]
2515 oid = "HOST-RESOURCES-MIB::hrMemorySize"
2516#
2517# [[inputs.snmp.table]]
2518# ## measurement name
2519# name = "remote_servers"
2520# inherit_tags = [ "hostname" ]
2521# [[inputs.snmp.table.field]]
2522# name = "server"
2523# oid = ".1.0.0.0.1.0"
2524# is_tag = true
2525# [[inputs.snmp.table.field]]
2526# name = "connections"
2527# oid = ".1.0.0.0.1.1"
2528# [[inputs.snmp.table.field]]
2529# name = "latency"
2530# oid = ".1.0.0.0.1.2"
2531#
2532# [[inputs.snmp.table]]
2533# ## auto populate table's fields using the MIB
2534# oid = "HOST-RESOURCES-MIB::hrNetworkTable"
2535
2536
2537# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
2538# [[inputs.snmp_legacy]]
2539# ## Use 'oids.txt' file to translate oids to names
2540# ## To generate 'oids.txt' you need to run:
2541# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
2542# ## Or if you have an other MIB folder with custom MIBs
2543# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
2544# snmptranslate_file = "/tmp/oids.txt"
2545# [[inputs.snmp.host]]
2546# address = "192.168.2.2:161"
2547# # SNMP community
2548# community = "public" # default public
2549# # SNMP version (1, 2 or 3)
2550# # Version 3 not supported yet
2551# version = 2 # default 2
2552# # SNMP response timeout
2553# timeout = 2.0 # default 2.0
2554# # SNMP request retries
2555# retries = 2 # default 2
2556# # Which get/bulk do you want to collect for this host
2557# collect = ["mybulk", "sysservices", "sysdescr"]
2558# # Simple list of OIDs to get, in addition to "collect"
2559# get_oids = []
2560#
2561# [[inputs.snmp.host]]
2562# address = "192.168.2.3:161"
2563# community = "public"
2564# version = 2
2565# timeout = 2.0
2566# retries = 2
2567# collect = ["mybulk"]
2568# get_oids = [
2569# "ifNumber",
2570# ".1.3.6.1.2.1.1.3.0",
2571# ]
2572#
2573# [[inputs.snmp.get]]
2574# name = "ifnumber"
2575# oid = "ifNumber"
2576#
2577# [[inputs.snmp.get]]
2578# name = "interface_speed"
2579# oid = "ifSpeed"
2580# instance = "0"
2581#
2582# [[inputs.snmp.get]]
2583# name = "sysuptime"
2584# oid = ".1.3.6.1.2.1.1.3.0"
2585# unit = "second"
2586#
2587# [[inputs.snmp.bulk]]
2588# name = "mybulk"
2589# max_repetition = 127
2590# oid = ".1.3.6.1.2.1.1"
2591#
2592# [[inputs.snmp.bulk]]
2593# name = "ifoutoctets"
2594# max_repetition = 127
2595# oid = "ifOutOctets"
2596#
2597# [[inputs.snmp.host]]
2598# address = "192.168.2.13:161"
2599# #address = "127.0.0.1:161"
2600# community = "public"
2601# version = 2
2602# timeout = 2.0
2603# retries = 2
2604# #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
2605# collect = ["sysuptime" ]
2606# [[inputs.snmp.host.table]]
2607# name = "iftable3"
2608# include_instances = ["enp5s0", "eth1"]
2609#
2610# # SNMP TABLEs
2611# # table without mapping neither subtables
2612# [[inputs.snmp.table]]
2613# name = "iftable1"
2614# oid = ".1.3.6.1.2.1.31.1.1.1"
2615#
2616# # table without mapping but with subtables
2617# [[inputs.snmp.table]]
2618# name = "iftable2"
2619# oid = ".1.3.6.1.2.1.31.1.1.1"
2620# sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
2621#
2622# # table with mapping but without subtables
2623# [[inputs.snmp.table]]
2624# name = "iftable3"
2625# oid = ".1.3.6.1.2.1.31.1.1.1"
2626# # if empty. get all instances
2627# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
2628# # if empty, get all subtables
2629#
2630# # table with both mapping and subtables
2631# [[inputs.snmp.table]]
2632# name = "iftable4"
2633# oid = ".1.3.6.1.2.1.31.1.1.1"
2634# # if empty get all instances
2635# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
2636# # if empty get all subtables
2637# # sub_tables could be not "real subtables"
2638# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
2639
2640
2641# # Read stats from one or more Solr servers or cores
2642# [[inputs.solr]]
2643# ## specify a list of one or more Solr servers
2644# servers = ["http://localhost:8983"]
2645#
2646# ## specify a list of one or more Solr cores (default - all)
2647# # cores = ["main"]
2648
2649
2650# # Read metrics from Microsoft SQL Server
2651# [[inputs.sqlserver]]
2652# ## Specify instances to monitor with a list of connection strings.
2653# ## All connection parameters are optional.
2654# ## By default, the host is localhost, listening on default port, TCP 1433.
2655# ## for Windows, the user is the currently running AD user (SSO).
2656# ## See https://github.com/denisenkom/go-mssqldb for detailed connection
2657# ## parameters.
2658# # servers = [
2659# # "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
2660# # ]
2661
2662
2663# # Sysstat metrics collector
2664# [[inputs.sysstat]]
2665# ## Path to the sadc command.
2666# #
2667# ## Common Defaults:
2668# ## Debian/Ubuntu: /usr/lib/sysstat/sadc
2669# ## Arch: /usr/lib/sa/sadc
2670# ## RHEL/CentOS: /usr/lib64/sa/sadc
2671# sadc_path = "/usr/lib/sa/sadc" # required
2672# #
2673# #
2674# ## Path to the sadf command, if it is not in PATH
2675# # sadf_path = "/usr/bin/sadf"
2676# #
2677# #
2678# ## Activities is a list of activities, that are passed as argument to the
2679# ## sadc collector utility (e.g: DISK, SNMP etc...)
2680# ## The more activities that are added, the more data is collected.
2681# # activities = ["DISK"]
2682# #
2683# #
2684# ## Group metrics to measurements.
2685# ##
2686# ## If group is false each metric will be prefixed with a description
2687# ## and represents itself a measurement.
2688# ##
2689# ## If Group is true, corresponding metrics are grouped to a single measurement.
2690# # group = true
2691# #
2692# #
2693# ## Options for the sadf command. The values on the left represent the sadf
2694# ## options and the values on the right their description (which are used for
2695# ## grouping and prefixing metrics).
2696# ##
2697# ## Run 'sar -h' or 'man sar' to find out the supported options for your
2698# ## sysstat version.
2699# [inputs.sysstat.options]
2700# -C = "cpu"
2701# -B = "paging"
2702# -b = "io"
2703# -d = "disk" # requires DISK activity
2704# "-n ALL" = "network"
2705# "-P ALL" = "per_cpu"
2706# -q = "queue"
2707# -R = "mem"
2708# -r = "mem_util"
2709# -S = "swap_util"
2710# -u = "cpu_util"
2711# -v = "inode"
2712# -W = "swap"
2713# -w = "task"
2714# # -H = "hugepages" # only available for newer linux distributions
2715# # "-I ALL" = "interrupts" # requires INT activity
2716# #
2717# #
2718# ## Device tags can be used to add additional tags for devices.
2719# ## For example the configuration below adds a tag vg with value rootvg for
2720# ## all metrics with sda devices.
2721# # [[inputs.sysstat.device_tags.sda]]
2722# # vg = "rootvg"
2723
2724
2725# # Reads metrics from a Teamspeak 3 Server via ServerQuery
2726# [[inputs.teamspeak]]
2727# ## Server address for Teamspeak 3 ServerQuery
2728# # server = "127.0.0.1:10011"
2729# ## Username for ServerQuery
2730# username = "serverqueryuser"
2731# ## Password for ServerQuery
2732# password = "secret"
2733# ## Array of virtual servers
2734# # virtual_servers = [1]
2735
2736
2737# # Gather metrics from the Tomcat server status page.
2738# [[inputs.tomcat]]
2739# ## URL of the Tomcat server status
2740# # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
2741#
2742# ## HTTP Basic Auth Credentials
2743# # username = "tomcat"
2744# # password = "s3cret"
2745#
2746# ## Request timeout
2747# # timeout = "5s"
2748#
2749# ## Optional SSL Config
2750# # ssl_ca = "/etc/telegraf/ca.pem"
2751# # ssl_cert = "/etc/telegraf/cert.pem"
2752# # ssl_key = "/etc/telegraf/key.pem"
2753# ## Use SSL but skip chain & host verification
2754# # insecure_skip_verify = false
2755
2756
2757# # Inserts sine and cosine waves for demonstration purposes
2758# [[inputs.trig]]
2759# ## Set the amplitude
2760# amplitude = 10.0
2761
2762
2763# # Read Twemproxy stats data
2764# [[inputs.twemproxy]]
2765# ## Twemproxy stats address and port (no scheme)
2766# addr = "localhost:22222"
2767# ## Monitor pool name
2768# pools = ["redis_pool", "mc_pool"]
2769
2770
2771# # A plugin to collect stats from Unbound - a validating, recursive, and caching DNS resolver
2772# [[inputs.unbound]]
2773# ## If running as a restricted user you can prepend sudo for additional access:
2774# #use_sudo = false
2775#
2776# ## The default location of the unbound-control binary can be overridden with:
2777# binary = "/usr/sbin/unbound-control"
2778#
2779# ## The default timeout of 1s can be overriden with:
2780# timeout = "1s"
2781#
2782# ## Use the builtin fielddrop/fieldpass telegraf filters in order to keep/remove specific fields
2783# fieldpass = ["total_*", "num_*","time_up", "mem_*"]
2784
2785
2786# # A plugin to collect stats from Varnish HTTP Cache
2787# [[inputs.varnish]]
2788# ## If running as a restricted user you can prepend sudo for additional access:
2789# #use_sudo = false
2790#
2791# ## The default location of the varnishstat binary can be overridden with:
2792# binary = "/usr/bin/varnishstat"
2793#
2794# ## By default, telegraf gather stats for 3 metric points.
2795# ## Setting stats will override the defaults shown below.
2796# ## Glob matching can be used, ie, stats = ["MAIN.*"]
2797# ## stats may also be set to ["*"], which will collect all stats
2798# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
2799#
2800# ## Optional name for the varnish instance (or working directory) to query
2801# ## Usually appened after -n in varnish cli
2802# #name = instanceName
2803
2804
2805# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
2806# [[inputs.zfs]]
2807# ## ZFS kstat path. Ignored on FreeBSD
2808# ## If not specified, then default is:
2809# # kstatPath = "/proc/spl/kstat/zfs"
2810#
2811# ## By default, telegraf gather all zfs stats
2812# ## If not specified, then default is:
2813# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
2814#
2815# ## By default, don't gather zpool stats
2816# # poolMetrics = false
2817
2818
2819# # Reads 'mntr' stats from one or many zookeeper servers
2820# [[inputs.zookeeper]]
2821# ## An array of address to gather stats about. Specify an ip or hostname
2822# ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
2823#
2824# ## If no servers are specified, then localhost is used as the host.
2825# ## If no port is specified, 2181 is used
2826# servers = [":2181"]
2827
2828
2829
2830###############################################################################
2831# SERVICE INPUT PLUGINS #
2832###############################################################################
2833
2834# # AMQP consumer plugin
2835# [[inputs.amqp_consumer]]
2836# ## AMQP url
2837# url = "amqp://localhost:5672/influxdb"
2838# ## AMQP exchange
2839# exchange = "telegraf"
2840# ## AMQP queue name
2841# queue = "telegraf"
2842# ## Binding Key
2843# binding_key = "#"
2844#
2845# ## Maximum number of messages server should give to the worker.
2846# prefetch_count = 50
2847#
2848# ## Auth method. PLAIN and EXTERNAL are supported
2849# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
2850# ## described here: https://www.rabbitmq.com/plugins.html
2851# # auth_method = "PLAIN"
2852#
2853# ## Optional SSL Config
2854# # ssl_ca = "/etc/telegraf/ca.pem"
2855# # ssl_cert = "/etc/telegraf/cert.pem"
2856# # ssl_key = "/etc/telegraf/key.pem"
2857# ## Use SSL but skip chain & host verification
2858# # insecure_skip_verify = false
2859#
2860# ## Data format to consume.
2861# ## Each data format has its own unique set of configuration options, read
2862# ## more about them here:
2863# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2864# data_format = "influx"
2865
2866
2867# # Influx HTTP write listener
2868# [[inputs.http_listener]]
2869# ## Address and port to host HTTP listener on
2870# service_address = ":8186"
2871#
2872# ## maximum duration before timing out read of the request
2873# read_timeout = "10s"
2874# ## maximum duration before timing out write of the response
2875# write_timeout = "10s"
2876#
2877# ## Maximum allowed http request body size in bytes.
2878# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes)
2879# max_body_size = 0
2880#
2881# ## Maximum line size allowed to be sent in bytes.
2882# ## 0 means to use the default of 65536 bytes (64 kibibytes)
2883# max_line_size = 0
2884#
2885# ## Set one or more allowed client CA certificate file names to
2886# ## enable mutually authenticated TLS connections
2887# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
2888#
2889# ## Add service certificate and key
2890# tls_cert = "/etc/telegraf/cert.pem"
2891# tls_key = "/etc/telegraf/key.pem"
2892
2893
2894# # Read metrics from Kafka topic(s)
2895# [[inputs.kafka_consumer]]
2896# ## kafka servers
2897# brokers = ["localhost:9092"]
2898# ## topic(s) to consume
2899# topics = ["telegraf"]
2900#
2901# ## Optional SSL Config
2902# # ssl_ca = "/etc/telegraf/ca.pem"
2903# # ssl_cert = "/etc/telegraf/cert.pem"
2904# # ssl_key = "/etc/telegraf/key.pem"
2905# ## Use SSL but skip chain & host verification
2906# # insecure_skip_verify = false
2907#
2908# ## Optional SASL Config
2909# # sasl_username = "kafka"
2910# # sasl_password = "secret"
2911#
2912# ## the name of the consumer group
2913# consumer_group = "telegraf_metrics_consumers"
2914# ## Offset (must be either "oldest" or "newest")
2915# offset = "oldest"
2916#
2917# ## Data format to consume.
2918# ## Each data format has its own unique set of configuration options, read
2919# ## more about them here:
2920# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2921# data_format = "influx"
2922#
2923# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
2924# ## larger messages are dropped
2925# max_message_len = 65536
2926
2927
2928# # Read metrics from Kafka topic(s)
2929# [[inputs.kafka_consumer_legacy]]
2930# ## topic(s) to consume
2931# topics = ["telegraf"]
2932# ## an array of Zookeeper connection strings
2933# zookeeper_peers = ["localhost:2181"]
2934# ## Zookeeper Chroot
2935# zookeeper_chroot = ""
2936# ## the name of the consumer group
2937# consumer_group = "telegraf_metrics_consumers"
2938# ## Offset (must be either "oldest" or "newest")
2939# offset = "oldest"
2940#
2941# ## Data format to consume.
2942# ## Each data format has its own unique set of configuration options, read
2943# ## more about them here:
2944# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2945# data_format = "influx"
2946#
2947# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
2948# ## larger messages are dropped
2949# max_message_len = 65536
2950
2951
2952# # Stream and parse log file(s).
2953# [[inputs.logparser]]
2954# ## Log files to parse.
2955# ## These accept standard unix glob matching rules, but with the addition of
2956# ## ** as a "super asterisk". ie:
2957# ## /var/log/**.log -> recursively find all .log files in /var/log
2958# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
2959# ## /var/log/apache.log -> only tail the apache log file
2960# files = ["/var/log/apache/access.log"]
2961#
2962# ## Read files that currently exist from the beginning. Files that are created
2963# ## while telegraf is running (and that match the "files" globs) will always
2964# ## be read from the beginning.
2965# from_beginning = false
2966#
2967# ## Method used to watch for file updates. Can be either "inotify" or "poll".
2968# # watch_method = "inotify"
2969#
2970# ## Parse logstash-style "grok" patterns:
2971# ## Telegraf built-in parsing patterns: https://goo.gl/dkay10
2972# [inputs.logparser.grok]
2973# ## This is a list of patterns to check the given log file(s) for.
2974# ## Note that adding patterns here increases processing time. The most
2975# ## efficient configuration is to have one pattern per logparser.
2976# ## Other common built-in patterns are:
2977# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
2978# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
2979# patterns = ["%{COMBINED_LOG_FORMAT}"]
2980#
2981# ## Name of the outputted measurement name.
2982# measurement = "apache_access_log"
2983#
2984# ## Full path(s) to custom pattern files.
2985# custom_pattern_files = []
2986#
2987# ## Custom patterns can also be defined here. Put one pattern per line.
2988# custom_patterns = '''
2989#
2990# ## Timezone allows you to provide an override for timestamps that
2991# ## don't already include an offset
2992# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
2993# ##
2994# ## Default: "" which renders UTC
2995# ## Options are as follows:
2996# ## 1. Local -- interpret based on machine localtime
2997# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
2998# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
2999# timezone = "Canada/Eastern"
3000# '''
3001
3002
3003# # Read metrics from MQTT topic(s)
3004# [[inputs.mqtt_consumer]]
3005# ## MQTT broker URLs to be used. The format should be scheme://host:port,
3006# ## schema can be tcp, ssl, or ws.
3007# servers = ["tcp://localhost:1883"]
3008#
3009# ## MQTT QoS, must be 0, 1, or 2
3010# qos = 0
3011# ## Connection timeout for initial connection in seconds
3012# connection_timeout = "30s"
3013#
3014# ## Topics to subscribe to
3015# topics = [
3016# "telegraf/host01/cpu",
3017# "telegraf/+/mem",
3018# "sensors/#",
3019# ]
3020#
3021# # if true, messages that can't be delivered while the subscriber is offline
3022# # will be delivered when it comes back (such as on service restart).
3023# # NOTE: if true, client_id MUST be set
3024# persistent_session = false
3025# # If empty, a random client ID will be generated.
3026# client_id = ""
3027#
3028# ## username and password to connect MQTT server.
3029# # username = "telegraf"
3030# # password = "metricsmetricsmetricsmetrics"
3031#
3032# ## Optional SSL Config
3033# # ssl_ca = "/etc/telegraf/ca.pem"
3034# # ssl_cert = "/etc/telegraf/cert.pem"
3035# # ssl_key = "/etc/telegraf/key.pem"
3036# ## Use SSL but skip chain & host verification
3037# # insecure_skip_verify = false
3038#
3039# ## Data format to consume.
3040# ## Each data format has its own unique set of configuration options, read
3041# ## more about them here:
3042# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
3043# data_format = "influx"
3044
3045
3046# # Read metrics from NATS subject(s)
3047# [[inputs.nats_consumer]]
3048# ## urls of NATS servers
3049# # servers = ["nats://localhost:4222"]
3050# ## Use Transport Layer Security
3051# # secure = false
3052# ## subject(s) to consume
3053# # subjects = ["telegraf"]
3054# ## name a queue group
3055# # queue_group = "telegraf_consumers"
3056#
3057# ## Sets the limits for pending msgs and bytes for each subscription
3058# ## These shouldn't need to be adjusted except in very high throughput scenarios
3059# # pending_message_limit = 65536
3060# # pending_bytes_limit = 67108864
3061#
3062# ## Data format to consume.
3063# ## Each data format has its own unique set of configuration options, read
3064# ## more about them here:
3065# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
3066# data_format = "influx"
3067
3068
3069# # Read NSQ topic for metrics.
3070# [[inputs.nsq_consumer]]
3071# ## Server option still works but is deprecated, we just prepend it to the nsqd array.
3072# # server = "localhost:4150"
3073# ## An array representing the NSQD TCP HTTP Endpoints
3074# nsqd = ["localhost:4150"]
3075# ## An array representing the NSQLookupd HTTP Endpoints
3076# nsqlookupd = ["localhost:4161"]
3077# topic = "telegraf"
3078# channel = "consumer"
3079# max_in_flight = 100
3080#
3081# ## Data format to consume.
3082# ## Each data format has its own unique set of configuration options, read
3083# ## more about them here:
3084# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
3085# data_format = "influx"
3086
3087
3088# # Generic socket listener capable of handling multiple socket types.
3089# [[inputs.socket_listener]]
3090# ## URL to listen on
3091# # service_address = "tcp://:8094"
3092# # service_address = "tcp://127.0.0.1:http"
3093# # service_address = "tcp4://:8094"
3094# # service_address = "tcp6://:8094"
3095# # service_address = "tcp6://[2001:db8::1]:8094"
3096# # service_address = "udp://:8094"
3097# # service_address = "udp4://:8094"
3098# # service_address = "udp6://:8094"
3099# # service_address = "unix:///tmp/telegraf.sock"
3100# # service_address = "unixgram:///tmp/telegraf.sock"
3101#
3102# ## Maximum number of concurrent connections.
3103# ## Only applies to stream sockets (e.g. TCP).
3104# ## 0 (default) is unlimited.
3105# # max_connections = 1024
3106#
3107# ## Read timeout.
3108# ## Only applies to stream sockets (e.g. TCP).
3109# ## 0 (default) is unlimited.
3110# # read_timeout = "30s"
3111#
3112# ## Maximum socket buffer size in bytes.
3113# ## For stream sockets, once the buffer fills up, the sender will start backing up.
3114# ## For datagram sockets, once the buffer fills up, metrics will start dropping.
3115# ## Defaults to the OS default.
3116# # read_buffer_size = 65535
3117#
3118# ## Period between keep alive probes.
3119# ## Only applies to TCP sockets.
3120# ## 0 disables keep alive probes.
3121# ## Defaults to the OS configuration.
3122# # keep_alive_period = "5m"
3123#
3124# ## Data format to consume.
3125# ## Each data format has its own unique set of configuration options, read
3126# ## more about them here:
3127# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
3128# # data_format = "influx"
3129
3130
3131# # Statsd UDP/TCP Server
3132# [[inputs.statsd]]
3133# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp)
3134# protocol = "udp"
3135#
3136# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
3137# max_tcp_connections = 250
3138#
3139# ## Address and port to host UDP listener on
3140# service_address = ":8125"
3141#
3142# ## The following configuration options control when telegraf clears it's cache
3143# ## of previous values. If set to false, then telegraf will only clear it's
3144# ## cache when the daemon is restarted.
3145# ## Reset gauges every interval (default=true)
3146# delete_gauges = true
3147# ## Reset counters every interval (default=true)
3148# delete_counters = true
3149# ## Reset sets every interval (default=true)
3150# delete_sets = true
3151# ## Reset timings & histograms every interval (default=true)
3152# delete_timings = true
3153#
3154# ## Percentiles to calculate for timing & histogram stats
3155# percentiles = [90]
3156#
3157# ## separator to use between elements of a statsd metric
3158# metric_separator = "_"
3159#
3160# ## Parses tags in the datadog statsd format
3161# ## http://docs.datadoghq.com/guides/dogstatsd/
3162# parse_data_dog_tags = false
3163#
3164# ## Statsd data translation templates, more info can be read here:
3165# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
3166# # templates = [
3167# # "cpu.* measurement*"
3168# # ]
3169#
3170# ## Number of UDP messages allowed to queue up, once filled,
3171# ## the statsd server will start dropping packets
3172# allowed_pending_messages = 10000
3173#
3174# ## Number of timing/histogram values to track per-measurement in the
3175# ## calculation of percentiles. Raising this limit increases the accuracy
3176# ## of percentiles but also increases the memory usage and cpu time.
3177# percentile_limit = 1000
3178
3179
3180# # Stream a log file, like the tail -f command
3181# [[inputs.tail]]
3182# ## files to tail.
3183# ## These accept standard unix glob matching rules, but with the addition of
3184# ## ** as a "super asterisk". ie:
3185# ## "/var/log/**.log" -> recursively find all .log files in /var/log
3186# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
3187# ## "/var/log/apache.log" -> just tail the apache log file
3188# ##
3189# ## See https://github.com/gobwas/glob for more examples
3190# ##
3191# files = ["/var/mymetrics.out"]
3192# ## Read file from beginning.
3193# from_beginning = false
3194# ## Whether file is a named pipe
3195# pipe = false
3196#
3197# ## Method used to watch for file updates. Can be either "inotify" or "poll".
3198# # watch_method = "inotify"
3199#
3200# ## Data format to consume.
3201# ## Each data format has its own unique set of configuration options, read
3202# ## more about them here:
3203# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
3204# data_format = "influx"
3205
3206
3207# # Generic TCP listener
3208# [[inputs.tcp_listener]]
3209# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
3210# # socket_listener plugin
3211# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
3212
3213
3214# # Generic UDP listener
3215# [[inputs.udp_listener]]
3216# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
3217# # socket_listener plugin
3218# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
3219
3220
3221# # A Webhooks Event collector
3222# [[inputs.webhooks]]
3223# ## Address and port to host Webhook listener on
3224# service_address = ":1619"
3225#
3226# [inputs.webhooks.filestack]
3227# path = "/filestack"
3228#
3229# [inputs.webhooks.github]
3230# path = "/github"
3231# # secret = ""
3232#
3233# [inputs.webhooks.mandrill]
3234# path = "/mandrill"
3235#
3236# [inputs.webhooks.rollbar]
3237# path = "/rollbar"
3238#
3239# [inputs.webhooks.papertrail]
3240# path = "/papertrail"
3241#
3242# [inputs.webhooks.particle]
3243# path = "/particle"
3244
3245
3246# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
3247# [[inputs.zipkin]]
3248# # path = "/api/v1/spans" # URL path for span data
3249# # port = 9411 # Port on which Telegraf listens