· 6 years ago · Apr 15, 2019, 06:44 PM
1# Telegraf Configuration
2#
3# Telegraf is entirely plugin driven. All metrics are gathered from the
4# declared inputs, and sent to the declared outputs.
5#
6# Plugins must be declared in here to be active.
7# To deactivate a plugin, comment out the name and any variables.
8#
9# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
10# file would generate.
11#
12# Environment variables can be used anywhere in this config file, simply prepend
13# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
14# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
15
16
17# Global tags can be specified here in key="value" format.
18[global_tags]
19 # dc = "us-east-1" # will tag all metrics with dc=us-east-1
20 # rack = "1a"
21 ## Environment variables can be used as tags, and throughout the config file
22 # user = "$USER"
23
24
25# Configuration for telegraf agent
26[agent]
27 ## Default data collection interval for all inputs
28 interval = "10s"
29 ## Rounds collection interval to 'interval'
30 ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
31 round_interval = true
32
33 ## Telegraf will send metrics to outputs in batches of at most
34 ## metric_batch_size metrics.
35 ## This controls the size of writes that Telegraf sends to output plugins.
36 metric_batch_size = 1000
37
38 ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
39 ## output, and will flush this buffer on a successful write. Oldest metrics
40 ## are dropped first when this buffer fills.
41 ## This buffer only fills when writes fail to output plugin(s).
42 metric_buffer_limit = 10000
43
44 ## Collection jitter is used to jitter the collection by a random amount.
45 ## Each plugin will sleep for a random time within jitter before collecting.
46 ## This can be used to avoid many plugins querying things like sysfs at the
47 ## same time, which can have a measurable effect on the system.
48 collection_jitter = "0s"
49
50 ## Default flushing interval for all outputs. Maximum flush_interval will be
51 ## flush_interval + flush_jitter
52 flush_interval = "10s"
53 ## Jitter the flush interval by a random amount. This is primarily to avoid
54 ## large write spikes for users running a large number of telegraf instances.
55 ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
56 flush_jitter = "0s"
57
58 ## By default or when set to "0s", precision will be set to the same
59 ## timestamp order as the collection interval, with the maximum being 1s.
60 ## ie, when interval = "10s", precision will be "1s"
61 ## when interval = "250ms", precision will be "1ms"
62 ## Precision will NOT be used for service inputs. It is up to each individual
63 ## service input to set the timestamp at the appropriate precision.
64 ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
65 precision = ""
66
67 ## Logging configuration:
68 ## Run telegraf with debug log messages.
69 debug = false
70 ## Run telegraf in quiet mode (error log messages only).
71 quiet = false
72 ## Specify the log file name. The empty string means to log to stderr.
73 logfile = ""
74
75 ## Override default hostname, if empty use os.Hostname()
76 hostname = ""
77 ## If set to true, do no set the "host" tag in the telegraf agent.
78 omit_hostname = false
79
80
81###############################################################################
82# OUTPUT PLUGINS #
83###############################################################################
84
85# Configuration for sending metrics to InfluxDB
86[[outputs.influxdb]]
87 ## The full HTTP or UDP URL for your InfluxDB instance.
88 ##
89 ## Multiple URLs can be specified for a single cluster, only ONE of the
90 ## urls will be written to each interval.
91 # urls = ["unix:///var/run/influxdb.sock"]
92 # urls = ["udp://127.0.0.1:8089"]
93 # urls = ["http://127.0.0.1:8086"]
94
95 ## The target database for metrics; will be created as needed.
96 ## For UDP url endpoint database needs to be configured on server side.
97 # database = "telegraf"
98
99 ## The value of this tag will be used to determine the database. If this
100 ## tag is not set the 'database' option is used as the default.
101 # database_tag = ""
102
103 ## If true, no CREATE DATABASE queries will be sent. Set to true when using
104 ## Telegraf with a user without permissions to create databases or when the
105 ## database already exists.
106 # skip_database_creation = false
107
108 ## Name of existing retention policy to write to. Empty string writes to
109 ## the default retention policy. Only takes effect when using HTTP.
110 # retention_policy = ""
111
112 ## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
113 ## Only takes effect when using HTTP.
114 # write_consistency = "any"
115
116 ## Timeout for HTTP messages.
117 # timeout = "5s"
118
119 ## HTTP Basic Auth
120 # username = "telegraf"
121 # password = "metricsmetricsmetricsmetrics"
122
123 ## HTTP User-Agent
124 # user_agent = "telegraf"
125
126 ## UDP payload size is the maximum packet size to send.
127 # udp_payload = "512B"
128
129 ## Optional TLS Config for use on HTTP connections.
130 # tls_ca = "/etc/telegraf/ca.pem"
131 # tls_cert = "/etc/telegraf/cert.pem"
132 # tls_key = "/etc/telegraf/key.pem"
133 ## Use TLS but skip chain & host verification
134 # insecure_skip_verify = false
135
136 ## HTTP Proxy override, if unset values the standard proxy environment
137 ## variables are consulted to determine which proxy, if any, should be used.
138 # http_proxy = "http://corporate.proxy:3128"
139
140 ## Additional HTTP headers
141 # http_headers = {"X-Special-Header" = "Special-Value"}
142
143 ## HTTP Content-Encoding for write request body, can be set to "gzip" to
144 ## compress body or "identity" to apply no encoding.
145 # content_encoding = "identity"
146
147 ## When true, Telegraf will output unsigned integers as unsigned values,
148 ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
149 ## integer values. Enabling this option will result in field type errors if
150 ## existing data has been written.
151 # influx_uint_support = false
152
153
154# # Configuration for Amon Server to send metrics to.
155# [[outputs.amon]]
156# ## Amon Server Key
157# server_key = "my-server-key" # required.
158#
159# ## Amon Instance URL
160# amon_instance = "https://youramoninstance" # required
161#
162# ## Connection timeout.
163# # timeout = "5s"
164
165
166# # Publishes metrics to an AMQP broker
167# [[outputs.amqp]]
168# ## Broker to publish to.
169# ## deprecated in 1.7; use the brokers option
170# # url = "amqp://localhost:5672/influxdb"
171#
172# ## Brokers to publish to. If multiple brokers are specified a random broker
173# ## will be selected anytime a connection is established. This can be
174# ## helpful for load balancing when not using a dedicated load balancer.
175# brokers = ["amqp://localhost:5672/influxdb"]
176#
177# ## Maximum messages to send over a connection. Once this is reached, the
178# ## connection is closed and a new connection is made. This can be helpful for
179# ## load balancing when not using a dedicated load balancer.
180# # max_messages = 0
181#
182# ## Exchange to declare and publish to.
183# exchange = "telegraf"
184#
185# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
186# # exchange_type = "topic"
187#
188# ## If true, exchange will be passively declared.
189# # exchange_declare_passive = false
190#
191# ## Exchange durability can be either "transient" or "durable".
192# # exchange_durability = "durable"
193#
194# ## Additional exchange arguments.
195# # exchange_arguments = { }
196# # exchange_arguments = {"hash_propery" = "timestamp"}
197#
198# ## Authentication credentials for the PLAIN auth_method.
199# # username = ""
200# # password = ""
201#
202# ## Auth method. PLAIN and EXTERNAL are supported
203# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
204# ## described here: https://www.rabbitmq.com/plugins.html
205# # auth_method = "PLAIN"
206#
207# ## Metric tag to use as a routing key.
208# ## ie, if this tag exists, its value will be used as the routing key
209# # routing_tag = "host"
210#
211# ## Static routing key. Used when no routing_tag is set or as a fallback
212# ## when the tag specified in routing tag is not found.
213# # routing_key = ""
214# # routing_key = "telegraf"
215#
216# ## Delivery Mode controls if a published message is persistent.
217# ## One of "transient" or "persistent".
218# # delivery_mode = "transient"
219#
220# ## InfluxDB database added as a message header.
221# ## deprecated in 1.7; use the headers option
222# # database = "telegraf"
223#
224# ## InfluxDB retention policy added as a message header
225# ## deprecated in 1.7; use the headers option
226# # retention_policy = "default"
227#
228# ## Static headers added to each published message.
229# # headers = { }
230# # headers = {"database" = "telegraf", "retention_policy" = "default"}
231#
232# ## Connection timeout. If not provided, will default to 5s. 0s means no
233# ## timeout (not recommended).
234# # timeout = "5s"
235#
236# ## Optional TLS Config
237# # tls_ca = "/etc/telegraf/ca.pem"
238# # tls_cert = "/etc/telegraf/cert.pem"
239# # tls_key = "/etc/telegraf/key.pem"
240# ## Use TLS but skip chain & host verification
241# # insecure_skip_verify = false
242#
243# ## If true use batch serialization format instead of line based delimiting.
244# ## Only applies to data formats which are not line based such as JSON.
245# ## Recommended to set to true.
246# # use_batch_format = false
247#
248# ## Data format to output.
249# ## Each data format has its own unique set of configuration options, read
250# ## more about them here:
251# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
252# # data_format = "influx"
253
254
255# # Send metrics to Azure Application Insights
256# [[outputs.application_insights]]
257# ## Instrumentation key of the Application Insights resource.
258# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
259#
260# ## Timeout for closing (default: 5s).
261# # timeout = "5s"
262#
263# ## Enable additional diagnostic logging.
264# # enable_diagnostic_logging = false
265#
266# ## Context Tag Sources add Application Insights context tags to a tag value.
267# ##
268# ## For list of allowed context tag keys see:
269# ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go
270# # [outputs.application_insights.context_tag_sources]
271# # "ai.cloud.role" = "kubernetes_container_name"
272# # "ai.cloud.roleInstance" = "kubernetes_pod_name"
273
274
275# # Send aggregate metrics to Azure Monitor
276# [[outputs.azure_monitor]]
277# ## Timeout for HTTP writes.
278# # timeout = "20s"
279#
280# ## Set the namespace prefix, defaults to "Telegraf/<input-name>".
281# # namespace_prefix = "Telegraf/"
282#
283# ## Azure Monitor doesn't have a string value type, so convert string
284# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows
285# ## a maximum of 10 dimensions so Telegraf will only send the first 10
286# ## alphanumeric dimensions.
287# # strings_as_dimensions = false
288#
289# ## Both region and resource_id must be set or be available via the
290# ## Instance Metadata service on Azure Virtual Machines.
291# #
292# ## Azure Region to publish metrics against.
293# ## ex: region = "southcentralus"
294# # region = ""
295# #
296# ## The Azure Resource ID against which metric will be logged, e.g.
297# ## ex: resource_id = "/subscriptions/<subscription_id>/resourceGroups/<resource_group>/providers/Microsoft.Compute/virtualMachines/<vm_name>"
298# # resource_id = ""
299#
300# ## Optionally, if in Azure US Government, China or other sovereign
301# ## cloud environment, set appropriate REST endpoint for receiving
302# ## metrics. (Note: region may be unused in this context)
303# # endpoint_url = "https://monitoring.core.usgovcloudapi.net"
304
305
306# # Publish Telegraf metrics to a Google Cloud PubSub topic
307# [[outputs.cloud_pubsub]]
308# ## Required. Name of Google Cloud Platform (GCP) Project that owns
309# ## the given PubSub topic.
310# project = "my-project"
311#
312# ## Required. Name of PubSub topic to publish metrics to.
313# topic = "my-topic"
314#
315# ## Required. Data format to consume.
316# ## Each data format has its own unique set of configuration options.
317# ## Read more about them here:
318# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
319# data_format = "influx"
320#
321# ## Optional. Filepath for GCP credentials JSON file to authorize calls to
322# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use
323# ## Application Default Credentials, which is preferred.
324# # credentials_file = "path/to/my/creds.json"
325#
326# ## Optional. If true, will send all metrics per write in one PubSub message.
327# # send_batched = true
328#
329# ## The following publish_* parameters specifically configures batching
330# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read
331# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings
332#
333# ## Optional. Send a request to PubSub (i.e. actually publish a batch)
334# ## when it has this many PubSub messages. If send_batched is true,
335# ## this is ignored and treated as if it were 1.
336# # publish_count_threshold = 1000
337#
338# ## Optional. Send a request to PubSub (i.e. actually publish a batch)
339# ## when it has this many PubSub messages. If send_batched is true,
340# ## this is ignored and treated as if it were 1
341# # publish_byte_threshold = 1000000
342#
343# ## Optional. Specifically configures requests made to the PubSub API.
344# # publish_num_go_routines = 2
345#
346# ## Optional. Specifies a timeout for requests to the PubSub API.
347# # publish_timeout = "30s"
348#
349# ## Optional. PubSub attributes to add to metrics.
350# # [[inputs.pubsub.attributes]]
351# # my_attr = "tag_value"
352
353
354# # Configuration for AWS CloudWatch output.
355# [[outputs.cloudwatch]]
356# ## Amazon REGION
357# region = "us-east-1"
358#
359# ## Amazon Credentials
360# ## Credentials are loaded in the following order
361# ## 1) Assumed credentials via STS if role_arn is specified
362# ## 2) explicit credentials from 'access_key' and 'secret_key'
363# ## 3) shared profile from 'profile'
364# ## 4) environment variables
365# ## 5) shared credentials file
366# ## 6) EC2 Instance Profile
367# #access_key = ""
368# #secret_key = ""
369# #token = ""
370# #role_arn = ""
371# #profile = ""
372# #shared_credential_file = ""
373#
374# ## Endpoint to make request against, the correct endpoint is automatically
375# ## determined and this option should only be set if you wish to override the
376# ## default.
377# ## ex: endpoint_url = "http://localhost:8000"
378# # endpoint_url = ""
379#
380# ## Namespace for the CloudWatch MetricDatums
381# namespace = "InfluxData/Telegraf"
382#
383# ## If you have a large amount of metrics, you should consider to send statistic
384# ## values instead of raw metrics which could not only improve performance but
385# ## also save AWS API cost. If enable this flag, this plugin would parse the required
386# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch.
387# ## You could use basicstats aggregator to calculate those fields. If not all statistic
388# ## fields are available, all fields would still be sent as raw metrics.
389# # write_statistics = false
390
391
392# # Configuration for CrateDB to send metrics to.
393# [[outputs.cratedb]]
394# # A github.com/jackc/pgx connection string.
395# # See https://godoc.org/github.com/jackc/pgx#ParseDSN
396# url = "postgres://user:password@localhost/schema?sslmode=disable"
397# # Timeout for all CrateDB queries.
398# timeout = "5s"
399# # Name of the table to store metrics in.
400# table = "metrics"
401# # If true, and the metrics table does not exist, create it automatically.
402# table_create = true
403
404
405# # Configuration for DataDog API to send metrics to.
406# [[outputs.datadog]]
407# ## Datadog API key
408# apikey = "my-secret-key" # required.
409#
410# # The base endpoint URL can optionally be specified but it defaults to:
411# #url = "https://app.datadoghq.com/api/v1/series"
412#
413# ## Connection timeout.
414# # timeout = "5s"
415
416
417# # Send metrics to nowhere at all
418# [[outputs.discard]]
419# # no configuration
420
421
422# # Configuration for Elasticsearch to send metrics to.
423# [[outputs.elasticsearch]]
424# ## The full HTTP endpoint URL for your Elasticsearch instance
425# ## Multiple urls can be specified as part of the same cluster,
426# ## this means that only ONE of the urls will be written to each interval.
427# urls = [ "http://node1.es.example.com:9200" ] # required.
428# ## Elasticsearch client timeout, defaults to "5s" if not set.
429# timeout = "5s"
430# ## Set to true to ask Elasticsearch a list of all cluster nodes,
431# ## thus it is not necessary to list all nodes in the urls config option.
432# enable_sniffer = false
433# ## Set the interval to check if the Elasticsearch nodes are available
434# ## Setting to "0s" will disable the health check (not recommended in production)
435# health_check_interval = "10s"
436# ## HTTP basic authentication details (eg. when using Shield)
437# # username = "telegraf"
438# # password = "mypassword"
439#
440# ## Index Config
441# ## The target index for metrics (Elasticsearch will create if it not exists).
442# ## You can use the date specifiers below to create indexes per time frame.
443# ## The metric timestamp will be used to decide the destination index name
444# # %Y - year (2016)
445# # %y - last two digits of year (00..99)
446# # %m - month (01..12)
447# # %d - day of month (e.g., 01)
448# # %H - hour (00..23)
449# # %V - week of the year (ISO week) (01..53)
450# ## Additionally, you can specify a tag name using the notation {{tag_name}}
451# ## which will be used as part of the index name. If the tag does not exist,
452# ## the default tag value will be used.
453# # index_name = "telegraf-{{host}}-%Y.%m.%d"
454# # default_tag_value = "none"
455# index_name = "telegraf-%Y.%m.%d" # required.
456#
457# ## Optional TLS Config
458# # tls_ca = "/etc/telegraf/ca.pem"
459# # tls_cert = "/etc/telegraf/cert.pem"
460# # tls_key = "/etc/telegraf/key.pem"
461# ## Use TLS but skip chain & host verification
462# # insecure_skip_verify = false
463#
464# ## Template Config
465# ## Set to true if you want telegraf to manage its index template.
466# ## If enabled it will create a recommended index template for telegraf indexes
467# manage_template = true
468# ## The template name used for telegraf indexes
469# template_name = "telegraf"
470# ## Set to true if you want telegraf to overwrite an existing template
471# overwrite_template = false
472
473
474# # Send telegraf metrics to file(s)
475# [[outputs.file]]
476# ## Files to write to, "stdout" is a specially handled file.
477# files = ["stdout", "/tmp/metrics.out"]
478#
479# ## Data format to output.
480# ## Each data format has its own unique set of configuration options, read
481# ## more about them here:
482# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
483# data_format = "influx"
484
485
486# # Configuration for Graphite server to send metrics to
487# [[outputs.graphite]]
488# ## TCP endpoint for your graphite instance.
489# ## If multiple endpoints are configured, output will be load balanced.
490# ## Only one of the endpoints will be written to with each iteration.
491# servers = ["localhost:2003"]
492# ## Prefix metrics name
493# prefix = ""
494# ## Graphite output template
495# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
496# template = "host.tags.measurement.field"
497#
498# ## Enable Graphite tags support
499# # graphite_tag_support = false
500#
501# ## timeout in seconds for the write connection to graphite
502# timeout = 2
503#
504# ## Optional TLS Config
505# # tls_ca = "/etc/telegraf/ca.pem"
506# # tls_cert = "/etc/telegraf/cert.pem"
507# # tls_key = "/etc/telegraf/key.pem"
508# ## Use TLS but skip chain & host verification
509# # insecure_skip_verify = false
510
511
512# # Send telegraf metrics to graylog(s)
513# [[outputs.graylog]]
514# ## UDP endpoint for your graylog instance.
515# servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
516
517
518# # A plugin that can transmit metrics over HTTP
519# [[outputs.http]]
520# ## URL is the address to send metrics to
521# url = "http://127.0.0.1:8080/metric"
522#
523# ## Timeout for HTTP message
524# # timeout = "5s"
525#
526# ## HTTP method, one of: "POST" or "PUT"
527# # method = "POST"
528#
529# ## HTTP Basic Auth credentials
530# # username = "username"
531# # password = "pa$$word"
532#
533# ## OAuth2 Client Credentials Grant
534# # client_id = "clientid"
535# # client_secret = "secret"
536# # token_url = "https://indentityprovider/oauth2/v1/token"
537# # scopes = ["urn:opc:idm:__myscopes__"]
538#
539# ## Optional TLS Config
540# # tls_ca = "/etc/telegraf/ca.pem"
541# # tls_cert = "/etc/telegraf/cert.pem"
542# # tls_key = "/etc/telegraf/key.pem"
543# ## Use TLS but skip chain & host verification
544# # insecure_skip_verify = false
545#
546# ## Data format to output.
547# ## Each data format has it's own unique set of configuration options, read
548# ## more about them here:
549# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
550# # data_format = "influx"
551#
552# ## Additional HTTP headers
553# # [outputs.http.headers]
554# # # Should be set manually to "application/json" for json data_format
555# # Content-Type = "text/plain; charset=utf-8"
556#
557# ## HTTP Content-Encoding for write request body, can be set to "gzip" to
558# ## compress body or "identity" to apply no encoding.
559# # content_encoding = "identity"
560
561
562# # Configuration for sending metrics to InfluxDB
563# [[outputs.influxdb_v2]]
564# ## The URLs of the InfluxDB cluster nodes.
565# ##
566# ## Multiple URLs can be specified for a single cluster, only ONE of the
567# ## urls will be written to each interval.
568# urls = ["http://127.0.0.1:9999"]
569#
570# ## Token for authentication.
571# token = ""
572#
573# ## Organization is the name of the organization you wish to write to; must exist.
574# organization = ""
575#
576# ## Destination bucket to write into.
577# bucket = ""
578#
579# ## The value of this tag will be used to determine the bucket. If this
580# ## tag is not set the 'bucket' option is used as the default.
581# # bucket_tag = ""
582#
583# ## Timeout for HTTP messages.
584# # timeout = "5s"
585#
586# ## Additional HTTP headers
587# # http_headers = {"X-Special-Header" = "Special-Value"}
588#
589# ## HTTP Proxy override, if unset values the standard proxy environment
590# ## variables are consulted to determine which proxy, if any, should be used.
591# # http_proxy = "http://corporate.proxy:3128"
592#
593# ## HTTP User-Agent
594# # user_agent = "telegraf"
595#
596# ## Content-Encoding for write request body, can be set to "gzip" to
597# ## compress body or "identity" to apply no encoding.
598# # content_encoding = "gzip"
599#
600# ## Enable or disable uint support for writing uints influxdb 2.0.
601# # influx_uint_support = false
602#
603# ## Optional TLS Config for use on HTTP connections.
604# # tls_ca = "/etc/telegraf/ca.pem"
605# # tls_cert = "/etc/telegraf/cert.pem"
606# # tls_key = "/etc/telegraf/key.pem"
607# ## Use TLS but skip chain & host verification
608# # insecure_skip_verify = false
609
610
611# # Configuration for sending metrics to an Instrumental project
612# [[outputs.instrumental]]
613# ## Project API Token (required)
614# api_token = "API Token" # required
615# ## Prefix the metrics with a given name
616# prefix = ""
617# ## Stats output template (Graphite formatting)
618# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
619# template = "host.tags.measurement.field"
620# ## Timeout in seconds to connect
621# timeout = "2s"
622# ## Display Communcation to Instrumental
623# debug = false
624
625
626# # Configuration for the Kafka server to send metrics to
627# [[outputs.kafka]]
628# ## URLs of kafka brokers
629# brokers = ["localhost:9092"]
630# ## Kafka topic for producer messages
631# topic = "telegraf"
632#
633# ## Optional Client id
634# # client_id = "Telegraf"
635#
636# ## Set the minimal supported Kafka version. Setting this enables the use of new
637# ## Kafka features and APIs. Of particular interest, lz4 compression
638# ## requires at least version 0.10.0.0.
639# ## ex: version = "1.1.0"
640# # version = ""
641#
642# ## Optional topic suffix configuration.
643# ## If the section is omitted, no suffix is used.
644# ## Following topic suffix methods are supported:
645# ## measurement - suffix equals to separator + measurement's name
646# ## tags - suffix equals to separator + specified tags' values
647# ## interleaved with separator
648#
649# ## Suffix equals to "_" + measurement name
650# # [outputs.kafka.topic_suffix]
651# # method = "measurement"
652# # separator = "_"
653#
654# ## Suffix equals to "__" + measurement's "foo" tag value.
655# ## If there's no such a tag, suffix equals to an empty string
656# # [outputs.kafka.topic_suffix]
657# # method = "tags"
658# # keys = ["foo"]
659# # separator = "__"
660#
661# ## Suffix equals to "_" + measurement's "foo" and "bar"
662# ## tag values, separated by "_". If there is no such tags,
663# ## their values treated as empty strings.
664# # [outputs.kafka.topic_suffix]
665# # method = "tags"
666# # keys = ["foo", "bar"]
667# # separator = "_"
668#
669# ## Telegraf tag to use as a routing key
670# ## ie, if this tag exists, its value will be used as the routing key
671# routing_tag = "host"
672#
673# ## Static routing key. Used when no routing_tag is set or as a fallback
674# ## when the tag specified in routing tag is not found. If set to "random",
675# ## a random value will be generated for each message.
676# ## ex: routing_key = "random"
677# ## routing_key = "telegraf"
678# # routing_key = ""
679#
680# ## CompressionCodec represents the various compression codecs recognized by
681# ## Kafka in messages.
682# ## 0 : No compression
683# ## 1 : Gzip compression
684# ## 2 : Snappy compression
685# ## 3 : LZ4 compression
686# # compression_codec = 0
687#
688# ## RequiredAcks is used in Produce Requests to tell the broker how many
689# ## replica acknowledgements it must see before responding
690# ## 0 : the producer never waits for an acknowledgement from the broker.
691# ## This option provides the lowest latency but the weakest durability
692# ## guarantees (some data will be lost when a server fails).
693# ## 1 : the producer gets an acknowledgement after the leader replica has
694# ## received the data. This option provides better durability as the
695# ## client waits until the server acknowledges the request as successful
696# ## (only messages that were written to the now-dead leader but not yet
697# ## replicated will be lost).
698# ## -1: the producer gets an acknowledgement after all in-sync replicas have
699# ## received the data. This option provides the best durability, we
700# ## guarantee that no messages will be lost as long as at least one in
701# ## sync replica remains.
702# # required_acks = -1
703#
704# ## The maximum number of times to retry sending a metric before failing
705# ## until the next flush.
706# # max_retry = 3
707#
708# ## The maximum permitted size of a message. Should be set equal to or
709# ## smaller than the broker's 'message.max.bytes'.
710# # max_message_bytes = 1000000
711#
712# ## Optional TLS Config
713# # tls_ca = "/etc/telegraf/ca.pem"
714# # tls_cert = "/etc/telegraf/cert.pem"
715# # tls_key = "/etc/telegraf/key.pem"
716# ## Use TLS but skip chain & host verification
717# # insecure_skip_verify = false
718#
719# ## Optional SASL Config
720# # sasl_username = "kafka"
721# # sasl_password = "secret"
722#
723# ## Data format to output.
724# ## Each data format has its own unique set of configuration options, read
725# ## more about them here:
726# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
727# # data_format = "influx"
728
729
730# # Configuration for the AWS Kinesis output.
731# [[outputs.kinesis]]
732# ## Amazon REGION of kinesis endpoint.
733# region = "ap-southeast-2"
734#
735# ## Amazon Credentials
736# ## Credentials are loaded in the following order
737# ## 1) Assumed credentials via STS if role_arn is specified
738# ## 2) explicit credentials from 'access_key' and 'secret_key'
739# ## 3) shared profile from 'profile'
740# ## 4) environment variables
741# ## 5) shared credentials file
742# ## 6) EC2 Instance Profile
743# #access_key = ""
744# #secret_key = ""
745# #token = ""
746# #role_arn = ""
747# #profile = ""
748# #shared_credential_file = ""
749#
750# ## Endpoint to make request against, the correct endpoint is automatically
751# ## determined and this option should only be set if you wish to override the
752# ## default.
753# ## ex: endpoint_url = "http://localhost:8000"
754# # endpoint_url = ""
755#
756# ## Kinesis StreamName must exist prior to starting telegraf.
757# streamname = "StreamName"
758# ## DEPRECATED: PartitionKey as used for sharding data.
759# partitionkey = "PartitionKey"
760# ## DEPRECATED: If set the paritionKey will be a random UUID on every put.
761# ## This allows for scaling across multiple shards in a stream.
762# ## This will cause issues with ordering.
763# use_random_partitionkey = false
764# ## The partition key can be calculated using one of several methods:
765# ##
766# ## Use a static value for all writes:
767# # [outputs.kinesis.partition]
768# # method = "static"
769# # key = "howdy"
770# #
771# ## Use a random partition key on each write:
772# # [outputs.kinesis.partition]
773# # method = "random"
774# #
775# ## Use the measurement name as the partition key:
776# # [outputs.kinesis.partition]
777# # method = "measurement"
778# #
779# ## Use the value of a tag for all writes, if the tag is not set the empty
780# ## default option will be used. When no default, defaults to "telegraf"
781# # [outputs.kinesis.partition]
782# # method = "tag"
783# # key = "host"
784# # default = "mykey"
785#
786#
787# ## Data format to output.
788# ## Each data format has its own unique set of configuration options, read
789# ## more about them here:
790# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
791# data_format = "influx"
792#
793# ## debug will show upstream aws messages.
794# debug = false
795
796
797# # Configuration for Librato API to send metrics to.
798# [[outputs.librato]]
799# ## Librator API Docs
800# ## http://dev.librato.com/v1/metrics-authentication
801# ## Librato API user
802# api_user = "telegraf@influxdb.com" # required.
803# ## Librato API token
804# api_token = "my-secret-token" # required.
805# ## Debug
806# # debug = false
807# ## Connection timeout.
808# # timeout = "5s"
809# ## Output source Template (same as graphite buckets)
810# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
811# ## This template is used in librato's source (not metric's name)
812# template = "host"
813#
814
815
816# # Configuration for MQTT server to send metrics to
817# [[outputs.mqtt]]
818# servers = ["localhost:1883"] # required.
819#
820# ## MQTT outputs send metrics to this topic format
821# ## "<topic_prefix>/<hostname>/<pluginname>/"
822# ## ex: prefix/web01.example.com/mem
823# topic_prefix = "telegraf"
824#
825# ## QoS policy for messages
826# ## 0 = at most once
827# ## 1 = at least once
828# ## 2 = exactly once
829# # qos = 2
830#
831# ## username and password to connect MQTT server.
832# # username = "telegraf"
833# # password = "metricsmetricsmetricsmetrics"
834#
835# ## client ID, if not set a random ID is generated
836# # client_id = ""
837#
838# ## Timeout for write operations. default: 5s
839# # timeout = "5s"
840#
841# ## Optional TLS Config
842# # tls_ca = "/etc/telegraf/ca.pem"
843# # tls_cert = "/etc/telegraf/cert.pem"
844# # tls_key = "/etc/telegraf/key.pem"
845# ## Use TLS but skip chain & host verification
846# # insecure_skip_verify = false
847#
848# ## When true, metrics will be sent in one MQTT message per flush. Otherwise,
849# ## metrics are written one metric per MQTT message.
850# # batch = false
851#
852# ## When true, metric will have RETAIN flag set, making broker cache entries until someone
853# ## actually reads it
854# # retain = false
855#
856# ## Data format to output.
857# ## Each data format has its own unique set of configuration options, read
858# ## more about them here:
859# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
860# data_format = "influx"
861
862
863# # Send telegraf measurements to NATS
864# [[outputs.nats]]
865# ## URLs of NATS servers
866# servers = ["nats://localhost:4222"]
867# ## Optional credentials
868# # username = ""
869# # password = ""
870# ## NATS subject for producer messages
871# subject = "telegraf"
872#
873# ## Optional TLS Config
874# # tls_ca = "/etc/telegraf/ca.pem"
875# # tls_cert = "/etc/telegraf/cert.pem"
876# # tls_key = "/etc/telegraf/key.pem"
877# ## Use TLS but skip chain & host verification
878# # insecure_skip_verify = false
879#
880# ## Data format to output.
881# ## Each data format has its own unique set of configuration options, read
882# ## more about them here:
883# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
884# data_format = "influx"
885
886
887# # Send telegraf measurements to NSQD
888# [[outputs.nsq]]
889# ## Location of nsqd instance listening on TCP
890# server = "localhost:4150"
891# ## NSQ topic for producer messages
892# topic = "telegraf"
893#
894# ## Data format to output.
895# ## Each data format has its own unique set of configuration options, read
896# ## more about them here:
897# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
898# data_format = "influx"
899
900
901# # Configuration for OpenTSDB server to send metrics to
902# [[outputs.opentsdb]]
903# ## prefix for metrics keys
904# prefix = "my.specific.prefix."
905#
906# ## DNS name of the OpenTSDB server
907# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
908# ## telnet API. "http://opentsdb.example.com" will use the Http API.
909# host = "opentsdb.example.com"
910#
911# ## Port of the OpenTSDB server
912# port = 4242
913#
914# ## Number of data points to send to OpenTSDB in Http requests.
915# ## Not used with telnet API.
916# http_batch_size = 50
917#
918# ## URI Path for Http requests to OpenTSDB.
919# ## Used in cases where OpenTSDB is located behind a reverse proxy.
920# http_path = "/api/put"
921#
922# ## Debug true - Prints OpenTSDB communication
923# debug = false
924#
925# ## Separator separates measurement name from field
926# separator = "_"
927
928
929# # Configuration for the Prometheus client to spawn
930# [[outputs.prometheus_client]]
931# ## Address to listen on
932# listen = ":9273"
933#
934# ## Use HTTP Basic Authentication.
935# # basic_username = "Foo"
936# # basic_password = "Bar"
937#
938# ## If set, the IP Ranges which are allowed to access metrics.
939# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"]
940# # ip_range = []
941#
942# ## Path to publish the metrics on.
943# # path = "/metrics"
944#
945# ## Expiration interval for each metric. 0 == no expiration
946# # expiration_interval = "60s"
947#
948# ## Collectors to enable, valid entries are "gocollector" and "process".
949# ## If unset, both are enabled.
950# # collectors_exclude = ["gocollector", "process"]
951#
952# ## Send string metrics as Prometheus labels.
953# ## Unless set to false all string metrics will be sent as labels.
954# # string_as_label = true
955#
956# ## If set, enable TLS with the given certificate.
957# # tls_cert = "/etc/ssl/telegraf.crt"
958# # tls_key = "/etc/ssl/telegraf.key"
959#
960# ## Set one or more allowed client CA certificate file names to
961# ## enable mutually authenticated TLS connections
962# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
963#
964# ## Export metric collection time.
965# # export_timestamp = false
966
967
968# # Configuration for the Riemann server to send metrics to
969# [[outputs.riemann]]
970# ## The full TCP or UDP URL of the Riemann server
971# url = "tcp://localhost:5555"
972#
973# ## Riemann event TTL, floating-point time in seconds.
974# ## Defines how long that an event is considered valid for in Riemann
975# # ttl = 30.0
976#
977# ## Separator to use between measurement and field name in Riemann service name
978# ## This does not have any effect if 'measurement_as_attribute' is set to 'true'
979# separator = "/"
980#
981# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name
982# # measurement_as_attribute = false
983#
984# ## Send string metrics as Riemann event states.
985# ## Unless enabled all string metrics will be ignored
986# # string_as_state = false
987#
988# ## A list of tag keys whose values get sent as Riemann tags.
989# ## If empty, all Telegraf tag values will be sent as tags
990# # tag_keys = ["telegraf","custom_tag"]
991#
992# ## Additional Riemann tags to send.
993# # tags = ["telegraf-output"]
994#
995# ## Description for Riemann event
996# # description_text = "metrics collected from telegraf"
997#
998# ## Riemann client write timeout, defaults to "5s" if not set.
999# # timeout = "5s"
1000
1001
1002# # Configuration for the Riemann server to send metrics to
1003# [[outputs.riemann_legacy]]
1004# ## URL of server
1005# url = "localhost:5555"
1006# ## transport protocol to use either tcp or udp
1007# transport = "tcp"
1008# ## separator to use between input name and field name in Riemann service name
1009# separator = " "
1010
1011
1012# # Generic socket writer capable of handling multiple socket types.
1013# [[outputs.socket_writer]]
1014# ## URL to connect to
1015# # address = "tcp://127.0.0.1:8094"
1016# # address = "tcp://example.com:http"
1017# # address = "tcp4://127.0.0.1:8094"
1018# # address = "tcp6://127.0.0.1:8094"
1019# # address = "tcp6://[2001:db8::1]:8094"
1020# # address = "udp://127.0.0.1:8094"
1021# # address = "udp4://127.0.0.1:8094"
1022# # address = "udp6://127.0.0.1:8094"
1023# # address = "unix:///tmp/telegraf.sock"
1024# # address = "unixgram:///tmp/telegraf.sock"
1025#
1026# ## Optional TLS Config
1027# # tls_ca = "/etc/telegraf/ca.pem"
1028# # tls_cert = "/etc/telegraf/cert.pem"
1029# # tls_key = "/etc/telegraf/key.pem"
1030# ## Use TLS but skip chain & host verification
1031# # insecure_skip_verify = false
1032#
1033# ## Period between keep alive probes.
1034# ## Only applies to TCP sockets.
1035# ## 0 disables keep alive probes.
1036# ## Defaults to the OS configuration.
1037# # keep_alive_period = "5m"
1038#
1039# ## Data format to generate.
1040# ## Each data format has its own unique set of configuration options, read
1041# ## more about them here:
1042# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
1043# # data_format = "influx"
1044
1045
1046# # Configuration for Google Cloud Stackdriver to send metrics to
1047# [[outputs.stackdriver]]
1048# ## GCP Project
1049# project = "erudite-bloom-151019"
1050#
1051# ## The namespace for the metric descriptor
1052# namespace = "telegraf"
1053#
1054# ## Custom resource type
1055# # resource_type = "generic_node"
1056#
1057# ## Additonal resource labels
1058# # [outputs.stackdriver.resource_labels]
1059# # node_id = "$HOSTNAME"
1060# # namespace = "myapp"
1061# # location = "eu-north0"
1062
1063
1064# # Configuration for Wavefront server to send metrics to
1065# [[outputs.wavefront]]
1066# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy
1067# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878
1068# url = "https://metrics.wavefront.com"
1069#
1070# ## Authentication Token for Wavefront. Only required if using Direct Ingestion
1071# #token = "DUMMY_TOKEN"
1072#
1073# ## DNS name of the wavefront proxy server. Do not use if url is specified
1074# #host = "wavefront.example.com"
1075#
1076# ## Port that the Wavefront proxy server listens on. Do not use if url is specified
1077# #port = 2878
1078#
1079# ## prefix for metrics keys
1080# #prefix = "my.specific.prefix."
1081#
1082# ## whether to use "value" for name of simple fields. default is false
1083# #simple_fields = false
1084#
1085# ## character to use between metric and field name. default is . (dot)
1086# #metric_separator = "."
1087#
1088# ## Convert metric name paths to use metricSeparator character
1089# ## When true will convert all _ (underscore) characters in final metric name. default is true
1090# #convert_paths = true
1091#
1092# ## Use Regex to sanitize metric and tag names from invalid characters
1093# ## Regex is more thorough, but significantly slower. default is false
1094# #use_regex = false
1095#
1096# ## point tags to use as the source name for Wavefront (if none found, host will be used)
1097# #source_override = ["hostname", "address", "agent_host", "node_host"]
1098#
1099# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true
1100# #convert_bool = true
1101#
1102# ## Define a mapping, namespaced by metric prefix, from string values to numeric values
1103# ## deprecated in 1.9; use the enum processor plugin
1104# #[[outputs.wavefront.string_to_number.elasticsearch]]
1105# # green = 1.0
1106# # yellow = 0.5
1107# # red = 0.0
1108
1109
1110
1111###############################################################################
1112# PROCESSOR PLUGINS #
1113###############################################################################
1114
1115# # Convert values to another metric value type
1116# [[processors.converter]]
1117# ## Tags to convert
1118# ##
1119# ## The table key determines the target type, and the array of key-values
1120# ## select the keys to convert. The array may contain globs.
1121# ## <target-type> = [<tag-key>...]
1122# [processors.converter.tags]
1123# string = []
1124# integer = []
1125# unsigned = []
1126# boolean = []
1127# float = []
1128#
1129# ## Fields to convert
1130# ##
1131# ## The table key determines the target type, and the array of key-values
1132# ## select the keys to convert. The array may contain globs.
1133# ## <target-type> = [<field-key>...]
1134# [processors.converter.fields]
1135# tag = []
1136# string = []
1137# integer = []
1138# unsigned = []
1139# boolean = []
1140# float = []
1141
1142
1143# # Map enum values according to given table.
1144# [[processors.enum]]
1145# [[processors.enum.mapping]]
1146# ## Name of the field to map
1147# field = "status"
1148#
1149# ## Destination field to be used for the mapped value. By default the source
1150# ## field is used, overwriting the original value.
1151# # dest = "status_code"
1152#
1153# ## Default value to be used for all values not contained in the mapping
1154# ## table. When unset, the unmodified value for the field will be used if no
1155# ## match is found.
1156# # default = 0
1157#
1158# ## Table of mappings
1159# [processors.enum.mapping.value_mappings]
1160# green = 1
1161# yellow = 2
1162# red = 3
1163
1164
1165# # Apply metric modifications using override semantics.
1166# [[processors.override]]
1167# ## All modifications on inputs and aggregators can be overridden:
1168# # name_override = "new_name"
1169# # name_prefix = "new_name_prefix"
1170# # name_suffix = "new_name_suffix"
1171#
1172# ## Tags to be added (all values must be strings)
1173# # [processors.override.tags]
1174# # additional_tag = "tag_value"
1175
1176
1177# # Parse a value in a specified field/tag(s) and add the result in a new metric
1178# [[processors.parser]]
1179# ## The name of the fields whose value will be parsed.
1180# parse_fields = []
1181#
1182# ## If true, incoming metrics are not emitted.
1183# drop_original = false
1184#
1185# ## If set to override, emitted metrics will be merged by overriding the
1186# ## original metric using the newly parsed metrics.
1187# merge = "override"
1188#
1189# ## The dataformat to be read from files
1190# ## Each data format has its own unique set of configuration options, read
1191# ## more about them here:
1192# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
1193# data_format = "influx"
1194
1195
1196# # Print all metrics that pass through this filter.
1197# [[processors.printer]]
1198
1199
1200# # Transforms tag and field values with regex pattern
1201# [[processors.regex]]
1202# ## Tag and field conversions defined in a separate sub-tables
1203# # [[processors.regex.tags]]
1204# # ## Tag to change
1205# # key = "resp_code"
1206# # ## Regular expression to match on a tag value
1207# # pattern = "^(\\d)\\d\\d$"
1208# # ## Pattern for constructing a new value (${1} represents first subgroup)
1209# # replacement = "${1}xx"
1210#
1211# # [[processors.regex.fields]]
1212# # key = "request"
1213# # ## All the power of the Go regular expressions available here
1214# # ## For example, named subgroups
1215# # pattern = "^/api(?P<method>/[\\w/]+)\\S*"
1216# # replacement = "${method}"
1217# # ## If result_key is present, a new field will be created
1218# # ## instead of changing existing field
1219# # result_key = "method"
1220#
1221# ## Multiple conversions may be applied for one field sequentially
1222# ## Let's extract one more value
1223# # [[processors.regex.fields]]
1224# # key = "request"
1225# # pattern = ".*category=(\\w+).*"
1226# # replacement = "${1}"
1227# # result_key = "search_category"
1228
1229
1230# # Rename measurements, tags, and fields that pass through this filter.
1231# [[processors.rename]]
1232
1233
1234# # Perform string processing on tags, fields, and measurements
1235# [[processors.strings]]
1236# ## Convert a tag value to uppercase
1237# # [[processors.strings.uppercase]]
1238# # tag = "method"
1239#
1240# ## Convert a field value to lowercase and store in a new field
1241# # [[processors.strings.lowercase]]
1242# # field = "uri_stem"
1243# # dest = "uri_stem_normalised"
1244#
1245# ## Trim leading and trailing whitespace using the default cutset
1246# # [[processors.strings.trim]]
1247# # field = "message"
1248#
1249# ## Trim leading characters in cutset
1250# # [[processors.strings.trim_left]]
1251# # field = "message"
1252# # cutset = "\t"
1253#
1254# ## Trim trailing characters in cutset
1255# # [[processors.strings.trim_right]]
1256# # field = "message"
1257# # cutset = "\r\n"
1258#
1259# ## Trim the given prefix from the field
1260# # [[processors.strings.trim_prefix]]
1261# # field = "my_value"
1262# # prefix = "my_"
1263#
1264# ## Trim the given suffix from the field
1265# # [[processors.strings.trim_suffix]]
1266# # field = "read_count"
1267# # suffix = "_count"
1268#
1269# ## Replace all non-overlapping instances of old with new
1270# # [[processors.strings.replace]]
1271# # measurement = "*"
1272# # old = ":"
1273# # new = "_"
1274
1275
1276# # Print all metrics that pass through this filter.
1277# [[processors.topk]]
1278# ## How many seconds between aggregations
1279# # period = 10
1280#
1281# ## How many top metrics to return
1282# # k = 10
1283#
1284# ## Over which tags should the aggregation be done. Globs can be specified, in
1285# ## which case any tag matching the glob will aggregated over. If set to an
1286# ## empty list is no aggregation over tags is done
1287# # group_by = ['*']
1288#
1289# ## Over which fields are the top k are calculated
1290# # fields = ["value"]
1291#
1292# ## What aggregation to use. Options: sum, mean, min, max
1293# # aggregation = "mean"
1294#
1295# ## Instead of the top k largest metrics, return the bottom k lowest metrics
1296# # bottomk = false
1297#
1298# ## The plugin assigns each metric a GroupBy tag generated from its name and
1299# ## tags. If this setting is different than "" the plugin will add a
1300# ## tag (which name will be the value of this setting) to each metric with
1301# ## the value of the calculated GroupBy tag. Useful for debugging
1302# # add_groupby_tag = ""
1303#
1304# ## These settings provide a way to know the position of each metric in
1305# ## the top k. The 'add_rank_field' setting allows to specify for which
1306# ## fields the position is required. If the list is non empty, then a field
1307# ## will be added to each and every metric for each string present in this
1308# ## setting. This field will contain the ranking of the group that
1309# ## the metric belonged to when aggregated over that field.
1310# ## The name of the field will be set to the name of the aggregation field,
1311# ## suffixed with the string '_topk_rank'
1312# # add_rank_fields = []
1313#
1314# ## These settings provide a way to know what values the plugin is generating
1315# ## when aggregating metrics. The 'add_agregate_field' setting allows to
1316# ## specify for which fields the final aggregation value is required. If the
1317# ## list is non empty, then a field will be added to each every metric for
1318# ## each field present in this setting. This field will contain
1319# ## the computed aggregation for the group that the metric belonged to when
1320# ## aggregated over that field.
1321# ## The name of the field will be set to the name of the aggregation field,
1322# ## suffixed with the string '_topk_aggregate'
1323# # add_aggregate_fields = []
1324
1325
1326
1327###############################################################################
1328# AGGREGATOR PLUGINS #
1329###############################################################################
1330
1331# # Keep the aggregate basicstats of each metric passing through.
1332# [[aggregators.basicstats]]
1333# ## The period on which to flush & clear the aggregator.
1334# period = "30s"
1335# ## If true, the original metric will be dropped by the
1336# ## aggregator and will not get sent to the output plugins.
1337# drop_original = false
1338#
1339# ## Configures which basic stats to push as fields
1340# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]
1341
1342
1343# # Create aggregate histograms.
1344# [[aggregators.histogram]]
1345# ## The period in which to flush the aggregator.
1346# period = "30s"
1347#
1348# ## If true, the original metric will be dropped by the
1349# ## aggregator and will not get sent to the output plugins.
1350# drop_original = false
1351#
1352# ## Example config that aggregates all fields of the metric.
1353# # [[aggregators.histogram.config]]
1354# # ## The set of buckets.
1355# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
1356# # ## The name of metric.
1357# # measurement_name = "cpu"
1358#
1359# ## Example config that aggregates only specific fields of the metric.
1360# # [[aggregators.histogram.config]]
1361# # ## The set of buckets.
1362# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
1363# # ## The name of metric.
1364# # measurement_name = "diskio"
1365# # ## The concrete fields of metric
1366# # fields = ["io_time", "read_time", "write_time"]
1367
1368
1369# # Keep the aggregate min/max of each metric passing through.
1370# [[aggregators.minmax]]
1371# ## General Aggregator Arguments:
1372# ## The period on which to flush & clear the aggregator.
1373# period = "30s"
1374# ## If true, the original metric will be dropped by the
1375# ## aggregator and will not get sent to the output plugins.
1376# drop_original = false
1377
1378
1379# # Count the occurrence of values in fields.
1380# [[aggregators.valuecounter]]
1381# ## General Aggregator Arguments:
1382# ## The period on which to flush & clear the aggregator.
1383# period = "30s"
1384# ## If true, the original metric will be dropped by the
1385# ## aggregator and will not get sent to the output plugins.
1386# drop_original = false
1387# ## The fields for which the values will be counted
1388# fields = []
1389
1390
1391
1392###############################################################################
1393# INPUT PLUGINS #
1394###############################################################################
1395
1396# Read metrics about cpu usage
1397[[inputs.cpu]]
1398 ## Whether to report per-cpu stats or not
1399 percpu = true
1400 ## Whether to report total system cpu stats or not
1401 totalcpu = true
1402 ## If true, collect raw CPU time metrics.
1403 collect_cpu_time = false
1404 ## If true, compute and report the sum of all non-idle CPU states.
1405 report_active = false
1406
1407
1408# Read metrics about disk usage by mount point
1409[[inputs.disk]]
1410 ## By default stats will be gathered for all mount points.
1411 ## Set mount_points will restrict the stats to only the specified mount points.
1412 # mount_points = ["/"]
1413
1414 ## Ignore mount points by filesystem type.
1415 ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
1416
1417
1418# Read metrics about disk IO by device
1419[[inputs.diskio]]
1420 ## By default, telegraf will gather stats for all devices including
1421 ## disk partitions.
1422 ## Setting devices will restrict the stats to the specified devices.
1423 # devices = ["sda", "sdb", "vd*"]
1424 ## Uncomment the following line if you need disk serial numbers.
1425 # skip_serial_number = false
1426 #
1427 ## On systems which support it, device metadata can be added in the form of
1428 ## tags.
1429 ## Currently only Linux is supported via udev properties. You can view
1430 ## available properties for a device by running:
1431 ## 'udevadm info -q property -n /dev/sda'
1432 ## Note: Most, but not all, udev properties can be accessed this way. Properties
1433 ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH.
1434 # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
1435 #
1436 ## Using the same metadata source as device_tags, you can also customize the
1437 ## name of the device via templates.
1438 ## The 'name_templates' parameter is a list of templates to try and apply to
1439 ## the device. The template may contain variables in the form of '$PROPERTY' or
1440 ## '${PROPERTY}'. The first template which does not contain any variables not
1441 ## present for the device is used as the device name tag.
1442 ## The typical use case is for LVM volumes, to get the VG/LV name instead of
1443 ## the near-meaningless DM-0 name.
1444 # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
1445
1446
1447# Get kernel statistics from /proc/stat
1448[[inputs.kernel]]
1449 # no configuration
1450
1451
1452# Read metrics about memory usage
1453[[inputs.mem]]
1454 # no configuration
1455
1456
1457# Get the number of processes and group them by status
1458[[inputs.processes]]
1459 # no configuration
1460
1461
1462# Read metrics about swap memory usage
1463[[inputs.swap]]
1464 # no configuration
1465
1466
1467# Read metrics about system load & uptime
1468[[inputs.system]]
1469 # no configuration
1470
1471
1472# # Gather ActiveMQ metrics
1473# [[inputs.activemq]]
1474# ## Required ActiveMQ Endpoint
1475# # server = "192.168.50.10"
1476#
1477# ## Required ActiveMQ port
1478# # port = 8161
1479#
1480# ## Credentials for basic HTTP authentication
1481# # username = "admin"
1482# # password = "admin"
1483#
1484# ## Required ActiveMQ webadmin root path
1485# # webadmin = "admin"
1486#
1487# ## Maximum time to receive response.
1488# # response_timeout = "5s"
1489#
1490# ## Optional TLS Config
1491# # tls_ca = "/etc/telegraf/ca.pem"
1492# # tls_cert = "/etc/telegraf/cert.pem"
1493# # tls_key = "/etc/telegraf/key.pem"
1494# ## Use TLS but skip chain & host verification
1495
1496
1497# # Read stats from aerospike server(s)
1498# [[inputs.aerospike]]
1499# ## Aerospike servers to connect to (with port)
1500# ## This plugin will query all namespaces the aerospike
1501# ## server has configured and get stats for them.
1502# servers = ["localhost:3000"]
1503#
1504# # username = "telegraf"
1505# # password = "pa$$word"
1506#
1507# ## Optional TLS Config
1508# # enable_tls = false
1509# # tls_ca = "/etc/telegraf/ca.pem"
1510# # tls_cert = "/etc/telegraf/cert.pem"
1511# # tls_key = "/etc/telegraf/key.pem"
1512# ## If false, skip chain & host verification
1513# # insecure_skip_verify = true
1514
1515
1516# # Read Apache status information (mod_status)
1517# [[inputs.apache]]
1518# ## An array of URLs to gather from, must be directed at the machine
1519# ## readable version of the mod_status page including the auto query string.
1520# ## Default is "http://localhost/server-status?auto".
1521# urls = ["http://localhost/server-status?auto"]
1522#
1523# ## Credentials for basic HTTP authentication.
1524# # username = "myuser"
1525# # password = "mypassword"
1526#
1527# ## Maximum time to receive response.
1528# # response_timeout = "5s"
1529#
1530# ## Optional TLS Config
1531# # tls_ca = "/etc/telegraf/ca.pem"
1532# # tls_cert = "/etc/telegraf/cert.pem"
1533# # tls_key = "/etc/telegraf/key.pem"
1534# ## Use TLS but skip chain & host verification
1535# # insecure_skip_verify = false
1536
1537
1538# # Gather metrics from Apache Aurora schedulers
1539# [[inputs.aurora]]
1540# ## Schedulers are the base addresses of your Aurora Schedulers
1541# schedulers = ["http://127.0.0.1:8081"]
1542#
1543# ## Set of role types to collect metrics from.
1544# ##
1545# ## The scheduler roles are checked each interval by contacting the
1546# ## scheduler nodes; zookeeper is not contacted.
1547# # roles = ["leader", "follower"]
1548#
1549# ## Timeout is the max time for total network operations.
1550# # timeout = "5s"
1551#
1552# ## Username and password are sent using HTTP Basic Auth.
1553# # username = "username"
1554# # password = "pa$$word"
1555#
1556# ## Optional TLS Config
1557# # tls_ca = "/etc/telegraf/ca.pem"
1558# # tls_cert = "/etc/telegraf/cert.pem"
1559# # tls_key = "/etc/telegraf/key.pem"
1560# ## Use TLS but skip chain & host verification
1561# # insecure_skip_verify = false
1562
1563
1564# # Read metrics of bcache from stats_total and dirty_data
1565# [[inputs.bcache]]
1566# ## Bcache sets path
1567# ## If not specified, then default is:
1568# bcachePath = "/sys/fs/bcache"
1569#
1570# ## By default, telegraf gather stats for all bcache devices
1571# ## Setting devices will restrict the stats to the specified
1572# ## bcache devices.
1573# bcacheDevs = ["bcache0"]
1574
1575
1576# # Collects Beanstalkd server and tubes stats
1577# [[inputs.beanstalkd]]
1578# ## Server to collect data from
1579# server = "localhost:11300"
1580#
1581# ## List of tubes to gather stats about.
1582# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command
1583# tubes = ["notifications"]
1584
1585
1586# # Collect bond interface status, slaves statuses and failures count
1587# [[inputs.bond]]
1588# ## Sets 'proc' directory path
1589# ## If not specified, then default is /proc
1590# # host_proc = "/proc"
1591#
1592# ## By default, telegraf gather stats for all bond interfaces
1593# ## Setting interfaces will restrict the stats to the specified
1594# ## bond interfaces.
1595# # bond_interfaces = ["bond0"]
1596
1597
1598# # Collect Kafka topics and consumers status from Burrow HTTP API.
1599# [[inputs.burrow]]
1600# ## Burrow API endpoints in format "schema://host:port".
1601# ## Default is "http://localhost:8000".
1602# servers = ["http://localhost:8000"]
1603#
1604# ## Override Burrow API prefix.
1605# ## Useful when Burrow is behind reverse-proxy.
1606# # api_prefix = "/v3/kafka"
1607#
1608# ## Maximum time to receive response.
1609# # response_timeout = "5s"
1610#
1611# ## Limit per-server concurrent connections.
1612# ## Useful in case of large number of topics or consumer groups.
1613# # concurrent_connections = 20
1614#
1615# ## Filter clusters, default is no filtering.
1616# ## Values can be specified as glob patterns.
1617# # clusters_include = []
1618# # clusters_exclude = []
1619#
1620# ## Filter consumer groups, default is no filtering.
1621# ## Values can be specified as glob patterns.
1622# # groups_include = []
1623# # groups_exclude = []
1624#
1625# ## Filter topics, default is no filtering.
1626# ## Values can be specified as glob patterns.
1627# # topics_include = []
1628# # topics_exclude = []
1629#
1630# ## Credentials for basic HTTP authentication.
1631# # username = ""
1632# # password = ""
1633#
1634# ## Optional SSL config
1635# # ssl_ca = "/etc/telegraf/ca.pem"
1636# # ssl_cert = "/etc/telegraf/cert.pem"
1637# # ssl_key = "/etc/telegraf/key.pem"
1638# # insecure_skip_verify = false
1639
1640
1641# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
1642# [[inputs.ceph]]
1643# ## This is the recommended interval to poll. Too frequent and you will lose
1644# ## data points due to timeouts during rebalancing and recovery
1645# interval = '1m'
1646#
1647# ## All configuration values are optional, defaults are shown below
1648#
1649# ## location of ceph binary
1650# ceph_binary = "/usr/bin/ceph"
1651#
1652# ## directory in which to look for socket files
1653# socket_dir = "/var/run/ceph"
1654#
1655# ## prefix of MON and OSD socket files, used to determine socket type
1656# mon_prefix = "ceph-mon"
1657# osd_prefix = "ceph-osd"
1658#
1659# ## suffix used to identify socket files
1660# socket_suffix = "asok"
1661#
1662# ## Ceph user to authenticate as
1663# ceph_user = "client.admin"
1664#
1665# ## Ceph configuration to use to locate the cluster
1666# ceph_config = "/etc/ceph/ceph.conf"
1667#
1668# ## Whether to gather statistics via the admin socket
1669# gather_admin_socket_stats = true
1670#
1671# ## Whether to gather statistics via ceph commands
1672# gather_cluster_stats = false
1673
1674
1675# # Read specific statistics per cgroup
1676# [[inputs.cgroup]]
1677# ## Directories in which to look for files, globs are supported.
1678# ## Consider restricting paths to the set of cgroups you really
1679# ## want to monitor if you have a large number of cgroups, to avoid
1680# ## any cardinality issues.
1681# # paths = [
1682# # "/cgroup/memory",
1683# # "/cgroup/memory/child1",
1684# # "/cgroup/memory/child2/*",
1685# # ]
1686# ## cgroup stat fields, as file names, globs are supported.
1687# ## these file names are appended to each path from above.
1688# # files = ["memory.*usage*", "memory.limit_in_bytes"]
1689
1690
1691# # Get standard chrony metrics, requires chronyc executable.
1692# [[inputs.chrony]]
1693# ## If true, chronyc tries to perform a DNS lookup for the time server.
1694# # dns_lookup = false
1695
1696
1697# # Pull Metric Statistics from Amazon CloudWatch
1698# [[inputs.cloudwatch]]
1699# ## Amazon Region
1700# region = "us-east-1"
1701#
1702# ## Amazon Credentials
1703# ## Credentials are loaded in the following order
1704# ## 1) Assumed credentials via STS if role_arn is specified
1705# ## 2) explicit credentials from 'access_key' and 'secret_key'
1706# ## 3) shared profile from 'profile'
1707# ## 4) environment variables
1708# ## 5) shared credentials file
1709# ## 6) EC2 Instance Profile
1710# #access_key = ""
1711# #secret_key = ""
1712# #token = ""
1713# #role_arn = ""
1714# #profile = ""
1715# #shared_credential_file = ""
1716#
1717# ## Endpoint to make request against, the correct endpoint is automatically
1718# ## determined and this option should only be set if you wish to override the
1719# ## default.
1720# ## ex: endpoint_url = "http://localhost:8000"
1721# # endpoint_url = ""
1722#
1723# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
1724# # metrics are made available to the 1 minute period. Some are collected at
1725# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
1726# # Note that if a period is configured that is smaller than the minimum for a
1727# # particular metric, that metric will not be returned by the Cloudwatch API
1728# # and will not be collected by Telegraf.
1729# #
1730# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
1731# period = "5m"
1732#
1733# ## Collection Delay (required - must account for metrics availability via CloudWatch API)
1734# delay = "5m"
1735#
1736# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
1737# ## gaps or overlap in pulled data
1738# interval = "5m"
1739#
1740# ## Configure the TTL for the internal cache of metrics.
1741# ## Defaults to 1 hr if not specified
1742# #cache_ttl = "10m"
1743#
1744# ## Metric Statistic Namespace (required)
1745# namespace = "AWS/ELB"
1746#
1747# ## Maximum requests per second. Note that the global default AWS rate limit is
1748# ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a
1749# ## maximum of 400. Optional - default value is 200.
1750# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
1751# ratelimit = 200
1752#
1753# ## Metrics to Pull (optional)
1754# ## Defaults to all Metrics in Namespace if nothing is provided
1755# ## Refreshes Namespace available metrics every 1h
1756# #[[inputs.cloudwatch.metrics]]
1757# # names = ["Latency", "RequestCount"]
1758# #
1759# # ## Dimension filters for Metric. These are optional however all dimensions
1760# # ## defined for the metric names must be specified in order to retrieve
1761# # ## the metric statistics.
1762# # [[inputs.cloudwatch.metrics.dimensions]]
1763# # name = "LoadBalancerName"
1764# # value = "p-example"
1765
1766
1767# # Collects conntrack stats from the configured directories and files.
1768# [[inputs.conntrack]]
1769# ## The following defaults would work with multiple versions of conntrack.
1770# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across
1771# ## kernel versions, as are the directory locations.
1772#
1773# ## Superset of filenames to look for within the conntrack dirs.
1774# ## Missing files will be ignored.
1775# files = ["ip_conntrack_count","ip_conntrack_max",
1776# "nf_conntrack_count","nf_conntrack_max"]
1777#
1778# ## Directories to search within for the conntrack files above.
1779# ## Missing directrories will be ignored.
1780# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
1781
1782
1783# # Gather health check statuses from services registered in Consul
1784# [[inputs.consul]]
1785# ## Consul server address
1786# # address = "localhost"
1787#
1788# ## URI scheme for the Consul server, one of "http", "https"
1789# # scheme = "http"
1790#
1791# ## ACL token used in every request
1792# # token = ""
1793#
1794# ## HTTP Basic Authentication username and password.
1795# # username = ""
1796# # password = ""
1797#
1798# ## Data center to query the health checks from
1799# # datacenter = ""
1800#
1801# ## Optional TLS Config
1802# # tls_ca = "/etc/telegraf/ca.pem"
1803# # tls_cert = "/etc/telegraf/cert.pem"
1804# # tls_key = "/etc/telegraf/key.pem"
1805# ## Use TLS but skip chain & host verification
1806# # insecure_skip_verify = true
1807#
1808# ## Consul checks' tag splitting
1809# # When tags are formatted like "key:value" with ":" as a delimiter then
1810# # they will be splitted and reported as proper key:value in Telegraf
1811# # tag_delimiter = ":"
1812
1813
1814# # Read metrics from one or many couchbase clusters
1815# [[inputs.couchbase]]
1816# ## specify servers via a url matching:
1817# ## [protocol://][:password]@address[:port]
1818# ## e.g.
1819# ## http://couchbase-0.example.com/
1820# ## http://admin:secret@couchbase-0.example.com:8091/
1821# ##
1822# ## If no servers are specified, then localhost is used as the host.
1823# ## If no protocol is specified, HTTP is used.
1824# ## If no port is specified, 8091 is used.
1825# servers = ["http://localhost:8091"]
1826
1827
1828# # Read CouchDB Stats from one or more servers
1829# [[inputs.couchdb]]
1830# ## Works with CouchDB stats endpoints out of the box
1831# ## Multiple Hosts from which to read CouchDB stats:
1832# hosts = ["http://localhost:8086/_stats"]
1833#
1834# ## Use HTTP Basic Authentication.
1835# # basic_username = "telegraf"
1836# # basic_password = "p@ssw0rd"
1837
1838
1839# # Input plugin for DC/OS metrics
1840# [[inputs.dcos]]
1841# ## The DC/OS cluster URL.
1842# cluster_url = "https://dcos-ee-master-1"
1843#
1844# ## The ID of the service account.
1845# service_account_id = "telegraf"
1846# ## The private key file for the service account.
1847# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
1848#
1849# ## Path containing login token. If set, will read on every gather.
1850# # token_file = "/home/dcos/.dcos/token"
1851#
1852# ## In all filter options if both include and exclude are empty all items
1853# ## will be collected. Arrays may contain glob patterns.
1854# ##
1855# ## Node IDs to collect metrics from. If a node is excluded, no metrics will
1856# ## be collected for its containers or apps.
1857# # node_include = []
1858# # node_exclude = []
1859# ## Container IDs to collect container metrics from.
1860# # container_include = []
1861# # container_exclude = []
1862# ## Container IDs to collect app metrics from.
1863# # app_include = []
1864# # app_exclude = []
1865#
1866# ## Maximum concurrent connections to the cluster.
1867# # max_connections = 10
1868# ## Maximum time to receive a response from cluster.
1869# # response_timeout = "20s"
1870#
1871# ## Optional TLS Config
1872# # tls_ca = "/etc/telegraf/ca.pem"
1873# # tls_cert = "/etc/telegraf/cert.pem"
1874# # tls_key = "/etc/telegraf/key.pem"
1875# ## If false, skip chain & host verification
1876# # insecure_skip_verify = true
1877#
1878# ## Recommended filtering to reduce series cardinality.
1879# # [inputs.dcos.tagdrop]
1880# # path = ["/var/lib/mesos/slave/slaves/*"]
1881
1882
1883# # Read metrics from one or many disque servers
1884# [[inputs.disque]]
1885# ## An array of URI to gather stats about. Specify an ip or hostname
1886# ## with optional port and password.
1887# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
1888# ## If no servers are specified, then localhost is used as the host.
1889# servers = ["localhost"]
1890
1891
1892# # Provide a native collection for dmsetup based statistics for dm-cache
1893# [[inputs.dmcache]]
1894# ## Whether to report per-device stats or not
1895# per_device = true
1896
1897
1898# # Query given DNS server and gives statistics
1899# [[inputs.dns_query]]
1900# ## servers to query
1901# servers = ["8.8.8.8"]
1902#
1903# ## Network is the network protocol name.
1904# # network = "udp"
1905#
1906# ## Domains or subdomains to query.
1907# # domains = ["."]
1908#
1909# ## Query record type.
1910# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
1911# # record_type = "A"
1912#
1913# ## Dns server port.
1914# # port = 53
1915#
1916# ## Query timeout in seconds.
1917# # timeout = 2
1918
1919
1920# # Read metrics about docker containers
1921# [[inputs.docker]]
1922# ## Docker Endpoint
1923# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
1924# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
1925# endpoint = "unix:///var/run/docker.sock"
1926#
1927# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
1928# gather_services = false
1929#
1930# ## Only collect metrics for these containers, collect all if empty
1931# container_names = []
1932#
1933# ## Containers to include and exclude. Globs accepted.
1934# ## Note that an empty array for both will include all containers
1935# container_name_include = []
1936# container_name_exclude = []
1937#
1938# ## Container states to include and exclude. Globs accepted.
1939# ## When empty only containers in the "running" state will be captured.
1940# # container_state_include = []
1941# # container_state_exclude = []
1942#
1943# ## Timeout for docker list, info, and stats commands
1944# timeout = "5s"
1945#
1946# ## Whether to report for each container per-device blkio (8:0, 8:1...) and
1947# ## network (eth0, eth1, ...) stats or not
1948# perdevice = true
1949# ## Whether to report for each container total blkio and network stats or not
1950# total = false
1951# ## Which environment variables should we use as a tag
1952# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
1953#
1954# ## docker labels to include and exclude as tags. Globs accepted.
1955# ## Note that an empty array for both will include all labels as tags
1956# docker_label_include = []
1957# docker_label_exclude = []
1958#
1959# ## Optional TLS Config
1960# # tls_ca = "/etc/telegraf/ca.pem"
1961# # tls_cert = "/etc/telegraf/cert.pem"
1962# # tls_key = "/etc/telegraf/key.pem"
1963# ## Use TLS but skip chain & host verification
1964# # insecure_skip_verify = false
1965
1966
1967# # Read statistics from one or many dovecot servers
1968# [[inputs.dovecot]]
1969# ## specify dovecot servers via an address:port list
1970# ## e.g.
1971# ## localhost:24242
1972# ##
1973# ## If no servers are specified, then localhost is used as the host.
1974# servers = ["localhost:24242"]
1975# ## Type is one of "user", "domain", "ip", or "global"
1976# type = "global"
1977# ## Wildcard matches like "*.com". An empty string "" is same as "*"
1978# ## If type = "ip" filters should be <IP/network>
1979# filters = [""]
1980
1981
1982# # Read stats from one or more Elasticsearch servers or clusters
1983# [[inputs.elasticsearch]]
1984# ## specify a list of one or more Elasticsearch servers
1985# # you can add username and password to your url to use basic authentication:
1986# # servers = ["http://user:pass@localhost:9200"]
1987# servers = ["http://localhost:9200"]
1988#
1989# ## Timeout for HTTP requests to the elastic search server(s)
1990# http_timeout = "5s"
1991#
1992# ## When local is true (the default), the node will read only its own stats.
1993# ## Set local to false when you want to read the node stats from all nodes
1994# ## of the cluster.
1995# local = true
1996#
1997# ## Set cluster_health to true when you want to also obtain cluster health stats
1998# cluster_health = false
1999#
2000# ## Adjust cluster_health_level when you want to also obtain detailed health stats
2001# ## The options are
2002# ## - indices (default)
2003# ## - cluster
2004# # cluster_health_level = "indices"
2005#
2006# ## Set cluster_stats to true when you want to also obtain cluster stats.
2007# cluster_stats = false
2008#
2009# ## Only gather cluster_stats from the master node. To work this require local = true
2010# cluster_stats_only_from_master = true
2011#
2012# ## node_stats is a list of sub-stats that you want to have gathered. Valid options
2013# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
2014# ## "breaker". Per default, all stats are gathered.
2015# # node_stats = ["jvm", "http"]
2016#
2017# ## Optional TLS Config
2018# # tls_ca = "/etc/telegraf/ca.pem"
2019# # tls_cert = "/etc/telegraf/cert.pem"
2020# # tls_key = "/etc/telegraf/key.pem"
2021# ## Use TLS but skip chain & host verification
2022# # insecure_skip_verify = false
2023
2024
2025# # Read metrics from one or more commands that can output to stdout
2026# [[inputs.exec]]
2027# ## Commands array
2028# commands = [
2029# "/tmp/test.sh",
2030# "/usr/bin/mycollector --foo=bar",
2031# "/tmp/collect_*.sh"
2032# ]
2033#
2034# ## Timeout for each command to complete.
2035# timeout = "5s"
2036#
2037# ## measurement name suffix (for separating different commands)
2038# name_suffix = "_mycollector"
2039#
2040# ## Data format to consume.
2041# ## Each data format has its own unique set of configuration options, read
2042# ## more about them here:
2043# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2044# data_format = "influx"
2045
2046
2047# # Read metrics from fail2ban.
2048# [[inputs.fail2ban]]
2049# ## Use sudo to run fail2ban-client
2050# use_sudo = false
2051
2052
2053# # Read devices value(s) from a Fibaro controller
2054# [[inputs.fibaro]]
2055# ## Required Fibaro controller address/hostname.
2056# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
2057# url = "http://<controller>:80"
2058#
2059# ## Required credentials to access the API (http://<controller/api/<component>)
2060# username = "<username>"
2061# password = "<password>"
2062#
2063# ## Amount of time allowed to complete the HTTP request
2064# # timeout = "5s"
2065
2066
2067# # Reload and gather from file[s] on telegraf's interval.
2068# [[inputs.file]]
2069# ## Files to parse each interval.
2070# ## These accept standard unix glob matching rules, but with the addition of
2071# ## ** as a "super asterisk". ie:
2072# ## /var/log/**.log -> recursively find all .log files in /var/log
2073# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
2074# ## /var/log/apache.log -> only read the apache log file
2075# files = ["/var/log/apache/access.log"]
2076#
2077# ## The dataformat to be read from files
2078# ## Each data format has its own unique set of configuration options, read
2079# ## more about them here:
2080# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2081# data_format = "influx"
2082
2083
2084# # Count files in a directory
2085# [[inputs.filecount]]
2086# ## Directory to gather stats about.
2087# ## deprecated in 1.9; use the directories option
2088# # directory = "/var/cache/apt/archives"
2089#
2090# ## Directories to gather stats about.
2091# ## This accept standard unit glob matching rules, but with the addition of
2092# ## ** as a "super asterisk". ie:
2093# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories
2094# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories
2095# ## /var/log -> count all files in /var/log and all of its subdirectories
2096# directories = ["/var/cache/apt/archives"]
2097#
2098# ## Only count files that match the name pattern. Defaults to "*".
2099# name = "*.deb"
2100#
2101# ## Count files in subdirectories. Defaults to true.
2102# recursive = false
2103#
2104# ## Only count regular files. Defaults to true.
2105# regular_only = true
2106#
2107# ## Only count files that are at least this size. If size is
2108# ## a negative number, only count files that are smaller than the
2109# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ...
2110# ## Without quotes and units, interpreted as size in bytes.
2111# size = "0B"
2112#
2113# ## Only count files that have not been touched for at least this
2114# ## duration. If mtime is negative, only count files that have been
2115# ## touched in this duration. Defaults to "0s".
2116# mtime = "0s"
2117
2118
2119# # Read stats about given file(s)
2120# [[inputs.filestat]]
2121# ## Files to gather stats about.
2122# ## These accept standard unix glob matching rules, but with the addition of
2123# ## ** as a "super asterisk". ie:
2124# ## "/var/log/**.log" -> recursively find all .log files in /var/log
2125# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
2126# ## "/var/log/apache.log" -> just tail the apache log file
2127# ##
2128# ## See https://github.com/gobwas/glob for more examples
2129# ##
2130# files = ["/var/log/**.log"]
2131# ## If true, read the entire file and calculate an md5 checksum.
2132# md5 = false
2133
2134
2135# # Read metrics exposed by fluentd in_monitor plugin
2136# [[inputs.fluentd]]
2137# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
2138# ##
2139# ## Endpoint:
2140# ## - only one URI is allowed
2141# ## - https is not supported
2142# endpoint = "http://localhost:24220/api/plugins.json"
2143#
2144# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
2145# exclude = [
2146# "monitor_agent",
2147# "dummy",
2148# ]
2149
2150
2151# # Read flattened metrics from one or more GrayLog HTTP endpoints
2152# [[inputs.graylog]]
2153# ## API endpoint, currently supported API:
2154# ##
2155# ## - multiple (Ex http://<host>:12900/system/metrics/multiple)
2156# ## - namespace (Ex http://<host>:12900/system/metrics/namespace/{namespace})
2157# ##
2158# ## For namespace endpoint, the metrics array will be ignored for that call.
2159# ## Endpoint can contain namespace and multiple type calls.
2160# ##
2161# ## Please check http://[graylog-server-ip]:12900/api-browser for full list
2162# ## of endpoints
2163# servers = [
2164# "http://[graylog-server-ip]:12900/system/metrics/multiple",
2165# ]
2166#
2167# ## Metrics list
2168# ## List of metrics can be found on Graylog webservice documentation.
2169# ## Or by hitting the the web service api at:
2170# ## http://[graylog-host]:12900/system/metrics
2171# metrics = [
2172# "jvm.cl.loaded",
2173# "jvm.memory.pools.Metaspace.committed"
2174# ]
2175#
2176# ## Username and password
2177# username = ""
2178# password = ""
2179#
2180# ## Optional TLS Config
2181# # tls_ca = "/etc/telegraf/ca.pem"
2182# # tls_cert = "/etc/telegraf/cert.pem"
2183# # tls_key = "/etc/telegraf/key.pem"
2184# ## Use TLS but skip chain & host verification
2185# # insecure_skip_verify = false
2186
2187
2188# # Read metrics of haproxy, via socket or csv stats page
2189# [[inputs.haproxy]]
2190# ## An array of address to gather stats about. Specify an ip on hostname
2191# ## with optional port. ie localhost, 10.10.3.33:1936, etc.
2192# ## Make sure you specify the complete path to the stats endpoint
2193# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
2194#
2195# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
2196# servers = ["http://myhaproxy.com:1936/haproxy?stats"]
2197#
2198# ## Credentials for basic HTTP authentication
2199# # username = "admin"
2200# # password = "admin"
2201#
2202# ## You can also use local socket with standard wildcard globbing.
2203# ## Server address not starting with 'http' will be treated as a possible
2204# ## socket, so both examples below are valid.
2205# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
2206#
2207# ## By default, some of the fields are renamed from what haproxy calls them.
2208# ## Setting this option to true results in the plugin keeping the original
2209# ## field names.
2210# # keep_field_names = false
2211#
2212# ## Optional TLS Config
2213# # tls_ca = "/etc/telegraf/ca.pem"
2214# # tls_cert = "/etc/telegraf/cert.pem"
2215# # tls_key = "/etc/telegraf/key.pem"
2216# ## Use TLS but skip chain & host verification
2217# # insecure_skip_verify = false
2218
2219
2220# # Monitor disks' temperatures using hddtemp
2221# [[inputs.hddtemp]]
2222# ## By default, telegraf gathers temps data from all disks detected by the
2223# ## hddtemp.
2224# ##
2225# ## Only collect temps from the selected disks.
2226# ##
2227# ## A * as the device name will return the temperature values of all disks.
2228# ##
2229# # address = "127.0.0.1:7634"
2230# # devices = ["sda", "*"]
2231
2232
2233# # Read formatted metrics from one or more HTTP endpoints
2234# [[inputs.http]]
2235# ## One or more URLs from which to read formatted metrics
2236# urls = [
2237# "http://localhost/metrics"
2238# ]
2239#
2240# ## HTTP method
2241# # method = "GET"
2242#
2243# ## Optional HTTP headers
2244# # headers = {"X-Special-Header" = "Special-Value"}
2245#
2246# ## Optional HTTP Basic Auth Credentials
2247# # username = "username"
2248# # password = "pa$$word"
2249#
2250# ## HTTP entity-body to send with POST/PUT requests.
2251# # body = ""
2252#
2253# ## HTTP Content-Encoding for write request body, can be set to "gzip" to
2254# ## compress body or "identity" to apply no encoding.
2255# # content_encoding = "identity"
2256#
2257# ## Optional TLS Config
2258# # tls_ca = "/etc/telegraf/ca.pem"
2259# # tls_cert = "/etc/telegraf/cert.pem"
2260# # tls_key = "/etc/telegraf/key.pem"
2261# ## Use TLS but skip chain & host verification
2262# # insecure_skip_verify = false
2263#
2264# ## Amount of time allowed to complete the HTTP request
2265# # timeout = "5s"
2266#
2267# ## Data format to consume.
2268# ## Each data format has its own unique set of configuration options, read
2269# ## more about them here:
2270# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
2271# # data_format = "influx"
2272
2273
2274# # HTTP/HTTPS request given an address a method and a timeout
2275# [[inputs.http_response]]
2276# ## Server address (default http://localhost)
2277# # address = "http://localhost"
2278#
2279# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
2280# # http_proxy = "http://localhost:8888"
2281#
2282# ## Set response_timeout (default 5 seconds)
2283# # response_timeout = "5s"
2284#
2285# ## HTTP Request Method
2286# # method = "GET"
2287#
2288# ## Whether to follow redirects from the server (defaults to false)
2289# # follow_redirects = false
2290#
2291# ## Optional HTTP Request Body
2292# # body = '''
2293# # {'fake':'data'}
2294# # '''
2295#
2296# ## Optional substring or regex match in body of the response
2297# # response_string_match = "\"service_status\": \"up\""
2298# # response_string_match = "ok"
2299# # response_string_match = "\".*_status\".?:.?\"up\""
2300#
2301# ## Optional TLS Config
2302# # tls_ca = "/etc/telegraf/ca.pem"
2303# # tls_cert = "/etc/telegraf/cert.pem"
2304# # tls_key = "/etc/telegraf/key.pem"
2305# ## Use TLS but skip chain & host verification
2306# # insecure_skip_verify = false
2307#
2308# ## HTTP Request Headers (all values must be strings)
2309# # [inputs.http_response.headers]
2310# # Host = "github.com"
2311
2312
2313# # Read flattened metrics from one or more JSON HTTP endpoints
2314# [[inputs.httpjson]]
2315# ## NOTE This plugin only reads numerical measurements, strings and booleans
2316# ## will be ignored.
2317#
2318# ## Name for the service being polled. Will be appended to the name of the
2319# ## measurement e.g. httpjson_webserver_stats
2320# ##
2321# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
2322# name = "webserver_stats"
2323#
2324# ## URL of each server in the service's cluster
2325# servers = [
2326# "http://localhost:9999/stats/",
2327# "http://localhost:9998/stats/",
2328# ]
2329# ## Set response_timeout (default 5 seconds)
2330# response_timeout = "5s"
2331#
2332# ## HTTP method to use: GET or POST (case-sensitive)
2333# method = "GET"
2334#
2335# ## List of tag names to extract from top-level of JSON server response
2336# # tag_keys = [
2337# # "my_tag_1",
2338# # "my_tag_2"
2339# # ]
2340#
2341# ## Optional TLS Config
2342# # tls_ca = "/etc/telegraf/ca.pem"
2343# # tls_cert = "/etc/telegraf/cert.pem"
2344# # tls_key = "/etc/telegraf/key.pem"
2345# ## Use TLS but skip chain & host verification
2346# # insecure_skip_verify = false
2347#
2348# ## HTTP parameters (all values must be strings). For "GET" requests, data
2349# ## will be included in the query. For "POST" requests, data will be included
2350# ## in the request body as "x-www-form-urlencoded".
2351# # [inputs.httpjson.parameters]
2352# # event_type = "cpu_spike"
2353# # threshold = "0.75"
2354#
2355# ## HTTP Headers (all values must be strings)
2356# # [inputs.httpjson.headers]
2357# # X-Auth-Token = "my-xauth-token"
2358# # apiVersion = "v1"
2359
2360
2361# # Gather Icinga2 status
2362# [[inputs.icinga2]]
2363# ## Required Icinga2 server address (default: "https://localhost:5665")
2364# # server = "https://localhost:5665"
2365#
2366# ## Required Icinga2 object type ("services" or "hosts, default "services")
2367# # object_type = "services"
2368#
2369# ## Credentials for basic HTTP authentication
2370# # username = "admin"
2371# # password = "admin"
2372#
2373# ## Maximum time to receive response.
2374# # response_timeout = "5s"
2375#
2376# ## Optional TLS Config
2377# # tls_ca = "/etc/telegraf/ca.pem"
2378# # tls_cert = "/etc/telegraf/cert.pem"
2379# # tls_key = "/etc/telegraf/key.pem"
2380# ## Use TLS but skip chain & host verification
2381# # insecure_skip_verify = true
2382
2383
2384# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
2385# [[inputs.influxdb]]
2386# ## Works with InfluxDB debug endpoints out of the box,
2387# ## but other services can use this format too.
2388# ## See the influxdb plugin's README for more details.
2389#
2390# ## Multiple URLs from which to read InfluxDB-formatted JSON
2391# ## Default is "http://localhost:8086/debug/vars".
2392# urls = [
2393# "http://localhost:8086/debug/vars"
2394# ]
2395#
2396# ## Optional TLS Config
2397# # tls_ca = "/etc/telegraf/ca.pem"
2398# # tls_cert = "/etc/telegraf/cert.pem"
2399# # tls_key = "/etc/telegraf/key.pem"
2400# ## Use TLS but skip chain & host verification
2401# # insecure_skip_verify = false
2402#
2403# ## http request & header timeout
2404# timeout = "5s"
2405
2406
2407# # Collect statistics about itself
2408# [[inputs.internal]]
2409# ## If true, collect telegraf memory stats.
2410# # collect_memstats = true
2411
2412
2413# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.
2414# [[inputs.interrupts]]
2415# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is
2416# ## stored as a field.
2417# ##
2418# ## The default is false for backwards compatibility, and will be changed to
2419# ## true in a future version. It is recommended to set to true on new
2420# ## deployments.
2421# # cpu_as_tag = false
2422#
2423# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
2424# # [inputs.interrupts.tagdrop]
2425# # irq = [ "NET_RX", "TASKLET" ]
2426
2427
2428# # Read metrics from the bare metal servers via IPMI
2429# [[inputs.ipmi_sensor]]
2430# ## optionally specify the path to the ipmitool executable
2431# # path = "/usr/bin/ipmitool"
2432# ##
2433# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR
2434# # privilege = "ADMINISTRATOR"
2435# ##
2436# ## optionally specify one or more servers via a url matching
2437# ## [username[:password]@][protocol[(address)]]
2438# ## e.g.
2439# ## root:passwd@lan(127.0.0.1)
2440# ##
2441# ## if no servers are specified, local machine sensor stats will be queried
2442# ##
2443# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
2444#
2445# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid
2446# ## gaps or overlap in pulled data
2447# interval = "30s"
2448#
2449# ## Timeout for the ipmitool command to complete
2450# timeout = "20s"
2451#
2452# ## Schema Version: (Optional, defaults to version 1)
2453# metric_version = 2
2454
2455
2456# # Gather packets and bytes counters from Linux ipsets
2457# [[inputs.ipset]]
2458# ## By default, we only show sets which have already matched at least 1 packet.
2459# ## set include_unmatched_sets = true to gather them all.
2460# include_unmatched_sets = false
2461# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save")
2462# use_sudo = false
2463# ## The default timeout of 1s for ipset execution can be overridden here:
2464# # timeout = "1s"
2465
2466
2467# # Gather packets and bytes throughput from iptables
2468# [[inputs.iptables]]
2469# ## iptables require root access on most systems.
2470# ## Setting 'use_sudo' to true will make use of sudo to run iptables.
2471# ## Users must configure sudo to allow telegraf user to run iptables with no password.
2472# ## iptables can be restricted to only list command "iptables -nvL".
2473# use_sudo = false
2474# ## Setting 'use_lock' to true runs iptables with the "-w" option.
2475# ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl")
2476# use_lock = false
2477# ## Define an alternate executable, such as "ip6tables". Default is "iptables".
2478# # binary = "ip6tables"
2479# ## defines the table to monitor:
2480# table = "filter"
2481# ## defines the chains to monitor.
2482# ## NOTE: iptables rules without a comment will not be monitored.
2483# ## Read the plugin documentation for more information.
2484# chains = [ "INPUT" ]
2485
2486
2487# # Collect virtual and real server stats from Linux IPVS
2488# [[inputs.ipvs]]
2489# # no configuration
2490
2491
2492# # Read jobs and cluster metrics from Jenkins instances
2493# [[inputs.jenkins]]
2494# ## The Jenkins URL
2495# url = "http://my-jenkins-instance:8080"
2496# # username = "admin"
2497# # password = "admin"
2498#
2499# ## Set response_timeout
2500# response_timeout = "5s"
2501#
2502# ## Optional TLS Config
2503# # tls_ca = "/etc/telegraf/ca.pem"
2504# # tls_cert = "/etc/telegraf/cert.pem"
2505# # tls_key = "/etc/telegraf/key.pem"
2506# ## Use SSL but skip chain & host verification
2507# # insecure_skip_verify = false
2508#
2509# ## Optional Max Job Build Age filter
2510# ## Default 1 hour, ignore builds older than max_build_age
2511# # max_build_age = "1h"
2512#
2513# ## Optional Sub Job Depth filter
2514# ## Jenkins can have unlimited layer of sub jobs
2515# ## This config will limit the layers of pulling, default value 0 means
2516# ## unlimited pulling until no more sub jobs
2517# # max_subjob_depth = 0
2518#
2519# ## Optional Sub Job Per Layer
2520# ## In workflow-multibranch-plugin, each branch will be created as a sub job.
2521# ## This config will limit to call only the lasted branches in each layer,
2522# ## empty will use default value 10
2523# # max_subjob_per_layer = 10
2524#
2525# ## Jobs to exclude from gathering
2526# # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"]
2527#
2528# ## Nodes to exclude from gathering
2529# # node_exclude = [ "node1", "node2" ]
2530#
2531# ## Worker pool for jenkins plugin only
2532# ## Empty this field will use default value 5
2533# # max_connections = 5
2534
2535
2536# # Read JMX metrics through Jolokia
2537# [[inputs.jolokia]]
2538# # DEPRECATED: the jolokia plugin has been deprecated in favor of the
2539# # jolokia2 plugin
2540# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
2541#
2542# ## This is the context root used to compose the jolokia url
2543# ## NOTE that Jolokia requires a trailing slash at the end of the context root
2544# ## NOTE that your jolokia security policy must allow for POST requests.
2545# context = "/jolokia/"
2546#
2547# ## This specifies the mode used
2548# # mode = "proxy"
2549# #
2550# ## When in proxy mode this section is used to specify further
2551# ## proxy address configurations.
2552# ## Remember to change host address to fit your environment.
2553# # [inputs.jolokia.proxy]
2554# # host = "127.0.0.1"
2555# # port = "8080"
2556#
2557# ## Optional http timeouts
2558# ##
2559# ## response_header_timeout, if non-zero, specifies the amount of time to wait
2560# ## for a server's response headers after fully writing the request.
2561# # response_header_timeout = "3s"
2562# ##
2563# ## client_timeout specifies a time limit for requests made by this client.
2564# ## Includes connection time, any redirects, and reading the response body.
2565# # client_timeout = "4s"
2566#
2567# ## Attribute delimiter
2568# ##
2569# ## When multiple attributes are returned for a single
2570# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric
2571# ## name, and the attribute name, separated by the given delimiter.
2572# # delimiter = "_"
2573#
2574# ## List of servers exposing jolokia read service
2575# [[inputs.jolokia.servers]]
2576# name = "as-server-01"
2577# host = "127.0.0.1"
2578# port = "8080"
2579# # username = "myuser"
2580# # password = "mypassword"
2581#
2582# ## List of metrics collected on above servers
2583# ## Each metric consists in a name, a jmx path and either
2584# ## a pass or drop slice attribute.
2585# ##Â This collect all heap memory usage metrics.
2586# [[inputs.jolokia.metrics]]
2587# name = "heap_memory_usage"
2588# mbean = "java.lang:type=Memory"
2589# attribute = "HeapMemoryUsage"
2590#
2591# ##Â This collect thread counts metrics.
2592# [[inputs.jolokia.metrics]]
2593# name = "thread_count"
2594# mbean = "java.lang:type=Threading"
2595# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
2596#
2597# ##Â This collect number of class loaded/unloaded counts metrics.
2598# [[inputs.jolokia.metrics]]
2599# name = "class_count"
2600# mbean = "java.lang:type=ClassLoading"
2601# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
2602
2603
2604# # Read JMX metrics from a Jolokia REST agent endpoint
2605# [[inputs.jolokia2_agent]]
2606# # default_tag_prefix = ""
2607# # default_field_prefix = ""
2608# # default_field_separator = "."
2609#
2610# # Add agents URLs to query
2611# urls = ["http://localhost:8080/jolokia"]
2612# # username = ""
2613# # password = ""
2614# # response_timeout = "5s"
2615#
2616# ## Optional TLS config
2617# # tls_ca = "/var/private/ca.pem"
2618# # tls_cert = "/var/private/client.pem"
2619# # tls_key = "/var/private/client-key.pem"
2620# # insecure_skip_verify = false
2621#
2622# ## Add metrics to read
2623# [[inputs.jolokia2_agent.metric]]
2624# name = "java_runtime"
2625# mbean = "java.lang:type=Runtime"
2626# paths = ["Uptime"]
2627
2628
2629# # Read JMX metrics from a Jolokia REST proxy endpoint
2630# [[inputs.jolokia2_proxy]]
2631# # default_tag_prefix = ""
2632# # default_field_prefix = ""
2633# # default_field_separator = "."
2634#
2635# ## Proxy agent
2636# url = "http://localhost:8080/jolokia"
2637# # username = ""
2638# # password = ""
2639# # response_timeout = "5s"
2640#
2641# ## Optional TLS config
2642# # tls_ca = "/var/private/ca.pem"
2643# # tls_cert = "/var/private/client.pem"
2644# # tls_key = "/var/private/client-key.pem"
2645# # insecure_skip_verify = false
2646#
2647# ## Add proxy targets to query
2648# # default_target_username = ""
2649# # default_target_password = ""
2650# [[inputs.jolokia2_proxy.target]]
2651# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
2652# # username = ""
2653# # password = ""
2654#
2655# ## Add metrics to read
2656# [[inputs.jolokia2_proxy.metric]]
2657# name = "java_runtime"
2658# mbean = "java.lang:type=Runtime"
2659# paths = ["Uptime"]
2660
2661
2662# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints
2663# [[inputs.kapacitor]]
2664# ## Multiple URLs from which to read Kapacitor-formatted JSON
2665# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars".
2666# urls = [
2667# "http://localhost:9092/kapacitor/v1/debug/vars"
2668# ]
2669#
2670# ## Time limit for http requests
2671# timeout = "5s"
2672#
2673# ## Optional TLS Config
2674# # tls_ca = "/etc/telegraf/ca.pem"
2675# # tls_cert = "/etc/telegraf/cert.pem"
2676# # tls_key = "/etc/telegraf/key.pem"
2677# ## Use TLS but skip chain & host verification
2678# # insecure_skip_verify = false
2679
2680
2681# # Get kernel statistics from /proc/vmstat
2682# [[inputs.kernel_vmstat]]
2683# # no configuration
2684
2685
2686# # Read status information from one or more Kibana servers
2687# [[inputs.kibana]]
2688# ## specify a list of one or more Kibana servers
2689# servers = ["http://localhost:5601"]
2690#
2691# ## Timeout for HTTP requests
2692# timeout = "5s"
2693#
2694# ## HTTP Basic Auth credentials
2695# # username = "username"
2696# # password = "pa$$word"
2697#
2698# ## Optional TLS Config
2699# # tls_ca = "/etc/telegraf/ca.pem"
2700# # tls_cert = "/etc/telegraf/cert.pem"
2701# # tls_key = "/etc/telegraf/key.pem"
2702# ## Use TLS but skip chain & host verification
2703# # insecure_skip_verify = false
2704
2705
2706# # Read metrics from the Kubernetes api
2707# [[inputs.kube_inventory]]
2708# ## URL for the Kubernetes API
2709# url = "https://127.0.0.1"
2710#
2711# ## Namespace to use. Set to "" to use all namespaces.
2712# # namespace = "default"
2713#
2714# ## Use bearer token for authorization. ('bearer_token' takes priority)
2715# # bearer_token = "/path/to/bearer/token"
2716# ## OR
2717# # bearer_token_string = "abc_123"
2718#
2719# ## Set response_timeout (default 5 seconds)
2720# # response_timeout = "5s"
2721#
2722# ## Optional Resources to exclude from gathering
2723# ## Leave them with blank with try to gather everything available.
2724# ## Values can be - "daemonsets", deployments", "nodes", "persistentvolumes",
2725# ## "persistentvolumeclaims", "pods", "statefulsets"
2726# # resource_exclude = [ "deployments", "nodes", "statefulsets" ]
2727#
2728# ## Optional Resources to include when gathering
2729# ## Overrides resource_exclude if both set.
2730# # resource_include = [ "deployments", "nodes", "statefulsets" ]
2731#
2732# ## Optional TLS Config
2733# # tls_ca = "/path/to/cafile"
2734# # tls_cert = "/path/to/certfile"
2735# # tls_key = "/path/to/keyfile"
2736# ## Use TLS but skip chain & host verification
2737# # insecure_skip_verify = false
2738
2739
2740# # Read metrics from the kubernetes kubelet api
2741# [[inputs.kubernetes]]
2742# ## URL for the kubelet
2743# url = "http://127.0.0.1:10255"
2744#
2745# ## Use bearer token for authorization. ('bearer_token' takes priority)
2746# # bearer_token = "/path/to/bearer/token"
2747# ## OR
2748# # bearer_token_string = "abc_123"
2749#
2750# ## Set response_timeout (default 5 seconds)
2751# # response_timeout = "5s"
2752#
2753# ## Optional TLS Config
2754# # tls_ca = /path/to/cafile
2755# # tls_cert = /path/to/certfile
2756# # tls_key = /path/to/keyfile
2757# ## Use TLS but skip chain & host verification
2758# # insecure_skip_verify = false
2759
2760
2761# # Read metrics from a LeoFS Server via SNMP
2762# [[inputs.leofs]]
2763# ## An array of URLs of the form:
2764# ## host [ ":" port]
2765# servers = ["127.0.0.1:4020"]
2766
2767
2768# # Provides Linux sysctl fs metrics
2769# [[inputs.linux_sysctl_fs]]
2770# # no configuration
2771
2772
2773# # Read metrics from local Lustre service on OST, MDS
2774# [[inputs.lustre2]]
2775# ## An array of /proc globs to search for Lustre stats
2776# ## If not specified, the default will work on Lustre 2.5.x
2777# ##
2778# # ost_procfiles = [
2779# # "/proc/fs/lustre/obdfilter/*/stats",
2780# # "/proc/fs/lustre/osd-ldiskfs/*/stats",
2781# # "/proc/fs/lustre/obdfilter/*/job_stats",
2782# # ]
2783# # mds_procfiles = [
2784# # "/proc/fs/lustre/mdt/*/md_stats",
2785# # "/proc/fs/lustre/mdt/*/job_stats",
2786# # ]
2787
2788
2789# # Gathers metrics from the /3.0/reports MailChimp API
2790# [[inputs.mailchimp]]
2791# ## MailChimp API key
2792# ## get from https://admin.mailchimp.com/account/api/
2793# api_key = "" # required
2794# ## Reports for campaigns sent more than days_old ago will not be collected.
2795# ## 0 means collect all.
2796# days_old = 0
2797# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
2798# # campaign_id = ""
2799
2800
2801# # Read metrics from one or many mcrouter servers
2802# [[inputs.mcrouter]]
2803# ## An array of address to gather stats about. Specify an ip or hostname
2804# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc.
2805# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"]
2806#
2807# ## Timeout for metric collections from all servers. Minimum timeout is "1s".
2808# # timeout = "5s"
2809
2810
2811# # Read metrics from one or many memcached servers
2812# [[inputs.memcached]]
2813# ## An array of address to gather stats about. Specify an ip on hostname
2814# ## with optional port. ie localhost, 10.0.0.1:11211, etc.
2815# servers = ["localhost:11211"]
2816# # unix_sockets = ["/var/run/memcached.sock"]
2817
2818
2819# # Telegraf plugin for gathering metrics from N Mesos masters
2820# [[inputs.mesos]]
2821# ## Timeout, in ms.
2822# timeout = 100
2823# ## A list of Mesos masters.
2824# masters = ["http://localhost:5050"]
2825# ## Master metrics groups to be collected, by default, all enabled.
2826# master_collections = [
2827# "resources",
2828# "master",
2829# "system",
2830# "agents",
2831# "frameworks",
2832# "tasks",
2833# "messages",
2834# "evqueue",
2835# "registrar",
2836# ]
2837# ## A list of Mesos slaves, default is []
2838# # slaves = []
2839# ## Slave metrics groups to be collected, by default, all enabled.
2840# # slave_collections = [
2841# # "resources",
2842# # "agent",
2843# # "system",
2844# # "executors",
2845# # "tasks",
2846# # "messages",
2847# # ]
2848#
2849# ## Optional TLS Config
2850# # tls_ca = "/etc/telegraf/ca.pem"
2851# # tls_cert = "/etc/telegraf/cert.pem"
2852# # tls_key = "/etc/telegraf/key.pem"
2853# ## Use TLS but skip chain & host verification
2854# # insecure_skip_verify = false
2855
2856
2857# # Collects scores from a minecraft server's scoreboard using the RCON protocol
2858# [[inputs.minecraft]]
2859# ## server address for minecraft
2860# # server = "localhost"
2861# ## port for RCON
2862# # port = "25575"
2863# ## password RCON for mincraft server
2864# # password = ""
2865
2866
2867# # Read metrics from one or many MongoDB servers
2868# [[inputs.mongodb]]
2869# ## An array of URLs of the form:
2870# ## "mongodb://" [user ":" pass "@"] host [ ":" port]
2871# ## For example:
2872# ## mongodb://user:auth_key@10.10.3.30:27017,
2873# ## mongodb://10.10.3.33:18832,
2874# servers = ["mongodb://127.0.0.1:27017"]
2875#
2876# ## When true, collect per database stats
2877# # gather_perdb_stats = false
2878#
2879# ## Optional TLS Config
2880# # tls_ca = "/etc/telegraf/ca.pem"
2881# # tls_cert = "/etc/telegraf/cert.pem"
2882# # tls_key = "/etc/telegraf/key.pem"
2883# ## Use TLS but skip chain & host verification
2884# # insecure_skip_verify = false
2885
2886
2887# # Aggregates the contents of multiple files into a single point
2888# [[inputs.multifile]]
2889# ## Base directory where telegraf will look for files.
2890# ## Omit this option to use absolute paths.
2891# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0"
2892#
2893# ## If true, Telegraf discard all data when a single file can't be read.
2894# ## Else, Telegraf omits the field generated from this file.
2895# # fail_early = true
2896#
2897# ## Files to parse each interval.
2898# [[inputs.multifile.file]]
2899# file = "in_pressure_input"
2900# dest = "pressure"
2901# conversion = "float"
2902# [[inputs.multifile.file]]
2903# file = "in_temp_input"
2904# dest = "temperature"
2905# conversion = "float(3)"
2906# [[inputs.multifile.file]]
2907# file = "in_humidityrelative_input"
2908# dest = "humidityrelative"
2909# conversion = "float(3)"
2910
2911
2912# # Read metrics from one or many mysql servers
2913# [[inputs.mysql]]
2914# ## specify servers via a url matching:
2915# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
2916# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
2917# ## e.g.
2918# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
2919# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
2920# #
2921# ## If no servers are specified, then localhost is used as the host.
2922# servers = ["tcp(127.0.0.1:3306)/"]
2923#
2924# ## Selects the metric output format.
2925# ##
2926# ## This option exists to maintain backwards compatibility, if you have
2927# ## existing metrics do not set or change this value until you are ready to
2928# ## migrate to the new format.
2929# ##
2930# ## If you do not have existing metrics from this plugin set to the latest
2931# ## version.
2932# ##
2933# ## Telegraf >=1.6: metric_version = 2
2934# ## <1.6: metric_version = 1 (or unset)
2935# metric_version = 2
2936#
2937# ## the limits for metrics form perf_events_statements
2938# perf_events_statements_digest_text_limit = 120
2939# perf_events_statements_limit = 250
2940# perf_events_statements_time_limit = 86400
2941# #
2942# ## if the list is empty, then metrics are gathered from all databasee tables
2943# table_schema_databases = []
2944# #
2945# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
2946# gather_table_schema = false
2947# #
2948# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
2949# gather_process_list = true
2950# #
2951# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
2952# gather_user_statistics = true
2953# #
2954# ## gather auto_increment columns and max values from information schema
2955# gather_info_schema_auto_inc = true
2956# #
2957# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
2958# gather_innodb_metrics = true
2959# #
2960# ## gather metrics from SHOW SLAVE STATUS command output
2961# gather_slave_status = true
2962# #
2963# ## gather metrics from SHOW BINARY LOGS command output
2964# gather_binary_logs = false
2965# #
2966# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
2967# gather_table_io_waits = false
2968# #
2969# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
2970# gather_table_lock_waits = false
2971# #
2972# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
2973# gather_index_io_waits = false
2974# #
2975# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
2976# gather_event_waits = false
2977# #
2978# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
2979# gather_file_events_stats = false
2980# #
2981# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
2982# gather_perf_events_statements = false
2983# #
2984# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
2985# interval_slow = "30m"
2986#
2987# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri)
2988# # tls_ca = "/etc/telegraf/ca.pem"
2989# # tls_cert = "/etc/telegraf/cert.pem"
2990# # tls_key = "/etc/telegraf/key.pem"
2991# ## Use TLS but skip chain & host verification
2992# # insecure_skip_verify = false
2993
2994
2995# # Provides metrics about the state of a NATS server
2996# [[inputs.nats]]
2997# ## The address of the monitoring endpoint of the NATS server
2998# server = "http://localhost:8222"
2999#
3000# ## Maximum time to receive response
3001# # response_timeout = "5s"
3002
3003
3004# # Neptune Apex data collector
3005# [[inputs.neptune_apex]]
3006# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex.
3007# ## Measurements will be logged under "apex".
3008#
3009# ## The base URL of the local Apex(es). If you specify more than one server, they will
3010# ## be differentiated by the "source" tag.
3011# servers = [
3012# "http://apex.local",
3013# ]
3014#
3015# ## The response_timeout specifies how long to wait for a reply from the Apex.
3016# #response_timeout = "5s"
3017
3018
3019# # Read metrics about network interface usage
3020# [[inputs.net]]
3021# ## By default, telegraf gathers stats from any up interface (excluding loopback)
3022# ## Setting interfaces will tell it to gather these explicit interfaces,
3023# ## regardless of status.
3024# ##
3025# # interfaces = ["eth0"]
3026# ##
3027# ## On linux systems telegraf also collects protocol stats.
3028# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
3029# ##
3030# # ignore_protocol_stats = false
3031# ##
3032
3033
3034# # Collect response time of a TCP or UDP connection
3035# [[inputs.net_response]]
3036# ## Protocol, must be "tcp" or "udp"
3037# ## NOTE: because the "udp" protocol does not respond to requests, it requires
3038# ## a send/expect string pair (see below).
3039# protocol = "tcp"
3040# ## Server address (default localhost)
3041# address = "localhost:80"
3042#
3043# ## Set timeout
3044# # timeout = "1s"
3045#
3046# ## Set read timeout (only used if expecting a response)
3047# # read_timeout = "1s"
3048#
3049# ## The following options are required for UDP checks. For TCP, they are
3050# ## optional. The plugin will send the given string to the server and then
3051# ## expect to receive the given 'expect' string back.
3052# ## string sent to the server
3053# # send = "ssh"
3054# ## expected string in answer
3055# # expect = "ssh"
3056#
3057# ## Uncomment to remove deprecated fields
3058# # fieldexclude = ["result_type", "string_found"]
3059
3060
3061# # Read TCP metrics such as established, time wait and sockets counts.
3062# [[inputs.netstat]]
3063# # no configuration
3064
3065
3066# # Read Nginx's basic status information (ngx_http_stub_status_module)
3067# [[inputs.nginx]]
3068# # An array of Nginx stub_status URI to gather stats.
3069# urls = ["http://localhost/server_status"]
3070#
3071# ## Optional TLS Config
3072# tls_ca = "/etc/telegraf/ca.pem"
3073# tls_cert = "/etc/telegraf/cert.cer"
3074# tls_key = "/etc/telegraf/key.key"
3075# ## Use TLS but skip chain & host verification
3076# insecure_skip_verify = false
3077#
3078# # HTTP response timeout (default: 5s)
3079# response_timeout = "5s"
3080
3081
3082# # Read Nginx Plus' full status information (ngx_http_status_module)
3083# [[inputs.nginx_plus]]
3084# ## An array of ngx_http_status_module or status URI to gather stats.
3085# urls = ["http://localhost/status"]
3086#
3087# # HTTP response timeout (default: 5s)
3088# response_timeout = "5s"
3089
3090
3091# # Read Nginx Plus Api documentation
3092# [[inputs.nginx_plus_api]]
3093# ## An array of API URI to gather stats.
3094# urls = ["http://localhost/api"]
3095#
3096# # Nginx API version, default: 3
3097# # api_version = 3
3098#
3099# # HTTP response timeout (default: 5s)
3100# response_timeout = "5s"
3101
3102
3103# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)
3104# [[inputs.nginx_upstream_check]]
3105# ## An URL where Nginx Upstream check module is enabled
3106# ## It should be set to return a JSON formatted response
3107# url = "http://127.0.0.1/status?format=json"
3108#
3109# ## HTTP method
3110# # method = "GET"
3111#
3112# ## Optional HTTP headers
3113# # headers = {"X-Special-Header" = "Special-Value"}
3114#
3115# ## Override HTTP "Host" header
3116# # host_header = "check.example.com"
3117#
3118# ## Timeout for HTTP requests
3119# timeout = "5s"
3120#
3121# ## Optional HTTP Basic Auth credentials
3122# # username = "username"
3123# # password = "pa$$word"
3124#
3125# ## Optional TLS Config
3126# # tls_ca = "/etc/telegraf/ca.pem"
3127# # tls_cert = "/etc/telegraf/cert.pem"
3128# # tls_key = "/etc/telegraf/key.pem"
3129# ## Use TLS but skip chain & host verification
3130# # insecure_skip_verify = false
3131
3132
3133# # Read Nginx virtual host traffic status module information (nginx-module-vts)
3134# [[inputs.nginx_vts]]
3135# ## An array of ngx_http_status_module or status URI to gather stats.
3136# urls = ["http://localhost/status"]
3137#
3138# ## HTTP response timeout (default: 5s)
3139# response_timeout = "5s"
3140
3141
3142# # Read NSQ topic and channel statistics.
3143# [[inputs.nsq]]
3144# ## An array of NSQD HTTP API endpoints
3145# endpoints = ["http://localhost:4151"]
3146#
3147# ## Optional TLS Config
3148# # tls_ca = "/etc/telegraf/ca.pem"
3149# # tls_cert = "/etc/telegraf/cert.pem"
3150# # tls_key = "/etc/telegraf/key.pem"
3151# ## Use TLS but skip chain & host verification
3152# # insecure_skip_verify = false
3153
3154
3155# # Collect kernel snmp counters and network interface statistics
3156# [[inputs.nstat]]
3157# ## file paths for proc files. If empty default paths will be used:
3158# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
3159# ## These can also be overridden with env variables, see README.
3160# proc_net_netstat = "/proc/net/netstat"
3161# proc_net_snmp = "/proc/net/snmp"
3162# proc_net_snmp6 = "/proc/net/snmp6"
3163# ## dump metrics with 0 values too
3164# dump_zeros = true
3165
3166
3167# # Get standard NTP query metrics, requires ntpq executable.
3168# [[inputs.ntpq]]
3169# ## If false, set the -n ntpq flag. Can reduce metric gather time.
3170# dns_lookup = true
3171
3172
3173# # Pulls statistics from nvidia GPUs attached to the host
3174# [[inputs.nvidia_smi]]
3175# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath
3176# # bin_path = "/usr/bin/nvidia-smi"
3177#
3178# ## Optional: timeout for GPU polling
3179# # timeout = "5s"
3180
3181
3182# # OpenLDAP cn=Monitor plugin
3183# [[inputs.openldap]]
3184# host = "localhost"
3185# port = 389
3186#
3187# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
3188# # note that port will likely need to be changed to 636 for ldaps
3189# # valid options: "" | "starttls" | "ldaps"
3190# tls = ""
3191#
3192# # skip peer certificate verification. Default is false.
3193# insecure_skip_verify = false
3194#
3195# # Path to PEM-encoded Root certificate to use to verify server certificate
3196# tls_ca = "/etc/ssl/certs.pem"
3197#
3198# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
3199# bind_dn = ""
3200# bind_password = ""
3201#
3202# # Reverse metric names so they sort more naturally. Recommended.
3203# # This defaults to false if unset, but is set to true when generating a new config
3204# reverse_metric_names = true
3205
3206
3207# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver
3208# [[inputs.opensmtpd]]
3209# ## If running as a restricted user you can prepend sudo for additional access:
3210# #use_sudo = false
3211#
3212# ## The default location of the smtpctl binary can be overridden with:
3213# binary = "/usr/sbin/smtpctl"
3214#
3215# ## The default timeout of 1000ms can be overriden with (in milliseconds):
3216# timeout = 1000
3217
3218
3219# # Read metrics of passenger using passenger-status
3220# [[inputs.passenger]]
3221# ## Path of passenger-status.
3222# ##
3223# ## Plugin gather metric via parsing XML output of passenger-status
3224# ## More information about the tool:
3225# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
3226# ##
3227# ## If no path is specified, then the plugin simply execute passenger-status
3228# ## hopefully it can be found in your PATH
3229# command = "passenger-status -v --show=xml"
3230
3231
3232# # Gather counters from PF
3233# [[inputs.pf]]
3234# ## PF require root access on most systems.
3235# ## Setting 'use_sudo' to true will make use of sudo to run pfctl.
3236# ## Users must configure sudo to allow telegraf user to run pfctl with no password.
3237# ## pfctl can be restricted to only list command "pfctl -s info".
3238# use_sudo = false
3239
3240
3241# # Read metrics of phpfpm, via HTTP status page or socket
3242# [[inputs.phpfpm]]
3243# ## An array of addresses to gather stats about. Specify an ip or hostname
3244# ## with optional port and path
3245# ##
3246# ## Plugin can be configured in three modes (either can be used):
3247# ## - http: the URL must start with http:// or https://, ie:
3248# ## "http://localhost/status"
3249# ## "http://192.168.130.1/status?full"
3250# ##
3251# ## - unixsocket: path to fpm socket, ie:
3252# ## "/var/run/php5-fpm.sock"
3253# ## or using a custom fpm status path:
3254# ## "/var/run/php5-fpm.sock:fpm-custom-status-path"
3255# ##
3256# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
3257# ## "fcgi://10.0.0.12:9000/status"
3258# ## "cgi://10.0.10.12:9001/status"
3259# ##
3260# ## Example of multiple gathering from local socket and remove host
3261# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
3262# urls = ["http://localhost/status"]
3263
3264
3265# # Ping given url(s) and return statistics
3266# [[inputs.ping]]
3267# ## List of urls to ping
3268# urls = ["example.org"]
3269#
3270# ## Number of pings to send per collection (ping -c <COUNT>)
3271# # count = 1
3272#
3273# ## Interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
3274# ## Not available in Windows.
3275# # ping_interval = 1.0
3276#
3277# ## Per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
3278# # timeout = 1.0
3279#
3280# ## Total-ping deadline, in s. 0 == no deadline (ping -w <DEADLINE>)
3281# # deadline = 10
3282#
3283# ## Interface or source address to send ping from (ping -I <INTERFACE/SRC_ADDR>)
3284# ## on Darwin and Freebsd only source address possible: (ping -S <SRC_ADDR>)
3285# # interface = ""
3286#
3287# ## Specify the ping executable binary, default is "ping"
3288# # binary = "ping"
3289#
3290# ## Arguments for ping command
3291# ## when arguments is not empty, other options (ping_interval, timeout, etc) will be ignored
3292# # arguments = ["-c", "3"]
3293
3294
3295# # Measure postfix queue statistics
3296# [[inputs.postfix]]
3297# ## Postfix queue directory. If not provided, telegraf will try to use
3298# ## 'postconf -h queue_directory' to determine it.
3299# # queue_directory = "/var/spool/postfix"
3300
3301
3302# # Read metrics from one or many PowerDNS servers
3303# [[inputs.powerdns]]
3304# ## An array of sockets to gather stats about.
3305# ## Specify a path to unix socket.
3306# unix_sockets = ["/var/run/pdns.controlsocket"]
3307
3308
3309# # Monitor process cpu and memory usage
3310# [[inputs.procstat]]
3311# ## PID file to monitor process
3312# pid_file = "/var/run/nginx.pid"
3313# ## executable name (ie, pgrep <exe>)
3314# # exe = "nginx"
3315# ## pattern as argument for pgrep (ie, pgrep -f <pattern>)
3316# # pattern = "nginx"
3317# ## user as argument for pgrep (ie, pgrep -u <user>)
3318# # user = "nginx"
3319# ## Systemd unit name
3320# # systemd_unit = "nginx.service"
3321# ## CGroup name or path
3322# # cgroup = "systemd/system.slice/nginx.service"
3323#
3324# ## Windows service name
3325# # win_service = ""
3326#
3327# ## override for process_name
3328# ## This is optional; default is sourced from /proc/<pid>/status
3329# # process_name = "bar"
3330#
3331# ## Field name prefix
3332# # prefix = ""
3333#
3334# ## Add PID as a tag instead of a field; useful to differentiate between
3335# ## processes whose tags are otherwise the same. Can create a large number
3336# ## of series, use judiciously.
3337# # pid_tag = false
3338#
3339# ## Method to use when finding process IDs. Can be one of 'pgrep', or
3340# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while
3341# ## the native finder performs the search directly in a manor dependent on the
3342# ## platform. Default is 'pgrep'
3343# # pid_finder = "pgrep"
3344
3345
3346# # Reads last_run_summary.yaml file and converts to measurments
3347# [[inputs.puppetagent]]
3348# ## Location of puppet last run summary file
3349# location = "/var/lib/puppet/state/last_run_summary.yaml"
3350
3351
3352# # Reads metrics from RabbitMQ servers via the Management Plugin
3353# [[inputs.rabbitmq]]
3354# ## Management Plugin url. (default: http://localhost:15672)
3355# # url = "http://localhost:15672"
3356# ## Tag added to rabbitmq_overview series; deprecated: use tags
3357# # name = "rmq-server-1"
3358# ## Credentials
3359# # username = "guest"
3360# # password = "guest"
3361#
3362# ## Optional TLS Config
3363# # tls_ca = "/etc/telegraf/ca.pem"
3364# # tls_cert = "/etc/telegraf/cert.pem"
3365# # tls_key = "/etc/telegraf/key.pem"
3366# ## Use TLS but skip chain & host verification
3367# # insecure_skip_verify = false
3368#
3369# ## Optional request timeouts
3370# ##
3371# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait
3372# ## for a server's response headers after fully writing the request.
3373# # header_timeout = "3s"
3374# ##
3375# ## client_timeout specifies a time limit for requests made by this client.
3376# ## Includes connection time, any redirects, and reading the response body.
3377# # client_timeout = "4s"
3378#
3379# ## A list of nodes to gather as the rabbitmq_node measurement. If not
3380# ## specified, metrics for all nodes are gathered.
3381# # nodes = ["rabbit@node1", "rabbit@node2"]
3382#
3383# ## A list of queues to gather as the rabbitmq_queue measurement. If not
3384# ## specified, metrics for all queues are gathered.
3385# # queues = ["telegraf"]
3386#
3387# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not
3388# ## specified, metrics for all exchanges are gathered.
3389# # exchanges = ["telegraf"]
3390#
3391# ## Queues to include and exclude. Globs accepted.
3392# ## Note that an empty array for both will include all queues
3393# queue_name_include = []
3394# queue_name_exclude = []
3395
3396
3397# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
3398# [[inputs.raindrops]]
3399# ## An array of raindrops middleware URI to gather stats.
3400# urls = ["http://localhost:8080/_raindrops"]
3401
3402
3403# # Read metrics from one or many redis servers
3404# [[inputs.redis]]
3405# ## specify servers via a url matching:
3406# ## [protocol://][:password]@address[:port]
3407# ## e.g.
3408# ## tcp://localhost:6379
3409# ## tcp://:password@192.168.99.100
3410# ## unix:///var/run/redis.sock
3411# ##
3412# ## If no servers are specified, then localhost is used as the host.
3413# ## If no port is specified, 6379 is used
3414# servers = ["tcp://localhost:6379"]
3415#
3416# ## specify server password
3417# # password = "s#cr@t%"
3418#
3419# ## Optional TLS Config
3420# # tls_ca = "/etc/telegraf/ca.pem"
3421# # tls_cert = "/etc/telegraf/cert.pem"
3422# # tls_key = "/etc/telegraf/key.pem"
3423# ## Use TLS but skip chain & host verification
3424# # insecure_skip_verify = true
3425
3426
3427# # Read metrics from one or many RethinkDB servers
3428# [[inputs.rethinkdb]]
3429# ## An array of URI to gather stats about. Specify an ip or hostname
3430# ## with optional port add password. ie,
3431# ## rethinkdb://user:auth_key@10.10.3.30:28105,
3432# ## rethinkdb://10.10.3.33:18832,
3433# ## 10.0.0.1:10000, etc.
3434# servers = ["127.0.0.1:28015"]
3435# ##
3436# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
3437# ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
3438# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
3439# ##
3440# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
3441# ## have to be named "rethinkdb".
3442# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
3443
3444
3445# # Read metrics one or many Riak servers
3446# [[inputs.riak]]
3447# # Specify a list of one or more riak http servers
3448# servers = ["http://localhost:8098"]
3449
3450
3451# # Read API usage and limits for a Salesforce organisation
3452# [[inputs.salesforce]]
3453# ## specify your credentials
3454# ##
3455# username = "your_username"
3456# password = "your_password"
3457# ##
3458# ## (optional) security token
3459# # security_token = "your_security_token"
3460# ##
3461# ## (optional) environment type (sandbox or production)
3462# ## default is: production
3463# ##
3464# # environment = "production"
3465# ##
3466# ## (optional) API version (default: "39.0")
3467# ##
3468# # version = "39.0"
3469
3470
3471# # Monitor sensors, requires lm-sensors package
3472# [[inputs.sensors]]
3473# ## Remove numbers from field names.
3474# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
3475# # remove_numbers = true
3476#
3477# ## Timeout is the maximum amount of time that the sensors command can run.
3478# # timeout = "5s"
3479
3480
3481# # Read metrics from storage devices supporting S.M.A.R.T.
3482# [[inputs.smart]]
3483# ## Optionally specify the path to the smartctl executable
3484# # path = "/usr/bin/smartctl"
3485# #
3486# ## On most platforms smartctl requires root access.
3487# ## Setting 'use_sudo' to true will make use of sudo to run smartctl.
3488# ## Sudo must be configured to to allow the telegraf user to run smartctl
3489# ## with out password.
3490# # use_sudo = false
3491# #
3492# ## Skip checking disks in this power mode. Defaults to
3493# ## "standby" to not wake up disks that have stoped rotating.
3494# ## See --nocheck in the man pages for smartctl.
3495# ## smartctl version 5.41 and 5.42 have faulty detection of
3496# ## power mode and might require changing this value to
3497# ## "never" depending on your disks.
3498# # nocheck = "standby"
3499# #
3500# ## Gather detailed metrics for each SMART Attribute.
3501# ## Defaults to "false"
3502# ##
3503# # attributes = false
3504# #
3505# ## Optionally specify devices to exclude from reporting.
3506# # excludes = [ "/dev/pass6" ]
3507# #
3508# ## Optionally specify devices and device type, if unset
3509# ## a scan (smartctl --scan) for S.M.A.R.T. devices will
3510# ## done and all found will be included except for the
3511# ## excluded in excludes.
3512# # devices = [ "/dev/ada0 -d atacam" ]
3513
3514
3515# # Retrieves SNMP values from remote agents
3516# [[inputs.snmp]]
3517# agents = [ "127.0.0.1:161" ]
3518# ## Timeout for each SNMP query.
3519# timeout = "5s"
3520# ## Number of retries to attempt within timeout.
3521# retries = 3
3522# ## SNMP version, values can be 1, 2, or 3
3523# version = 2
3524#
3525# ## SNMP community string.
3526# community = "public"
3527#
3528# ## The GETBULK max-repetitions parameter
3529# max_repetitions = 10
3530#
3531# ## SNMPv3 auth parameters
3532# #sec_name = "myuser"
3533# #auth_protocol = "md5" # Values: "MD5", "SHA", ""
3534# #auth_password = "pass"
3535# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
3536# #context_name = ""
3537# #priv_protocol = "" # Values: "DES", "AES", ""
3538# #priv_password = ""
3539#
3540# ## measurement name
3541# name = "system"
3542# [[inputs.snmp.field]]
3543# name = "hostname"
3544# oid = ".1.0.0.1.1"
3545# [[inputs.snmp.field]]
3546# name = "uptime"
3547# oid = ".1.0.0.1.2"
3548# [[inputs.snmp.field]]
3549# name = "load"
3550# oid = ".1.0.0.1.3"
3551# [[inputs.snmp.field]]
3552# oid = "HOST-RESOURCES-MIB::hrMemorySize"
3553#
3554# [[inputs.snmp.table]]
3555# ## measurement name
3556# name = "remote_servers"
3557# inherit_tags = [ "hostname" ]
3558# [[inputs.snmp.table.field]]
3559# name = "server"
3560# oid = ".1.0.0.0.1.0"
3561# is_tag = true
3562# [[inputs.snmp.table.field]]
3563# name = "connections"
3564# oid = ".1.0.0.0.1.1"
3565# [[inputs.snmp.table.field]]
3566# name = "latency"
3567# oid = ".1.0.0.0.1.2"
3568#
3569# [[inputs.snmp.table]]
3570# ## auto populate table's fields using the MIB
3571# oid = "HOST-RESOURCES-MIB::hrNetworkTable"
3572
3573
3574# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
3575# [[inputs.snmp_legacy]]
3576# ## Use 'oids.txt' file to translate oids to names
3577# ## To generate 'oids.txt' you need to run:
3578# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
3579# ## Or if you have an other MIB folder with custom MIBs
3580# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
3581# snmptranslate_file = "/tmp/oids.txt"
3582# [[inputs.snmp.host]]
3583# address = "192.168.2.2:161"
3584# # SNMP community
3585# community = "public" # default public
3586# # SNMP version (1, 2 or 3)
3587# # Version 3 not supported yet
3588# version = 2 # default 2
3589# # SNMP response timeout
3590# timeout = 2.0 # default 2.0
3591# # SNMP request retries
3592# retries = 2 # default 2
3593# # Which get/bulk do you want to collect for this host
3594# collect = ["mybulk", "sysservices", "sysdescr"]
3595# # Simple list of OIDs to get, in addition to "collect"
3596# get_oids = []
3597#
3598# [[inputs.snmp.host]]
3599# address = "192.168.2.3:161"
3600# community = "public"
3601# version = 2
3602# timeout = 2.0
3603# retries = 2
3604# collect = ["mybulk"]
3605# get_oids = [
3606# "ifNumber",
3607# ".1.3.6.1.2.1.1.3.0",
3608# ]
3609#
3610# [[inputs.snmp.get]]
3611# name = "ifnumber"
3612# oid = "ifNumber"
3613#
3614# [[inputs.snmp.get]]
3615# name = "interface_speed"
3616# oid = "ifSpeed"
3617# instance = "0"
3618#
3619# [[inputs.snmp.get]]
3620# name = "sysuptime"
3621# oid = ".1.3.6.1.2.1.1.3.0"
3622# unit = "second"
3623#
3624# [[inputs.snmp.bulk]]
3625# name = "mybulk"
3626# max_repetition = 127
3627# oid = ".1.3.6.1.2.1.1"
3628#
3629# [[inputs.snmp.bulk]]
3630# name = "ifoutoctets"
3631# max_repetition = 127
3632# oid = "ifOutOctets"
3633#
3634# [[inputs.snmp.host]]
3635# address = "192.168.2.13:161"
3636# #address = "127.0.0.1:161"
3637# community = "public"
3638# version = 2
3639# timeout = 2.0
3640# retries = 2
3641# #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
3642# collect = ["sysuptime" ]
3643# [[inputs.snmp.host.table]]
3644# name = "iftable3"
3645# include_instances = ["enp5s0", "eth1"]
3646#
3647# # SNMP TABLEs
3648# # table without mapping neither subtables
3649# [[inputs.snmp.table]]
3650# name = "iftable1"
3651# oid = ".1.3.6.1.2.1.31.1.1.1"
3652#
3653# # table without mapping but with subtables
3654# [[inputs.snmp.table]]
3655# name = "iftable2"
3656# oid = ".1.3.6.1.2.1.31.1.1.1"
3657# sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
3658#
3659# # table with mapping but without subtables
3660# [[inputs.snmp.table]]
3661# name = "iftable3"
3662# oid = ".1.3.6.1.2.1.31.1.1.1"
3663# # if empty. get all instances
3664# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
3665# # if empty, get all subtables
3666#
3667# # table with both mapping and subtables
3668# [[inputs.snmp.table]]
3669# name = "iftable4"
3670# oid = ".1.3.6.1.2.1.31.1.1.1"
3671# # if empty get all instances
3672# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
3673# # if empty get all subtables
3674# # sub_tables could be not "real subtables"
3675# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
3676
3677
3678# # Read stats from one or more Solr servers or cores
3679# [[inputs.solr]]
3680# ## specify a list of one or more Solr servers
3681# servers = ["http://localhost:8983"]
3682#
3683# ## specify a list of one or more Solr cores (default - all)
3684# # cores = ["main"]
3685
3686
3687# # Read metrics from Microsoft SQL Server
3688# [[inputs.sqlserver]]
3689# ## Specify instances to monitor with a list of connection strings.
3690# ## All connection parameters are optional.
3691# ## By default, the host is localhost, listening on default port, TCP 1433.
3692# ## for Windows, the user is the currently running AD user (SSO).
3693# ## See https://github.com/denisenkom/go-mssqldb for detailed connection
3694# ## parameters.
3695# # servers = [
3696# # "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
3697# # ]
3698#
3699# ## Optional parameter, setting this to 2 will use a new version
3700# ## of the collection queries that break compatibility with the original
3701# ## dashboards.
3702# query_version = 2
3703#
3704# ## If you are using AzureDB, setting this to true will gather resource utilization metrics
3705# # azuredb = false
3706#
3707# ## If you would like to exclude some of the metrics queries, list them here
3708# ## Possible choices:
3709# ## - PerformanceCounters
3710# ## - WaitStatsCategorized
3711# ## - DatabaseIO
3712# ## - DatabaseProperties
3713# ## - CPUHistory
3714# ## - DatabaseSize
3715# ## - DatabaseStats
3716# ## - MemoryClerk
3717# ## - VolumeSpace
3718# ## - PerformanceMetrics
3719# # exclude_query = [ 'DatabaseIO' ]
3720
3721
3722# # Gather timeseries from Google Cloud Platform v3 monitoring API
3723# [[inputs.stackdriver]]
3724# ## GCP Project
3725# project = "erudite-bloom-151019"
3726#
3727# ## Include timeseries that start with the given metric type.
3728# metric_type_prefix_include = [
3729# "compute.googleapis.com/",
3730# ]
3731#
3732# ## Exclude timeseries that start with the given metric type.
3733# # metric_type_prefix_exclude = []
3734#
3735# ## Many metrics are updated once per minute; it is recommended to override
3736# ## the agent level interval with a value of 1m or greater.
3737# interval = "1m"
3738#
3739# ## Maximum number of API calls to make per second. The quota for accounts
3740# ## varies, it can be viewed on the API dashboard:
3741# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits
3742# # rate_limit = 14
3743#
3744# ## The delay and window options control the number of points selected on
3745# ## each gather. When set, metrics are gathered between:
3746# ## start: now() - delay - window
3747# ## end: now() - delay
3748# #
3749# ## Collection delay; if set too low metrics may not yet be available.
3750# # delay = "5m"
3751# #
3752# ## If unset, the window will start at 1m and be updated dynamically to span
3753# ## the time between calls (approximately the length of the plugin interval).
3754# # window = "1m"
3755#
3756# ## TTL for cached list of metric types. This is the maximum amount of time
3757# ## it may take to discover new metrics.
3758# # cache_ttl = "1h"
3759#
3760# ## If true, raw bucket counts are collected for distribution value types.
3761# ## For a more lightweight collection, you may wish to disable and use
3762# ## distribution_aggregation_aligners instead.
3763# # gather_raw_distribution_buckets = true
3764#
3765# ## Aggregate functions to be used for metrics whose value type is
3766# ## distribution. These aggregate values are recorded in in addition to raw
3767# ## bucket counts; if they are enabled.
3768# ##
3769# ## For a list of aligner strings see:
3770# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner
3771# # distribution_aggregation_aligners = [
3772# # "ALIGN_PERCENTILE_99",
3773# # "ALIGN_PERCENTILE_95",
3774# # "ALIGN_PERCENTILE_50",
3775# # ]
3776#
3777# ## Filters can be added to reduce the number of time series matched. All
3778# ## functions are supported: starts_with, ends_with, has_substring, and
3779# ## one_of. Only the '=' operator is supported.
3780# ##
3781# ## The logical operators when combining filters are defined statically using
3782# ## the following values:
3783# ## filter ::= <resource_labels> {AND <metric_labels>}
3784# ## resource_labels ::= <resource_labels> {OR <resource_label>}
3785# ## metric_labels ::= <metric_labels> {OR <metric_label>}
3786# ##
3787# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters
3788# #
3789# ## Resource labels refine the time series selection with the following expression:
3790# ## resource.labels.<key> = <value>
3791# # [[inputs.stackdriver.filter.resource_labels]]
3792# # key = "instance_name"
3793# # value = 'starts_with("localhost")'
3794# #
3795# ## Metric labels refine the time series selection with the following expression:
3796# ## metric.labels.<key> = <value>
3797# # [[inputs.stackdriver.filter.metric_labels]]
3798# # key = "device_name"
3799# # value = 'one_of("sda", "sdb")'
3800
3801
3802# # Sysstat metrics collector
3803# [[inputs.sysstat]]
3804# ## Path to the sadc command.
3805# #
3806# ## Common Defaults:
3807# ## Debian/Ubuntu: /usr/lib/sysstat/sadc
3808# ## Arch: /usr/lib/sa/sadc
3809# ## RHEL/CentOS: /usr/lib64/sa/sadc
3810# sadc_path = "/usr/lib/sa/sadc" # required
3811# #
3812# #
3813# ## Path to the sadf command, if it is not in PATH
3814# # sadf_path = "/usr/bin/sadf"
3815# #
3816# #
3817# ## Activities is a list of activities, that are passed as argument to the
3818# ## sadc collector utility (e.g: DISK, SNMP etc...)
3819# ## The more activities that are added, the more data is collected.
3820# # activities = ["DISK"]
3821# #
3822# #
3823# ## Group metrics to measurements.
3824# ##
3825# ## If group is false each metric will be prefixed with a description
3826# ## and represents itself a measurement.
3827# ##
3828# ## If Group is true, corresponding metrics are grouped to a single measurement.
3829# # group = true
3830# #
3831# #
3832# ## Options for the sadf command. The values on the left represent the sadf
3833# ## options and the values on the right their description (which are used for
3834# ## grouping and prefixing metrics).
3835# ##
3836# ## Run 'sar -h' or 'man sar' to find out the supported options for your
3837# ## sysstat version.
3838# [inputs.sysstat.options]
3839# -C = "cpu"
3840# -B = "paging"
3841# -b = "io"
3842# -d = "disk" # requires DISK activity
3843# "-n ALL" = "network"
3844# "-P ALL" = "per_cpu"
3845# -q = "queue"
3846# -R = "mem"
3847# -r = "mem_util"
3848# -S = "swap_util"
3849# -u = "cpu_util"
3850# -v = "inode"
3851# -W = "swap"
3852# -w = "task"
3853# # -H = "hugepages" # only available for newer linux distributions
3854# # "-I ALL" = "interrupts" # requires INT activity
3855# #
3856# #
3857# ## Device tags can be used to add additional tags for devices.
3858# ## For example the configuration below adds a tag vg with value rootvg for
3859# ## all metrics with sda devices.
3860# # [[inputs.sysstat.device_tags.sda]]
3861# # vg = "rootvg"
3862
3863
3864# # Reads metrics from a Teamspeak 3 Server via ServerQuery
3865# [[inputs.teamspeak]]
3866# ## Server address for Teamspeak 3 ServerQuery
3867# # server = "127.0.0.1:10011"
3868# ## Username for ServerQuery
3869# username = "serverqueryuser"
3870# ## Password for ServerQuery
3871# password = "secret"
3872# ## Array of virtual servers
3873# # virtual_servers = [1]
3874
3875
3876# # Read metrics about temperature
3877# [[inputs.temp]]
3878# # no configuration
3879
3880
3881# # Read Tengine's basic status information (ngx_http_reqstat_module)
3882# [[inputs.tengine]]
3883# # An array of Tengine reqstat module URI to gather stats.
3884# urls = ["http://127.0.0.1/us"]
3885#
3886# # HTTP response timeout (default: 5s)
3887# # response_timeout = "5s"
3888#
3889# ## Optional TLS Config
3890# # tls_ca = "/etc/telegraf/ca.pem"
3891# # tls_cert = "/etc/telegraf/cert.cer"
3892# # tls_key = "/etc/telegraf/key.key"
3893# ## Use TLS but skip chain & host verification
3894# # insecure_skip_verify = false
3895
3896
3897# # Gather metrics from the Tomcat server status page.
3898# [[inputs.tomcat]]
3899# ## URL of the Tomcat server status
3900# # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
3901#
3902# ## HTTP Basic Auth Credentials
3903# # username = "tomcat"
3904# # password = "s3cret"
3905#
3906# ## Request timeout
3907# # timeout = "5s"
3908#
3909# ## Optional TLS Config
3910# # tls_ca = "/etc/telegraf/ca.pem"
3911# # tls_cert = "/etc/telegraf/cert.pem"
3912# # tls_key = "/etc/telegraf/key.pem"
3913# ## Use TLS but skip chain & host verification
3914# # insecure_skip_verify = false
3915
3916
3917# # Inserts sine and cosine waves for demonstration purposes
3918# [[inputs.trig]]
3919# ## Set the amplitude
3920# amplitude = 10.0
3921
3922
3923# # Read Twemproxy stats data
3924# [[inputs.twemproxy]]
3925# ## Twemproxy stats address and port (no scheme)
3926# addr = "localhost:22222"
3927# ## Monitor pool name
3928# pools = ["redis_pool", "mc_pool"]
3929
3930
3931# # A plugin to collect stats from the Unbound DNS resolver
3932# [[inputs.unbound]]
3933# ## Address of server to connect to, read from unbound conf default, optionally ':port'
3934# ## Will lookup IP if given a hostname
3935# server = "127.0.0.1:8953"
3936#
3937# ## If running as a restricted user you can prepend sudo for additional access:
3938# # use_sudo = false
3939#
3940# ## The default location of the unbound-control binary can be overridden with:
3941# # binary = "/usr/sbin/unbound-control"
3942#
3943# ## The default timeout of 1s can be overriden with:
3944# # timeout = "1s"
3945#
3946# ## When set to true, thread metrics are tagged with the thread id.
3947# ##
3948# ## The default is false for backwards compatibility, and will be changed to
3949# ## true in a future version. It is recommended to set to true on new
3950# ## deployments.
3951# thread_as_tag = false
3952
3953
3954# # A plugin to collect stats from Varnish HTTP Cache
3955# [[inputs.varnish]]
3956# ## If running as a restricted user you can prepend sudo for additional access:
3957# #use_sudo = false
3958#
3959# ## The default location of the varnishstat binary can be overridden with:
3960# binary = "/usr/bin/varnishstat"
3961#
3962# ## By default, telegraf gather stats for 3 metric points.
3963# ## Setting stats will override the defaults shown below.
3964# ## Glob matching can be used, ie, stats = ["MAIN.*"]
3965# ## stats may also be set to ["*"], which will collect all stats
3966# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
3967#
3968# ## Optional name for the varnish instance (or working directory) to query
3969# ## Usually appened after -n in varnish cli
3970# # instance_name = instanceName
3971#
3972# ## Timeout for varnishstat command
3973# # timeout = "1s"
3974
3975
3976# # Monitor wifi signal strength and quality
3977# [[inputs.wireless]]
3978# ## Sets 'proc' directory path
3979# ## If not specified, then default is /proc
3980# # host_proc = "/proc"
3981
3982
3983# # Reads metrics from a SSL certificate
3984# [[inputs.x509_cert]]
3985# ## List certificate sources
3986# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"]
3987#
3988# ## Timeout for SSL connection
3989# # timeout = "5s"
3990#
3991# ## Optional TLS Config
3992# # tls_ca = "/etc/telegraf/ca.pem"
3993# # tls_cert = "/etc/telegraf/cert.pem"
3994# # tls_key = "/etc/telegraf/key.pem"
3995#
3996# ## Use TLS but skip chain & host verification
3997# # insecure_skip_verify = false
3998
3999
4000# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
4001# [[inputs.zfs]]
4002# ## ZFS kstat path. Ignored on FreeBSD
4003# ## If not specified, then default is:
4004# # kstatPath = "/proc/spl/kstat/zfs"
4005#
4006# ## By default, telegraf gather all zfs stats
4007# ## If not specified, then default is:
4008# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
4009# ## For Linux, the default is:
4010# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats",
4011# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"]
4012# ## By default, don't gather zpool stats
4013# # poolMetrics = false
4014
4015
4016# # Reads 'mntr' stats from one or many zookeeper servers
4017# [[inputs.zookeeper]]
4018# ## An array of address to gather stats about. Specify an ip or hostname
4019# ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
4020#
4021# ## If no servers are specified, then localhost is used as the host.
4022# ## If no port is specified, 2181 is used
4023# servers = [":2181"]
4024#
4025# ## Timeout for metric collections from all servers. Minimum timeout is "1s".
4026# # timeout = "5s"
4027#
4028# ## Optional TLS Config
4029# # enable_tls = true
4030# # tls_ca = "/etc/telegraf/ca.pem"
4031# # tls_cert = "/etc/telegraf/cert.pem"
4032# # tls_key = "/etc/telegraf/key.pem"
4033# ## If false, skip chain & host verification
4034# # insecure_skip_verify = true
4035
4036
4037
4038###############################################################################
4039# SERVICE INPUT PLUGINS #
4040###############################################################################
4041
4042# # AMQP consumer plugin
4043# [[inputs.amqp_consumer]]
4044# ## Broker to consume from.
4045# ## deprecated in 1.7; use the brokers option
4046# # url = "amqp://localhost:5672/influxdb"
4047#
4048# ## Brokers to consume from. If multiple brokers are specified a random broker
4049# ## will be selected anytime a connection is established. This can be
4050# ## helpful for load balancing when not using a dedicated load balancer.
4051# brokers = ["amqp://localhost:5672/influxdb"]
4052#
4053# ## Authentication credentials for the PLAIN auth_method.
4054# # username = ""
4055# # password = ""
4056#
4057# ## Exchange to declare and consume from.
4058# exchange = "telegraf"
4059#
4060# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
4061# # exchange_type = "topic"
4062#
4063# ## If true, exchange will be passively declared.
4064# # exchange_passive = false
4065#
4066# ## Exchange durability can be either "transient" or "durable".
4067# # exchange_durability = "durable"
4068#
4069# ## Additional exchange arguments.
4070# # exchange_arguments = { }
4071# # exchange_arguments = {"hash_propery" = "timestamp"}
4072#
4073# ## AMQP queue name.
4074# queue = "telegraf"
4075#
4076# ## AMQP queue durability can be "transient" or "durable".
4077# queue_durability = "durable"
4078#
4079# ## Binding Key.
4080# binding_key = "#"
4081#
4082# ## Maximum number of messages server should give to the worker.
4083# # prefetch_count = 50
4084#
4085# ## Maximum messages to read from the broker that have not been written by an
4086# ## output. For best throughput set based on the number of metrics within
4087# ## each message and the size of the output's metric_batch_size.
4088# ##
4089# ## For example, if each message from the queue contains 10 metrics and the
4090# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
4091# ## full batch is collected and the write is triggered immediately without
4092# ## waiting until the next flush_interval.
4093# # max_undelivered_messages = 1000
4094#
4095# ## Auth method. PLAIN and EXTERNAL are supported
4096# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
4097# ## described here: https://www.rabbitmq.com/plugins.html
4098# # auth_method = "PLAIN"
4099#
4100# ## Optional TLS Config
4101# # tls_ca = "/etc/telegraf/ca.pem"
4102# # tls_cert = "/etc/telegraf/cert.pem"
4103# # tls_key = "/etc/telegraf/key.pem"
4104# ## Use TLS but skip chain & host verification
4105# # insecure_skip_verify = false
4106#
4107# ## Data format to consume.
4108# ## Each data format has its own unique set of configuration options, read
4109# ## more about them here:
4110# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
4111# data_format = "influx"
4112
4113
4114# # Read Cassandra metrics through Jolokia
4115# [[inputs.cassandra]]
4116# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the
4117# ## jolokia2 plugin instead.
4118# ##
4119# ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
4120#
4121# context = "/jolokia/read"
4122# ## List of cassandra servers exposing jolokia read service
4123# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
4124# ## List of metrics collected on above servers
4125# ## Each metric consists of a jmx path.
4126# ## This will collect all heap memory usage metrics from the jvm and
4127# ## ReadLatency metrics for all keyspaces and tables.
4128# ## "type=Table" in the query works with Cassandra3.0. Older versions might
4129# ## need to use "type=ColumnFamily"
4130# metrics = [
4131# "/java.lang:type=Memory/HeapMemoryUsage",
4132# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
4133# ]
4134
4135
4136# # Read metrics from Google PubSub
4137# [[inputs.cloud_pubsub]]
4138# ## Required. Name of Google Cloud Platform (GCP) Project that owns
4139# ## the given PubSub subscription.
4140# project = "my-project"
4141#
4142# ## Required. Name of PubSub subscription to ingest metrics from.
4143# subscription = "my-subscription"
4144#
4145# ## Required. Data format to consume.
4146# ## Each data format has its own unique set of configuration options.
4147# ## Read more about them here:
4148# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
4149# data_format = "influx"
4150#
4151# ## Optional. Filepath for GCP credentials JSON file to authorize calls to
4152# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use
4153# ## Application Default Credentials, which is preferred.
4154# # credentials_file = "path/to/my/creds.json"
4155#
4156# ## Optional. Number of seconds to wait before attempting to restart the
4157# ## PubSub subscription receiver after an unexpected error.
4158# ## If the streaming pull for a PubSub Subscription fails (receiver),
4159# ## the agent attempts to restart receiving messages after this many seconds.
4160# # retry_delay_seconds = 5
4161#
4162# ## Optional. Maximum byte length of a message to consume.
4163# ## Larger messages are dropped with an error. If less than 0 or unspecified,
4164# ## treated as no limit.
4165# # max_message_len = 1000000
4166#
4167# ## Optional. Maximum messages to read from PubSub that have not been written
4168# ## to an output. Defaults to 1000.
4169# ## For best throughput set based on the number of metrics within
4170# ## each message and the size of the output's metric_batch_size.
4171# ##
4172# ## For example, if each message contains 10 metrics and the output
4173# ## metric_batch_size is 1000, setting this to 100 will ensure that a
4174# ## full batch is collected and the write is triggered immediately without
4175# ## waiting until the next flush_interval.
4176# # max_undelivered_messages = 1000
4177#
4178# ## The following are optional Subscription ReceiveSettings in PubSub.
4179# ## Read more about these values:
4180# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings
4181#
4182# ## Optional. Maximum number of seconds for which a PubSub subscription
4183# ## should auto-extend the PubSub ACK deadline for each message. If less than
4184# ## 0, auto-extension is disabled.
4185# # max_extension = 0
4186#
4187# ## Optional. Maximum number of unprocessed messages in PubSub
4188# ## (unacknowledged but not yet expired in PubSub).
4189# ## A value of 0 is treated as the default PubSub value.
4190# ## Negative values will be treated as unlimited.
4191# # max_outstanding_messages = 0
4192#
4193# ## Optional. Maximum size in bytes of unprocessed messages in PubSub
4194# ## (unacknowledged but not yet expired in PubSub).
4195# ## A value of 0 is treated as the default PubSub value.
4196# ## Negative values will be treated as unlimited.
4197# # max_outstanding_bytes = 0
4198#
4199# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn
4200# ## to pull messages from PubSub concurrently. This limit applies to each
4201# ## subscription separately and is treated as the PubSub default if less than
4202# ## 1. Note this setting does not limit the number of messages that can be
4203# ## processed concurrently (use "max_outstanding_messages" instead).
4204# # max_receiver_go_routines = 0
4205
4206
4207# # Google Cloud Pub/Sub Push HTTP listener
4208# [[inputs.cloud_pubsub_push]]
4209# ## Address and port to host HTTP listener on
4210# service_address = ":8080"
4211#
4212# ## Application secret to verify messages originate from Cloud Pub/Sub
4213# # token = ""
4214#
4215# ## Path to listen to.
4216# # path = "/"
4217#
4218# ## Maximum duration before timing out read of the request
4219# # read_timeout = "10s"
4220# ## Maximum duration before timing out write of the response. This should be set to a value
4221# ## large enough that you can send at least 'metric_batch_size' number of messages within the
4222# ## duration.
4223# # write_timeout = "10s"
4224#
4225# ## Maximum allowed http request body size in bytes.
4226# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
4227# # max_body_size = "500MB"
4228#
4229# ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag.
4230# # add_meta = false
4231#
4232# ## Optional. Maximum messages to read from PubSub that have not been written
4233# ## to an output. Defaults to 1000.
4234# ## For best throughput set based on the number of metrics within
4235# ## each message and the size of the output's metric_batch_size.
4236# ##
4237# ## For example, if each message contains 10 metrics and the output
4238# ## metric_batch_size is 1000, setting this to 100 will ensure that a
4239# ## full batch is collected and the write is triggered immediately without
4240# ## waiting until the next flush_interval.
4241# # max_undelivered_messages = 1000
4242#
4243# ## Set one or more allowed client CA certificate file names to
4244# ## enable mutually authenticated TLS connections
4245# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
4246#
4247# ## Add service certificate and key
4248# # tls_cert = "/etc/telegraf/cert.pem"
4249# # tls_key = "/etc/telegraf/key.pem"
4250#
4251# ## Data format to consume.
4252# ## Each data format has its own unique set of configuration options, read
4253# ## more about them here:
4254# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
4255# data_format = "influx"
4256
4257
4258# # Influx HTTP write listener
4259# [[inputs.http_listener]]
4260# ## Address and port to host HTTP listener on
4261# service_address = ":8186"
4262#
4263# ## maximum duration before timing out read of the request
4264# read_timeout = "10s"
4265# ## maximum duration before timing out write of the response
4266# write_timeout = "10s"
4267#
4268# ## Maximum allowed http request body size in bytes.
4269# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
4270# max_body_size = "500MiB"
4271#
4272# ## Maximum line size allowed to be sent in bytes.
4273# ## 0 means to use the default of 65536 bytes (64 kibibytes)
4274# max_line_size = "64KiB"
4275#
4276# ## Set one or more allowed client CA certificate file names to
4277# ## enable mutually authenticated TLS connections
4278# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
4279#
4280# ## Add service certificate and key
4281# tls_cert = "/etc/telegraf/cert.pem"
4282# tls_key = "/etc/telegraf/key.pem"
4283#
4284# ## Optional username and password to accept for HTTP basic authentication.
4285# ## You probably want to make sure you have TLS configured above for this.
4286# # basic_username = "foobar"
4287# # basic_password = "barfoo"
4288
4289
4290# # Generic HTTP write listener
4291# [[inputs.http_listener_v2]]
4292# ## Address and port to host HTTP listener on
4293# service_address = ":8080"
4294#
4295# ## Path to listen to.
4296# # path = "/telegraf"
4297#
4298# ## HTTP methods to accept.
4299# # methods = ["POST", "PUT"]
4300#
4301# ## maximum duration before timing out read of the request
4302# # read_timeout = "10s"
4303# ## maximum duration before timing out write of the response
4304# # write_timeout = "10s"
4305#
4306# ## Maximum allowed http request body size in bytes.
4307# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
4308# # max_body_size = "500MB"
4309#
4310# ## Set one or more allowed client CA certificate file names to
4311# ## enable mutually authenticated TLS connections
4312# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
4313#
4314# ## Add service certificate and key
4315# # tls_cert = "/etc/telegraf/cert.pem"
4316# # tls_key = "/etc/telegraf/key.pem"
4317#
4318# ## Optional username and password to accept for HTTP basic authentication.
4319# ## You probably want to make sure you have TLS configured above for this.
4320# # basic_username = "foobar"
4321# # basic_password = "barfoo"
4322#
4323# ## Data format to consume.
4324# ## Each data format has its own unique set of configuration options, read
4325# ## more about them here:
4326# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
4327# data_format = "influx"
4328
4329
4330# # Influx HTTP write listener
4331# [[inputs.influxdb_listener]]
4332# ## Address and port to host HTTP listener on
4333# service_address = ":8186"
4334#
4335# ## maximum duration before timing out read of the request
4336# read_timeout = "10s"
4337# ## maximum duration before timing out write of the response
4338# write_timeout = "10s"
4339#
4340# ## Maximum allowed http request body size in bytes.
4341# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
4342# max_body_size = "500MiB"
4343#
4344# ## Maximum line size allowed to be sent in bytes.
4345# ## 0 means to use the default of 65536 bytes (64 kibibytes)
4346# max_line_size = "64KiB"
4347#
4348# ## Set one or more allowed client CA certificate file names to
4349# ## enable mutually authenticated TLS connections
4350# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
4351#
4352# ## Add service certificate and key
4353# tls_cert = "/etc/telegraf/cert.pem"
4354# tls_key = "/etc/telegraf/key.pem"
4355#
4356# ## Optional username and password to accept for HTTP basic authentication.
4357# ## You probably want to make sure you have TLS configured above for this.
4358# # basic_username = "foobar"
4359# # basic_password = "barfoo"
4360
4361
4362# # Read JTI OpenConfig Telemetry from listed sensors
4363# [[inputs.jti_openconfig_telemetry]]
4364# ## List of device addresses to collect telemetry from
4365# servers = ["localhost:1883"]
4366#
4367# ## Authentication details. Username and password are must if device expects
4368# ## authentication. Client ID must be unique when connecting from multiple instances
4369# ## of telegraf to the same device
4370# username = "user"
4371# password = "pass"
4372# client_id = "telegraf"
4373#
4374# ## Frequency to get data
4375# sample_frequency = "1000ms"
4376#
4377# ## Sensors to subscribe for
4378# ## A identifier for each sensor can be provided in path by separating with space
4379# ## Else sensor path will be used as identifier
4380# ## When identifier is used, we can provide a list of space separated sensors.
4381# ## A single subscription will be created with all these sensors and data will
4382# ## be saved to measurement with this identifier name
4383# sensors = [
4384# "/interfaces/",
4385# "collection /components/ /lldp",
4386# ]
4387#
4388# ## We allow specifying sensor group level reporting rate. To do this, specify the
4389# ## reporting rate in Duration at the beginning of sensor paths / collection
4390# ## name. For entries without reporting rate, we use configured sample frequency
4391# sensors = [
4392# "1000ms customReporting /interfaces /lldp",
4393# "2000ms collection /components",
4394# "/interfaces",
4395# ]
4396#
4397# ## x509 Certificate to use with TLS connection. If it is not provided, an insecure
4398# ## channel will be opened with server
4399# ssl_cert = "/etc/telegraf/cert.pem"
4400#
4401# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
4402# ## Failed streams/calls will not be retried if 0 is provided
4403# retry_delay = "1000ms"
4404#
4405# ## To treat all string values as tags, set this to true
4406# str_as_tags = false
4407
4408
4409# # Read metrics from Kafka topic(s)
4410# [[inputs.kafka_consumer]]
4411# ## kafka servers
4412# brokers = ["localhost:9092"]
4413# ## topic(s) to consume
4414# topics = ["telegraf"]
4415# ## Add topic as tag if topic_tag is not empty
4416# # topic_tag = ""
4417#
4418# ## Optional Client id
4419# # client_id = "Telegraf"
4420#
4421# ## Set the minimal supported Kafka version. Setting this enables the use of new
4422# ## Kafka features and APIs. Of particular interest, lz4 compression
4423# ## requires at least version 0.10.0.0.
4424# ## ex: version = "1.1.0"
4425# # version = ""
4426#
4427# ## Optional TLS Config
4428# # tls_ca = "/etc/telegraf/ca.pem"
4429# # tls_cert = "/etc/telegraf/cert.pem"
4430# # tls_key = "/etc/telegraf/key.pem"
4431# ## Use TLS but skip chain & host verification
4432# # insecure_skip_verify = false
4433#
4434# ## Optional SASL Config
4435# # sasl_username = "kafka"
4436# # sasl_password = "secret"
4437#
4438# ## the name of the consumer group
4439# consumer_group = "telegraf_metrics_consumers"
4440# ## Offset (must be either "oldest" or "newest")
4441# offset = "oldest"
4442# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
4443# ## larger messages are dropped
4444# max_message_len = 1000000
4445#
4446# ## Maximum messages to read from the broker that have not been written by an
4447# ## output. For best throughput set based on the number of metrics within
4448# ## each message and the size of the output's metric_batch_size.
4449# ##
4450# ## For example, if each message from the queue contains 10 metrics and the
4451# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
4452# ## full batch is collected and the write is triggered immediately without
4453# ## waiting until the next flush_interval.
4454# # max_undelivered_messages = 1000
4455#
4456# ## Data format to consume.
4457# ## Each data format has its own unique set of configuration options, read
4458# ## more about them here:
4459# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
4460# data_format = "influx"
4461
4462
4463# # Read metrics from Kafka topic(s)
4464# [[inputs.kafka_consumer_legacy]]
4465# ## topic(s) to consume
4466# topics = ["telegraf"]
4467# ## an array of Zookeeper connection strings
4468# zookeeper_peers = ["localhost:2181"]
4469# ## Zookeeper Chroot
4470# zookeeper_chroot = ""
4471# ## the name of the consumer group
4472# consumer_group = "telegraf_metrics_consumers"
4473# ## Offset (must be either "oldest" or "newest")
4474# offset = "oldest"
4475#
4476# ## Data format to consume.
4477# ## Each data format has its own unique set of configuration options, read
4478# ## more about them here:
4479# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
4480# data_format = "influx"
4481#
4482# ## Maximum length of a message to consume, in bytes (default 0/unlimited);
4483# ## larger messages are dropped
4484# max_message_len = 65536
4485
4486
4487# # Configuration for the AWS Kinesis input.
4488# [[inputs.kinesis_consumer]]
4489# ## Amazon REGION of kinesis endpoint.
4490# region = "ap-southeast-2"
4491#
4492# ## Amazon Credentials
4493# ## Credentials are loaded in the following order
4494# ## 1) Assumed credentials via STS if role_arn is specified
4495# ## 2) explicit credentials from 'access_key' and 'secret_key'
4496# ## 3) shared profile from 'profile'
4497# ## 4) environment variables
4498# ## 5) shared credentials file
4499# ## 6) EC2 Instance Profile
4500# # access_key = ""
4501# # secret_key = ""
4502# # token = ""
4503# # role_arn = ""
4504# # profile = ""
4505# # shared_credential_file = ""
4506#
4507# ## Endpoint to make request against, the correct endpoint is automatically
4508# ## determined and this option should only be set if you wish to override the
4509# ## default.
4510# ## ex: endpoint_url = "http://localhost:8000"
4511# # endpoint_url = ""
4512#
4513# ## Kinesis StreamName must exist prior to starting telegraf.
4514# streamname = "StreamName"
4515#
4516# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported)
4517# # shard_iterator_type = "TRIM_HORIZON"
4518#
4519# ## Maximum messages to read from the broker that have not been written by an
4520# ## output. For best throughput set based on the number of metrics within
4521# ## each message and the size of the output's metric_batch_size.
4522# ##
4523# ## For example, if each message from the queue contains 10 metrics and the
4524# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
4525# ## full batch is collected and the write is triggered immediately without
4526# ## waiting until the next flush_interval.
4527# # max_undelivered_messages = 1000
4528#
4529# ## Data format to consume.
4530# ## Each data format has its own unique set of configuration options, read
4531# ## more about them here:
4532# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
4533# data_format = "influx"
4534#
4535# ## Optional
4536# ## Configuration for a dynamodb checkpoint
4537# [inputs.kinesis_consumer.checkpoint_dynamodb]
4538# ## unique name for this consumer
4539# app_name = "default"
4540# table_name = "default"
4541
4542
4543# # Stream and parse log file(s).
4544# [[inputs.logparser]]
4545# ## Log files to parse.
4546# ## These accept standard unix glob matching rules, but with the addition of
4547# ## ** as a "super asterisk". ie:
4548# ## /var/log/**.log -> recursively find all .log files in /var/log
4549# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
4550# ## /var/log/apache.log -> only tail the apache log file
4551# files = ["/var/log/apache/access.log"]
4552#
4553# ## Read files that currently exist from the beginning. Files that are created
4554# ## while telegraf is running (and that match the "files" globs) will always
4555# ## be read from the beginning.
4556# from_beginning = false
4557#
4558# ## Method used to watch for file updates. Can be either "inotify" or "poll".
4559# # watch_method = "inotify"
4560#
4561# ## Parse logstash-style "grok" patterns:
4562# [inputs.logparser.grok]
4563# ## This is a list of patterns to check the given log file(s) for.
4564# ## Note that adding patterns here increases processing time. The most
4565# ## efficient configuration is to have one pattern per logparser.
4566# ## Other common built-in patterns are:
4567# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
4568# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
4569# patterns = ["%{COMBINED_LOG_FORMAT}"]
4570#
4571# ## Name of the outputted measurement name.
4572# measurement = "apache_access_log"
4573#
4574# ## Full path(s) to custom pattern files.
4575# custom_pattern_files = []
4576#
4577# ## Custom patterns can also be defined here. Put one pattern per line.
4578# custom_patterns = '''
4579# '''
4580#
4581# ## Timezone allows you to provide an override for timestamps that
4582# ## don't already include an offset
4583# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
4584# ##
4585# ## Default: "" which renders UTC
4586# ## Options are as follows:
4587# ## 1. Local -- interpret based on machine localtime
4588# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
4589# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
4590# # timezone = "Canada/Eastern"
4591#
4592# ## When set to "disable", timestamp will not incremented if there is a
4593# ## duplicate.
4594# # unique_timestamp = "auto"
4595
4596
4597# # Read metrics from MQTT topic(s)
4598# [[inputs.mqtt_consumer]]
4599# ## MQTT broker URLs to be used. The format should be scheme://host:port,
4600# ## schema can be tcp, ssl, or ws.
4601# servers = ["tcp://localhost:1883"]
4602#
4603# ## QoS policy for messages
4604# ## 0 = at most once
4605# ## 1 = at least once
4606# ## 2 = exactly once
4607# ##
4608# ## When using a QoS of 1 or 2, you should enable persistent_session to allow
4609# ## resuming unacknowledged messages.
4610# qos = 0
4611#
4612# ## Connection timeout for initial connection in seconds
4613# connection_timeout = "30s"
4614#
4615# ## Maximum messages to read from the broker that have not been written by an
4616# ## output. For best throughput set based on the number of metrics within
4617# ## each message and the size of the output's metric_batch_size.
4618# ##
4619# ## For example, if each message from the queue contains 10 metrics and the
4620# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
4621# ## full batch is collected and the write is triggered immediately without
4622# ## waiting until the next flush_interval.
4623# # max_undelivered_messages = 1000
4624#
4625# ## Topics to subscribe to
4626# topics = [
4627# "telegraf/host01/cpu",
4628# "telegraf/+/mem",
4629# "sensors/#",
4630# ]
4631#
4632# # if true, messages that can't be delivered while the subscriber is offline
4633# # will be delivered when it comes back (such as on service restart).
4634# # NOTE: if true, client_id MUST be set
4635# persistent_session = false
4636# # If empty, a random client ID will be generated.
4637# client_id = ""
4638#
4639# ## username and password to connect MQTT server.
4640# # username = "telegraf"
4641# # password = "metricsmetricsmetricsmetrics"
4642#
4643# ## Optional TLS Config
4644# # tls_ca = "/etc/telegraf/ca.pem"
4645# # tls_cert = "/etc/telegraf/cert.pem"
4646# # tls_key = "/etc/telegraf/key.pem"
4647# ## Use TLS but skip chain & host verification
4648# # insecure_skip_verify = false
4649#
4650# ## Data format to consume.
4651# ## Each data format has its own unique set of configuration options, read
4652# ## more about them here:
4653# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
4654# data_format = "influx"
4655
4656
4657# # Read metrics from NATS subject(s)
4658# [[inputs.nats_consumer]]
4659# ## urls of NATS servers
4660# servers = ["nats://localhost:4222"]
4661# ## Use Transport Layer Security
4662# secure = false
4663# ## subject(s) to consume
4664# subjects = ["telegraf"]
4665# ## name a queue group
4666# queue_group = "telegraf_consumers"
4667#
4668# ## Sets the limits for pending msgs and bytes for each subscription
4669# ## These shouldn't need to be adjusted except in very high throughput scenarios
4670# # pending_message_limit = 65536
4671# # pending_bytes_limit = 67108864
4672#
4673# ## Maximum messages to read from the broker that have not been written by an
4674# ## output. For best throughput set based on the number of metrics within
4675# ## each message and the size of the output's metric_batch_size.
4676# ##
4677# ## For example, if each message from the queue contains 10 metrics and the
4678# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
4679# ## full batch is collected and the write is triggered immediately without
4680# ## waiting until the next flush_interval.
4681# # max_undelivered_messages = 1000
4682#
4683# ## Data format to consume.
4684# ## Each data format has its own unique set of configuration options, read
4685# ## more about them here:
4686# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
4687# data_format = "influx"
4688
4689
4690# # Read NSQ topic for metrics.
4691# [[inputs.nsq_consumer]]
4692# ## Server option still works but is deprecated, we just prepend it to the nsqd array.
4693# # server = "localhost:4150"
4694# ## An array representing the NSQD TCP HTTP Endpoints
4695# nsqd = ["localhost:4150"]
4696# ## An array representing the NSQLookupd HTTP Endpoints
4697# nsqlookupd = ["localhost:4161"]
4698# topic = "telegraf"
4699# channel = "consumer"
4700# max_in_flight = 100
4701#
4702# ## Maximum messages to read from the broker that have not been written by an
4703# ## output. For best throughput set based on the number of metrics within
4704# ## each message and the size of the output's metric_batch_size.
4705# ##
4706# ## For example, if each message from the queue contains 10 metrics and the
4707# ## output metric_batch_size is 1000, setting this to 100 will ensure that a
4708# ## full batch is collected and the write is triggered immediately without
4709# ## waiting until the next flush_interval.
4710# # max_undelivered_messages = 1000
4711#
4712# ## Data format to consume.
4713# ## Each data format has its own unique set of configuration options, read
4714# ## more about them here:
4715# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
4716# data_format = "influx"
4717
4718
4719# # Read metrics from one or many pgbouncer servers
4720# [[inputs.pgbouncer]]
4721# ## specify address via a url matching:
4722# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
4723# ## ?sslmode=[disable|verify-ca|verify-full]
4724# ## or a simple string:
4725# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
4726# ##
4727# ## All connection parameters are optional.
4728# ##
4729# address = "host=localhost user=pgbouncer sslmode=disable"
4730
4731
4732# # Read metrics from one or many postgresql servers
4733# [[inputs.postgresql]]
4734# ## specify address via a url matching:
4735# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
4736# ## ?sslmode=[disable|verify-ca|verify-full]
4737# ## or a simple string:
4738# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
4739# ##
4740# ## All connection parameters are optional.
4741# ##
4742# ## Without the dbname parameter, the driver will default to a database
4743# ## with the same name as the user. This dbname is just for instantiating a
4744# ## connection with the server and doesn't restrict the databases we are trying
4745# ## to grab metrics for.
4746# ##
4747# address = "host=localhost user=postgres sslmode=disable"
4748# ## A custom name for the database that will be used as the "server" tag in the
4749# ## measurement output. If not specified, a default one generated from
4750# ## the connection address is used.
4751# # outputaddress = "db01"
4752#
4753# ## connection configuration.
4754# ## maxlifetime - specify the maximum lifetime of a connection.
4755# ## default is forever (0s)
4756# max_lifetime = "0s"
4757#
4758# ## A list of databases to explicitly ignore. If not specified, metrics for all
4759# ## databases are gathered. Do NOT use with the 'databases' option.
4760# # ignored_databases = ["postgres", "template0", "template1"]
4761#
4762# ## A list of databases to pull metrics about. If not specified, metrics for all
4763# ## databases are gathered. Do NOT use with the 'ignored_databases' option.
4764# # databases = ["app_production", "testing"]
4765
4766
4767# # Read metrics from one or many postgresql servers
4768# [[inputs.postgresql_extensible]]
4769# ## specify address via a url matching:
4770# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
4771# ## ?sslmode=[disable|verify-ca|verify-full]
4772# ## or a simple string:
4773# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
4774# #
4775# ## All connection parameters are optional. #
4776# ## Without the dbname parameter, the driver will default to a database
4777# ## with the same name as the user. This dbname is just for instantiating a
4778# ## connection with the server and doesn't restrict the databases we are trying
4779# ## to grab metrics for.
4780# #
4781# address = "host=localhost user=postgres sslmode=disable"
4782#
4783# ## connection configuration.
4784# ## maxlifetime - specify the maximum lifetime of a connection.
4785# ## default is forever (0s)
4786# max_lifetime = "0s"
4787#
4788# ## A list of databases to pull metrics about. If not specified, metrics for all
4789# ## databases are gathered.
4790# ## databases = ["app_production", "testing"]
4791# #
4792# ## A custom name for the database that will be used as the "server" tag in the
4793# ## measurement output. If not specified, a default one generated from
4794# ## the connection address is used.
4795# # outputaddress = "db01"
4796# #
4797# ## Define the toml config where the sql queries are stored
4798# ## New queries can be added, if the withdbname is set to true and there is no
4799# ## databases defined in the 'databases field', the sql query is ended by a
4800# ## 'is not null' in order to make the query succeed.
4801# ## Example :
4802# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
4803# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
4804# ## because the databases variable was set to ['postgres', 'pgbench' ] and the
4805# ## withdbname was true. Be careful that if the withdbname is set to false you
4806# ## don't have to define the where clause (aka with the dbname) the tagvalue
4807# ## field is used to define custom tags (separated by commas)
4808# ## The optional "measurement" value can be used to override the default
4809# ## output measurement name ("postgresql").
4810# #
4811# ## Structure :
4812# ## [[inputs.postgresql_extensible.query]]
4813# ## sqlquery string
4814# ## version string
4815# ## withdbname boolean
4816# ## tagvalue string (comma separated)
4817# ## measurement string
4818# [[inputs.postgresql_extensible.query]]
4819# sqlquery="SELECT * FROM pg_stat_database"
4820# version=901
4821# withdbname=false
4822# tagvalue=""
4823# measurement=""
4824# [[inputs.postgresql_extensible.query]]
4825# sqlquery="SELECT * FROM pg_stat_bgwriter"
4826# version=901
4827# withdbname=false
4828# tagvalue="postgresql.stats"
4829
4830
4831# # Read metrics from one or many prometheus clients
4832# [[inputs.prometheus]]
4833# ## An array of urls to scrape metrics from.
4834# urls = ["http://localhost:9100/metrics"]
4835#
4836# ## An array of Kubernetes services to scrape metrics from.
4837# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
4838#
4839# ## Kubernetes config file to create client from.
4840# # kube_config = "/path/to/kubernetes.config"
4841#
4842# ## Scrape Kubernetes pods for the following prometheus annotations:
4843# ## - prometheus.io/scrape: Enable scraping for this pod
4844# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
4845# ## set this to 'https' & most likely set the tls config.
4846# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
4847# ## - prometheus.io/port: If port is not 9102 use this annotation
4848# # monitor_kubernetes_pods = true
4849#
4850# ## Use bearer token for authorization. ('bearer_token' takes priority)
4851# # bearer_token = "/path/to/bearer/token"
4852# ## OR
4853# # bearer_token_string = "abc_123"
4854#
4855# ## Specify timeout duration for slower prometheus clients (default is 3s)
4856# # response_timeout = "3s"
4857#
4858# ## Optional TLS Config
4859# # tls_ca = /path/to/cafile
4860# # tls_cert = /path/to/certfile
4861# # tls_key = /path/to/keyfile
4862# ## Use TLS but skip chain & host verification
4863# # insecure_skip_verify = false
4864
4865
4866# # Generic socket listener capable of handling multiple socket types.
4867# [[inputs.socket_listener]]
4868# ## URL to listen on
4869# # service_address = "tcp://:8094"
4870# # service_address = "tcp://127.0.0.1:http"
4871# # service_address = "tcp4://:8094"
4872# # service_address = "tcp6://:8094"
4873# # service_address = "tcp6://[2001:db8::1]:8094"
4874# # service_address = "udp://:8094"
4875# # service_address = "udp4://:8094"
4876# # service_address = "udp6://:8094"
4877# # service_address = "unix:///tmp/telegraf.sock"
4878# # service_address = "unixgram:///tmp/telegraf.sock"
4879#
4880# ## Maximum number of concurrent connections.
4881# ## Only applies to stream sockets (e.g. TCP).
4882# ## 0 (default) is unlimited.
4883# # max_connections = 1024
4884#
4885# ## Read timeout.
4886# ## Only applies to stream sockets (e.g. TCP).
4887# ## 0 (default) is unlimited.
4888# # read_timeout = "30s"
4889#
4890# ## Optional TLS configuration.
4891# ## Only applies to stream sockets (e.g. TCP).
4892# # tls_cert = "/etc/telegraf/cert.pem"
4893# # tls_key = "/etc/telegraf/key.pem"
4894# ## Enables client authentication if set.
4895# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
4896#
4897# ## Maximum socket buffer size (in bytes when no unit specified).
4898# ## For stream sockets, once the buffer fills up, the sender will start backing up.
4899# ## For datagram sockets, once the buffer fills up, metrics will start dropping.
4900# ## Defaults to the OS default.
4901# # read_buffer_size = "64KiB"
4902#
4903# ## Period between keep alive probes.
4904# ## Only applies to TCP sockets.
4905# ## 0 disables keep alive probes.
4906# ## Defaults to the OS configuration.
4907# # keep_alive_period = "5m"
4908#
4909# ## Data format to consume.
4910# ## Each data format has its own unique set of configuration options, read
4911# ## more about them here:
4912# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
4913# # data_format = "influx"
4914
4915
4916# # Statsd UDP/TCP Server
4917# [[inputs.statsd]]
4918# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp)
4919# protocol = "udp"
4920#
4921# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
4922# max_tcp_connections = 250
4923#
4924# ## Enable TCP keep alive probes (default=false)
4925# tcp_keep_alive = false
4926#
4927# ## Specifies the keep-alive period for an active network connection.
4928# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
4929# ## Defaults to the OS configuration.
4930# # tcp_keep_alive_period = "2h"
4931#
4932# ## Address and port to host UDP listener on
4933# service_address = ":8125"
4934#
4935# ## The following configuration options control when telegraf clears it's cache
4936# ## of previous values. If set to false, then telegraf will only clear it's
4937# ## cache when the daemon is restarted.
4938# ## Reset gauges every interval (default=true)
4939# delete_gauges = true
4940# ## Reset counters every interval (default=true)
4941# delete_counters = true
4942# ## Reset sets every interval (default=true)
4943# delete_sets = true
4944# ## Reset timings & histograms every interval (default=true)
4945# delete_timings = true
4946#
4947# ## Percentiles to calculate for timing & histogram stats
4948# percentiles = [90]
4949#
4950# ## separator to use between elements of a statsd metric
4951# metric_separator = "_"
4952#
4953# ## Parses tags in the datadog statsd format
4954# ## http://docs.datadoghq.com/guides/dogstatsd/
4955# parse_data_dog_tags = false
4956#
4957# ## Statsd data translation templates, more info can be read here:
4958# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md
4959# # templates = [
4960# # "cpu.* measurement*"
4961# # ]
4962#
4963# ## Number of UDP messages allowed to queue up, once filled,
4964# ## the statsd server will start dropping packets
4965# allowed_pending_messages = 10000
4966#
4967# ## Number of timing/histogram values to track per-measurement in the
4968# ## calculation of percentiles. Raising this limit increases the accuracy
4969# ## of percentiles but also increases the memory usage and cpu time.
4970# percentile_limit = 1000
4971
4972
4973# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587
4974# [[inputs.syslog]]
4975# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514
4976# ## Protocol, address and port to host the syslog receiver.
4977# ## If no host is specified, then localhost is used.
4978# ## If no port is specified, 6514 is used (RFC5425#section-4.1).
4979# server = "tcp://:6514"
4980#
4981# ## TLS Config
4982# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"]
4983# # tls_cert = "/etc/telegraf/cert.pem"
4984# # tls_key = "/etc/telegraf/key.pem"
4985#
4986# ## Period between keep alive probes.
4987# ## 0 disables keep alive probes.
4988# ## Defaults to the OS configuration.
4989# ## Only applies to stream sockets (e.g. TCP).
4990# # keep_alive_period = "5m"
4991#
4992# ## Maximum number of concurrent connections (default = 0).
4993# ## 0 means unlimited.
4994# ## Only applies to stream sockets (e.g. TCP).
4995# # max_connections = 1024
4996#
4997# ## Read timeout is the maximum time allowed for reading a single message (default = 5s).
4998# ## 0 means unlimited.
4999# # read_timeout = "5s"
5000#
5001# ## The framing technique with which it is expected that messages are transported (default = "octet-counting").
5002# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
5003# ## or the non-transparent framing technique (RFC6587#section-3.4.2).
5004# ## Must be one of "octect-counting", "non-transparent".
5005# # framing = "octet-counting"
5006#
5007# ## The trailer to be expected in case of non-trasparent framing (default = "LF").
5008# ## Must be one of "LF", or "NUL".
5009# # trailer = "LF"
5010#
5011# ## Whether to parse in best effort mode or not (default = false).
5012# ## By default best effort parsing is off.
5013# # best_effort = false
5014#
5015# ## Character to prepend to SD-PARAMs (default = "_").
5016# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section.
5017# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"]
5018# ## For each combination a field is created.
5019# ## Its name is created concatenating identifier, sdparam_separator, and parameter name.
5020# # sdparam_separator = "_"
5021
5022
5023# # Stream a log file, like the tail -f command
5024# [[inputs.tail]]
5025# ## files to tail.
5026# ## These accept standard unix glob matching rules, but with the addition of
5027# ## ** as a "super asterisk". ie:
5028# ## "/var/log/**.log" -> recursively find all .log files in /var/log
5029# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
5030# ## "/var/log/apache.log" -> just tail the apache log file
5031# ##
5032# ## See https://github.com/gobwas/glob for more examples
5033# ##
5034# files = ["/var/mymetrics.out"]
5035# ## Read file from beginning.
5036# from_beginning = false
5037# ## Whether file is a named pipe
5038# pipe = false
5039#
5040# ## Method used to watch for file updates. Can be either "inotify" or "poll".
5041# # watch_method = "inotify"
5042#
5043# ## Data format to consume.
5044# ## Each data format has its own unique set of configuration options, read
5045# ## more about them here:
5046# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
5047# data_format = "influx"
5048
5049
5050# # Generic TCP listener
5051# [[inputs.tcp_listener]]
5052# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
5053# # socket_listener plugin
5054# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
5055
5056
5057# # Generic UDP listener
5058# [[inputs.udp_listener]]
5059# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
5060# # socket_listener plugin
5061# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
5062
5063
5064# # Read metrics from VMware vCenter
5065# [[inputs.vsphere]]
5066# ## List of vCenter URLs to be monitored. These three lines must be uncommented
5067# ## and edited for the plugin to work.
5068# vcenters = [ "https://vcenter.local/sdk" ]
5069# username = "user@corp.local"
5070# password = "secret"
5071#
5072# ## VMs
5073# ## Typical VM metrics (if omitted or empty, all metrics are collected)
5074# vm_metric_include = [
5075# "cpu.demand.average",
5076# "cpu.idle.summation",
5077# "cpu.latency.average",
5078# "cpu.readiness.average",
5079# "cpu.ready.summation",
5080# "cpu.run.summation",
5081# "cpu.usagemhz.average",
5082# "cpu.used.summation",
5083# "cpu.wait.summation",
5084# "mem.active.average",
5085# "mem.granted.average",
5086# "mem.latency.average",
5087# "mem.swapin.average",
5088# "mem.swapinRate.average",
5089# "mem.swapout.average",
5090# "mem.swapoutRate.average",
5091# "mem.usage.average",
5092# "mem.vmmemctl.average",
5093# "net.bytesRx.average",
5094# "net.bytesTx.average",
5095# "net.droppedRx.summation",
5096# "net.droppedTx.summation",
5097# "net.usage.average",
5098# "power.power.average",
5099# "virtualDisk.numberReadAveraged.average",
5100# "virtualDisk.numberWriteAveraged.average",
5101# "virtualDisk.read.average",
5102# "virtualDisk.readOIO.latest",
5103# "virtualDisk.throughput.usage.average",
5104# "virtualDisk.totalReadLatency.average",
5105# "virtualDisk.totalWriteLatency.average",
5106# "virtualDisk.write.average",
5107# "virtualDisk.writeOIO.latest",
5108# "sys.uptime.latest",
5109# ]
5110# # vm_metric_exclude = [] ## Nothing is excluded by default
5111# # vm_instances = true ## true by default
5112#
5113# ## Hosts
5114# ## Typical host metrics (if omitted or empty, all metrics are collected)
5115# host_metric_include = [
5116# "cpu.coreUtilization.average",
5117# "cpu.costop.summation",
5118# "cpu.demand.average",
5119# "cpu.idle.summation",
5120# "cpu.latency.average",
5121# "cpu.readiness.average",
5122# "cpu.ready.summation",
5123# "cpu.swapwait.summation",
5124# "cpu.usage.average",
5125# "cpu.usagemhz.average",
5126# "cpu.used.summation",
5127# "cpu.utilization.average",
5128# "cpu.wait.summation",
5129# "disk.deviceReadLatency.average",
5130# "disk.deviceWriteLatency.average",
5131# "disk.kernelReadLatency.average",
5132# "disk.kernelWriteLatency.average",
5133# "disk.numberReadAveraged.average",
5134# "disk.numberWriteAveraged.average",
5135# "disk.read.average",
5136# "disk.totalReadLatency.average",
5137# "disk.totalWriteLatency.average",
5138# "disk.write.average",
5139# "mem.active.average",
5140# "mem.latency.average",
5141# "mem.state.latest",
5142# "mem.swapin.average",
5143# "mem.swapinRate.average",
5144# "mem.swapout.average",
5145# "mem.swapoutRate.average",
5146# "mem.totalCapacity.average",
5147# "mem.usage.average",
5148# "mem.vmmemctl.average",
5149# "net.bytesRx.average",
5150# "net.bytesTx.average",
5151# "net.droppedRx.summation",
5152# "net.droppedTx.summation",
5153# "net.errorsRx.summation",
5154# "net.errorsTx.summation",
5155# "net.usage.average",
5156# "power.power.average",
5157# "storageAdapter.numberReadAveraged.average",
5158# "storageAdapter.numberWriteAveraged.average",
5159# "storageAdapter.read.average",
5160# "storageAdapter.write.average",
5161# "sys.uptime.latest",
5162# ]
5163# # host_metric_exclude = [] ## Nothing excluded by default
5164# # host_instances = true ## true by default
5165#
5166# ## Clusters
5167# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected
5168# # cluster_metric_exclude = [] ## Nothing excluded by default
5169# # cluster_instances = false ## false by default
5170#
5171# ## Datastores
5172# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected
5173# # datastore_metric_exclude = [] ## Nothing excluded by default
5174# # datastore_instances = false ## false by default for Datastores only
5175#
5176# ## Datacenters
5177# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
5178# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
5179# # datacenter_instances = false ## false by default for Datastores only
5180#
5181# ## Plugin Settings
5182# ## separator character to use for measurement and field names (default: "_")
5183# # separator = "_"
5184#
5185# ## number of objects to retreive per query for realtime resources (vms and hosts)
5186# ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
5187# # max_query_objects = 256
5188#
5189# ## number of metrics to retreive per query for non-realtime resources (clusters and datastores)
5190# ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
5191# # max_query_metrics = 256
5192#
5193# ## number of go routines to use for collection and discovery of objects and metrics
5194# # collect_concurrency = 1
5195# # discover_concurrency = 1
5196#
5197# ## whether or not to force discovery of new objects on initial gather call before collecting metrics
5198# ## when true for large environments this may cause errors for time elapsed while collecting metrics
5199# ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered
5200# # force_discover_on_init = false
5201#
5202# ## the interval before (re)discovering objects subject to metrics collection (default: 300s)
5203# # object_discovery_interval = "300s"
5204#
5205# ## timeout applies to any of the api request made to vcenter
5206# # timeout = "60s"
5207#
5208# ## Optional SSL Config
5209# # ssl_ca = "/path/to/cafile"
5210# # ssl_cert = "/path/to/certfile"
5211# # ssl_key = "/path/to/keyfile"
5212# ## Use SSL but skip chain & host verification
5213# # insecure_skip_verify = false
5214
5215
5216# # A Webhooks Event collector
5217# [[inputs.webhooks]]
5218# ## Address and port to host Webhook listener on
5219# service_address = ":1619"
5220#
5221# [inputs.webhooks.filestack]
5222# path = "/filestack"
5223#
5224# [inputs.webhooks.github]
5225# path = "/github"
5226# # secret = ""
5227#
5228# [inputs.webhooks.mandrill]
5229# path = "/mandrill"
5230#
5231# [inputs.webhooks.rollbar]
5232# path = "/rollbar"
5233#
5234# [inputs.webhooks.papertrail]
5235# path = "/papertrail"
5236#
5237# [inputs.webhooks.particle]
5238# path = "/particle"
5239
5240
5241# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
5242# [[inputs.zipkin]]
5243# # path = "/api/v1/spans" # URL path for span data
5244# # port = 9411 # Port on which Telegraf listens