· 8 years ago · Jan 04, 2018, 04:34 AM
1
2# Telegraf Configuration
3#
4# Telegraf is entirely plugin driven. All metrics are gathered from the
5# declared inputs, and sent to the declared outputs.
6#
7# Plugins must be declared in here to be active.
8# To deactivate a plugin, comment out the name and any variables.
9#
10# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
11# file would generate.
12#
13# Environment variables can be used anywhere in this config file, simply prepend
14# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
15# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
16
17
18# Global tags can be specified here in key="value" format.
19[global_tags]
20 # dc = "us-east-1" # will tag all metrics with dc=us-east-1
21 # rack = "1a"
22 ## Environment variables can be used as tags, and throughout the config file
23 # user = "$USER"
24
25
26# Configuration for telegraf agent
27[agent]
28 ## Default data collection interval for all inputs
29 interval = "10s"
30 ## Rounds collection interval to 'interval'
31 ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
32 round_interval = false
33
34 ## Telegraf will send metrics to outputs in batches of at
35 ## most metric_batch_size metrics.
36 metric_batch_size = 1000
37 ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
38 ## output, and will flush this buffer on a successful write. Oldest metrics
39 ## are dropped first when this buffer fills.
40 metric_buffer_limit = 10000
41
42 ## Collection jitter is used to jitter the collection by a random amount.
43 ## Each plugin will sleep for a random time within jitter before collecting.
44 ## This can be used to avoid many plugins querying things like sysfs at the
45 ## same time, which can have a measurable effect on the system.
46 collection_jitter = "0s"
47
48 ## Default flushing interval for all outputs. You shouldn't set this below
49 ## interval. Maximum flush_interval will be flush_interval + flush_jitter
50 flush_interval = "30s"
51 ## Jitter the flush interval by a random amount. This is primarily to avoid
52 ## large write spikes for users running a large number of telegraf instances.
53 ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
54 flush_jitter = "0s"
55
56 ## By default, precision will be set to the same timestamp order as the
57 ## collection interval, with the maximum being 1s.
58 ## Precision will NOT be used for service inputs, such as logparser and statsd.
59 ## Valid values are "Nns", "Nus" (or "Nµs"), "Nms", "Ns".
60 precision = ""
61 ## Run telegraf in debug mode
62 debug = false
63 ## Run telegraf in quiet mode
64 quiet = false
65 ## Override default hostname, if empty use os.Hostname()
66 ## hostname = ""
67 ## If set to true, do no set the "host" tag in the telegraf agent.
68 omit_hostname = false
69
70
71###############################################################################
72# OUTPUT PLUGINS #
73###############################################################################
74
75
76
77###############################################################################
78# INPUT PLUGINS #
79###############################################################################
80
81# Statsd Server
82#[[inputs.statsd]]
83 ## Address and port to host UDP listener on
84 #service_address = ":8125"
85 ## Delete gauges every interval (default=false)
86 #delete_gauges = false
87 ## Delete counters every interval (default=false)
88 #delete_counters = false
89 ## Delete sets every interval (default=false)
90 #delete_sets = false
91 ## Delete timings & histograms every interval (default=true)
92 #delete_timings = true
93 ## Percentiles to calculate for timing & histogram stats
94 #percentiles = [90]
95
96 ## separator to use between elements of a statsd metric
97 #metric_separator = "_"
98
99 ## Parses tags in the datadog statsd format
100 ## http://docs.datadoghq.com/guides/dogstatsd/
101 #parse_data_dog_tags = false
102
103 ## Statsd data translation templates, more info can be read here:
104 ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
105 # templates = [
106 # "cpu.* measurement*"
107 # ]
108
109 ## Number of UDP messages allowed to queue up, once filled,
110 ## the statsd server will start dropping packets
111 #allowed_pending_messages = 10000
112
113 ## Number of timing/histogram values to track per-measurement in the
114 ## calculation of percentiles. Raising this limit increases the accuracy
115 ## of percentiles but also increases the memory usage and cpu time.
116 #percentile_limit = 1000
117
118
119# Read metrics about cpu usage
120[[inputs.cpu]]
121 ## Whether to report per-cpu stats or not
122 percpu = true
123 ## Whether to report total system cpu stats or not
124 totalcpu = true
125 ## Comment this line if you want the raw CPU time metrics
126 fielddrop = ["time_*"]
127
128
129# Read metrics about disk usage by mount point
130[[inputs.disk]]
131 ## By default, telegraf gather stats for all mountpoints.
132 ## Setting mountpoints will restrict the stats to the specified mountpoints.
133 # mount_points = ["/"]
134
135 ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
136 ## present on /run, /var/run, /dev/shm or /dev).
137 ignore_fs = ["tmpfs", "devtmpfs", "squashfs"]
138
139
140# Read metrics about disk IO by device
141# [[inputs.diskio]]
142 ## By default, telegraf will gather stats for all devices including
143 ## disk partitions.
144 ## Setting devices will restrict the stats to the specified devices.
145 # devices = ["sda", "sdb"]
146 ## Uncomment the following line if you need disk serial numbers.
147 # skip_serial_number = false
148
149
150# Get kernel statistics from /proc/stat
151[[inputs.kernel]]
152 # no configuration
153
154
155# Read metrics about memory usage
156[[inputs.mem]]
157 # no configuration
158
159
160# Get the number of processes and group them by status
161[[inputs.processes]]
162 # no configuration
163
164
165# Read metrics about swap memory usage
166[[inputs.swap]]
167 # no configuration
168
169
170# Read metrics about system load & uptime
171[[inputs.system]]
172 fielddrop = ["uptime_format"]
173
174
175# # Read stats from aerospike server(s)
176# [[inputs.aerospike]]
177# ## Aerospike servers to connect to (with port)
178# ## This plugin will query all namespaces the aerospike
179# ## server has configured and get stats for them.
180# servers = ["localhost:3000"]
181
182
183# # Read Apache status information (mod_status)
184# [[inputs.apache]]
185# ## An array of Apache status URI to gather stats.
186# ## Default is "http://localhost/server-status?auto".
187# urls = ["http://localhost/server-status?auto"]
188
189
190# # Read metrics of bcache from stats_total and dirty_data
191# [[inputs.bcache]]
192# ## Bcache sets path
193# ## If not specified, then default is:
194# bcachePath = "/sys/fs/bcache"
195#
196# ## By default, telegraf gather stats for all bcache devices
197# ## Setting devices will restrict the stats to the specified
198# ## bcache devices.
199# bcacheDevs = ["bcache0"]
200
201
202# # Read Cassandra metrics through Jolokia
203# [[inputs.cassandra]]
204# # This is the context root used to compose the jolokia url
205# context = "/jolokia/read"
206# ## List of cassandra servers exposing jolokia read service
207# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
208# ## List of metrics collected on above servers
209# ## Each metric consists of a jmx path.
210# ## This will collect all heap memory usage metrics from the jvm and
211# ## ReadLatency metrics for all keyspaces and tables.
212# ## "type=Table" in the query works with Cassandra3.0. Older versions might
213# ## need to use "type=ColumnFamily"
214# metrics = [
215# "/java.lang:type=Memory/HeapMemoryUsage",
216# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
217# ]
218
219
220# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
221# [[inputs.ceph]]
222# ## All configuration values are optional, defaults are shown below
223#
224# ## location of ceph binary
225# ceph_binary = "/usr/bin/ceph"
226#
227# ## directory in which to look for socket files
228# socket_dir = "/var/run/ceph"
229#
230# ## prefix of MON and OSD socket files, used to determine socket type
231# mon_prefix = "ceph-mon"
232# osd_prefix = "ceph-osd"
233#
234# ## suffix used to identify socket files
235# socket_suffix = "asok"
236
237
238# # Read specific statistics per cgroup
239# [[inputs.cgroup]]
240# ## Directories in which to look for files, globs are supported.
241# # paths = [
242# # "/cgroup/memory",
243# # "/cgroup/memory/child1",
244# # "/cgroup/memory/child2/*",
245# # ]
246# ## cgroup stat fields, as file names, globs are supported.
247# ## these file names are appended to each path from above.
248# # files = ["memory.*usage*", "memory.limit_in_bytes"]
249
250
251# # Pull Metric Statistics from Amazon CloudWatch
252# [[inputs.cloudwatch]]
253# ## Amazon Region
254# region = 'us-east-1'
255#
256# ## Amazon Credentials
257# ## Credentials are loaded in the following order
258# ## 1) Assumed credentials via STS if role_arn is specified
259# ## 2) explicit credentials from 'access_key' and 'secret_key'
260# ## 3) shared profile from 'profile'
261# ## 4) environment variables
262# ## 5) shared credentials file
263# ## 6) EC2 Instance Profile
264# #access_key = ""
265# #secret_key = ""
266# #token = ""
267# #role_arn = ""
268# #profile = ""
269# #shared_credential_file = ""
270#
271# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
272# period = '1m'
273#
274# ## Collection Delay (required - must account for metrics availability via CloudWatch API)
275# delay = '1m'
276#
277# ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
278# ## gaps or overlap in pulled data
279# interval = '1m'
280#
281# ## Configure the TTL for the internal cache of metrics.
282# ## Defaults to 1 hr if not specified
283# #cache_ttl = '10m'
284#
285# ## Metric Statistic Namespace (required)
286# namespace = 'AWS/ELB'
287#
288# ## Metrics to Pull (optional)
289# ## Defaults to all Metrics in Namespace if nothing is provided
290# ## Refreshes Namespace available metrics every 1h
291# #[[inputs.cloudwatch.metrics]]
292# # names = ['Latency', 'RequestCount']
293# #
294# # ## Dimension filters for Metric (optional)
295# # [[inputs.cloudwatch.metrics.dimensions]]
296# # name = 'LoadBalancerName'
297# # value = 'p-example'
298
299
300# # Gather health check statuses from services registered in Consul
301# [[inputs.consul]]
302# ## Most of these values defaults to the one configured on a Consul's agent level.
303# ## Optional Consul server address (default: "localhost")
304# # address = "localhost"
305# ## Optional URI scheme for the Consul server (default: "http")
306# # scheme = "http"
307# ## Optional ACL token used in every request (default: "")
308# # token = ""
309# ## Optional username used for request HTTP Basic Authentication (default: "")
310# # username = ""
311# ## Optional password used for HTTP Basic Authentication (default: "")
312# # password = ""
313# ## Optional data centre to query the health checks from (default: "")
314# # datacentre = ""
315
316
317# # Read metrics from one or many couchbase clusters
318# [[inputs.couchbase]]
319# ## specify servers via a url matching:
320# ## [protocol://][:password]@address[:port]
321# ## e.g.
322# ## http://couchbase-0.example.com/
323# ## http://admin:secret@couchbase-0.example.com:8091/
324# ##
325# ## If no servers are specified, then localhost is used as the host.
326# ## If no protocol is specifed, HTTP is used.
327# ## If no port is specified, 8091 is used.
328# servers = ["http://localhost:8091"]
329
330
331# # Read CouchDB Stats from one or more servers
332# [[inputs.couchdb]]
333# ## Works with CouchDB stats endpoints out of the box
334# ## Multiple HOSTs from which to read CouchDB stats:
335# hosts = ["http://localhost:8086/_stats"]
336
337
338# # Read metrics from one or many disque servers
339# [[inputs.disque]]
340# ## An array of URI to gather stats about. Specify an ip or hostname
341# ## with optional port and password.
342# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
343# ## If no servers are specified, then localhost is used as the host.
344# servers = ["localhost"]
345
346
347# # Query given DNS server and gives statistics
348# [[inputs.dns_query]]
349# ## servers to query
350# servers = ["8.8.8.8"] # required
351#
352# ## Domains or subdomains to query. "."(root) is default
353# domains = ["."] # optional
354#
355# ## Query record type. Default is "A"
356# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
357# record_type = "A" # optional
358#
359# ## Dns server port. 53 is default
360# port = 53 # optional
361#
362# ## Query timeout in seconds. Default is 2 seconds
363# timeout = 2 # optional
364
365
366# # Read metrics about docker containers
367# [[inputs.docker]]
368# ## Docker Endpoint
369# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
370# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
371# endpoint = "unix:///var/run/docker.sock"
372# ## Only collect metrics for these containers, collect all if empty
373# container_names = []
374# ## Timeout for docker list, info, and stats commands
375# timeout = "5s"
376#
377# ## Whether to report for each container per-device blkio (8:0, 8:1...) and
378# ## network (eth0, eth1, ...) stats or not
379# perdevice = true
380# ## Whether to report for each container total blkio and network stats or not
381# total = false
382#
383
384
385# # Read statistics from one or many dovecot servers
386# [[inputs.dovecot]]
387# ## specify dovecot servers via an address:port list
388# ## e.g.
389# ## localhost:24242
390# ##
391# ## If no servers are specified, then localhost is used as the host.
392# servers = ["localhost:24242"]
393# ## Type is one of "user", "domain", "ip", or "global"
394# type = "global"
395# ## Wildcard matches like "*.com". An empty string "" is same as "*"
396# ## If type = "ip" filters should be <IP/network>
397# filters = [""]
398
399
400# # Read stats from one or more Elasticsearch servers or clusters
401# [[inputs.elasticsearch]]
402# ## specify a list of one or more Elasticsearch servers
403# servers = ["http://localhost:9200"]
404#
405# ## set local to false when you want to read the indices stats from all nodes
406# ## within the cluster
407# local = true
408#
409# ## set cluster_health to true when you want to also obtain cluster level stats
410# cluster_health = false
411#
412# ## Optional SSL Config
413# # ssl_ca = "/etc/telegraf/ca.pem"
414# # ssl_cert = "/etc/telegraf/cert.pem"
415# # ssl_key = "/etc/telegraf/key.pem"
416# ## Use SSL but skip chain & host verification
417# # insecure_skip_verify = false
418
419
420# # Read metrics from one or more commands that can output to stdout
421# [[inputs.exec]]
422# ## Commands array
423# commands = [
424# "/tmp/test.sh",
425# "/usr/bin/mycollector --foo=bar",
426# "/tmp/collect_*.sh"
427# ]
428#
429# ## Timeout for each command to complete.
430# timeout = "5s"
431#
432# ## measurement name suffix (for separating different commands)
433# name_suffix = "_mycollector"
434#
435# ## Data format to consume.
436# ## Each data format has it's own unique set of configuration options, read
437# ## more about them here:
438# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
439# data_format = "influx"
440
441
442# # Read stats about given file(s)
443# [[inputs.filestat]]
444# ## Files to gather stats about.
445# ## These accept standard unix glob matching rules, but with the addition of
446# ## ** as a "super asterisk". ie:
447# ## "/var/log/**.log" -> recursively find all .log files in /var/log
448# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
449# ## "/var/log/apache.log" -> just tail the apache log file
450# ##
451# ## See https://github.com/gobwas/glob for more examples
452# ##
453# files = ["/var/log/**.log"]
454# ## If true, read the entire file and calculate an md5 checksum.
455# md5 = false
456
457
458# # Read flattened metrics from one or more GrayLog HTTP endpoints
459# [[inputs.graylog]]
460# ## API endpoint, currently supported API:
461# ##
462# ## - multiple (Ex http://<host>:12900/system/metrics/multiple)
463# ## - namespace (Ex http://<host>:12900/system/metrics/namespace/{namespace})
464# ##
465# ## For namespace endpoint, the metrics array will be ignored for that call.
466# ## Endpoint can contain namespace and multiple type calls.
467# ##
468# ## Please check http://[graylog-server-ip]:12900/api-browser for full list
469# ## of endpoints
470# servers = [
471# "http://[graylog-server-ip]:12900/system/metrics/multiple",
472# ]
473#
474# ## Metrics list
475# ## List of metrics can be found on Graylog webservice documentation.
476# ## Or by hitting the the web service api at:
477# ## http://[graylog-host]:12900/system/metrics
478# metrics = [
479# "jvm.cl.loaded",
480# "jvm.memory.pools.Metaspace.committed"
481# ]
482#
483# ## Username and password
484# username = ""
485# password = ""
486#
487# ## Optional SSL Config
488# # ssl_ca = "/etc/telegraf/ca.pem"
489# # ssl_cert = "/etc/telegraf/cert.pem"
490# # ssl_key = "/etc/telegraf/key.pem"
491# ## Use SSL but skip chain & host verification
492# # insecure_skip_verify = false
493
494
495# # Read metrics of haproxy, via socket or csv stats page
496# [[inputs.haproxy]]
497# ## An array of address to gather stats about. Specify an ip on hostname
498# ## with optional port. ie localhost, 10.10.3.33:1936, etc.
499# ## Make sure you specify the complete path to the stats endpoint
500# ## ie 10.10.3.33:1936/haproxy?stats
501# #
502# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
503# servers = ["http://myhaproxy.com:1936/haproxy?stats"]
504# ## Or you can also use local socket
505# ## servers = ["socket:/run/haproxy/admin.sock"]
506
507
508# # HTTP/HTTPS request given an address a method and a timeout
509# [[inputs.http_response]]
510# ## Server address (default http://localhost)
511# address = "http://github.com"
512# ## Set response_timeout (default 5 seconds)
513# response_timeout = "5s"
514# ## HTTP Request Method
515# method = "GET"
516# ## Whether to follow redirects from the server (defaults to false)
517# follow_redirects = true
518# ## HTTP Request Headers (all values must be strings)
519# # [inputs.http_response.headers]
520# # Host = "github.com"
521# ## Optional HTTP Request Body
522# # body = '''
523# # {'fake':'data'}
524# # '''
525#
526# ## Optional SSL Config
527# # ssl_ca = "/etc/telegraf/ca.pem"
528# # ssl_cert = "/etc/telegraf/cert.pem"
529# # ssl_key = "/etc/telegraf/key.pem"
530# ## Use SSL but skip chain & host verification
531# # insecure_skip_verify = false
532
533
534# # Read flattened metrics from one or more JSON HTTP endpoints
535# [[inputs.httpjson]]
536# ## NOTE This plugin only reads numerical measurements, strings and booleans
537# ## will be ignored.
538#
539# ## a name for the service being polled
540# name = "webserver_stats"
541#
542# ## URL of each server in the service's cluster
543# servers = [
544# "http://localhost:9999/stats/",
545# "http://localhost:9998/stats/",
546# ]
547#
548# ## HTTP method to use: GET or POST (case-sensitive)
549# method = "GET"
550#
551# ## List of tag names to extract from top-level of JSON server response
552# # tag_keys = [
553# # "my_tag_1",
554# # "my_tag_2"
555# # ]
556#
557# ## HTTP parameters (all values must be strings)
558# [inputs.httpjson.parameters]
559# event_type = "cpu_spike"
560# threshold = "0.75"
561#
562# ## HTTP Header parameters (all values must be strings)
563# # [inputs.httpjson.headers]
564# # X-Auth-Token = "my-xauth-token"
565# # apiVersion = "v1"
566#
567# ## Optional SSL Config
568# # ssl_ca = "/etc/telegraf/ca.pem"
569# # ssl_cert = "/etc/telegraf/cert.pem"
570# # ssl_key = "/etc/telegraf/key.pem"
571# ## Use SSL but skip chain & host verification
572# # insecure_skip_verify = false
573
574
575# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
576# [[inputs.influxdb]]
577# ## Works with InfluxDB debug endpoints out of the box,
578# ## but other services can use this format too.
579# ## See the influxdb plugin's README for more details.
580#
581# ## Multiple URLs from which to read InfluxDB-formatted JSON
582# ## Default is "http://localhost:8086/debug/vars".
583# urls = [
584# "http://localhost:8086/debug/vars"
585# ]
586
587
588# # Read metrics from one or many bare metal servers
589# [[inputs.ipmi_sensor]]
590# ## specify servers via a url matching:
591# ## [username[:password]@][protocol[(address)]]
592# ## e.g.
593# ## root:passwd@lan(127.0.0.1)
594# ##
595# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
596
597
598# # Read JMX metrics through Jolokia
599# [[inputs.jolokia]]
600# ## This is the context root used to compose the jolokia url
601# context = "/jolokia"
602#
603# ## This specifies the mode used
604# # mode = "proxy"
605# #
606# ## When in proxy mode this section is used to specify further
607# ## proxy address configurations.
608# ## Remember to change host address to fit your environment.
609# # [inputs.jolokia.proxy]
610# # host = "127.0.0.1"
611# # port = "8080"
612#
613#
614# ## List of servers exposing jolokia read service
615# [[inputs.jolokia.servers]]
616# name = "as-server-01"
617# host = "127.0.0.1"
618# port = "8080"
619# # username = "myuser"
620# # password = "mypassword"
621#
622# ## List of metrics collected on above servers
623# ## Each metric consists in a name, a jmx path and either
624# ## a pass or drop slice attribute.
625# ## This collect all heap memory usage metrics.
626# [[inputs.jolokia.metrics]]
627# name = "heap_memory_usage"
628# mbean = "java.lang:type=Memory"
629# attribute = "HeapMemoryUsage"
630#
631# ## This collect thread counts metrics.
632# [[inputs.jolokia.metrics]]
633# name = "thread_count"
634# mbean = "java.lang:type=Threading"
635# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
636#
637# ## This collect number of class loaded/unloaded counts metrics.
638# [[inputs.jolokia.metrics]]
639# name = "class_count"
640# mbean = "java.lang:type=ClassLoading"
641# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
642
643
644# # Read metrics from a LeoFS Server via SNMP
645# [[inputs.leofs]]
646# ## An array of URI to gather stats about LeoFS.
647# ## Specify an ip or hostname with port. ie 127.0.0.1:4020
648# servers = ["127.0.0.1:4021"]
649
650
651# # Read metrics from local Lustre service on OST, MDS
652# [[inputs.lustre2]]
653# ## An array of /proc globs to search for Lustre stats
654# ## If not specified, the default will work on Lustre 2.5.x
655# ##
656# # ost_procfiles = [
657# # "/proc/fs/lustre/obdfilter/*/stats",
658# # "/proc/fs/lustre/osd-ldiskfs/*/stats",
659# # "/proc/fs/lustre/obdfilter/*/job_stats",
660# # ]
661# # mds_procfiles = [
662# # "/proc/fs/lustre/mdt/*/md_stats",
663# # "/proc/fs/lustre/mdt/*/job_stats",
664# # ]
665
666
667# # Gathers metrics from the /3.0/reports MailChimp API
668# [[inputs.mailchimp]]
669# ## MailChimp API key
670# ## get from https://admin.mailchimp.com/account/api/
671# api_key = "" # required
672# ## Reports for campaigns sent more than days_old ago will not be collected.
673# ## 0 means collect all.
674# days_old = 0
675# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
676# # campaign_id = ""
677
678
679# # Read metrics from one or many memcached servers
680# [[inputs.memcached]]
681# ## An array of address to gather stats about. Specify an ip on hostname
682# ## with optional port. ie localhost, 10.0.0.1:11211, etc.
683# servers = ["localhost:11211"]
684# # unix_sockets = ["/var/run/memcached.sock"]
685
686
687# # Telegraf plugin for gathering metrics from N Mesos masters
688# [[inputs.mesos]]
689# ## Timeout, in ms.
690# timeout = 100
691# ## A list of Mesos masters.
692# masters = ["localhost:5050"]
693# ## Master metrics groups to be collected, by default, all enabled.
694# master_collections = [
695# "resources",
696# "master",
697# "system",
698# "agents",
699# "frameworks",
700# "tasks",
701# "messages",
702# "evqueue",
703# "registrar",
704# ]
705# ## A list of Mesos slaves, default is []
706# # slaves = []
707# ## Slave metrics groups to be collected, by default, all enabled.
708# # slave_collections = [
709# # "resources",
710# # "agent",
711# # "system",
712# # "executors",
713# # "tasks",
714# # "messages",
715# # ]
716# ## Include mesos tasks statistics, default is false
717# # slave_tasks = true
718
719
720# # Read metrics from one or many MongoDB servers
721# [[inputs.mongodb]]
722# ## An array of URI to gather stats about. Specify an ip or hostname
723# ## with optional port add password. ie,
724# ## mongodb://user:auth_key@10.10.3.30:27017,
725# ## mongodb://10.10.3.33:18832,
726# ## 10.0.0.1:10000, etc.
727# servers = ["127.0.0.1:27017"]
728# gather_perdb_stats = false
729
730
731# # Read metrics from one or many mysql servers
732# [[inputs.mysql]]
733# ## specify servers via a url matching:
734# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
735# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
736# ## e.g.
737# ## db_user:passwd@tcp(127.0.0.1:3306)/?tls=false
738# ## db_user@tcp(127.0.0.1:3306)/?tls=false
739# #
740# ## If no servers are specified, then localhost is used as the host.
741# servers = ["tcp(127.0.0.1:3306)/"]
742# ## the limits for metrics form perf_events_statements
743# perf_events_statements_digest_text_limit = 120
744# perf_events_statements_limit = 250
745# perf_events_statements_time_limit = 86400
746# #
747# ## if the list is empty, then metrics are gathered from all databasee tables
748# table_schema_databases = []
749# #
750# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
751# gather_table_schema = false
752# #
753# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
754# gather_process_list = true
755# #
756# ## gather auto_increment columns and max values from information schema
757# gather_info_schema_auto_inc = true
758# #
759# ## gather metrics from SHOW SLAVE STATUS command output
760# gather_slave_status = true
761# #
762# ## gather metrics from SHOW BINARY LOGS command output
763# gather_binary_logs = false
764# #
765# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_TABLE
766# gather_table_io_waits = false
767# #
768# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
769# gather_table_lock_waits = false
770# #
771# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_INDEX_USAGE
772# gather_index_io_waits = false
773# #
774# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
775# gather_event_waits = false
776# #
777# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
778# gather_file_events_stats = false
779# #
780# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
781# gather_perf_events_statements = false
782# #
783# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
784# interval_slow = "30m"
785
786
787# # Read metrics about network interface usage
788[[inputs.net]]
789# ## By default, telegraf gathers stats from any up interface (excluding loopback)
790# ## Setting interfaces will tell it to gather these explicit interfaces,
791# ## regardless of status.
792# ##
793# # interfaces = ["eth0"]
794
795
796# # TCP or UDP 'ping' given url and collect response time in seconds
797# [[inputs.net_response]]
798# ## Protocol, must be "tcp" or "udp"
799# protocol = "tcp"
800# ## Server address (default localhost)
801# address = "github.com:80"
802# ## Set timeout
803# timeout = "1s"
804#
805# ## Optional string sent to the server
806# # send = "ssh"
807# ## Optional expected string in answer
808# # expect = "ssh"
809# ## Set read timeout (only used if expecting a response)
810# read_timeout = "1s"
811
812
813# # Read TCP metrics such as established, time wait and sockets counts.
814# [[inputs.netstat]]
815# # no configuration
816
817
818# # Read Nginx's basic status information (ngx_http_stub_status_module)
819# [[inputs.nginx]]
820# ## An array of Nginx stub_status URI to gather stats.
821# urls = ["http://localhost/status"]
822
823
824# # Read NSQ topic and channel statistics.
825# [[inputs.nsq]]
826# ## An array of NSQD HTTP API endpoints
827# endpoints = ["http://localhost:4151"]
828
829
830# # Collect kernel snmp counters and network interface statistics
831# [[inputs.nstat]]
832# ## file paths for proc files. If empty default paths will be used:
833# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
834# ## These can also be overridden with env variables, see README.
835# proc_net_netstat = "/proc/net/netstat"
836# proc_net_snmp = "/proc/net/snmp"
837# proc_net_snmp6 = "/proc/net/snmp6"
838# ## dump metrics with 0 values too
839# dump_zeros = true
840
841
842# # Get standard NTP query metrics, requires ntpq executable.
843# [[inputs.ntpq]]
844# ## If false, set the -n ntpq flag. Can reduce metric gather time.
845# dns_lookup = true
846
847
848# # Read metrics of passenger using passenger-status
849# [[inputs.passenger]]
850# ## Path of passenger-status.
851# ##
852# ## Plugin gather metric via parsing XML output of passenger-status
853# ## More information about the tool:
854# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
855# ##
856# ## If no path is specified, then the plugin simply execute passenger-status
857# ## hopefully it can be found in your PATH
858# command = "passenger-status -v --show=xml"
859
860
861# # Read metrics of phpfpm, via HTTP status page or socket
862# [[inputs.phpfpm]]
863# ## An array of addresses to gather stats about. Specify an ip or hostname
864# ## with optional port and path
865# ##
866# ## Plugin can be configured in three modes (either can be used):
867# ## - http: the URL must start with http:// or https://, ie:
868# ## "http://localhost/status"
869# ## "http://192.168.130.1/status?full"
870# ##
871# ## - unixsocket: path to fpm socket, ie:
872# ## "/var/run/php5-fpm.sock"
873# ## or using a custom fpm status path:
874# ## "/var/run/php5-fpm.sock:fpm-custom-status-path"
875# ##
876# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
877# ## "fcgi://10.0.0.12:9000/status"
878# ## "cgi://10.0.10.12:9001/status"
879# ##
880# ## Example of multiple gathering from local socket and remove host
881# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
882# urls = ["http://localhost/status"]
883
884
885# # Ping given url(s) and return statistics
886# [[inputs.ping]]
887# ## NOTE: this plugin forks the ping command. You may need to set capabilities
888# ## via setcap cap_net_raw+p /bin/ping
889# #
890# ## urls to ping
891# urls = ["www.google.com"] # required
892# ## number of pings to send per collection (ping -c <COUNT>)
893# count = 1 # required
894# ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
895# ping_interval = 0.0
896# ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
897# timeout = 1.0
898# ## interface to send ping from (ping -I <INTERFACE>)
899# interface = ""
900
901
902# # Read metrics from one or many postgresql servers
903# [[inputs.postgresql]]
904# ## specify address via a url matching:
905# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
906# ## ?sslmode=[disable|verify-ca|verify-full]
907# ## or a simple string:
908# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
909# ##
910# ## All connection parameters are optional.
911# ##
912# ## Without the dbname parameter, the driver will default to a database
913# ## with the same name as the user. This dbname is just for instantiating a
914# ## connection with the server and doesn't restrict the databases we are trying
915# ## to grab metrics for.
916# ##
917# address = "host=localhost user=postgres sslmode=disable"
918#
919# ## A list of databases to pull metrics about. If not specified, metrics for all
920# ## databases are gathered.
921# # databases = ["app_production", "testing"]
922
923
924# # Read metrics from one or many postgresql servers
925# [[inputs.postgresql_extensible]]
926# ## specify address via a url matching:
927# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
928# ## ?sslmode=[disable|verify-ca|verify-full]
929# ## or a simple string:
930# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
931# #
932# ## All connection parameters are optional. #
933# ## Without the dbname parameter, the driver will default to a database
934# ## with the same name as the user. This dbname is just for instantiating a
935# ## connection with the server and doesn't restrict the databases we are trying
936# ## to grab metrics for.
937# #
938# address = "host=localhost user=postgres sslmode=disable"
939# ## A list of databases to pull metrics about. If not specified, metrics for all
940# ## databases are gathered.
941# ## databases = ["app_production", "testing"]
942# #
943# # outputaddress = "db01"
944# ## A custom name for the database that will be used as the "server" tag in the
945# ## measurement output. If not specified, a default one generated from
946# ## the connection address is used.
947# #
948# ## Define the toml config where the sql queries are stored
949# ## New queries can be added, if the withdbname is set to true and there is no
950# ## databases defined in the 'databases field', the sql query is ended by a
951# ## 'is not null' in order to make the query succeed.
952# ## Example :
953# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
954# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
955# ## because the databases variable was set to ['postgres', 'pgbench' ] and the
956# ## withdbname was true. Be careful that if the withdbname is set to false you
957# ## don't have to define the where clause (aka with the dbname) the tagvalue
958# ## field is used to define custom tags (separated by commas)
959# ## The optional "measurement" value can be used to override the default
960# ## output measurement name ("postgresql").
961# #
962# ## Structure :
963# ## [[inputs.postgresql_extensible.query]]
964# ## sqlquery string
965# ## version string
966# ## withdbname boolean
967# ## tagvalue string (comma separated)
968# ## measurement string
969# [[inputs.postgresql_extensible.query]]
970# sqlquery="SELECT * FROM pg_stat_database"
971# version=901
972# withdbname=false
973# tagvalue=""
974# measurement=""
975# [[inputs.postgresql_extensible.query]]
976# sqlquery="SELECT * FROM pg_stat_bgwriter"
977# version=901
978# withdbname=false
979# tagvalue="postgresql.stats"
980
981
982# # Read metrics from one or many PowerDNS servers
983# [[inputs.powerdns]]
984# ## An array of sockets to gather stats about.
985# ## Specify a path to unix socket.
986# unix_sockets = ["/var/run/pdns.controlsocket"]
987
988
989# # Monitor process cpu and memory usage
990# [[inputs.procstat]]
991# ## Must specify one of: pid_file, exe, or pattern
992# ## PID file to monitor process
993# pid_file = "/var/run/nginx.pid"
994# ## executable name (ie, pgrep <exe>)
995# # exe = "nginx"
996# ## pattern as argument for pgrep (ie, pgrep -f <pattern>)
997# # pattern = "nginx"
998# ## user as argument for pgrep (ie, pgrep -u <user>)
999# # user = "nginx"
1000#
1001# ## override for process_name
1002# ## This is optional; default is sourced from /proc/<pid>/status
1003# # process_name = "bar"
1004# ## Field name prefix
1005# prefix = ""
1006# ## comment this out if you want raw cpu_time stats
1007# fielddrop = ["cpu_time_*"]
1008
1009
1010# # Read metrics from one or many prometheus clients
1011# [[inputs.prometheus]]
1012# ## An array of urls to scrape metrics from.
1013# urls = ["http://localhost:9100/metrics"]
1014#
1015# ## Use bearer token for authorization
1016# # bearer_token = /path/to/bearer/token
1017#
1018# ## Optional SSL Config
1019# # ssl_ca = /path/to/cafile
1020# # ssl_cert = /path/to/certfile
1021# # ssl_key = /path/to/keyfile
1022# ## Use SSL but skip chain & host verification
1023# # insecure_skip_verify = false
1024
1025
1026# # Reads last_run_summary.yaml file and converts to measurments
1027# [[inputs.puppetagent]]
1028# ## Location of puppet last run summary file
1029# location = "/var/lib/puppet/state/last_run_summary.yaml"
1030
1031
1032# # Read metrics from one or many RabbitMQ servers via the management API
1033# [[inputs.rabbitmq]]
1034# # url = "http://localhost:15672"
1035# # name = "rmq-server-1" # optional tag
1036# # username = "guest"
1037# # password = "guest"
1038#
1039# ## Optional SSL Config
1040# # ssl_ca = "/etc/telegraf/ca.pem"
1041# # ssl_cert = "/etc/telegraf/cert.pem"
1042# # ssl_key = "/etc/telegraf/key.pem"
1043# ## Use SSL but skip chain & host verification
1044# # insecure_skip_verify = false
1045#
1046# ## A list of nodes to pull metrics about. If not specified, metrics for
1047# ## all nodes are gathered.
1048# # nodes = ["rabbit@node1", "rabbit@node2"]
1049
1050
1051# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
1052# [[inputs.raindrops]]
1053# ## An array of raindrops middleware URI to gather stats.
1054# urls = ["http://localhost:8080/_raindrops"]
1055
1056
1057# # Read metrics from one or many redis servers
1058# [[inputs.redis]]
1059# ## specify servers via a url matching:
1060# ## [protocol://][:password]@address[:port]
1061# ## e.g.
1062# ## tcp://localhost:6379
1063# ## tcp://:password@192.168.99.100
1064# ## unix:///var/run/redis.sock
1065# ##
1066# ## If no servers are specified, then localhost is used as the host.
1067# ## If no port is specified, 6379 is used
1068# servers = ["tcp://localhost:6379"]
1069
1070
1071# # Read metrics from one or many RethinkDB servers
1072# [[inputs.rethinkdb]]
1073# ## An array of URI to gather stats about. Specify an ip or hostname
1074# ## with optional port add password. ie,
1075# ## rethinkdb://user:auth_key@10.10.3.30:28105,
1076# ## rethinkdb://10.10.3.33:18832,
1077# ## 10.0.0.1:10000, etc.
1078# servers = ["127.0.0.1:28015"]
1079
1080
1081# # Read metrics one or many Riak servers
1082# [[inputs.riak]]
1083# # Specify a list of one or more riak http servers
1084# servers = ["http://localhost:8098"]
1085
1086
1087# # Reads oids value from one or many snmp agents
1088# [[inputs.snmp]]
1089# ## Use 'oids.txt' file to translate oids to names
1090# ## To generate 'oids.txt' you need to run:
1091# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
1092# ## Or if you have an other MIB folder with custom MIBs
1093# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
1094# snmptranslate_file = "/tmp/oids.txt"
1095# [[inputs.snmp.host]]
1096# address = "192.168.2.2:161"
1097# # SNMP community
1098# community = "public" # default public
1099# # SNMP version (1, 2 or 3)
1100# # Version 3 not supported yet
1101# version = 2 # default 2
1102# # SNMP response timeout
1103# timeout = 2.0 # default 2.0
1104# # SNMP request retries
1105# retries = 2 # default 2
1106# # Which get/bulk do you want to collect for this host
1107# collect = ["mybulk", "sysservices", "sysdescr"]
1108# # Simple list of OIDs to get, in addition to "collect"
1109# get_oids = []
1110#
1111# [[inputs.snmp.host]]
1112# address = "192.168.2.3:161"
1113# community = "public"
1114# version = 2
1115# timeout = 2.0
1116# retries = 2
1117# collect = ["mybulk"]
1118# get_oids = [
1119# "ifNumber",
1120# ".1.3.6.1.2.1.1.3.0",
1121# ]
1122#
1123# [[inputs.snmp.get]]
1124# name = "ifnumber"
1125# oid = "ifNumber"
1126#
1127# [[inputs.snmp.get]]
1128# name = "interface_speed"
1129# oid = "ifSpeed"
1130# instance = "0"
1131#
1132# [[inputs.snmp.get]]
1133# name = "sysuptime"
1134# oid = ".1.3.6.1.2.1.1.3.0"
1135# unit = "second"
1136#
1137# [[inputs.snmp.bulk]]
1138# name = "mybulk"
1139# max_repetition = 127
1140# oid = ".1.3.6.1.2.1.1"
1141#
1142# [[inputs.snmp.bulk]]
1143# name = "ifoutoctets"
1144# max_repetition = 127
1145# oid = "ifOutOctets"
1146#
1147# [[inputs.snmp.host]]
1148# address = "192.168.2.13:161"
1149# #address = "127.0.0.1:161"
1150# community = "public"
1151# version = 2
1152# timeout = 2.0
1153# retries = 2
1154# #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
1155# collect = ["sysuptime" ]
1156# [[inputs.snmp.host.table]]
1157# name = "iftable3"
1158# include_instances = ["enp5s0", "eth1"]
1159#
1160# # SNMP TABLEs
1161# # table without mapping neither subtables
1162# [[inputs.snmp.table]]
1163# name = "iftable1"
1164# oid = ".1.3.6.1.2.1.31.1.1.1"
1165#
1166# # table without mapping but with subtables
1167# [[inputs.snmp.table]]
1168# name = "iftable2"
1169# oid = ".1.3.6.1.2.1.31.1.1.1"
1170# sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
1171#
1172# # table with mapping but without subtables
1173# [[inputs.snmp.table]]
1174# name = "iftable3"
1175# oid = ".1.3.6.1.2.1.31.1.1.1"
1176# # if empty. get all instances
1177# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
1178# # if empty, get all subtables
1179#
1180# # table with both mapping and subtables
1181# [[inputs.snmp.table]]
1182# name = "iftable4"
1183# oid = ".1.3.6.1.2.1.31.1.1.1"
1184# # if empty get all instances
1185# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
1186# # if empty get all subtables
1187# # sub_tables could be not "real subtables"
1188# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
1189
1190
1191# # Read metrics from Microsoft SQL Server
1192# [[inputs.sqlserver]]
1193# ## Specify instances to monitor with a list of connection strings.
1194# ## All connection parameters are optional.
1195# ## By default, the host is localhost, listening on default port, TCP 1433.
1196# ## for Windows, the user is the currently running AD user (SSO).
1197# ## See https://github.com/denisenkom/go-mssqldb for detailed connection
1198# ## parameters.
1199# # servers = [
1200# # "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
1201# # ]
1202
1203
1204# # Inserts sine and cosine waves for demonstration purposes
1205# [[inputs.trig]]
1206# ## Set the amplitude
1207# amplitude = 10.0
1208
1209
1210# # Read Twemproxy stats data
1211# [[inputs.twemproxy]]
1212# ## Twemproxy stats address and port (no scheme)
1213# addr = "localhost:22222"
1214# ## Monitor pool name
1215# pools = ["redis_pool", "mc_pool"]
1216
1217
1218# # A plugin to collect stats from Varnish HTTP Cache
1219# [[inputs.varnish]]
1220# ## The default location of the varnishstat binary can be overridden with:
1221# binary = "/usr/bin/varnishstat"
1222#
1223# ## By default, telegraf gather stats for 3 metric points.
1224# ## Setting stats will override the defaults shown below.
1225# ## Glob matching can be used, ie, stats = ["MAIN.*"]
1226# ## stats may also be set to ["*"], which will collect all stats
1227# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
1228
1229
1230# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
1231# [[inputs.zfs]]
1232# ## ZFS kstat path. Ignored on FreeBSD
1233# ## If not specified, then default is:
1234# # kstatPath = "/proc/spl/kstat/zfs"
1235#
1236# ## By default, telegraf gather all zfs stats
1237# ## If not specified, then default is:
1238# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
1239#
1240# ## By default, don't gather zpool stats
1241# # poolMetrics = false
1242
1243
1244# # Reads 'mntr' stats from one or many zookeeper servers
1245# [[inputs.zookeeper]]
1246# ## An array of address to gather stats about. Specify an ip or hostname
1247# ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
1248#
1249# ## If no servers are specified, then localhost is used as the host.
1250# ## If no port is specified, 2181 is used
1251# servers = [":2181"]
1252
1253
1254
1255###############################################################################
1256# SERVICE INPUT PLUGINS #
1257###############################################################################
1258
1259# # Read metrics from Kafka topic(s)
1260# [[inputs.kafka_consumer]]
1261# ## topic(s) to consume
1262# topics = ["telegraf"]
1263# ## an array of Zookeeper connection strings
1264# zookeeper_peers = ["localhost:2181"]
1265# ## Zookeeper Chroot
1266# zookeeper_chroot = ""
1267# ## the name of the consumer group
1268# consumer_group = "telegraf_metrics_consumers"
1269# ## Offset (must be either "oldest" or "newest")
1270# offset = "oldest"
1271#
1272# ## Data format to consume.
1273# ## Each data format has it's own unique set of configuration options, read
1274# ## more about them here:
1275# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
1276# data_format = "influx"
1277
1278
1279# # Stream and parse log file(s).
1280# [[inputs.logparser]]
1281# ## Log files to parse.
1282# ## These accept standard unix glob matching rules, but with the addition of
1283# ## ** as a "super asterisk". ie:
1284# ## /var/log/**.log -> recursively find all .log files in /var/log
1285# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
1286# ## /var/log/apache.log -> only tail the apache log file
1287# files = ["/var/log/influxdb/influxdb.log"]
1288# ## Read file from beginning.
1289# from_beginning = false
1290#
1291# ## Parse logstash-style "grok" patterns:
1292# ## Telegraf built-in parsing patterns: https://goo.gl/dkay10
1293# [inputs.logparser.grok]
1294# ## This is a list of patterns to check the given log file(s) for.
1295# ## Note that adding patterns here increases processing time. The most
1296# ## efficient configuration is to have one pattern per logparser.
1297# ## Other common built-in patterns are:
1298# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
1299# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
1300# patterns = ["%{INFLUXDB_HTTPD_LOG}"]
1301# ## Name of the outputted measurement name.
1302# measurement = "influxdb_log"
1303# ## Full path(s) to custom pattern files.
1304# custom_pattern_files = []
1305# ## Custom patterns can also be defined here. Put one pattern per line.
1306# custom_patterns = '''
1307# '''
1308
1309
1310# # Read metrics from MQTT topic(s)
1311# [[inputs.mqtt_consumer]]
1312# servers = ["localhost:1883"]
1313# ## MQTT QoS, must be 0, 1, or 2
1314# qos = 0
1315#
1316# ## Topics to subscribe to
1317# topics = [
1318# "telegraf/host01/cpu",
1319# "telegraf/+/mem",
1320# "sensors/#",
1321# ]
1322#
1323# # if true, messages that can't be delivered while the subscriber is offline
1324# # will be delivered when it comes back (such as on service restart).
1325# # NOTE: if true, client_id MUST be set
1326# persistent_session = false
1327# # If empty, a random client ID will be generated.
1328# client_id = ""
1329#
1330# ## username and password to connect MQTT server.
1331# # username = "telegraf"
1332# # password = "metricsmetricsmetricsmetrics"
1333#
1334# ## Optional SSL Config
1335# # ssl_ca = "/etc/telegraf/ca.pem"
1336# # ssl_cert = "/etc/telegraf/cert.pem"
1337# # ssl_key = "/etc/telegraf/key.pem"
1338# ## Use SSL but skip chain & host verification
1339# # insecure_skip_verify = false
1340#
1341# ## Data format to consume.
1342# ## Each data format has it's own unique set of configuration options, read
1343# ## more about them here:
1344# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
1345# data_format = "influx"
1346
1347
1348# # Read metrics from NATS subject(s)
1349# [[inputs.nats_consumer]]
1350# ## urls of NATS servers
1351# servers = ["nats://localhost:4222"]
1352# ## Use Transport Layer Security
1353# secure = false
1354# ## subject(s) to consume
1355# subjects = ["telegraf"]
1356# ## name a queue group
1357# queue_group = "telegraf_consumers"
1358#
1359# ## Data format to consume.
1360# ## Each data format has it's own unique set of configuration options, read
1361# ## more about them here:
1362# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
1363# data_format = "influx"
1364
1365
1366# # Read NSQ topic for metrics.
1367# [[inputs.nsq_consumer]]
1368# ## An string representing the NSQD TCP Endpoint
1369# server = "localhost:4150"
1370# topic = "telegraf"
1371# channel = "consumer"
1372# max_in_flight = 100
1373#
1374# ## Data format to consume.
1375# ## Each data format has it's own unique set of configuration options, read
1376# ## more about them here:
1377# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
1378# data_format = "influx"
1379
1380
1381# # Statsd Server
1382# [[inputs.statsd]]
1383# ## Address and port to host UDP listener on
1384# service_address = ":8125"
1385# ## Delete gauges every interval (default=false)
1386# delete_gauges = false
1387# ## Delete counters every interval (default=false)
1388# delete_counters = false
1389# ## Delete sets every interval (default=false)
1390# delete_sets = false
1391# ## Delete timings & histograms every interval (default=true)
1392# delete_timings = true
1393# ## Percentiles to calculate for timing & histogram stats
1394# percentiles = [90]
1395#
1396# ## separator to use between elements of a statsd metric
1397# metric_separator = "_"
1398#
1399# ## Parses tags in the datadog statsd format
1400# ## http://docs.datadoghq.com/guides/dogstatsd/
1401# parse_data_dog_tags = false
1402#
1403# ## Statsd data translation templates, more info can be read here:
1404# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
1405# # templates = [
1406# # "cpu.* measurement*"
1407# # ]
1408#
1409# ## Number of UDP messages allowed to queue up, once filled,
1410# ## the statsd server will start dropping packets
1411# allowed_pending_messages = 10000
1412#
1413# ## Number of timing/histogram values to track per-measurement in the
1414# ## calculation of percentiles. Raising this limit increases the accuracy
1415# ## of percentiles but also increases the memory usage and cpu time.
1416# percentile_limit = 1000
1417
1418
1419# # Stream a log file, like the tail -f command
1420# [[inputs.tail]]
1421# ## files to tail.
1422# ## These accept standard unix glob matching rules, but with the addition of
1423# ## ** as a "super asterisk". ie:
1424# ## "/var/log/**.log" -> recursively find all .log files in /var/log
1425# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
1426# ## "/var/log/apache.log" -> just tail the apache log file
1427# ##
1428# ## See https://github.com/gobwas/glob for more examples
1429# ##
1430# files = ["/var/mymetrics.out"]
1431# ## Read file from beginning.
1432# from_beginning = false
1433#
1434# ## Data format to consume.
1435# ## Each data format has it's own unique set of configuration options, read
1436# ## more about them here:
1437# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
1438# data_format = "influx"
1439
1440
1441# # Generic TCP listener
1442# [[inputs.tcp_listener]]
1443# ## Address and port to host TCP listener on
1444# service_address = ":8094"
1445#
1446# ## Number of TCP messages allowed to queue up. Once filled, the
1447# ## TCP listener will start dropping packets.
1448# allowed_pending_messages = 10000
1449#
1450# ## Maximum number of concurrent TCP connections to allow
1451# max_tcp_connections = 250
1452#
1453# ## Data format to consume.
1454# ## Each data format has it's own unique set of configuration options, read
1455# ## more about them here:
1456# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
1457# data_format = "influx"
1458
1459
1460# # Generic UDP listener
1461# [[inputs.udp_listener]]
1462# ## Address and port to host UDP listener on
1463# service_address = ":8092"
1464#
1465# ## Number of UDP messages allowed to queue up. Once filled, the
1466# ## UDP listener will start dropping packets.
1467# allowed_pending_messages = 10000
1468#
1469# ## Data format to consume.
1470# ## Each data format has it's own unique set of configuration options, read
1471# ## more about them here:
1472# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
1473# data_format = "influx"
1474
1475
1476# # A Webhooks Event collector
1477# [[inputs.webhooks]]
1478# ## Address and port to host Webhook listener on
1479# service_address = ":1619"
1480#
1481# [inputs.webhooks.github]
1482# path = "/github"
1483#
1484# [inputs.webhooks.mandrill]
1485# path = "/mandrill"
1486#
1487# [inputs.webhooks.rollbar]
1488# path = "/rollbar"