· 3 years ago · Aug 10, 2022, 07:00 PM
1###################### Filebeat Configuration Example #########################
2
3# This file is an example configuration file highlighting only the most common
4# options. The filebeat.reference.yml file from the same directory contains all the
5# supported options with more comments. You can use it as a reference.
6#
7# You can find the full configuration reference here:
8# https://www.elastic.co/guide/en/beats/filebeat/index.html
9
10# For more available modules and options, please see the filebeat.reference.yml sample
11# configuration file.
12
13# ============================== Filebeat inputs ===============================
14
15filebeat.inputs:
16
17# Each - is an input. Most options can be set at the input level, so
18# you can use different inputs for various configurations.
19# Below are the input specific configurations.
20
21# filestream is an input for collecting log messages from files.
22- type: filestream
23
24 # Unique ID among all inputs, an ID is required.
25 id: my-filestream-id
26
27 # Change to true to enable this input configuration.
28 enabled: true
29
30 # Paths that should be crawled and fetched. Glob based paths.
31 paths:
32 - /var/log/*.log
33 - /var/log/*/
34 - /var/log/*/*.log
35 #- c:\programdata\elasticsearch\logs\*
36
37 # Exclude lines. A list of regular expressions to match. It drops the lines that are
38 # matching any regular expression from the list.
39 #exclude_lines: ['^DBG']
40queue.mem:
41 events: 8000
42 flush.min_events: 1000
43 flush.timeout: 1s
44 # Include lines. A list of regular expressions to match. It exports the lines that are
45 # matching any regular expression from the list.
46 #include_lines: ['^ERR', '^WARN']
47
48 # Exclude files. A list of regular expressions to match. Filebeat drops the files that
49 # are matching any regular expression from the list. By default, no files are dropped.
50 #prospector.scanner.exclude_files: ['.gz$']
51
52 # Optional additional fields. These fields can be freely picked
53 # to add additional information to the crawled log files for filtering
54 #fields:
55 # level: debug
56 # review: 1
57
58# ============================== Filebeat modules ==============================
59
60filebeat.config.modules:
61 # Glob pattern for configuration loading
62 path: ${path.config}/modules.d/*.yml
63
64 # Set to true to enable config reloading
65 reload.enabled: false
66
67 # Period on which files under path should be checked for changes
68 #reload.period: 10s
69
70# ======================= Elasticsearch template setting =======================
71
72setup.template.settings:
73 index.number_of_shards: 1
74 #index.codec: best_compression
75 #_source.enabled: false
76
77
78# ================================== General ===================================
79
80# The name of the shipper that publishes the network data. It can be used to group
81# all the transactions sent by a single shipper in the web interface.
82#name:
83
84# The tags of the shipper are included in their own field with each
85# transaction published.
86#tags: ["service-X", "web-tier"]
87
88# Optional fields that you can specify to add additional information to the
89# output.
90#fields:
91# env: staging
92
93# ================================= Dashboards =================================
94# These settings control loading the sample dashboards to the Kibana index. Loading
95# the dashboards is disabled by default and can be enabled either by setting the
96# options here or by using the `setup` command.
97#setup.dashboards.enabled: false
98
99# The URL from where to download the dashboards archive. By default this URL
100# has a value which is computed based on the Beat name and version. For released
101# versions, this URL points to the dashboard archive on the artifacts.elastic.co
102# website.
103#setup.dashboards.url:
104
105# =================================== Kibana ===================================
106
107# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
108# This requires a Kibana endpoint configuration.
109setup.kibana:
110
111 # Kibana Host
112 # Scheme and port can be left out and will be set to the default (http and 5601)
113 # In case you specify and additional path, the scheme is required: http://localhost:5601/path
114 # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
115 #host: "localhost:5601"
116
117 # Kibana Space ID
118 # ID of the Kibana Space into which the dashboards should be loaded. By default,
119 # the Default Space will be used.
120 #space.id:
121
122# =============================== Elastic Cloud ================================
123
124# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
125
126# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
127# `setup.kibana.host` options.
128# You can find the `cloud.id` in the Elastic Cloud web UI.
129#cloud.id:
130
131# The cloud.auth setting overwrites the `output.elasticsearch.username` and
132# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
133#cloud.auth:
134
135# ================================== Outputs ===================================
136
137# Configure what output to use when sending the data collected by the beat.
138
139# ---------------------------- Elasticsearch Output ----------------------------
140output.elasticsearch:
141 # Array of hosts to connect to.
142 hosts: ["localhost:8080/api/v1/ingest/elastic-bulk"]
143 username: anything
144 password: cd2e4448-711d-4bf7-9880-103973964ed6
145 compression_level: 5
146 bulk_max_size: 200
147 worker: 5
148 # Protocol - either `http` (default) or `https`.
149 #protocol: "https"
150
151 # Authentication credentials - either API key or username/password.
152 #api_key: "id:api_key"
153 #username: "elastic"
154 #password: "changeme"
155setup.ilm.enabled: false
156#output.elasticsearch.allow_older_versions: true
157# ------------------------------ Logstash Output -------------------------------
158#output.logstash:
159 # The Logstash hosts
160 #hosts: ["localhost:5044"]
161
162 # Optional SSL. By default is off.
163 # List of root certificates for HTTPS server verifications
164 #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
165
166 # Certificate for SSL client authentication
167 #ssl.certificate: "/etc/pki/client/cert.pem"
168
169 # Client Certificate Key
170 #ssl.key: "/etc/pki/client/cert.key"
171
172# ================================= Processors =================================
173processors:
174 - add_host_metadata:
175 when.not.contains.tags: forwarded
176 - add_cloud_metadata: ~
177 - add_docker_metadata: ~
178 - add_kubernetes_metadata: ~
179
180# ================================== Logging ===================================
181#setup.ilm.enabled: false
182#output.elasticsearch.allow_older_versions: true
183# Sets log level. The default log level is info.
184# Available log levels are: error, warning, info, debug
185#logging.level: debug
186
187# At debug level, you can selectively enable logging only for some components.
188# To enable all selectors use ["*"]. Examples of other selectors are "beat",
189# "publisher", "service".
190#logging.selectors: ["*"]
191
192# ============================= X-Pack Monitoring ==============================
193# Filebeat can export internal metrics to a central Elasticsearch monitoring
194# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
195# reporting is disabled by default.
196
197# Set to true to enable the monitoring reporter.
198#monitoring.enabled: false
199
200# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
201# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
202# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
203#monitoring.cluster_uuid:
204
205# Uncomment to send the metrics to Elasticsearch. Most settings from the
206# Elasticsearch output are accepted here as well.
207# Note that the settings should point to your Elasticsearch *monitoring* cluster.
208# Any setting that is not set is automatically inherited from the Elasticsearch
209# output configuration, so if you have the Elasticsearch output configured such
210# that it is pointing to your Elasticsearch monitoring cluster, you can simply
211# uncomment the following line.
212#monitoring.elasticsearch:
213
214# ============================== Instrumentation ===============================
215
216# Instrumentation support for the filebeat.
217#instrumentation:
218 # Set to true to enable instrumentation of filebeat.
219 #enabled: false
220
221 # Environment in which filebeat is running on (eg: staging, production, etc.)
222 #environment: ""
223
224 # APM Server hosts to report instrumentation results to.
225 #hosts:
226 # - http://localhost:8200
227
228 # API Key for the APM Server(s).
229 # If api_key is set then secret_token will be ignored.
230 #api_key:
231
232 # Secret token for the APM Server(s).
233 #secret_token:
234
235
236# ================================= Migration ==================================
237
238# This allows to enable 6.7 migration aliases
239#migration.6_to_7.enabled: true
240
241