· 7 years ago · May 10, 2018, 02:42 PM
1[core]
2# The home folder for airflow, default is ~/airflow
3airflow_home = /airflow
4# The folder where your airflow pipelines live, most likely a
5# subfolder in a code repository
6# This path must be absolute
7dags_folder = /airflow/dags
8
9# The folder where airflow should store its log files
10# This path must be absolute
11base_log_folder = /airflow/logs
12
13# Airflow can store logs remotely in AWS S3 or Google Cloud Storage. Users
14# must supply an Airflow connection id that provides access to the storage
15# location.
16remote_log_conn_id =
17encrypt_s3_logs = False
18# Logging level
19logging_level = INFO
20# Logging class
21# Specify the class that will specify the logging configuration
22# This class has to be on the python classpath
23# logging_config_class = my.path.default_local_settings.LOGGING_CONFIG
24logging_config_class =
25
26# Log format
27log_format = [%%(asctime)s] {%%(filename)s:%%(lineno)d} %%(levelname)s - %%(message)s
28simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s
29
30# The executor class that airflow should use. Choices include
31# SequentialExecutor, LocalExecutor, CeleryExecutor, DaskExecutor
32executor = SequentialExecutor
33
34# The SqlAlchemy connection string to the metadata database.
35# SqlAlchemy supports many different database engine, more information
36# their website
37sql_alchemy_conn = sqlite:////airflow/airflow.db
38
39# The SqlAlchemy pool size is the maximum number of database connections
40# in the pool.
41sql_alchemy_pool_size = 5
42
43# The SqlAlchemy pool recycle is the number of seconds a connection
44# can be idle in the pool before it is invalidated. This config does
45# not apply to sqlite.
46sql_alchemy_pool_recycle = 3600
47
48# The amount of parallelism as a setting to the executor. This defines
49# the max number of task instances that should run simultaneously
50# on this airflow installation
51parallelism = 64
52
53
54# The number of task instances allowed to run concurrently by the scheduler
55dag_concurrency = 32
56
57
58# Are DAGs paused by default at creation
59dags_are_paused_at_creation = True
60
61# When not using pools, tasks are run in the "default pool",
62# whose size is guided by this config element
63non_pooled_task_slot_count = 128
64
65# The maximum number of active DAG runs per DAG
66max_active_runs_per_dag = 16
67
68# Whether to load the examples that ship with Airflow. It's good to
69# get started, but you probably want to set this to False in a production
70# environment
71load_examples = False
72
73# Where your Airflow plugins are stored
74plugins_folder = /airflow/plugins
75
76# Secret key to save connection passwords in the db
77fernet_key = gX7DpsHZ9dBnMnzVPvD_WeaCM9FE8DXtncwEZBNT_j0=
78
79# Whether to disable pickling dags
80donot_pickle = False
81
82# How long before timing out a python file import while filling the DagBag
83dagbag_import_timeout = 30
84
85# The class to use for running task instances in a subprocess
86task_runner = BashTaskRunner
87
88# If set, tasks without a `run_as_user` argument will be run with this user
89# Can be used to de-elevate a sudo user running Airflow when executing tasks
90default_impersonation =
91
92# What security module to use (for example kerberos):
93security =
94
95# Turn unit test mode on (overwrites many configuration options with test
96# values at runtime)
97unit_test_mode = False
98
99# Name of handler to read task instance logs.
100# Default to use file task handler.
101logging_config_class = log_config.LOGGING_CONFIG
102task_log_reader = gcs.task
103remote_log_conn_id=datalake_gcp_connection
104
105# Whether to enable pickling for xcom (note that this is insecure and allows for
106# RCE exploits). This will be deprecated in Airflow 2.0 (be forced to False).
107enable_xcom_pickling = True
108
109# When a task is killed forcefully, this is the amount of time in seconds that
110# it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED
111killed_task_cleanup_time = 60
112
113[cli]
114# In what way should the cli access the API. The LocalClient will use the
115# database directly, while the json_client will use the api running on the
116# webserver
117api_client = airflow.api.client.json_client
118endpoint_url = http://0.0.0.0:443
119
120[api]
121# How to authenticate users of the API
122auth_backend = airflow.api.auth.backend.default
123
124[operators]
125# The default owner assigned to each new operator, unless
126# provided explicitly or passed via `default_args`
127default_owner = Airflow
128default_cpus = 1
129default_ram = 512
130default_disk = 512
131default_gpus = 0
132
133
134[webserver]
135# The base url of your website as airflow cannot guess what domain or
136# cname you are using. This is used in automated emails that
137# airflow sends to point links to the right web server
138base_url = http://localhost:443
139
140authenticate = False
141auth_backend = airflow.contrib.auth.backends.password_auth
142
143# The ip specified when starting the web server
144web_server_host = 0.0.0.0
145
146# The port on which to run the web server
147web_server_port = 443
148
149# Paths to the SSL certificate and key for the web server. When both are
150# provided SSL will be enabled. This does not change the web server port.
151web_server_ssl_cert = /home/certs/rootCA.pem
152web_server_ssl_key = /home/certs/rootCA.key
153# Number of seconds the gunicorn webserver waits before timing out on a worker
154web_server_worker_timeout = 120
155
156# Number of workers to refresh at a time. When set to 0, worker refresh is
157# disabled. When nonzero, airflow periodically refreshes webserver workers by
158# bringing up new ones and killing old ones.
159worker_refresh_batch_size = 1
160
161# Number of seconds to wait before refreshing a batch of workers.
162worker_refresh_interval = 30
163
164# Secret key used to run your flask app
165secret_key = temporary_key
166
167# Number of workers to run the Gunicorn web server
168workers = 4
169
170# The worker class gunicorn should use. Choices include
171# sync (default), eventlet, gevent
172worker_class = sync
173
174# Log files for the gunicorn webserver. '-' means log to stderr.
175access_logfile = -
176error_logfile = -
177
178# Expose the configuration file in the web server
179expose_config = False
180
181# Set to true to turn on authentication:
182# http://pythonhosted.org/airflow/security.html#web-authentication
183#authenticate = False
184
185# Filter the list of dags by owner name (requires authentication to be enabled)
186filter_by_owner = False
187
188# Filtering mode. Choices include user (default) and ldapgroup.
189# Ldap group filtering requires using the ldap backend
190#
191# Note that the ldap server needs the "memberOf" overlay to be set up
192# in order to user the ldapgroup mode.
193owner_mode = user
194
195# Default DAG view. Valid values are:
196# tree, graph, duration, gantt, landing_times
197dag_default_view = graph
198
199# Default DAG orientation. Valid values are:
200# LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top)
201dag_orientation = LR
202
203# Puts the webserver in demonstration mode; blurs the names of Operators for
204# privacy.
205demo_mode = False
206
207# The amount of time (in secs) webserver will wait for initial handshake
208# while fetching logs from other worker machine
209log_fetch_timeout_sec = 5
210
211# By default, the webserver shows paused DAGs. Flip this to hide paused
212# DAGs by default
213hide_paused_dags_by_default = True
214
215# Consistent page size across all listing views in the UI
216page_size = 100
217
218[email]
219email_backend = airflow.utils.email.send_email_smtp
220
221
222[smtp]
223# If you want airflow to send emails on retries, failure, and you want to use
224# the airflow.utils.email.send_email_smtp function, you have to configure an
225# smtp server here
226smtp_host = localhost
227smtp_starttls = True
228smtp_ssl = False
229# Uncomment and set the user/pass settings if you want to use SMTP AUTH
230# smtp_user = airflow
231# smtp_password = airflow
232smtp_port = 25
233smtp_mail_from = airflow@example.com
234
235
236[celery]
237# This section only applies if you are using the CeleryExecutor in
238# [core] section above
239
240# The app name that will be used by celery
241celery_app_name = airflow.executors.celery_executor
242
243# The concurrency that will be used when starting workers with the
244# "airflow worker" command. This defines the number of task instances that
245# a worker will take, so size up your workers based on the resources on
246# your worker box and the nature of your tasks
247celeryd_concurrency = 16
248
249# When you start an airflow worker, airflow starts a tiny web server
250# subprocess to serve the workers local log files to the airflow main
251# web server, who then builds pages and sends them to users. This defines
252# the port on which the logs are served. It needs to be unused, and open
253# visible from the main web server to connect into the workers.
254worker_log_server_port = 8793
255
256# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
257# a sqlalchemy database. Refer to the Celery documentation for more
258# information.
259broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
260
261# Another key Celery setting
262celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
263
264# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
265# it `airflow flower`. This defines the IP that Celery Flower runs on
266flower_host = 0.0.0.0
267
268# This defines the port that Celery Flower runs on
269flower_port = 5555
270
271# Default queue that tasks get assigned to and that worker listen on.
272default_queue = default
273
274# Import path for celery configuration options
275celery_config_options = airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG
276
277[dask]
278# This section only applies if you are using the DaskExecutor in
279# [core] section above
280
281# The IP address and port of the Dask cluster's scheduler.
282cluster_address = 127.0.0.1:8786
283
284
285[scheduler]
286# Task instances listen for external kill signal (when you clear tasks
287# from the CLI or the UI), this defines the frequency at which they should
288# listen (in seconds).
289job_heartbeat_sec = 5
290
291# The scheduler constantly tries to trigger new tasks (look at the
292# scheduler section in the docs for more information). This defines
293# how often the scheduler should run (in seconds).
294scheduler_heartbeat_sec = 5
295
296# after how much time should the scheduler terminate in seconds
297# -1 indicates to run continuously (see also num_runs)
298run_duration = -1
299
300# after how much time a new DAGs should be picked up from the filesystem
301min_file_process_interval = 0
302
303dag_dir_list_interval = 300
304
305# How often should stats be printed to the logs
306print_stats_interval = 30
307
308child_process_log_directory = /airflow/logs/scheduler
309
310# Local task jobs periodically heartbeat to the DB. If the job has
311# not heartbeat in this many seconds, the scheduler will mark the
312# associated task instance as failed and will re-schedule the task.
313scheduler_zombie_task_threshold = 300
314
315# Turn off scheduler catchup by setting this to False.
316# Default behavior is unchanged and
317# Command Line Backfills still work, but the scheduler
318# will not do scheduler catchup if this is False,
319# however it can be set on a per DAG basis in the
320# DAG definition (catchup)
321catchup_by_default = True
322
323# This changes the batch size of queries in the scheduling main loop.
324# This depends on query length limits and how long you are willing to hold locks.
325# 0 for no limit
326max_tis_per_query = 0
327
328# Statsd (https://github.com/etsy/statsd) integration settings
329statsd_on = False
330statsd_host = localhost
331statsd_port = 8125
332statsd_prefix = airflow
333
334# The scheduler can run multiple threads in parallel to schedule dags.
335# This defines how many threads will run.
336max_threads = 2
337
338authenticate = False
339
340[ldap]
341# set this to ldaps://<your.ldap.server>:<port>
342uri =
343user_filter = objectClass=*
344user_name_attr = uid
345group_member_attr = memberOf
346superuser_filter =
347data_profiler_filter =
348bind_user = cn=Manager,dc=example,dc=com
349bind_password = insecure
350basedn = dc=example,dc=com
351cacert = /etc/ca/ldap_ca.crt
352search_scope = LEVEL
353
354[mesos]
355# Mesos master address which MesosExecutor will connect to.
356master = localhost:5050
357
358# The framework name which Airflow scheduler will register itself as on mesos
359framework_name = Airflow
360
361# Number of cpu cores required for running one task instance using
362# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
363# command on a mesos slave
364task_cpu = 1
365
366# Memory in MB required for running one task instance using
367# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
368# command on a mesos slave
369task_memory = 256
370
371# Enable framework checkpointing for mesos
372# See http://mesos.apache.org/documentation/latest/slave-recovery/
373checkpoint = False
374
375# Failover timeout in milliseconds.
376# When checkpointing is enabled and this option is set, Mesos waits
377# until the configured timeout for
378# the MesosExecutor framework to re-register after a failover. Mesos
379# shuts down running tasks if the
380# MesosExecutor framework fails to re-register within this timeframe.
381# failover_timeout = 604800
382
383# Enable framework authentication for mesos
384# See http://mesos.apache.org/documentation/latest/configuration/
385authenticate = False
386
387# Mesos credentials, if authentication is enabled
388# default_principal = admin
389# default_secret = admin
390
391
392[kerberos]
393ccache = /tmp/airflow_krb5_ccache
394# gets augmented with fqdn
395principal = airflow
396reinit_frequency = 3600
397kinit_path = kinit
398keytab = airflow.keytab
399
400
401[github_enterprise]
402api_rev = v3
403
404
405[admin]
406# UI to hide sensitive variable fields when set to True
407hide_sensitive_variable_fields = True