· 5 years ago · Nov 08, 2019, 02:54 PM
1[core]
2# The home folder for airflow, default is ~/airflow
3airflow_home = /home/soumik/airflow
4
5# The folder where your airflow pipelines live, most likely a
6# subfolder in a code repository
7# This path must be absolute
8dags_folder = /home/soumik/devzone/github/allsecur/airflow-webcrawler/dags
9
10# The folder where airflow should store its log files
11# This path must be absolute
12base_log_folder = /home/soumik/airflow/logs
13
14# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search.
15# Users must supply an Airflow connection id that provides access to the storage
16# location. If remote_logging is set to true, see UPDATING.md for additional
17# configuration requirements.
18remote_logging = False
19remote_log_conn_id =
20remote_base_log_folder =
21encrypt_s3_logs = False
22
23# Logging level
24logging_level = INFO
25fab_logging_level = WARN
26
27# Logging class
28# Specify the class that will specify the logging configuration
29# This class has to be on the python classpath
30# logging_config_class = my.path.default_local_settings.LOGGING_CONFIG
31logging_config_class =
32
33# Log format
34# we need to escape the curly braces by adding an additional curly brace
35log_format = [%%(asctime)s] {%%(filename)s:%%(lineno)d} %%(levelname)s - %%(message)s
36simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s
37
38# Log filename format
39# we need to escape the curly braces by adding an additional curly brace
40log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log
41log_processor_filename_template = {{ filename }}.log
42
43# Hostname by providing a path to a callable, which will resolve the hostname
44hostname_callable = socket:getfqdn
45
46# Default timezone in case supplied date times are naive
47# can be utc (default), system, or any IANA timezone string (e.g. Europe/Amsterdam)
48default_timezone = utc
49
50# The executor class that airflow should use. Choices include
51# SequentialExecutor, LocalExecutor, CeleryExecutor, DaskExecutor
52executor = SequentialExecutor
53
54# The SqlAlchemy connection string to the metadata database.
55# SqlAlchemy supports many different database engine, more information
56# their website
57sql_alchemy_conn = sqlite:////home/soumik/airflow/airflow.db
58
59# If SqlAlchemy should pool database connections.
60sql_alchemy_pool_enabled = True
61
62# The SqlAlchemy pool size is the maximum number of database connections
63# in the pool. 0 indicates no limit.
64sql_alchemy_pool_size = 5
65
66# The SqlAlchemy pool recycle is the number of seconds a connection
67# can be idle in the pool before it is invalidated. This config does
68# not apply to sqlite. If the number of DB connections is ever exceeded,
69# a lower config value will allow the system to recover faster.
70sql_alchemy_pool_recycle = 1800
71
72# How many seconds to retry re-establishing a DB connection after
73# disconnects. Setting this to 0 disables retries.
74sql_alchemy_reconnect_timeout = 300
75
76# The amount of parallelism as a setting to the executor. This defines
77# the max number of task instances that should run simultaneously
78# on this airflow installation
79parallelism = 32
80
81# The number of task instances allowed to run concurrently by the scheduler
82dag_concurrency = 16
83
84# Are DAGs paused by default at creation
85dags_are_paused_at_creation = True
86
87# When not using pools, tasks are run in the "default pool",
88# whose size is guided by this config element
89non_pooled_task_slot_count = 128
90
91# The maximum number of active DAG runs per DAG
92max_active_runs_per_dag = 16
93
94# Whether to load the examples that ship with Airflow. It's good to
95# get started, but you probably want to set this to False in a production
96# environment
97load_examples = False
98
99# Where your Airflow plugins are stored
100plugins_folder = /home/soumik/airflow/plugins
101
102# Secret key to save connection passwords in the db
103fernet_key = YMZVRUbN3RW79mFr2EXfW3LYkvvAkWhKgnnGxEOYFFQ=
104
105# Whether to disable pickling dags
106donot_pickle = False
107
108# How long before timing out a python file import while filling the DagBag
109dagbag_import_timeout = 30
110
111# The class to use for running task instances in a subprocess
112task_runner = BashTaskRunner
113
114# If set, tasks without a `run_as_user` argument will be run with this user
115# Can be used to de-elevate a sudo user running Airflow when executing tasks
116default_impersonation =
117
118# What security module to use (for example kerberos):
119security =
120
121# If set to False enables some unsecure features like Charts and Ad Hoc Queries.
122# In 2.0 will default to True.
123secure_mode = False
124
125# Turn unit test mode on (overwrites many configuration options with test
126# values at runtime)
127unit_test_mode = False
128
129# Name of handler to read task instance logs.
130# Default to use task handler.
131task_log_reader = task
132
133# Whether to enable pickling for xcom (note that this is insecure and allows for
134# RCE exploits). This will be deprecated in Airflow 2.0 (be forced to False).
135enable_xcom_pickling = True
136
137# When a task is killed forcefully, this is the amount of time in seconds that
138# it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED
139killed_task_cleanup_time = 60
140
141# Whether to override params with dag_run.conf. If you pass some key-value pairs through `airflow backfill -c` or
142# `airflow trigger_dag -c`, the key-value pairs will override the existing ones in params.
143dag_run_conf_overrides_params = False
144
145[cli]
146# In what way should the cli access the API. The LocalClient will use the
147# database directly, while the json_client will use the api running on the
148# webserver
149api_client = airflow.api.client.local_client
150
151# If you set web_server_url_prefix, do NOT forget to append it here, ex:
152# endpoint_url = http://localhost:8080/myroot
153# So api will look like: http://localhost:8080/myroot/api/experimental/...
154endpoint_url = http://localhost:8080
155
156[api]
157# How to authenticate users of the API
158auth_backend = airflow.api.auth.backend.default
159
160[lineage]
161# what lineage backend to use
162backend =
163
164[atlas]
165sasl_enabled = False
166host =
167port = 21000
168username =
169password =
170
171[operators]
172# The default owner assigned to each new operator, unless
173# provided explicitly or passed via `default_args`
174default_owner = Airflow
175default_cpus = 1
176default_ram = 512
177default_disk = 512
178default_gpus = 0
179
180[hive]
181# Default mapreduce queue for HiveOperator tasks
182default_hive_mapred_queue =
183
184[webserver]
185# The base url of your website as airflow cannot guess what domain or
186# cname you are using. This is used in automated emails that
187# airflow sends to point links to the right web server
188base_url = http://localhost:8080
189
190# The ip specified when starting the web server
191web_server_host = 0.0.0.0
192
193# The port on which to run the web server
194web_server_port = 8080
195
196# Paths to the SSL certificate and key for the web server. When both are
197# provided SSL will be enabled. This does not change the web server port.
198web_server_ssl_cert =
199web_server_ssl_key =
200
201# Number of seconds the webserver waits before killing gunicorn master that doesn't respond
202web_server_master_timeout = 120
203
204# Number of seconds the gunicorn webserver waits before timing out on a worker
205web_server_worker_timeout = 120
206
207# Number of workers to refresh at a time. When set to 0, worker refresh is
208# disabled. When nonzero, airflow periodically refreshes webserver workers by
209# bringing up new ones and killing old ones.
210worker_refresh_batch_size = 1
211
212# Number of seconds to wait before refreshing a batch of workers.
213worker_refresh_interval = 30
214
215# Secret key used to run your flask app
216secret_key = temporary_key
217
218# Number of workers to run the Gunicorn web server
219workers = 4
220
221# The worker class gunicorn should use. Choices include
222# sync (default), eventlet, gevent
223worker_class = sync
224
225# Log files for the gunicorn webserver. '-' means log to stderr.
226access_logfile = -
227error_logfile = -
228
229# Expose the configuration file in the web server
230expose_config = False
231
232# Set to true to turn on authentication:
233# https://airflow.incubator.apache.org/security.html#web-authentication
234authenticate = True
235auth_backend = airflow.contrib.auth.backends.password_auth
236
237# Filter the list of dags by owner name (requires authentication to be enabled)
238filter_by_owner = False
239
240# Filtering mode. Choices include user (default) and ldapgroup.
241# Ldap group filtering requires using the ldap backend
242#
243# Note that the ldap server needs the "memberOf" overlay to be set up
244# in order to user the ldapgroup mode.
245owner_mode = user
246
247# Default DAG view. Valid values are:
248# tree, graph, duration, gantt, landing_times
249dag_default_view = graph
250
251# Default DAG orientation. Valid values are:
252# LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top)
253dag_orientation = LR
254
255# Puts the webserver in demonstration mode; blurs the names of Operators for
256# privacy.
257demo_mode = False
258
259# The amount of time (in secs) webserver will wait for initial handshake
260# while fetching logs from other worker machine
261log_fetch_timeout_sec = 5
262
263# By default, the webserver shows paused DAGs. Flip this to hide paused
264# DAGs by default
265hide_paused_dags_by_default = False
266
267# Consistent page size across all listing views in the UI
268page_size = 100
269
270# Use FAB-based webserver with RBAC feature
271rbac = False
272
273# Define the color of navigation bar
274navbar_color = #007A87
275
276# Default dagrun to show in UI
277default_dag_run_display_number = 25
278
279
280[email]
281email_backend = airflow.utils.email.send_email_smtp
282
283
284[smtp]
285# If you want airflow to send emails on retries, failure, and you want to use
286# the airflow.utils.email.send_email_smtp function, you have to configure an
287# smtp server here
288smtp_host = localhost
289smtp_starttls = True
290smtp_ssl = False
291# Uncomment and set the user/pass settings if you want to use SMTP AUTH
292# smtp_user = airflow
293# smtp_password = airflow
294smtp_port = 25
295smtp_mail_from = airflow@example.com
296
297
298[celery]
299# This section only applies if you are using the CeleryExecutor in
300# [core] section above
301
302# The app name that will be used by celery
303celery_app_name = airflow.executors.celery_executor
304
305# The concurrency that will be used when starting workers with the
306# "airflow worker" command. This defines the number of task instances that
307# a worker will take, so size up your workers based on the resources on
308# your worker box and the nature of your tasks
309worker_concurrency = 16
310
311# When you start an airflow worker, airflow starts a tiny web server
312# subprocess to serve the workers local log files to the airflow main
313# web server, who then builds pages and sends them to users. This defines
314# the port on which the logs are served. It needs to be unused, and open
315# visible from the main web server to connect into the workers.
316worker_log_server_port = 8793
317
318# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
319# a sqlalchemy database. Refer to the Celery documentation for more
320# information.
321# http://docs.celeryproject.org/en/latest/userguide/configuration.html#broker-settings
322broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
323
324# The Celery result_backend. When a job finishes, it needs to update the
325# metadata of the job. Therefore it will post a message on a message bus,
326# or insert it into a database (depending of the backend)
327# This status is used by the scheduler to update the state of the task
328# The use of a database is highly recommended
329# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-result-backend-settings
330result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
331
332# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
333# it `airflow flower`. This defines the IP that Celery Flower runs on
334flower_host = 0.0.0.0
335
336# The root URL for Flower
337# Ex: flower_url_prefix = /flower
338flower_url_prefix =
339
340# This defines the port that Celery Flower runs on
341flower_port = 5555
342
343# Default queue that tasks get assigned to and that worker listen on.
344default_queue = default
345
346# Import path for celery configuration options
347celery_config_options = airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG
348
349# In case of using SSL
350ssl_active = False
351ssl_key =
352ssl_cert =
353ssl_cacert =
354
355[celery_broker_transport_options]
356# This section is for specifying options which can be passed to the
357# underlying celery broker transport. See:
358# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_transport_options
359
360# The visibility timeout defines the number of seconds to wait for the worker
361# to acknowledge the task before the message is redelivered to another worker.
362# Make sure to increase the visibility timeout to match the time of the longest
363# ETA you're planning to use.
364#
365# visibility_timeout is only supported for Redis and SQS celery brokers.
366# See:
367# http://docs.celeryproject.org/en/master/userguide/configuration.html#std:setting-broker_transport_options
368#
369#visibility_timeout = 21600
370
371[dask]
372# This section only applies if you are using the DaskExecutor in
373# [core] section above
374
375# The IP address and port of the Dask cluster's scheduler.
376cluster_address = 127.0.0.1:8786
377# TLS/ SSL settings to access a secured Dask scheduler.
378tls_ca =
379tls_cert =
380tls_key =
381
382
383[scheduler]
384# Task instances listen for external kill signal (when you clear tasks
385# from the CLI or the UI), this defines the frequency at which they should
386# listen (in seconds).
387job_heartbeat_sec = 5
388
389# The scheduler constantly tries to trigger new tasks (look at the
390# scheduler section in the docs for more information). This defines
391# how often the scheduler should run (in seconds).
392scheduler_heartbeat_sec = 5
393
394# after how much time should the scheduler terminate in seconds
395# -1 indicates to run continuously (see also num_runs)
396run_duration = -1
397
398# after how much time a new DAGs should be picked up from the filesystem
399min_file_process_interval = 0
400
401# How many seconds to wait between file-parsing loops to prevent the logs from being spammed.
402min_file_parsing_loop_time = 1
403
404dag_dir_list_interval = 300
405
406# How often should stats be printed to the logs
407print_stats_interval = 30
408
409child_process_log_directory = /home/soumik/airflow/logs/scheduler
410
411# Local task jobs periodically heartbeat to the DB. If the job has
412# not heartbeat in this many seconds, the scheduler will mark the
413# associated task instance as failed and will re-schedule the task.
414scheduler_zombie_task_threshold = 300
415
416# Turn off scheduler catchup by setting this to False.
417# Default behavior is unchanged and
418# Command Line Backfills still work, but the scheduler
419# will not do scheduler catchup if this is False,
420# however it can be set on a per DAG basis in the
421# DAG definition (catchup)
422catchup_by_default = True
423
424# This changes the batch size of queries in the scheduling main loop.
425# If this is too high, SQL query performance may be impacted by one
426# or more of the following:
427# - reversion to full table scan
428# - complexity of query predicate
429# - excessive locking
430#
431# Additionally, you may hit the maximum allowable query length for your db.
432#
433# Set this to 0 for no limit (not advised)
434max_tis_per_query = 512
435
436# Statsd (https://github.com/etsy/statsd) integration settings
437statsd_on = False
438statsd_host = localhost
439statsd_port = 8125
440statsd_prefix = airflow
441
442# The scheduler can run multiple threads in parallel to schedule dags.
443# This defines how many threads will run.
444max_threads = 2
445
446authenticate = False
447
448[ldap]
449# set this to ldaps://<your.ldap.server>:<port>
450uri =
451user_filter = objectClass=*
452user_name_attr = uid
453group_member_attr = memberOf
454superuser_filter =
455data_profiler_filter =
456bind_user = cn=Manager,dc=example,dc=com
457bind_password = insecure
458basedn = dc=example,dc=com
459cacert = /etc/ca/ldap_ca.crt
460search_scope = LEVEL
461
462[mesos]
463# Mesos master address which MesosExecutor will connect to.
464master = localhost:5050
465
466# The framework name which Airflow scheduler will register itself as on mesos
467framework_name = Airflow
468
469# Number of cpu cores required for running one task instance using
470# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
471# command on a mesos slave
472task_cpu = 1
473
474# Memory in MB required for running one task instance using
475# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
476# command on a mesos slave
477task_memory = 256
478
479# Enable framework checkpointing for mesos
480# See http://mesos.apache.org/documentation/latest/slave-recovery/
481checkpoint = False
482
483# Failover timeout in milliseconds.
484# When checkpointing is enabled and this option is set, Mesos waits
485# until the configured timeout for
486# the MesosExecutor framework to re-register after a failover. Mesos
487# shuts down running tasks if the
488# MesosExecutor framework fails to re-register within this timeframe.
489# failover_timeout = 604800
490
491# Enable framework authentication for mesos
492# See http://mesos.apache.org/documentation/latest/configuration/
493authenticate = False
494
495# Mesos credentials, if authentication is enabled
496# default_principal = admin
497# default_secret = admin
498
499# Optional Docker Image to run on slave before running the command
500# This image should be accessible from mesos slave i.e mesos slave
501# should be able to pull this docker image before executing the command.
502# docker_image_slave = puckel/docker-airflow
503
504[kerberos]
505ccache = /tmp/airflow_krb5_ccache
506# gets augmented with fqdn
507principal = airflow
508reinit_frequency = 3600
509kinit_path = kinit
510keytab = airflow.keytab
511
512
513[github_enterprise]
514api_rev = v3
515
516[admin]
517# UI to hide sensitive variable fields when set to True
518hide_sensitive_variable_fields = True
519
520[elasticsearch]
521elasticsearch_host =
522# we need to escape the curly braces by adding an additional curly brace
523elasticsearch_log_id_template = {dag_id}-{task_id}-{execution_date}-{try_number}
524elasticsearch_end_of_log_mark = end_of_log
525
526[kubernetes]
527# The repository and tag of the Kubernetes Image for the Worker to Run
528worker_container_repository =
529worker_container_tag =
530
531# If True (default), worker pods will be deleted upon termination
532delete_worker_pods = True
533
534# The Kubernetes namespace where airflow workers should be created. Defaults to `default`
535namespace = default
536
537# The name of the Kubernetes ConfigMap Containing the Airflow Configuration (this file)
538airflow_configmap =
539
540# For either git sync or volume mounted DAGs, the worker will look in this subpath for DAGs
541dags_volume_subpath =
542
543# For DAGs mounted via a volume claim (mutually exclusive with volume claim)
544dags_volume_claim =
545
546# For volume mounted logs, the worker will look in this subpath for logs
547logs_volume_subpath =
548
549# A shared volume claim for the logs
550logs_volume_claim =
551
552# Git credentials and repository for DAGs mounted via Git (mutually exclusive with volume claim)
553git_repo =
554git_branch =
555git_user =
556git_password =
557git_subpath =
558
559# For cloning DAGs from git repositories into volumes: https://github.com/kubernetes/git-sync
560git_sync_container_repository = gcr.io/google-containers/git-sync-amd64
561git_sync_container_tag = v2.0.5
562git_sync_init_container_name = git-sync-clone
563
564# The name of the Kubernetes service account to be associated with airflow workers, if any.
565# Service accounts are required for workers that require access to secrets or cluster resources.
566# See the Kubernetes RBAC documentation for more:
567# https://kubernetes.io/docs/admin/authorization/rbac/
568worker_service_account_name =
569
570# Any image pull secrets to be given to worker pods, If more than one secret is
571# required, provide a comma separated list: secret_a,secret_b
572image_pull_secrets =
573
574# GCP Service Account Keys to be provided to tasks run on Kubernetes Executors
575# Should be supplied in the format: key-name-1:key-path-1,key-name-2:key-path-2
576gcp_service_account_keys =
577
578# Use the service account kubernetes gives to pods to connect to kubernetes cluster.
579# It's intended for clients that expect to be running inside a pod running on kubernetes.
580# It will raise an exception if called from a process not running in a kubernetes environment.
581in_cluster = True
582
583[kubernetes_secrets]
584# The scheduler mounts the following secrets into your workers as they are launched by the
585# scheduler. You may define as many secrets as needed and the kubernetes launcher will parse the
586# defined secrets and mount them as secret environment variables in the launched workers.
587# Secrets in this section are defined as follows
588# <environment_variable_mount> = <kubernetes_secret_object>:<kubernetes_secret_key>
589#
590# For example if you wanted to mount a kubernetes secret key named `postgres_password` from the
591# kubernetes secret object `airflow-secret` as the environment variable `POSTGRES_PASSWORD` into
592# your workers you would follow the following format:
593# POSTGRES_PASSWORD = airflow-secret:postgres_credentials
594#
595# Additionally you may override worker airflow settings with the AIRFLOW__<SECTION>__<KEY>
596# formatting as supported by airflow normally.