· 6 years ago · Feb 27, 2020, 03:58 PM
1# A list of host/port pairs to use for establishing the initial connection to the Kafka cluster.
2
3bootstrap.servers=pkc-4nym6.us-east-1.aws.confluent.cloud:9092
4
5
6# unique name for the cluster, used in forming the Connect cluster group. Note that this must not conflict with consumer group IDs
7
8group.id=connect-cluster
9
10
11# The converters specify the format of data in Kafka and how to translate it into Connect data. Every Connect user will
12
13# need to configure these based on the format they want their data in when loaded from or stored into Kafka
14
15key.converter=org.apache.kafka.connect.json.JsonConverter
16
17value.converter=org.apache.kafka.connect.json.JsonConverter
18
19# Converter-specific settings can be passed in by prefixing the Converter's setting with the converter we want to apply
20
21# it to
22
23key.converter.schemas.enable=false
24
25value.converter.schemas.enable=false
26
27
28# Topic to use for storing offsets. This topic should have many partitions and be replicated and compacted.
29
30# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
31
32# the topic before starting Kafka Connect if a specific topic configuration is needed.
33
34# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
35
36# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
37
38# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
39
40offset.storage.topic=connect-offsets
41
42offset.storage.replication.factor=1
43
44#offset.storage.partitions=25
45
46
47# Topic to use for storing connector and task configurations; note that this should be a single partition, highly replicated,
48
49# and compacted topic. Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
50
51# the topic before starting Kafka Connect if a specific topic configuration is needed.
52
53# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
54
55# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
56
57# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
58
59config.storage.topic=connect-configs
60
61config.storage.replication.factor=1
62
63
64# Topic to use for storing statuses. This topic can have multiple partitions and should be replicated and compacted.
65
66# Kafka Connect will attempt to create the topic automatically when needed, but you can always manually create
67
68# the topic before starting Kafka Connect if a specific topic configuration is needed.
69
70# Most users will want to use the built-in default replication factor of 3 or in some cases even specify a larger value.
71
72# Since this means there must be at least as many brokers as the maximum replication factor used, we'd like to be able
73
74# to run this example on a single-broker cluster and so here we instead set the replication factor to 1.
75
76status.storage.topic=connect-status
77
78status.storage.replication.factor=1
79
80#status.storage.partitions=5
81
82
83# Flush much faster than normal, which is useful for testing/debugging
84
85offset.flush.interval.ms=10000
86
87
88# These are provided to inform the user about the presence of the REST host and port configs
89
90# Hostname & Port for the REST API to listen on. If this is set, it will bind to the interface used to listen to requests.
91
92#rest.port=8083
93
94rest.host.name=10.132.29.244
95
96
97# The Hostname & Port that will be given out to other workers to connect to i.e. URLs that are routable from other servers.
98
99rest.advertised.host.name=10.132.29.244
100
101#rest.advertised.port=
102
103
104# Set to a list of filesystem paths separated by commas (,) to enable class loading isolation for plugins
105
106# (connectors, converters, transformations). The list should consist of top level directories that include
107
108# any combination of:
109
110# a) directories immediately containing jars with plugins and their dependencies
111
112# b) uber-jars with plugins and their dependencies
113
114# c) directories immediately containing the package directory structure of classes of plugins and their dependencies
115
116# Examples:
117
118# plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins,/opt/connectors,
119
120plugin.path=/usr/share/java,/usr/share/confluent-hub-components
121
122
123ssl.endpoint.identification.algorithm=https
124
125sasl.mechanism=PLAIN
126
127request.timeout.ms=20000
128
129retry.backoff.ms=500
130
131sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
132
133username="<API KEY>" password="<API SECRET>";
134
135security.protocol=SASL_SSL
136
137
138# Confluent Cloud Connection
139
140consumer.ssl.endpoint.identification.algorithm=https
141
142consumer.sasl.mechanism=PLAIN
143
144consumer.request.timeout.ms=20000
145
146consumer.retry.backoff.ms=500
147
148consumer.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
149
150username="<API KEY>" password="<API SECRET>";
151
152consumer.security.protocol=SASL_SSL
153
154
155producer.ssl.endpoint.identification.algorithm=https
156
157producer.sasl.mechanism=PLAIN
158
159producer.request.timeout.ms=20000
160
161producer.retry.backoff.ms=500
162
163producer.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
164
165username="<API KEY>" password="<API SECRET>";
166
167producer.security.protocol=SASL_SSL