forked from confluentinc/demo-scene
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdocker-compose.yml
227 lines (218 loc) · 8.7 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
version: "2.1"
services:
# Begin zookeeper section
zookeeper:
image: confluentinc/cp-zookeeper:5.0.1
restart: always
hostname: zookeeper
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_CLIENT_PORT: "2181"
ZOOKEEPER_TICK_TIME: "2000"
ZOOKEEPER_SERVERS: "zookeeper:2888:3888"
ports:
- "2181:2181"
# End zookeeper section
# Begin kafka1 section
kafka1:
image: confluentinc/cp-enterprise-kafka:5.0.1
hostname: kafka1
depends_on:
- zookeeper
# persisting data in local fs
volumes:
- $PWD/data/kafka1/data:/var/lib/kafka/data
ports:
- "9091:9091"
- "29091:29091"
environment:
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9091,PLAINTEXT_HOST://localhost:29091
KAFKA_LOG4J_LOGGERS: "kafka.authorizer.logger=INFO"
KAFKA_METRIC_REPORTERS: "io.confluent.metrics.reporter.ConfluentMetricsReporter"
KAFKA_BROKER_ID: 1
KAFKA_BROKER_RACK: "r1"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: "kafka1:9091"
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 2
CONFLUENT_METRICS_REPORTER_MAX_REQUEST_SIZE: 10485760
# To avoid race condition with control-center
CONFLUENT_METRICS_REPORTER_TOPIC_CREATE: "false"
KAFKA_DELETE_TOPIC_ENABLE: "true"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false"
KAFKA_LOG4J_ROOT_LOGLEVEL: INFO
KAFKA_JMX_PORT: 9991
# End kafka1 section
# Begin kafka2 section
kafka2:
image: confluentinc/cp-enterprise-kafka:5.0.1
hostname: kafka2
depends_on:
- zookeeper
volumes:
- $PWD/data/kafka2/data:/var/lib/kafka/data
ports:
- "9092:9092"
- "29092:29092"
environment:
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9092,PLAINTEXT_HOST://localhost:29092
KAFKA_METRIC_REPORTERS: "io.confluent.metrics.reporter.ConfluentMetricsReporter"
KAFKA_BROKER_ID: 2
KAFKA_BROKER_RACK: "r1"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: "kafka2:9092"
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 2
CONFLUENT_METRICS_REPORTER_MAX_REQUEST_SIZE: 10485760
CONFLUENT_METRICS_REPORTER_TOPIC_CREATE: "false"
KAFKA_DELETE_TOPIC_ENABLE: "true"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false"
KAFKA_JMX_PORT: 9991
# End kafka 2 section
# Begin Kafka Connect / Twitter Connector section
connect:
image: jcustenborder/kafka-connect-twitter:0.2.32
build:
context: .
dockerfile: Dockerfile-kafka-connect-twitter
container_name: connect
restart: always
ports:
- "8083:8083"
depends_on:
- zookeeper
- kafka1
- schemaregistry
volumes:
- $PWD/monitoring-interceptors/monitoring-interceptors-5.0.1.jar:/usr/share/java/monitoring-interceptors/monitoring-interceptors-5.0.1.jar
- $PWD/scripts/consumer.properties:/usr/share/consumer.properties
environment:
CONNECT_BOOTSTRAP_SERVERS: "kafka1:9091,kafka2:9092"
CONNECT_REST_PORT: 8083
CONNECT_GROUP_ID: "connect"
CONNECT_CONFIG_STORAGE_TOPIC: connect-config
CONNECT_OFFSET_STORAGE_TOPIC: connect-offsets
CONNECT_STATUS_STORAGE_TOPIC: connect-status
CONNECT_REPLICATION_FACTOR: 2
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 2
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 2
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 2
CONNECT_KEY_CONVERTER: "org.apache.kafka.connect.storage.StringConverter"
CONNECT_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
CONNECT_REST_ADVERTISED_HOST_NAME: "connect"
CONNECT_ZOOKEEPER_CONNECT: zookeeper:2181
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
CONNECT_LOG4J_ROOT_LOGLEVEL: INFO
CONNECT_LOG4J_LOGGERS: "org.apache.kafka.connect.runtime.rest=WARN,org.reflections=ERROR"
CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-5.0.1.jar
# End Kafka Connect section
# Begin Control Center section
control-center:
image: confluentinc/cp-enterprise-control-center:5.0.1
container_name: control-center
restart: always
depends_on:
- zookeeper
- kafka1
- kafka2
- connect
ports:
- "9021:9021"
environment:
CONTROL_CENTER_BOOTSTRAP_SERVERS: "kafka1:9091,kafka2:9092"
CONTROL_CENTER_ZOOKEEPER_CONNECT: "zookeeper:2181"
CONTROL_CENTER_REPLICATION_FACTOR: 2
CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_REPLICATION: 2
CONTROL_CENTER_INTERNAL_TOPICS_REPLICATION: 2
CONTROL_CENTER_COMMAND_TOPIC_REPLICATION: 2
CONTROL_CENTER_METRICS_TOPIC_REPLICATION: 2
CONTROL_CENTER_STREAMS_NUM_STREAM_THREADS: 2
CONTROL_CENTER_CONNECT_CLUSTER: "http://connect:8083"
CONTROL_CENTER_KSQL_URL: "http://ksql-server:8088"
CONTROL_CENTER_KSQL_ADVERTISED_URL: "http://ksql-server:8088"
CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schemaregistry:8085"
CONTROL_CENTER_STREAMS_CONSUMER_REQUEST_TIMEOUT_MS: "960032"
CONTROL_CENTER_ID: "MY_C3"
# End Control Center section
# Begin Schema Registry section
schemaregistry:
image: confluentinc/cp-schema-registry:5.0.1
#restart: always
depends_on:
- zookeeper
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: "PLAINTEXT://kafka1:9091,PLAINTEXT://kafka2:9092"
SCHEMA_REGISTRY_HOST_NAME: schemaregistry
SCHEMA_REGISTRY_LISTENERS: "http://0.0.0.0:8082"
ports:
- 8082:8082
# End Schema Registry section
# Begin kafka client section
kafka-client:
image: confluentinc/cp-enterprise-kafka:5.0.1
depends_on:
- kafka1
- kafka2
hostname: kafka-client
# We defined a dependency on "kafka", but `depends_on` will NOT wait for the
# dependencies to be "ready" before starting the "kafka-client"
# container; it waits only until the dependencies have started. Hence we
# must control startup order more explicitly.
# See https://docs.docker.com/compose/startup-order/
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka1:9091 1 60 && \
sleep 5 && \
kafka-topics --zookeeper zookeeper:2181 --topic twitter_json_01 --create --replication-factor 2 --partitions 2 && \
exit'"
environment:
# The following settings are listed here only to satisfy the image's requirements.
# We override the image's `command` anyways, hence this container will not start a broker.
KAFKA_BROKER_ID: ignored
KAFKA_ZOOKEEPER_CONNECT: ignored
ports:
- "7073:7073"
# End kafka-client section
ksql-server:
image: confluentinc/cp-ksql-server:5.0.1
hostname: ksql-server
container_name: ksql-server
restart: always
depends_on:
- kafka1
- kafka2
- connect
ports:
- "8088:8088"
environment:
KSQL_CONFIG_DIR: "/etc/ksql"
KSQL_LOG4J_OPTS: "-Dlog4j.configuration=file:/etc/ksql/log4j-rolling.properties"
KSQL_BOOTSTRAP_SERVERS: "kafka1:9091,kafka2:9092"
KSQL_HOST_NAME: ksql-server
KSQL_APPLICATION_ID: "cp-demo"
KSQL_LISTENERS: "http://0.0.0.0:8088"
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
KSQL_KSQL_SCHEMA_REGISTRY_URL: "http://schemaregistry:8085"
# Producer Confluent Monitoring Interceptors for Control Center streams monitoring
KSQL_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
# Consumer Confluent Monitoring Interceptors for Control Center streams monitoring
KSQL_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
# Begin ksql section
ksql-cli:
image: confluentinc/cp-ksql-cli:5.0.1
container_name: ksql-cli
depends_on:
- kafka1
- connect
- ksql-server
volumes:
- $PWD/scripts/ksqlcommands:/tmp/ksqlcommands
entrypoint: /bin/sh
tty: true