-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathkafka-cluster.yaml
187 lines (187 loc) · 6.35 KB
/
kafka-cluster.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
apiVersion: kafka.strimzi.io/v1beta2
kind: Kafka
metadata:
name: demo
labels:
app: demo
spec:
kafka:
version: 2.7.0
config:
# default replication factors for automatically created topics
default.replication.factor: 1
# The default number of log partitions per topic
num.partitions: 1
# Enable auto creation of topic on the server
auto.create.topics.enable: false
# When a producer sets acks to "all" (or "-1"), min.insync.replicas specifies the minimum number of replicas that
# must acknowledge a write for the write to be considered successful.
# When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical
# scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and
# produce with acks of "all". This will ensure that the producer raises an exception if a
# majority of replicas do not receive a write.
min.insync.replicas: 2
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
offsets.topic.replication.factor: 3
transaction.state.log.replication.factor: 3
transaction.state.log.min.isr: 2
# The minimum age of a log file to be eligible for deletion due to age. Default value: 168
# The number of hours to keep a log file before deleting it (in hours), tertiary to log.retention.ms property
log.retention.hours: 168
# The default cleanup policy for segments beyond the retention window. A comma separated list of valid policies.
# Valid policies are: "delete" and "compact". Default value: "delete"
log.cleanup.policy: delete
# Enable the log cleaner process to run on the server. Should be enabled if using any topics with a
# cleanup.policy=compact including the internal offsets topic. If disabled those topics will not be compacted
# and continually grow in size.
log.cleaner.enable: true
# How long are delete records retained?. Default value: 86400000 (24 hours)
log.cleaner.delete.retention.ms: 86400000
# Specify the message format version the broker will use to append messages to the logs.
# The value should be a valid ApiVersion. Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check
# ApiVersion for more details. By setting a particular message format version, the user
# is certifying that all the existing messages on disk are smaller or equal than the specified version.
# Setting this value incorrectly will cause consumers with older versions to break as they will
# receive messages with a format that they don't understand.
log.message.format.version: "2.7"
jvmOptions:
"-Xms": "2g"
"-Xmx": "2g"
authorization:
type: simple
listeners:
- name: plain
port: 9092
tls: false
type: internal
authentication:
type: scram-sha-512
- name: tls
port: 9093
tls: true
type: internal
authentication:
type: tls
- name: external
port: 9094
tls: true
type: route
authentication:
type: tls
livenessProbe:
initialDelaySeconds: 30
timeoutSeconds: 5
readinessProbe:
initialDelaySeconds: 30
timeoutSeconds: 5
metricsConfig:
type: jmxPrometheusExporter
valueFrom:
configMapKeyRef:
name: kafka-metrics
key: kafka-metrics-config.yml
replicas: 3
storage:
#type: ephemeral
type: jbod
volumes:
- id: 0
type: persistent-claim
size: 25Gi
deleteClaim: true
- id: 1
type: persistent-claim
size: 25Gi
deleteClaim: true
template:
pod:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: strimzi.io/name
operator: In
values:
- demo-kafka
topologyKey: kubernetes.io/hostname
weight: 1
metadata:
labels:
custom-strimzi-label: demo-kafka-cluster
zookeeper:
jvmOptions:
"-Xms": "512m"
"-Xmx": "1g"
livenessProbe:
initialDelaySeconds: 30
timeoutSeconds: 5
readinessProbe:
initialDelaySeconds: 60
timeoutSeconds: 5
metricsConfig:
type: jmxPrometheusExporter
valueFrom:
configMapKeyRef:
name: kafka-metrics
key: zookeeper-metrics-config.yml
replicas: 3
storage:
type: persistent-claim
size: 25Gi
deleteClaim: true
template:
pod:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: strimzi.io/name
operator: In
values:
- demo-zookeeper
topologyKey: kubernetes.io/hostname
weight: 1
metadata:
labels:
custom-strimzi-label: demo-zookeeper-cluster
entityOperator:
topicOperator:
reconciliationIntervalSeconds: 60
userOperator:
reconciliationIntervalSeconds: 60
clientsCa:
generateCertificateAuthority: true
renewalDays: 30
validityDays: 1460
clusterCa:
generateCertificateAuthority: true
renewalDays: 30
validityDays: 1460
kafkaExporter:
topicRegex: ".*"
groupRegex: ".*"
readinessProbe:
initialDelaySeconds: 15
timeoutSeconds: 5
livenessProbe:
initialDelaySeconds: 15
timeoutSeconds: 5
cruiseControl:
#brokerCapacity:
# inboundNetwork: 10000KB/s
# outboundNetwork: 10000KB/s
#config:
# hard.goals: >
# com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkInboundCapacityGoal,
# com.linkedin.kafka.cruisecontrol.analyzer.goals.NetworkOutboundCapacityGoal
metricsConfig:
type: jmxPrometheusExporter
valueFrom:
configMapKeyRef:
name: cruise-control-metrics
key: metrics-config.yml