-
Notifications
You must be signed in to change notification settings - Fork 37
/
values.yaml
2134 lines (2034 loc) · 64.4 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#
# Copyright 2022 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Namespace to deploy pulsar
# Use the Helm --namespace option to set the namespace
# Override for the name of the helm deployment
fullnameOverride: pulsar
# DNS name for loadbalancer
dnsName: pulsar.example.com
# The domain name for your kubernetes cluster. This domain is documented here: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#a-aaaa-records-1
# and is used to fully qualify service names when configuring Pulsar.
kubernetesClusterDomain: "cluster.local"
# The DNS Config to apply to all pod specs. It is documented here: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config.
# Note that because we use fully qualified service names for intra cluster networking with Pulsar components, we set
# the ndots value to 4 (the default is 5). This prevents 3 DNS lookups for the fully qualified service names that would
# be guaranteed to result in NXDOMAIN.
dnsConfig:
options:
- name: ndots
value: "4"
# RBAC resource configuration
rbac:
# create ClusterRole/Role and ClusterRoleBinding/RoleBinding resources
create: true
# use ClusterRole and ClusterRoleBinding resources when set to true
# use namespaced Role and RoleBinding resources when set to false
clusterRoles: false
# Global node selector
# If set, this will apply to all components
# Individual components can be set to a different node
# selector
# nodeSelector:
# zone: pulsar
## If persistence is enabled, components that has state will
## be deployed with PersistentVolumeClaims, otherwise, for test
## purposes, they will be deployed with emptDir
persistence: true
# Global priority class
#
# If enabled, a new priority class will be created and applied to
# all Pulsar core pods. Priority indicates the importance of a Pod relative to other Pods.
# If you are running Pulsar with other pods that depend on it (ex. microservices),
# using a higher priority will ensure it gets scheduled first
priorityClass:
enabled: false
value: 1000000
# Enable initContainers that wait for dependent components to
# be enabled. Provides a graceful initial install. However, it some
# failure scenarios it can prevent containers from starting
# even though they could operate.
enableWaitContainers: true
default_storage:
existingStorageClassName: default
## The default reclaimPolicy for created storage classes
reclaimPolicy: Retain
# If default_storage is set, that storage class is the default for all
# persistent volumes created by the chart.
#
# You can override the default_storage storage class in the
# volumes section of each component configuration (example: zookeeper.volumes.data.storageClass)
#
# If you want to use an existing storage class as the default, then
# set existingStorageClassName, like this:
# To use an existing storage class:
# default_storage:
# existingStorageClassName: <name>
# To use the default storage class of the k8s cluster, use the name "default"
# default_storage:
# existingStorageClassName: default
# If you want the chart to create storage classes, then don't set
# existingStorageClass name and provide configuration values
# for the storage class. The settings vary based on cloud
# provider. Below are examples for AWS, GCP, and Azure.
# For AWS
# default_storage:
# provisioner: kubernetes.io/aws-ebs
# type: gp2
# fsType: ext4
# extraParams:
# iopsPerGB: "10"
# For GCP
# default_storage:
# provisioner: kubernetes.io/gce-pd
# type: pd-ssd
# fsType: ext4
# extraParams:
# replication-type: none
# For Azure
# default_storage:
# provisioner: kubernetes.io/azure-disk
# fsType: ext4
# type: managed-premium
# extraParams:
# storageaccounttype: Premium_LRS
# kind: Managed
# cachingmode: ReadOnly
# TLS
# When you enable TLS and are using a proxy, you need to expose the
# TLS-enabled ports on the service. To allow TLS connections only, remove the plain-text ports.
# See the proxy and broker sections for details.
# This flag enables TLS for all client and admin facing components: broker, proxy, websocket proxy, standalone function
# worker. You must deploy the broker as a StatefulSet for hostname verification to work. In order to enable Pulsar
# components to network using TLS, see the tls.<component> section below; by default, intra cluster networking is
# plaintext.
enableTls: false
# Deprecated in favor of tls.<component>.tlsSecretName to enable certificates per component.
tlsSecretName: pulsar-tls
# When enableTls is true, TLS is enabled on the client- or admin-facing components (broker, proxy, websocket
# proxy, standalone function worker) by default.
# For added security, you can also enable TLS between the internal components (zookeeper, bookkeeper,
# function worker [connect to broker])
tls:
# Enable TLS between ZooKeeper nodes (quorum TLS), between BookKeeper and ZooKeeper, and between
# broker and ZooKeeper.
# Note: The configured certificate must allow for both server and client use since it is used
# for mTLS. This should be in certificate:
#
# X509v3 Extended Key Usage:
# TLS Web Server Authentication, TLS Web Client Authentication
# If using cert-manager, make sure your certificate includes:
#
zookeeper:
enabled: false
createCertificates: false
# Used to enable hostname verification for all zk clients.
# NOTE: temporarily false to allow for easy transition. In next major version bump, this will default to true.
enableHostnameVerification: false
# If set, supersedes the root "tlsSecretName" config
tlsSecretName: ""
# Starting in Zookeeper 3.8.0, it's possible to pass the Java Keystore password by file name.
configureKeystoreWithPasswordFile: false
certSpec:
# Spec defined here: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificatePrivateKey
# Can be used to override the default algorithm that Cert-Manager uses when signing keys.
privateKey: {}
# Enable TLS between broker and BookKeeper, function worker and bookkeeper, and autorecovery and bookkeeper
bookkeeper:
enabled: false
createCertificates: false
# If set, supersedes the root "tlsSecretName" config
tlsSecretName: ""
certSpec:
# Spec defined here: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificatePrivateKey
# Can be used to override the default algorithm that Cert-Manager uses when signing keys.
privateKey: {}
## TLS is enabled for the function worker, broker, and proxy when enableTls is true. The below <component>.enableTlsWithBroker
## flags are used to determine whether the component's client should use TLS when connecting to the broker or the
## function worker. This is an inversion of the paradigm used for the zookeeper and bookkeeper configurations above,
## which is used to enable TLS for all components interacting with bookkeeper or zookeeper.
# Enable TLS between function worker and broker
# NOTE: the function worker's connection to the broker is only over TLS if brokerClientAuthenticationEnabled is true
# or if authenticationEnabled is true in the function's configuration.
function:
enableTlsWithBroker: false
# NOTE: temporarily false to allow for easy transition. In next major version bump, this will default to true.
enableHostnameVerification: false
createCertificates: false
# If set, supersedes the root "tlsSecretName" config
tlsSecretName: ""
certSpec:
# Spec defined here: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificatePrivateKey
# Can be used to override the default algorithm that Cert-Manager uses when signing keys.
privateKey: {}
# Websocket will use the tls.proxy.tlsSecretName for TLS
websocket:
enableTlsWithBroker: false
proxy:
# Specify the tls protocols the proxy will use to negotiate during TLS handshake (a comma-separated list of protocol names).
tlsProtocols: "TLSv1.3,TLSv1.2"
# Applies to connections to standalone function worker, too.
enableTlsWithBroker: false
# Applies to upstream broker and function worker TLS connections.
# NOTE: temporarily false to allow for easy transition. In next major version bump, this will default to true.
enableHostnameVerification: false
createCertificates: false
# If set, supersedes the root "tlsSecretName" config
tlsSecretName: ""
certSpec:
# Spec defined here: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificatePrivateKey
# Can be used to override the default algorithm that Cert-Manager uses when signing keys.
privateKey: {}
broker:
# Specify the tls protocols the broker will use to negotiate during TLS handshake (a comma-separated list of protocol names).
tlsProtocols: "TLSv1.3,TLSv1.2"
createCertificates: false
# If set, supersedes the root "tlsSecretName" config
tlsSecretName: ""
certSpec:
# Spec defined here: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificatePrivateKey
# Can be used to override the default algorithm that Cert-Manager uses when signing keys.
privateKey: {}
# Certificate used for TLS client authentication with bookkeeper and zookeeper, and for verifying self-signed certs
autoRecovery:
createCertificates: false
# NOTE: temporarily false to allow for easy transition. In next major version bump, this will default to true.
enableHostnameVerification: false
# If set, supersedes the root "tlsSecretName" config
tlsSecretName: ""
certSpec:
# Spec defined here: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificatePrivateKey
# Can be used to override the default algorithm that Cert-Manager uses when signing keys.
privateKey: {}
pulsarAdminConsole:
# Applies to all upstream targets
enableForProxyToBroker: false
createCertificates: false
# If set, supersedes the root "tlsSecretName" config
tlsSecretName: ""
certSpec:
# Spec defined here: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificatePrivateKey
# Can be used to override the default algorithm that Cert-Manager uses when signing keys.
privateKey: {}
bastion:
enableHostnameVerification: true
# enable these in case you use tls certificate stored in /pulsar/certs/ca.crt
pulsarHeartbeat:
enableHostnameVerification: false
transactionCoordinatorInitialiser:
enableHostnameVerification: false
# Configuration for the self-signed root CA Certificate. (All Pulsar components are assumed to share the same root CA.)
ssCaCert:
# If set, supersedes the root "tlsSecretName" config. Note, this secret is used to configure TLS verification for
# components like the Bastion and the Pulsar HeartBeat.
tlsSecretName: ""
# Only applied when using Cert Manager's self-signed certs per component.
certSpec:
# Spec defined here: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificatePrivateKey
# Can be used to override the default algorithm that Cert-Manager uses when signing keys.
privateKey: {}
# Deprecated. Use tls.ssCaCert.tlsSecretName.
rootCaSecretName: ""
# secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY----- # pragma: allowlist secret
##
## If you're using cert-manager (see below), this is unneeded, as it will create the secret for you if it is not set
##
## Note: The key should not be in PKCS 8 format even though that is the format used by Pulsar
## The format will be converted by chart to PKCS 8. This is to maintain compatibility with
## cert-manager
# key: |
# certificate: |
# caCertificate: |
# If you are using an external source to populate the TLS certificate (ex cert-manager),
# enter the path and name to the CA cert. This is required so that the components
# within the cluster (proxy, broker, etc.) can talk to each other
#
# If you are using self-signed certs, the CA will be contained within the tlsSecretName above,
# so use the following settings:
#
# tlsCaPath: /pulsar/certs
# tlsCaCert: ca.crt
#
# If your certificate is signed by public CA (ex Let's Encrypt), then you can use
# the standard CA store from the container OS using the following settings:
tlsCaPath: /etc/ssl/certs
tlsCaCert: ca-certificates.crt
superUserRoles: superuser,admin,websocket,proxy
proxyRoles: proxy
# Enable token-based authentication and authorization
enableTokenAuth: false
# Token public key file name
tokenPublicKeyFile: my-public.key
# Token private key file name
tokenPrivateKeyFile: my-private.key
# Turn on anti affinity rules so that replica pods are spread for
# high availability.
# In development environments (ex. Minikube) with a single node, this needs to be disabled
enableAntiAffinity: true
# Settings for anti-affinity. Host antiAffinity ensures that
# replica pods are scheduled on different hosts. The
# number of hosts >= number of replicas. By default, this is
# required, but you can set mode to "preferred" to make
# this a preferred scheduling setting for deployments only (broker, proxy)
#
# Zone antiAffinity distributes replica pods across availability
# zones. This is a "soft" requirement, so that in the event of
# a failure of a zone, pods will run in a different zone
antiAffinity:
host:
enabled: true
mode: "required"
zone:
enabled: false
# Install Helm tests (run with helm test <release>)
# Enable basic tests
enableTests: false
# Also enabled extended tests
enableExtendedTests: false
# By default, Kubernetes will not restart pods when only their
# configmap is changed. This setting will restart pods when their
# configmap is changed using an annotation that calculates the
# checksum of the configmap
restartOnConfigMapChange:
enabled: false
## which extra components
extra:
# Broker as deployment
broker: true
# Broker as stateful set
brokerSts: false
# Pulsar proxy
proxy: true
# Websocket proxy
#
# This will enable a standalone WebSocket proxy that
# runs as part of the proxy pod.
#
# See the broker config section for enabling WebSocket
# service within the broker.
wsproxy: true
# Standalone functions workers
#
# See broker config section for information on enabling
# the function worker within the broker. If you should use one or the other.
#
# When enabling the standalone function worker, the proxy will be configured
# to forward function API calls.
#
# ZooKeeper with non-persistent storage
#
# These are extra ZooKeepers that can be used to achieve quorum that
# are not locked to an AZ or host by a PVC requirement
# They can "float" between AZs for quorum in complete AZ failure scenarios
zookeepernp: false
# Standalone function worker
#
function: false
pulsarSQL: false
# DNS on proxy
usedns: false
dnsOnProxy: true
# Bookkeeper auto-recovery
autoRecovery: true
# Bastion pod for administrative commands
bastion: true
# Pulsar Beam for HTTP interface
# Pulsar Beam depends on the proxy pod, so you must enable
# that to use Beam. You need to expose the Pulsar Beam
# port on the proxy. See the proxy section for details.
pulsarBeam: false
# Burnell - various Pulsar proxies
burnell: false
# Burnell log collector for functions when using process runtime
burnellLogCollector: false
# Zoonavigator for debugging Zookeeper
zoonavigator: false
# Tardigrade for decentralized blob storage
# This runs the S3 gateway that connects to Tardigrade
tardigrade: false
# Pulsar Heartbeat
pulsarHeartbeat: false
# Pulsar Admin console
pulsarAdminConsole: false
## Which images to use
# When upgrading a Pulsar cluster, it is recommended to upgrade the
# components one at a time (zookeeper, bookkeeper, broker, etc.).
# This section allows for targeted upgrades of each component.
#
image:
broker:
# If not using tiered storage, you can use the smaller pulsar image for the broker
repository: datastax/lunastreaming-all
pullPolicy: IfNotPresent
tag: 2.10_1.5
brokerSts:
# If not using tiered storage, you can use the smaller pulsar image for the broker
repository: datastax/lunastreaming-all
pullPolicy: IfNotPresent
tag: 2.10_1.5
function:
repository: datastax/lunastreaming-all
pullPolicy: IfNotPresent
tag: 2.10_1.5
zookeeper:
repository: datastax/lunastreaming
pullPolicy: IfNotPresent
tag: 2.10_1.5
bookkeeper:
repository: datastax/lunastreaming
pullPolicy: IfNotPresent
tag: 2.10_1.5
proxy:
repository: datastax/lunastreaming
pullPolicy: IfNotPresent
tag: 2.10_1.5
bastion:
repository: datastax/lunastreaming
pullPolicy: IfNotPresent
tag: 2.10_1.5
pulsarBeam:
repository: kesque/pulsar-beam
pullPolicy: IfNotPresent
tag: 1.0.0
burnell:
repository: datastax/burnell
pullPolicy: Always
tag: 1.0.5
burnellLogCollector:
repository: datastax/burnell
pullPolicy: IfNotPresent
tag: logcollector_latest
pulsarSQL:
repository: datastax/lunastreaming-all
tag: 2.10_1.5
pullPolicy: IfNotPresent
tardigrade:
repository: storjlabs/gateway
pullPolicy: IfNotPresent
tag: 981f92a-v1.20.0-go1.17.5
pulsarHeartbeat:
repository: datastax/pulsar-heartbeat
pullPolicy: IfNotPresent
tag: 1.0.17
pulsarAdminConsole:
repository: datastax/pulsar-admin-console
pullPolicy: IfNotPresent
tag: 2.1.5
## Tiered Storage
##
storageOffload:
driver: ""
## General
## =======
# bucket: <bucket>
# region: <region>
# maxBlockSizeInBytes: "64000000"
# readBufferSizeInBytes: "1000000"
## The following are default values for the cluster. They can be changed
## on each namespace.
# managedLedgerOffloadDeletionLagMs: "14400000"
# managedLedgerOffloadAutoTriggerSizeThresholdBytes: "-1" # disabled
# For AWS S3
# ======
# You must create an IAM account with access to the bucket and
# generate keys for that account.
#
# driver: aws-s3
# accessKey: <access-key>
# accessSecret: <secret-key> # pragma: allowlist secret
# For S3 Compatible
# =================
# Need to create access and secret key for S3 compatible service
#
# driver: aws-s3
# accessKey: <access-key>
# accessSecret: <secret-key> # pragma: allowlist secret
# serviceEndpoint: host:port
# For Tardigrade
# =================
# Need to enable extra.tardigrade for the S3 gateway.
# See tardigrade section below to configure the S3 gateway
#
# driver: aws-s3
# For Azure Blob
# =================
# Need to create an Azure storage account and a blob container (bucket)
# To retrieve key, see https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#code-try-1
#
# driver: azureblob
# storageAccount: <account name>
# storageAccountKey: <account key>
## For Google Cloud Storage
## ====================
## You must create a service account that has access to the objects in GCP buckets
## and upload its key as a JSON file to a secret.
##
## 1. Go to https://console.cloud.google.com/iam-admin/serviceaccounts
## 2. Select your project.
## 3. Create a new service account.
## 4. Give the service account permission to access the bucket. For example,
## the "Storage Object Admin" role.
## 5. Create a key for the service account and save it as a JSON file.
## 6. Either of the two steps below:
## * Save the JSON file in a k8s secret:
## kubectl create secret generic pulsar-gcp-sa-secret \
## --from-file=account-223201-f12856532197.json \
## --namespace pulsar
## OR
## * Set storageOffload.gcsServiceAccountJsonFileContent to the
## base64-encoded content of the JSON file:
## helm install pulsar \
## --set storageOffload.gcsServiceAccountJsonFile=account-223201-f12856532197.json \
## --set storageOffload.gcsServiceAccountJsonFileContent=$(cat account-223201-f12856532197.json | base64) .
## This method is totally equivalent to the previous one.
## In fact, it would generate the same secret.
# driver: google-cloud-storage
# gcsServiceAccountSecret: pulsar-gcp-sa-secret # pragma: allowlist secret
# gcsServiceAccountJsonFile: account-223201-f12856532197.json
## Pulsar: Zookeeper cluster
## templates/zookeeper-statefulset.yaml
##
zookeeper:
component: zookeeper
replicaCount: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
# nodeAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# preference:
# matchExpressions:
# - key: failure-domain.beta.kubernetes.io/region
# operator: In
# values:
# - region1
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8000"
tolerations: []
gracePeriod: 60
probe:
enabled: true
initial: 20
period: 30
timeout: 30
resources:
requests:
memory: 1Gi
cpu: 0.3
# limits:
# memory: 1Gi
# cpu: 0.3
volumes:
data:
name: data
size: 5Gi
# You can override the default_storage class for this volume.
# To use an existing storage class, set existingStorageClassName.
# To have the chart create a storage class, leave existingStorageClassName
# unset and specify the storage class parameters under storageClass. The
# appropriate parameters will vary by cloud provider. See default_storage above for examples.
#
# existingStorageClassName: <name>
#
# To use the default storage class of the k8s cluster, use the name "default"
# existingStorageClassName: default
#
# OR (GCP example)
#
# storageClass:
# provisioner: kubernetes.io/gce-pd
# type: pd-ssd
# fsType: ext4
# extraParams:
# replication-type: none
## Zookeeper configmap
## templates/zookeeper-configmap.yaml
##
configData:
PULSAR_MEM: "-Xms1g -Xmx1g -Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760"
PULSAR_GC: "-XX:+UseG1GC"
PULSAR_LOG_LEVEL: "info"
PULSAR_LOG_ROOT_LEVEL: "info"
PULSAR_EXTRA_OPTS: "-Dzookeeper.tcpKeepAlive=true -Dzookeeper.clientTcpKeepAlive=true -Dpulsar.log.root.level=info"
## Zookeeper service
## templates/zookeeper-service.yaml
##
service:
annotations:
ports:
- name: server
port: 2888
- name: leader-election
port: 3888
- name: client
port: 2181
## Zookeeper PodDisruptionBudget
## templates/zookeeper-pdb.yaml
##
pdb:
usePolicy: true
maxUnavailable: 1
## Pulsar: Zookeeper cluster non persistent storage
## Used as floating Zookeeper to maintain quorum in AZ failures
## templates/zookeepernp-statefulset.yaml
##
zookeepernp:
component: zookeepernp
# Keep count at 0 unless you are using the non-persistent zookeeper
replicaCount: 0
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
# nodeAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# preference:
# matchExpressions:
# - key: failure-domain.beta.kubernetes.io/region
# operator: In
# values:
# - region1
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8000"
tolerations: []
gracePeriod: 60
probe:
enabled: true
initial: 20
period: 30
timeout: 30
resources:
requests:
memory: 1Gi
cpu: 0.3
# limits:
# memory: 1Gi
# cpu: 0.3
# Volume is emptyDir
volumes:
data:
name: data
## Zookeeper configmap
## templates/zookeepernp-configmap.yaml
##
configData:
PULSAR_MEM: "-Xms1g -Xmx1g -Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760"
PULSAR_GC: "-XX:+UseG1GC"
PULSAR_LOG_LEVEL: "info"
PULSAR_LOG_ROOT_LEVEL: "info"
PULSAR_EXTRA_OPTS: "-Dzookeeper.tcpKeepAlive=true -Dzookeeper.clientTcpKeepAlive=true -Dpulsar.log.root.level=info"
## Zookeeper service
## templates/zookeepernp-service.yaml
##
service:
annotations:
ports:
- name: server
port: 2888
- name: leader-election
port: 3888
- name: client
port: 2181
## Zookeeper PodDisruptionBudget
## templates/zookeepernp-pdb.yaml
##
pdb:
usePolicy: true
maxUnavailable: 1
## Pulsar Zookeeper metadata. The metadata will be deployed as
## soon as the last zookeeper node is reachable. The deployment
## of other components that depends on zookeeper, such as the
## bookkeeper nodes, broker nodes, etc. will only start to be
## deployed when the zookeeper cluster is ready and with the
## metadata deployed
zookeeperMetadata:
component: zookeeper-metadata
## timeout (in seconds) for running "bin/pulsar initialize-cluster-metadata" command
initTimeout: 60
## Pulsar: Bookkeeper cluster
## templates/bookkeeper-statefulset.yaml
##
bookkeeper:
component: bookkeeper
replicaCount: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: nodepool
# operator: In
# values:
# - pulsar
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8000"
tolerations: []
pvcPrexix: ''
probe:
enabled: true
port: 3181
initial: 10
period: 30
timeout: 5
gracePeriod: 60
resources:
requests:
memory: 2Gi
cpu: 1
# limits:
# memory: 2Gi
# cpu: 1
volumes:
journal:
name: journal
size: 20Gi
# To override the default_storage class for this volume, set storageClass.
# To use an existing storage class, set existingStorageClassName.
# To have the chart create a storage class, leave existingStorageClassName
# unset and specify the storage class parameters. The appropriate parameters
# will vary by cloud provider. See default_storage above for examples.
#
# existingStorageClassName: <name>
#
# To use the default storage class of the k8s cluster, use the name "default"
# existingStorageClassName: default
#
# OR (GCP example)
#
# storageClass:
# provisioner: kubernetes.io/gce-pd
# type: pd-ssd
# fsType: ext4
# extraParams:
# replication-type: none
ledgers:
name: ledgers
size: 50Gi
# To override the default_storage class for this volume, set storageClass.
# To use an existing storage class, set existingStorageClassName.
# To have the chart create a storage class, leave existingStorageClassName
# unset and specify the storage class parameters. The appropriate parameters
# will vary by cloud provider. See default_storage above for examples.
#
# existingStorageClassName: <name>
#
# To use the default storage class of the k8s cluster, use the name "default"
# existingStorageClassName: default
#
# OR (GCP example)
#
# storageClass:
# provisioner: kubernetes.io/gce-pd
# type: pd-ssd
# fsType: ext4
# extraParams:
# replication-type: none
# If you enable state storage on BookKeeper, a persistent volume
# to hold the state files (ranges)
ranges:
name: ranges
size: 5Gi
# To override the default_storage class for this volume, set storageClass.
# To use an existing storage class, set existingStorageClassName.
# To have the chart create a storage class, leave existingStorageClassName
# unset and specify the storage class parameters. The appropriate parameters
# will vary by cloud provider. See default_storage above for examples.
#
# existingStorageClassName: <name>
#
# To use the default storage class of the k8s cluster, use the name "default"
# existingStorageClassName: default
#
# OR (GCP example)
#
# storageClass:
# provisioner: kubernetes.io/gce-pd
# type: pd-ssd
# fsType: ext4
# extraParams:
# replication-type: none
## Bookkeeper configmap
## templates/bookkeeper-configmap.yaml
##
configData:
BOOKIE_MEM: "-Xms2g -Xmx2g -XX:MaxDirectMemorySize=2g -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ExitOnOutOfMemoryError"
BOOKIE_GC: "-XX:+UseG1GC"
PULSAR_LOG_LEVEL: "info"
PULSAR_LOG_ROOT_LEVEL: "info"
PULSAR_EXTRA_OPTS: "-Dpulsar.log.root.level=info"
statsProviderClass: org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider
## Bookkeeper configmap
## templates/bookkeeper-service.yaml
##
service:
annotations:
ports:
- name: server
port: 3181
## Bookkeeper PodDisruptionBudget
## templates/bookkeeper-pdb.yaml
##
pdb:
usePolicy: true
maxUnavailable: 1
## Pulsar: Broker cluster
## templates/broker-deployment.yaml
##
broker:
component: broker
replicaCount: 3
# ledger:
# defaultEnsembleSize: 2
# defaultAckQuorum: 2
# defaultWriteQuorum: 2
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
#
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: nodepool
# operator: In
# values:
# - pulsar
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: "component"
# operator: In
# values:
# - bookkeeper
# topologyKey: "kubernetes.io/hostname"
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
tolerations: []
gracePeriod: 60
# Enable extra services in the broker
# These can also be enabled as standalone services
# See extra section above
functionsWorkerEnabled: false
webSocketServiceEnabled: false
# Enable Transactions on the Pulsar cluster.
# It starts the transaction coordinator module inside the broker.
# It runs the Transaction coordinator metadata setup job.
transactionCoordinator:
enabled: false
# set to true to manually initialize transaction coordinator on existing cluster
initialize: false
# Count of partitions used for the transaction coordinator.
initialCount: 16
probe:
enabled: true
port: 8080
initial: 10
period: 30
timeout: 5
resources:
requests:
memory: 2Gi
cpu: 1
# limits:
# memory: 2Gi
# cpu: 1
# Init container to add files to image
initContainer: {}
# initContainer:
# repository: repository/image
# tag: latest
# pullPolicy: IfNotPresent
# command: ["cp", "-r", "/pulsar-libs", "/jars" ]
# args: []
# emptyDirPath: "/jars"
# env:
# - name:
# value:
# envFrom:
# - configMapRef:
# name:
# Comma delimited list of authentication providers
authenticationProviders: "org.apache.pulsar.broker.authentication.AuthenticationProviderToken"
# extraAuthProvider: this is now a deprecated field, use the `.Values.broker.authenticationProviders` field instead
## Broker configmap
## templates/broker-configmap.yaml
##
configData:
PULSAR_MEM: "-Xms2g -Xmx2g -XX:MaxDirectMemorySize=2g -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ExitOnOutOfMemoryError"
PULSAR_GC: "-XX:+UseG1GC"
PULSAR_LOG_LEVEL: "info"
PULSAR_LOG_ROOT_LEVEL: "info"
PULSAR_EXTRA_OPTS: "-Dpulsar.log.root.level=info"
brokerDeduplicationEnabled: "false"
exposeTopicLevelMetricsInPrometheus: "true"
exposeConsumerLevelMetricsInPrometheus: "false"
backlogQuotaDefaultRetentionPolicy: "producer_exception"
## Broker service
## templates/broker-service.yaml
##
# If you are enabling TLS, make sure these ports are on the port list:
# - name: https
# port: 8443
# protocol: TCP
# - name: pulsarssl
# port: 6651
# To only allow TLS connections, remove the plain-text ports (http, pulsar)
service:
annotations: {}
type: ClusterIP
headless: false
ports:
- name: http
port: 8080
- name: pulsar
port: 6650
- name: https
port: 8443
- name: pulsarssl
port: 6651
ingress:
enabled: false
## Broker PodDisruptionBudget
## templates/broker-pdb.yaml
##
pdb:
usePolicy: true
maxUnavailable: 1
## Pulsar: Broker cluster
## templates/broker-deployment.yaml
##
brokerSts:
component: brokersts
replicaCount: 3
# ledger:
# defaultEnsembleSize: 2
# defaultAckQuorum: 2
# defaultWriteQuorum: 2
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution: