values.yaml 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. # Default values for flink.
  2. # This is a YAML-formatted file.
  3. # Declare variables to be passed into your templates.
  4. nameOverride: ""
  5. fullnameOverride: ""
  6. image:
  7. repository: flink
  8. tag: 1.11.2-scala_2.12
  9. pullPolicy: IfNotPresent
  10. imagePullSecrets: []
  11. # For general configuration
  12. flink:
  13. # logging, log4j configuration copied from Flink distribution
  14. logging:
  15. log4j_properties: |+
  16. # This affects logging for both user code and Flink
  17. rootLogger.level = INFO
  18. rootLogger.appenderRef.file.ref = MainAppender
  19. # Uncomment this if you want to _only_ change Flink's logging
  20. #logger.flink.name = org.apache.flink
  21. #logger.flink.level = INFO
  22. # The following lines keep the log level of common libraries/connectors on
  23. # log level INFO. The root logger does not override this. You have to manually
  24. # change the log levels here.
  25. logger.akka.name = akka
  26. logger.akka.level = INFO
  27. logger.kafka.name= org.apache.kafka
  28. logger.kafka.level = INFO
  29. logger.hadoop.name = org.apache.hadoop
  30. logger.hadoop.level = INFO
  31. logger.zookeeper.name = org.apache.zookeeper
  32. logger.zookeeper.level = INFO
  33. # Log all infos in the given file
  34. appender.main.name = MainAppender
  35. appender.main.type = File
  36. appender.main.append = false
  37. appender.main.fileName = ${sys:log.file}
  38. appender.main.layout.type = PatternLayout
  39. appender.main.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
  40. # Suppress the irrelevant (wrong) warnings from the Netty channel handler
  41. logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
  42. logger.netty.level = OFF
  43. # monitoring is exporting metrics in Prometheus format
  44. monitoring:
  45. enabled: true
  46. # port for metrics
  47. port: 9999
  48. # latency monitoring
  49. latency:
  50. enabled: false
  51. probingInterval: 1000
  52. # system is additional system metrics
  53. system:
  54. enabled: true
  55. probingInterval: 5000
  56. rocksdb:
  57. enabled: false
  58. workDir: /opt/flink
  59. # In case of issue - Metaspace OOM increase this param according to your memory limits
  60. # params: |+
  61. # taskmanager.memory.jvm-metaspace.size: 256mb
  62. params: ""
  63. state:
  64. # backend for state. Available options: filesystem, rocksdb, memory; empty - for default(memory)
  65. backend:
  66. # These values are default excludes file pathes
  67. # https://ci.apache.org/projects/flink/flink-docs-stable/dev/stream/state/checkpointing.html#related-config-options
  68. params: |+
  69. state.checkpoints.dir: file:///flink_state/checkpoints
  70. state.savepoints.dir: file:///flink_state/savepoints
  71. state.backend.async: true
  72. state.backend.fs.memory-threshold: 1024
  73. state.backend.fs.write-buffer-size: 4096
  74. state.backend.incremental: false
  75. state.backend.local-recovery: false
  76. state.checkpoints.num-retained: 1
  77. taskmanager.state.local.root-dirs: file:///flink_state/local-recovery
  78. # https://ci.apache.org/projects/flink/flink-docs-stable/ops/state/state_backends.html#rocksdb-state-backend-config-options
  79. # * state.backend.rocksdb.localdir doesn't have a prefix - file://
  80. rocksdb: |+
  81. state.backend.rocksdb.checkpoint.transfer.thread.num: 1
  82. state.backend.rocksdb.localdir: /flink_state/rocksdb
  83. state.backend.rocksdb.options-factory: org.apache.flink.contrib.streaming.state.DefaultConfigurableOptionsFactory
  84. state.backend.rocksdb.predefined-options: DEFAULT
  85. state.backend.rocksdb.timer-service.factory: HEAP
  86. state.backend.rocksdb.ttl.compaction.filter.enabled: false
  87. # extraEnvs passes envs to both Jobmanagers and Taskmanager
  88. # for example
  89. # extraEnvs:
  90. # - name: KAFKA_BOOTSTRAP_SERVERS
  91. # value: dest-kafka-bootstrap:9092
  92. #
  93. extraEnvs: []
  94. jobmanager:
  95. # Statefulset option will create Jobmanager as a StatefulSet
  96. statefulset: false
  97. # Init containers
  98. initContainers: {}
  99. # Example
  100. # test:
  101. # image: busybox:1.28
  102. # command:
  103. # - /bin/sh
  104. # - -c
  105. # - "echo test"
  106. # highAvailability configuration based on zookeeper
  107. highAvailability:
  108. # enabled also will enable zookeeper Dependency
  109. enabled: false
  110. zookeeperConnect: "{{ .Release.Name }}-zookeeper:{{ .Values.zookeeper.env.ZOO_PORT }}"
  111. zookeeperRootPath: /flink
  112. clusterId: /flink
  113. # storageDir for Jobmanagers. DFS expected.
  114. # Docs - Storage directory (required): JobManager metadata is persisted in the file system storageDir and only a pointer to this state is stored in ZooKeeper
  115. storageDir:
  116. # syncPort is a rpc port in HA configuration
  117. syncPort: 6150
  118. # command for HA configuration
  119. # this trick with sed required because taskmanagers read jobmanager.rpc.address from Zookeeper.
  120. # For configuration with one jobmanager (it's enough stable because Kubernetes will restart Jobmanager on falures)
  121. # 'sed' can be changed to use flink service name, e.g. {{ include "flink.name" . }}-jobmanager
  122. command: >-
  123. sed 's/REPLACE_HOSTNAME/'${FLINK_POD_IP}'/'
  124. $FLINK_HOME/conf/flink-conf.yaml.tpl > $FLINK_HOME/conf/flink-conf.yaml &&
  125. $FLINK_HOME/bin/jobmanager.sh start;
  126. while :;
  127. do
  128. if [[ -f $(find log -name '*taskexecutor*.log' -print -quit) ]];
  129. then tail -f -n +1 log/*taskexecutor*.log;
  130. fi;
  131. done
  132. # Additional param for JVM to support security.properties override
  133. # check configMap for more information
  134. jvmArgs: "-Djava.security.properties={{ .Values.flink.workDir }}/conf/security.properties"
  135. # extraEnvs passes envs to Jobmanagers
  136. extraEnvs: []
  137. ports:
  138. rpc: 6123
  139. # blob port uses for Liveness probe
  140. blob: 6124
  141. ui: 8081
  142. replicaCount: 1
  143. # heapSize params for Jobmanager
  144. # keep in mind that Flink can use offheap memory
  145. # e.g. in case of checkpoint usage
  146. heapSize: 1g
  147. resources: {}
  148. # Example
  149. # limits:
  150. # cpu: 3800m
  151. # memory: 8000Mi
  152. additionalCommand: >-
  153. cp /opt/flink/opt/flink-s3-fs-presto-*.jar
  154. /opt/flink/lib/ &&
  155. wget https://repo1.maven.org/maven2/com/github/oshi/oshi-core/3.4.0/oshi-core-3.4.0.jar
  156. -O /opt/flink/lib/oshi-core-3.4.0.jar &&
  157. wget https://repo1.maven.org/maven2/net/java/dev/jna/jna/5.4.0/jna-5.4.0.jar
  158. -O /opt/flink/lib/jna-5.4.0.jar &&
  159. wget https://repo1.maven.org/maven2/net/java/dev/jna/jna-platform/5.4.0/jna-platform-5.4.0.jar
  160. -O /opt/flink/lib/jna-platform-5.4.0.jar
  161. command: >-
  162. cp $FLINK_HOME/conf/flink-conf.yaml.tpl $FLINK_HOME/conf/flink-conf.yaml &&
  163. $FLINK_HOME/bin/jobmanager.sh start;
  164. while :;
  165. do
  166. if [[ -f $(find log -name '*standalonesession*.log' -print -quit) ]];
  167. then tail -f -n +1 log/*standalonesession*.log;
  168. fi;
  169. done
  170. service:
  171. type: ClusterIP
  172. annotations: {}
  173. # rest is additional service which exposes only HTTP port
  174. # can be using for cases of using exposeController
  175. rest:
  176. enabled: true
  177. annotations: {}
  178. headless:
  179. annotations: {}
  180. nodeSelector: {}
  181. affinity: {}
  182. tolerations: []
  183. persistent:
  184. enabled: false
  185. # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1
  186. storageClass:
  187. size: 8Gi
  188. mountPath: "/flink_state"
  189. podManagementPolicy: Parallel
  190. annotations: {}
  191. # Example
  192. # "cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
  193. serviceAccount:
  194. # Specifies whether a ServiceAccount should be created
  195. create: true
  196. # The name of the ServiceAccount to use.
  197. # If not set and create is true, a name is generated using the fullname template
  198. name:
  199. #livenessProbe will conduct checks for rpc port as tcpSocket probe
  200. livenessProbe:
  201. initialDelaySeconds: 10
  202. periodSeconds: 15
  203. readinessProbe:
  204. periodSeconds: 10
  205. initialDelaySeconds: 20
  206. podAnnotations: {}
  207. taskmanager:
  208. # Statefulset option will create Taskmanager as a StatefulSet
  209. # A necessary option for Persistent
  210. statefulset: true
  211. # Additional param for JVM to support security.properties override
  212. # check configMap for more information
  213. jvmArgs: "-Djava.security.properties={{ .Values.flink.workDir }}/conf/security.properties"
  214. # extraEnvs passes envs to Taskmanagers
  215. extraEnvs: []
  216. ports:
  217. rpc: 6122
  218. replicaCount: 4
  219. numberOfTaskSlots: 1
  220. memoryProcessSize: 1g
  221. memoryFlinkSize:
  222. resources: {}
  223. # Example
  224. # limits:
  225. # cpu: 3800m
  226. # memory: 8000Mi
  227. additionalCommand: >-
  228. cp /opt/flink/opt/flink-metrics-prometheus-*.jar
  229. /opt/flink/opt/flink-s3-fs-presto-*.jar
  230. /opt/flink/lib/ &&
  231. wget https://repo1.maven.org/maven2/com/github/oshi/oshi-core/3.4.0/oshi-core-3.4.0.jar
  232. -O /opt/flink/lib/oshi-core-3.4.0.jar &&
  233. wget https://repo1.maven.org/maven2/net/java/dev/jna/jna/5.4.0/jna-5.4.0.jar
  234. -O /opt/flink/lib/jna-5.4.0.jar &&
  235. wget https://repo1.maven.org/maven2/net/java/dev/jna/jna-platform/5.4.0/jna-platform-5.4.0.jar
  236. -O /opt/flink/lib/jna-platform-5.4.0.jar
  237. command: >-
  238. $FLINK_HOME/bin/taskmanager.sh start;
  239. while :;
  240. do
  241. if [[ -f $(find log -name '*taskmanager*.log' -print -quit) ]];
  242. then tail -f -n +1 log/*taskmanager*.log;
  243. fi;
  244. done
  245. service:
  246. type: ClusterIP
  247. nodeSelector: {}
  248. affinity: {}
  249. tolerations: []
  250. persistent:
  251. enabled: false
  252. # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1
  253. storageClass:
  254. size: 8Gi
  255. mountPath: "/flink_state"
  256. podManagementPolicy: Parallel
  257. annotations:
  258. "cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
  259. serviceAccount:
  260. # Specifies whether a ServiceAccount should be created
  261. create: true
  262. # The name of the ServiceAccount to use.
  263. # If not set and create is true, a name is generated using the fullname template
  264. name:
  265. #livenessProbe will conduct checks for rpc port as tcpSocket probe
  266. livenessProbe:
  267. initialDelaySeconds: 30
  268. periodSeconds: 60
  269. podAnnotations: {}
  270. ingress:
  271. enabled: false
  272. annotations: {}
  273. path: /
  274. hosts: []
  275. tls: []
  276. prometheus:
  277. # serviceMonitor provides service discovery for prometheus operatored installations
  278. serviceMonitor:
  279. enabled: false
  280. namespace:
  281. interval: 5s
  282. selector:
  283. # According to default selector for prometheus operator
  284. prometheus: kube-prometheus
  285. zookeeper:
  286. enabled: false
  287. replicaCount: 3
  288. env:
  289. ZK_HEAP_SIZE: "1G"
  290. ZOO_PORT: 2181
  291. resources:
  292. limits:
  293. cpu: 400m
  294. memory: 1256Mi
  295. persistence:
  296. enabled: true
  297. secrets:
  298. # Plain predefined secrets example
  299. # kubernetesSecrets:
  300. # - name: kerberos
  301. # mountPath: /kerberos
  302. bitnamiSealedSecrets:
  303. enabled: false
  304. # The encrypted raw file sealed secrets generated for example with
  305. # kubeseal --raw --from-file=... --controller-name sealed-secrets --scope namespace-wide
  306. sealedSecretFiles: {}
  307. # file1: encypted_file1
  308. # file2: encypted_file2
  309. sealedSecretFilesPath: /etc/sealed
  310. sealedSecretFilesAnnotations:
  311. sealedsecrets.bitnami.com/namespace-wide: true
  312. # The encrypted raw env sealed secrets generated for example with
  313. # echo -n password | kubeseal --raw --from-file=/dev/stdin --controller-name sealed-secrets --scope namespace-wide
  314. sealedSecretEnvs: {}
  315. # env1: encypted_env1
  316. # env2: encypted_env2
  317. sealedSecretEnvsAnnotations:
  318. sealedsecrets.bitnami.com/namespace-wide: true