123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337 |
- # Default values for flink.
- # This is a YAML-formatted file.
- # Declare variables to be passed into your templates.
- nameOverride: ""
- fullnameOverride: ""
- image:
- repository: flink
- tag: 1.11.2-scala_2.12
- pullPolicy: IfNotPresent
- imagePullSecrets: []
- # Pod Security Context
- # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
- securityContext: {}
- # securityContext:
- # fsGroup: 1000
- # runAsUser: 1000
- # runAsNonRoot: true
- # For general configuration
- flink:
- # logging, log4j configuration copied from Flink distribution
- logging:
- log4j_properties: |+
- # This affects logging for both user code and Flink
- rootLogger.level = INFO
- rootLogger.appenderRef.console.ref = ConsoleAppender
- rootLogger.appenderRef.rolling.ref = RollingFileAppender
-
- # Uncomment this if you want to _only_ change Flink's logging
- #logger.flink.name = org.apache.flink
- #logger.flink.level = INFO
-
- # The following lines keep the log level of common libraries/connectors on
- # log level INFO. The root logger does not override this. You have to manually
- # change the log levels here.
- logger.akka.name = akka
- logger.akka.level = INFO
- logger.kafka.name= org.apache.kafka
- logger.kafka.level = INFO
- logger.hadoop.name = org.apache.hadoop
- logger.hadoop.level = INFO
- logger.zookeeper.name = org.apache.zookeeper
- logger.zookeeper.level = INFO
-
- # Log all infos to the console
- appender.console.name = ConsoleAppender
- appender.console.type = CONSOLE
- appender.console.layout.type = PatternLayout
- appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
-
- # Log all infos in the given rolling file
- appender.rolling.name = RollingFileAppender
- appender.rolling.type = RollingFile
- appender.rolling.append = false
- appender.rolling.fileName = ${sys:log.file}
- appender.rolling.filePattern = ${sys:log.file}.%i
- appender.rolling.layout.type = PatternLayout
- appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
- appender.rolling.policies.type = Policies
- appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
- appender.rolling.policies.size.size=100MB
- appender.rolling.strategy.type = DefaultRolloverStrategy
- appender.rolling.strategy.max = 10
-
- # Suppress the irrelevant (wrong) warnings from the Netty channel handler
- logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
- logger.netty.level = OFF
- # monitoring is exporting metrics in Prometheus format
- monitoring:
- enabled: true
- # port for metrics
- port: 9999
- # latency monitoring
- latency:
- enabled: false
- probingInterval: 1000
- # system is additional system metrics
- system:
- enabled: true
- probingInterval: 5000
- rocksdb:
- enabled: false
- workDir: /opt/flink
- # In case of issue - Metaspace OOM increase this param according to your memory limits
- # params: |+
- # taskmanager.memory.jvm-metaspace.size: 256mb
- params: ""
- state:
- # backend for state. Available options: filesystem, rocksdb, memory; empty - for default(memory)
- backend:
- # These values are default excludes file pathes
- # https://ci.apache.org/projects/flink/flink-docs-stable/dev/stream/state/checkpointing.html#related-config-options
- params: |+
- state.checkpoints.dir: file:///flink_state/checkpoints
- state.savepoints.dir: file:///flink_state/savepoints
- state.backend.async: true
- state.backend.fs.memory-threshold: 1024
- state.backend.fs.write-buffer-size: 4096
- state.backend.incremental: false
- state.backend.local-recovery: false
- state.checkpoints.num-retained: 1
- taskmanager.state.local.root-dirs: file:///flink_state/local-recovery
- # https://ci.apache.org/projects/flink/flink-docs-stable/ops/state/state_backends.html#rocksdb-state-backend-config-options
- # * state.backend.rocksdb.localdir doesn't have a prefix - file://
- rocksdb: |+
- state.backend.rocksdb.checkpoint.transfer.thread.num: 1
- state.backend.rocksdb.localdir: /flink_state/rocksdb
- state.backend.rocksdb.options-factory: org.apache.flink.contrib.streaming.state.DefaultConfigurableOptionsFactory
- state.backend.rocksdb.predefined-options: DEFAULT
- state.backend.rocksdb.timer-service.factory: HEAP
- state.backend.rocksdb.ttl.compaction.filter.enabled: false
- # extraEnvs passes envs to both Jobmanagers and Taskmanager
- # for example
- # extraEnvs:
- # - name: KAFKA_BOOTSTRAP_SERVERS
- # value: dest-kafka-bootstrap:9092
- #
- extraEnvs: []
- jobmanager:
- # Statefulset option will create Jobmanager as a StatefulSet
- statefulset: false
- # Init containers
- initContainers: {}
- # Example
- # test:
- # image: busybox:1.28
- # command:
- # - /bin/sh
- # - -c
- # - "echo test"
- # highAvailability configuration based on zookeeper
- highAvailability:
- # enabled also will enable zookeeper Dependency
- enabled: false
- zookeeperConnect: "{{ .Release.Name }}-zookeeper:{{ .Values.zookeeper.env.ZOO_PORT }}"
- zookeeperRootPath: /flink
- clusterId: /flink
- # storageDir for Jobmanagers. DFS expected.
- # Docs - Storage directory (required): JobManager metadata is persisted in the file system storageDir and only a pointer to this state is stored in ZooKeeper
- storageDir:
- # syncPort is a rpc port in HA configuration
- syncPort: 6150
- # command for HA configuration
- # this trick with sed required because taskmanagers read jobmanager.rpc.address from Zookeeper.
- # For configuration with one jobmanager (it's enough stable because Kubernetes will restart Jobmanager on falures)
- # 'sed' can be changed to use flink service name, e.g. {{ include "flink.name" . }}-jobmanager
- command: >-
- sed 's/REPLACE_HOSTNAME/'${FLINK_POD_IP}'/'
- $FLINK_HOME/conf/flink-conf.yaml.tpl > $FLINK_HOME/conf/flink-conf.yaml &&
- $FLINK_HOME/bin/jobmanager.sh start-foreground;
- # Additional param for JVM to support security.properties override
- # check configMap for more information
- jvmArgs: "-Djava.security.properties={{ .Values.flink.workDir }}/conf/security.properties"
- # extraEnvs passes envs to Jobmanagers
- extraEnvs: []
- ports:
- rpc: 6123
- # blob port uses for Liveness probe
- blob: 6124
- ui: 8081
- replicaCount: 1
- # heapSize params for Jobmanager
- # keep in mind that Flink can use offheap memory
- # e.g. in case of checkpoint usage
- heapSize: 1g
- resources: {}
- # Example
- # limits:
- # cpu: 3800m
- # memory: 8000Mi
- additionalCommand: >-
- cp /opt/flink/opt/flink-s3-fs-presto-*.jar
- /opt/flink/lib/ &&
- wget https://repo1.maven.org/maven2/com/github/oshi/oshi-core/3.4.0/oshi-core-3.4.0.jar
- -O /opt/flink/lib/oshi-core-3.4.0.jar &&
- wget https://repo1.maven.org/maven2/net/java/dev/jna/jna/5.4.0/jna-5.4.0.jar
- -O /opt/flink/lib/jna-5.4.0.jar &&
- wget https://repo1.maven.org/maven2/net/java/dev/jna/jna-platform/5.4.0/jna-platform-5.4.0.jar
- -O /opt/flink/lib/jna-platform-5.4.0.jar
- command: >-
- cp $FLINK_HOME/conf/flink-conf.yaml.tpl $FLINK_HOME/conf/flink-conf.yaml &&
- $FLINK_HOME/bin/jobmanager.sh start-foreground;
- service:
- type: ClusterIP
- annotations: {}
- # rest is additional service which exposes only HTTP port
- # can be using for cases of using exposeController
- rest:
- enabled: true
- annotations: {}
- headless:
- annotations: {}
- nodeSelector: {}
- affinity: {}
- tolerations: []
- persistent:
- enabled: false
- # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1
- storageClass:
- size: 8Gi
- mountPath: "/flink_state"
- podManagementPolicy: Parallel
- annotations: {}
- # Example
- # "cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
- serviceAccount:
- # Specifies whether a ServiceAccount should be created
- create: true
- # The name of the ServiceAccount to use.
- # If not set and create is true, a name is generated using the fullname template
- name:
- annotations: {}
- #livenessProbe will conduct checks for rpc port as tcpSocket probe
- livenessProbe:
- initialDelaySeconds: 10
- periodSeconds: 15
- readinessProbe:
- periodSeconds: 10
- initialDelaySeconds: 20
- podAnnotations: {}
- taskmanager:
- # Statefulset option will create Taskmanager as a StatefulSet
- # A necessary option for Persistent
- statefulset: true
- # Additional param for JVM to support security.properties override
- # check configMap for more information
- jvmArgs: "-Djava.security.properties={{ .Values.flink.workDir }}/conf/security.properties"
- # extraEnvs passes envs to Taskmanagers
- extraEnvs: []
- ports:
- rpc: 6122
- replicaCount: 4
- numberOfTaskSlots: 1
- memoryProcessSize: 1g
- memoryFlinkSize:
- resources: {}
- # Example
- # limits:
- # cpu: 3800m
- # memory: 8000Mi
- additionalCommand: >-
- cp /opt/flink/opt/flink-s3-fs-presto-*.jar
- /opt/flink/lib/ &&
- wget https://repo1.maven.org/maven2/com/github/oshi/oshi-core/3.4.0/oshi-core-3.4.0.jar
- -O /opt/flink/lib/oshi-core-3.4.0.jar &&
- wget https://repo1.maven.org/maven2/net/java/dev/jna/jna/5.4.0/jna-5.4.0.jar
- -O /opt/flink/lib/jna-5.4.0.jar &&
- wget https://repo1.maven.org/maven2/net/java/dev/jna/jna-platform/5.4.0/jna-platform-5.4.0.jar
- -O /opt/flink/lib/jna-platform-5.4.0.jar
- command: >-
- $FLINK_HOME/bin/taskmanager.sh start-foreground;
- service:
- type: ClusterIP
- nodeSelector: {}
- affinity: {}
- tolerations: []
- persistent:
- enabled: false
- # https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1
- storageClass:
- size: 8Gi
- mountPath: "/flink_state"
- podManagementPolicy: Parallel
- annotations:
- "cluster-autoscaler.kubernetes.io/safe-to-evict": "false"
- serviceAccount:
- # Specifies whether a ServiceAccount should be created
- create: true
- # The name of the ServiceAccount to use.
- # If not set and create is true, a name is generated using the fullname template
- name:
- annotations: {}
- #livenessProbe will conduct checks for rpc port as tcpSocket probe
- livenessProbe:
- initialDelaySeconds: 30
- periodSeconds: 60
- podAnnotations: {}
- ingress:
- enabled: false
- annotations: {}
- path: /
- hosts: []
- tls: []
- prometheus:
- # serviceMonitor provides service discovery for prometheus operatored installations
- serviceMonitor:
- enabled: false
- namespace:
- interval: 5s
- selector:
- # According to default selector for prometheus operator
- prometheus: kube-prometheus
- zookeeper:
- enabled: false
- replicaCount: 3
- env:
- ZK_HEAP_SIZE: "1G"
- ZOO_PORT: 2181
- resources:
- limits:
- cpu: 400m
- memory: 1256Mi
- persistence:
- enabled: true
- secrets:
- # Plain predefined secrets example
- # kubernetesSecrets:
- # - name: kerberos
- # mountPath: /kerberos
- bitnamiSealedSecrets:
- enabled: false
- # The encrypted raw file sealed secrets generated for example with
- # kubeseal --raw --from-file=... --controller-name sealed-secrets --scope namespace-wide
- sealedSecretFiles: {}
- # file1: encypted_file1
- # file2: encypted_file2
- sealedSecretFilesPath: /etc/sealed
- sealedSecretFilesAnnotations:
- sealedsecrets.bitnami.com/namespace-wide: true
- # The encrypted raw env sealed secrets generated for example with
- # echo -n password | kubeseal --raw --from-file=/dev/stdin --controller-name sealed-secrets --scope namespace-wide
- sealedSecretEnvs: {}
- # env1: encypted_env1
- # env2: encypted_env2
- sealedSecretEnvsAnnotations:
- sealedsecrets.bitnami.com/namespace-wide: true
|