未验证 提交 1c15a7d6 编写于 作者: 爱穿格子裤 提交者: GitHub

[fix-11404]: make the common.properties to configurable on values.yaml (#11441)

and set the resource.storage.type default to hdfs
上级 35165330
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{{- if index .Values.conf "common" }}
{{- range $key, $value := index .Values.conf "common" }}
{{ $key }}={{ $value }}
{{- end }}
{{- end }}
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "dolphinscheduler.fullname" . }}-configs
labels:
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-common
{{- include "dolphinscheduler.common.labels" . | nindent 4 }}
data:
common_properties: |-
{{ tpl (.Files.Get "resources/config/common.properties") . | indent 4 }}
\ No newline at end of file
......@@ -104,6 +104,9 @@ spec:
volumeMounts:
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-alert
- name: config-volume
mountPath: /opt/dolphinscheduler/conf/common.properties
subPath: common_properties
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-alert
{{- if .Values.alert.persistentVolumeClaim.enabled }}
......@@ -112,3 +115,6 @@ spec:
{{- else }}
emptyDir: {}
{{- end }}
- name: config-volume
configMap:
name: {{ include "dolphinscheduler.fullname" . }}-configs
\ No newline at end of file
......@@ -106,6 +106,9 @@ spec:
volumeMounts:
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-api
- name: config-volume
mountPath: /opt/dolphinscheduler/conf/common.properties
subPath: common_properties
{{- include "dolphinscheduler.sharedStorage.volumeMount" . | nindent 12 }}
{{- include "dolphinscheduler.fsFileResource.volumeMount" . | nindent 12 }}
volumes:
......@@ -116,5 +119,8 @@ spec:
{{- else }}
emptyDir: {}
{{- end }}
- name: config-volume
configMap:
name: {{ include "dolphinscheduler.fullname" . }}-configs
{{- include "dolphinscheduler.sharedStorage.volume" . | nindent 8 }}
{{- include "dolphinscheduler.fsFileResource.volume" . | nindent 8 }}
......@@ -102,6 +102,9 @@ spec:
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-master
{{- include "dolphinscheduler.sharedStorage.volumeMount" . | nindent 12 }}
- name: config-volume
mountPath: /opt/dolphinscheduler/conf/common.properties
subPath: common_properties
volumes:
- name: {{ include "dolphinscheduler.fullname" . }}-master
{{- if .Values.master.persistentVolumeClaim.enabled }}
......@@ -111,6 +114,9 @@ spec:
emptyDir: {}
{{- end }}
{{- include "dolphinscheduler.sharedStorage.volume" . | nindent 8 }}
- name: config-volume
configMap:
name: {{ include "dolphinscheduler.fullname" . }}-configs
{{- if .Values.master.persistentVolumeClaim.enabled }}
volumeClaimTemplates:
- metadata:
......
......@@ -105,6 +105,9 @@ spec:
name: {{ include "dolphinscheduler.fullname" . }}-worker-data
- mountPath: "/opt/dolphinscheduler/logs"
name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
- name: config-volume
mountPath: /opt/dolphinscheduler/conf/common.properties
subPath: common_properties
{{- include "dolphinscheduler.sharedStorage.volumeMount" . | nindent 12 }}
{{- include "dolphinscheduler.fsFileResource.volumeMount" . | nindent 12 }}
volumes:
......@@ -122,6 +125,9 @@ spec:
{{- else }}
emptyDir: {}
{{- end }}
- name: config-volume
configMap:
name: {{ include "dolphinscheduler.fullname" . }}-configs
{{- include "dolphinscheduler.sharedStorage.volume" . | nindent 8 }}
{{- include "dolphinscheduler.fsFileResource.volume" . | nindent 8 }}
{{- if .Values.worker.persistentVolumeClaim.enabled }}
......
......@@ -66,6 +66,84 @@ externalRegistry:
registryPluginName: "zookeeper"
registryServers: "127.0.0.1:2181"
conf:
common:
# user data local directory path, please make sure the directory exists and have read write permissions
data.basedir.path: /tmp/dolphinscheduler
# resource storage type: HDFS, S3, NONE
resource.storage.type: HDFS
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
resource.upload.path: /dolphinscheduler
# whether to startup kerberos
hadoop.security.authentication.startup.state: false
# java.security.krb5.conf path
java.security.krb5.conf.path: /opt/krb5.conf
# login user from keytab username
login.user.keytab.username: hdfs-mycluster@ESZ.COM
# login user from keytab path
login.user.keytab.path: /opt/hdfs.headless.keytab
# kerberos expire time, the unit is hour
kerberos.expire.time: 2
# resource view suffixs
#resource.view.suffixs: txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js
# if resource.storage.type: HDFS, the user must have the permission to create directories under the HDFS root path
hdfs.root.user: hdfs
# if resource.storage.type: S3, the value like: s3a://dolphinscheduler; if resource.storage.type: HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
fs.defaultFS: file:///
aws.access.key.id: minioadmin
aws.secret.access.key: minioadmin
aws.region: us-east-1
aws.endpoint: http://localhost:9000
# resourcemanager port, the default value is 8088 if not specified
resource.manager.httpaddress.port: 8088
# if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
yarn.resourcemanager.ha.rm.ids: 192.168.xx.xx,192.168.xx.xx
# if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
yarn.application.status.address: http://ds1:%s/ws/v1/cluster/apps/%s
# job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)
yarn.job.history.status.address: http://ds1:19888/ws/v1/history/mapreduce/jobs/%s
# datasource encryption enable
datasource.encryption.enable: false
# datasource encryption salt
datasource.encryption.salt: '!@#$%^&*'
# data quality option
data-quality.jar.name: dolphinscheduler-data-quality-dev-SNAPSHOT.jar
#data-quality.error.output.path: /tmp/data-quality-error-data
# Network IP gets priority, default inner outer
# Whether hive SQL is executed in the same session
support.hive.oneSession: false
# use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions
sudo.enable: true
# network interface preferred like eth0, default: empty
#dolphin.scheduler.network.interface.preferred:
# network IP gets priority, default: inner outer
#dolphin.scheduler.network.priority.strategy: default
# system env path
#dolphinscheduler.env.path: dolphinscheduler_env.sh
# development state
development.state: false
# rpc port
alert.rpc.port: 50052
# Url endpoint for zeppelin RESTful API
zeppelin.rest.url: http://localhost:8080
common:
## Configmap
configmap:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册