“8a1ddc6540a388db88d517464c5afd583138124d”上不存在“core/git@gitcode.net:paddlepaddle/Serving.git”
未验证 提交 d84e56c9 编写于 作者: R Rongfeng Fu 提交者: GitHub

V2.2.0 (#176)

上级 156416e1
......@@ -132,7 +132,3 @@ A:You can use the `obd update` command to update OBD. When you are done with t
## Protocol
OBD complies with [GPL-3.0](/LICENSE).
## Sysbench benchmark
- [Run the Sysbench benchmark test in OceanBase Database (Paetica, VLDB 2023)](https://github.com/oceanbase/oceanbase-doc/blob/V4.1.0/en-US/7.reference/3.performance-tuning-guide/6.performance-whitepaper/3.run-the-sysbench-benchmark-test-in-oceanbase-database.md)
......@@ -44,6 +44,7 @@ ROOT_IO = IO(1)
OBD_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), '.obd')
OBDIAG_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), 'oceanbase-diagnostic-tool')
COMMAND_ENV.load(os.path.join(OBD_HOME_PATH, '.obd_environ'), ROOT_IO)
ROOT_IO.default_confirm = COMMAND_ENV.get(ENV.ENV_DEFAULT_CONFIRM, '0') == '1'
class OptionHelpFormatter(IndentedHelpFormatter):
......@@ -871,10 +872,11 @@ class ClusterRedeployCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterRedeployCommand, self).__init__('redeploy', 'Redeploy a started cluster.')
self.parser.add_option('-f', '--force-kill', action='store_true', help="Force kill the running observer process in the working directory.")
self.parser.add_option('--confirm', action='store_true', help='Confirm to redeploy.')
def _do_command(self, obd):
if self.cmds:
res = obd.redeploy_cluster(self.cmds[0])
res = obd.redeploy_cluster(self.cmds[0], need_confirm=not getattr(self.opts, 'confirm', False))
self.background_telemetry_task(obd)
return res
else:
......
......@@ -820,15 +820,20 @@ class ClusterConfig(object):
self._cache_server[server] = self._apply_temp_conf(self._get_unprocessed_server_conf(server))
return self._cache_server[server]
def get_original_global_conf(self):
return deepcopy(self._original_global_conf)
def get_original_global_conf(self, format_conf=False):
conf = deepcopy(self._original_global_conf)
format_conf and self._apply_temp_conf(conf)
return conf
def get_original_server_conf(self, server):
return self._server_conf.get(server)
def get_original_server_conf(self, server, format_conf=False):
conf = deepcopy(self._server_conf.get(server))
format_conf and self._apply_temp_conf(conf)
return conf
def get_original_server_conf_with_global(self, server):
config = self.get_original_global_conf()
def get_original_server_conf_with_global(self, server, format_conf=False):
config = deepcopy(self.get_original_global_conf())
config.update(self._server_conf.get(server, {}))
format_conf and self._apply_temp_conf(config)
return config
......
......@@ -42,3 +42,6 @@ TELEMETRY_MODE = "TELEMETRY_MODE"
# telemetry log mode. 0 - disable, 1 - enable.
TELEMETRY_LOG_MODE = "TELEMETRY_LOG_MODE"
# ROOT IO DEFAULT CONFIRM. 0 - disable, 1 - enable.
ENV_DEFAULT_CONFIRM = "IO_DEFAULT_CONFIRM"
\ No newline at end of file
......@@ -175,6 +175,9 @@ EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK_AVAILABLE = OBDErrorCodeTemplate(4305
EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK = OBDErrorCodeTemplate(4305, 'There is not enough log disk for ocp meta tenant.')
EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_MEM = OBDErrorCodeTemplate(4305, 'There is not enough memory for ocp meta tenant')
EC_OCP_EXPRESS_ADMIN_PASSWD_ERROR = OBDErrorCodeTemplate(4306, '({ip}) ocp-express admin_passwd invalid.(Current :{current})')
# 4350-4399 had been used by ocp
# sql
EC_SQL_EXECUTE_FAILED = OBDErrorCodeTemplate(5000, "{sql} execute failed")
......
......@@ -271,9 +271,9 @@ class RemoteMirrorRepository(MirrorRepository):
self._load_repo_age()
if self.enabled:
repo_age = ConfigUtil.get_value_from_dict(meta_data, 'repo_age', 0, int)
if repo_age > self.repo_age or int(time.time()) - 86400 > self.repo_age:
self.repo_age = repo_age
self.update_mirror()
if (repo_age > self.repo_age or int(time.time()) - 86400 > self.repo_age) and self.available:
if self.update_mirror():
self.repo_age = repo_age
@property
def available(self):
......
......@@ -32,6 +32,7 @@ from _manager import Manager
from _rpm import Version
from ssh import ConcurrentExecutor
from tool import ConfigUtil, DynamicLoading, YamlLoader, FileUtil
from _types import *
yaml = YamlLoader()
......@@ -360,225 +361,9 @@ class Null(object):
def __init__(self):
pass
class ParamPlugin(Plugin):
class ConfigItemType(object):
TYPE_STR = None
NULL = Null()
def __init__(self, s):
try:
self._origin = s
self._value = 0
self.value = self.NULL
self._format()
if self.value == self.NULL:
self.value = self._origin
except:
raise Exception("'%s' is not %s" % (self._origin, self._type_str))
@property
def _type_str(self):
if self.TYPE_STR is None:
self.TYPE_STR = str(self.__class__.__name__).split('.')[-1]
return self.TYPE_STR
def _format(self):
raise NotImplementedError
def __str__(self):
return str(self._origin)
def __hash__(self):
return self._origin.__hash__()
@property
def __cmp_value__(self):
return self._value
def __eq__(self, value):
if value is None:
return False
return self.__cmp_value__ == value.__cmp_value__
def __gt__(self, value):
if value is None:
return True
return self.__cmp_value__ > value.__cmp_value__
def __ge__(self, value):
if value is None:
return True
return self.__eq__(value) or self.__gt__(value)
def __lt__(self, value):
if value is None:
return False
return self.__cmp_value__ < value.__cmp_value__
def __le__(self, value):
if value is None:
return False
return self.__eq__(value) or self.__lt__(value)
class Moment(ConfigItemType):
def _format(self):
if self._origin:
if self._origin.upper() == 'DISABLE':
self._value = 0
else:
r = re.match('^(\d{1,2}):(\d{1,2})$', self._origin)
h, m = r.groups()
h, m = int(h), int(m)
if 0 <= h <= 23 and 0 <= m <= 60:
self._value = h * 60 + m
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Time(ConfigItemType):
UNITS = {
'ns': 0.000000001,
'us': 0.000001,
'ms': 0.001,
's': 1,
'm': 60,
'h': 3600,
'd': 86400
}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['s']
else:
r = re.match('^(\d+)(\w+)$', self._origin.lower())
n, u = r.groups()
unit = self.UNITS.get(u.lower())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Capacity(ConfigItemType):
UNITS = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40, 'P': 1 << 50}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['M']
else:
r = re.match('^(\d+)(\w)B?$', self._origin.upper())
n, u = r.groups()
unit = self.UNITS.get(u.upper())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class StringList(ConfigItemType):
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
self._value = self._origin.split(';')
else:
self._value = []
class Dict(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, dict):
raise Exception("Invalid Value")
self._value = self._origin
else:
self._value = self.value = {}
class List(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, list):
raise Exception("Invalid value: {} is not a list.".format(self._origin))
self._value = self._origin
else:
self._value = self.value = []
class StringOrKvList(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, list):
raise Exception("Invalid value: {} is not a list.".format(self._origin))
for item in self._origin:
if not item:
continue
if not isinstance(item, (str, dict)):
raise Exception("Invalid value: {} should be string or key-value format.".format(item))
if isinstance(item, dict):
if len(item.keys()) != 1:
raise Exception("Invalid value: {} should be single key-value format".format(item))
self._value = self._origin
else:
self._value = self.value = []
class Double(ConfigItemType):
def _format(self):
self.value = self._value = float(self._origin) if self._origin else 0
class Boolean(ConfigItemType):
def _format(self):
if isinstance(self._origin, bool):
self._value = self._origin
else:
_origin = str(self._origin).lower()
if _origin == 'true':
self._value = True
elif _origin == 'false':
self._value = False
elif _origin.isdigit():
self._value = bool(self._origin)
else:
raise Exception('%s is not Boolean' % _origin)
self.value = self._value
class Integer(ConfigItemType):
def _format(self):
if self._origin is None:
self._value = 0
self._origin = 0
else:
_origin = str(self._origin)
try:
self.value = self._value = int(_origin)
except:
raise Exception('%s is not Integer' % _origin)
class String(ConfigItemType):
def _format(self):
self.value = self._value = str(self._origin) if self._origin else ''
class ConfigItem(object):
def __init__(
......@@ -667,17 +452,18 @@ class ParamPlugin(Plugin):
if self._src_data is None:
try:
TYPES = {
'DOUBLE': ParamPlugin.Double,
'BOOL': ParamPlugin.Boolean,
'INT': ParamPlugin.Integer,
'STRING': ParamPlugin.String,
'MOMENT': ParamPlugin.Moment,
'TIME': ParamPlugin.Time,
'CAPACITY': ParamPlugin.Capacity,
'STRING_LIST': ParamPlugin.StringList,
'DICT': ParamPlugin.Dict,
'LIST': ParamPlugin.List,
'PARAM_LIST': ParamPlugin.StringOrKvList
'DOUBLE': Double,
'BOOL': Boolean,
'INT': Integer,
'STRING': String,
'MOMENT': Moment,
'TIME': Time,
'CAPACITY': Capacity,
'CAPACITY_MB': CapacityMB,
'STRING_LIST': StringList,
'DICT': Dict,
'LIST': List,
'PARAM_LIST': StringOrKvList
}
self._src_data = {}
with open(self.def_param_yaml_path, 'rb') as f:
......@@ -688,7 +474,7 @@ class ParamPlugin(Plugin):
if param_type in TYPES:
param_type = TYPES[param_type]
else:
param_type = ParamPlugin.String
param_type = String
self._src_data[conf['name']] = ParamPlugin.ConfigItem(
name=conf['name'],
......
......@@ -232,7 +232,7 @@ class ParallerExtractor(object):
pool.close()
pool = None
except:
self.stdio and getattr(self.stdio, 'exception', print)()
self.stdio and getattr(self.stdio, 'exception', print)('')
finally:
pool and pool.close()
return False
......
......@@ -379,6 +379,7 @@ class IO(object):
):
self.level = level
self.msg_lv = msg_lv
self.default_confirm = False
self._log_path = None
self._trace_id = None
self._log_name = 'default'
......@@ -672,6 +673,8 @@ class IO(object):
def confirm(self, msg):
msg = '%s [y/n]: ' % msg
self.print(msg, end='')
if self.default_confirm:
return True
if self._input_is_tty:
while True:
try:
......@@ -748,7 +751,7 @@ class IO(object):
self._print(MsgLevel.VERBOSE, '%s %s' % (self._verbose_prefix, msg), *args, **kwargs)
if sys.version_info.major == 2:
def exception(self, msg, *args, **kwargs):
def exception(self, msg='', *args, **kwargs):
import linecache
exception_msg = []
ei = sys.exc_info()
......@@ -780,7 +783,7 @@ class IO(object):
msg and self.error(msg)
print_stack('\n'.join(exception_msg))
else:
def exception(self, msg, *args, **kwargs):
def exception(self, msg='', *args, **kwargs):
ei = sys.exc_info()
traceback_e = traceback.TracebackException(type(ei[1]), ei[1], ei[2], limit=None)
pre_stach = traceback.extract_stack()[self.track_limit:-2]
......
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import re
__all__ = ("Moment", "Time", "Capacity", "CapacityMB", "StringList", "Dict", "List", "StringOrKvList", "Double", "Boolean", "Integer", "String")
class Null(object):
def __init__(self):
pass
class ConfigItemType(object):
TYPE_STR = None
NULL = Null()
def __init__(self, s):
try:
self._origin = s
self._value = 0
self.value = self.NULL
self._format()
if self.value == self.NULL:
self.value = self._origin
except:
raise Exception("'%s' is not %s" % (self._origin, self._type_str))
@property
def _type_str(self):
if self.TYPE_STR is None:
self.TYPE_STR = str(self.__class__.__name__).split('.')[-1]
return self.TYPE_STR
def _format(self):
raise NotImplementedError
def __str__(self):
return str(self._origin)
def __hash__(self):
return self._origin.__hash__()
@property
def __cmp_value__(self):
return self._value
def __eq__(self, value):
if value is None:
return False
return self.__cmp_value__ == value.__cmp_value__
def __gt__(self, value):
if value is None:
return True
return self.__cmp_value__ > value.__cmp_value__
def __ge__(self, value):
if value is None:
return True
return self.__eq__(value) or self.__gt__(value)
def __lt__(self, value):
if value is None:
return False
return self.__cmp_value__ < value.__cmp_value__
def __le__(self, value):
if value is None:
return False
return self.__eq__(value) or self.__lt__(value)
class Moment(ConfigItemType):
def _format(self):
if self._origin:
if self._origin.upper() == 'DISABLE':
self._value = 0
else:
r = re.match('^(\d{1,2}):(\d{1,2})$', self._origin)
h, m = r.groups()
h, m = int(h), int(m)
if 0 <= h <= 23 and 0 <= m <= 60:
self._value = h * 60 + m
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Time(ConfigItemType):
UNITS = {
'ns': 0.000000001,
'us': 0.000001,
'ms': 0.001,
's': 1,
'm': 60,
'h': 3600,
'd': 86400
}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['s']
else:
r = re.match('^(\d+)(\w+)$', self._origin.lower())
n, u = r.groups()
unit = self.UNITS.get(u.lower())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Capacity(ConfigItemType):
UNITS = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40, 'P': 1 << 50}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['M']
else:
r = re.match('^(\d+)(\w)B?$', self._origin.upper())
n, u = r.groups()
unit = self.UNITS.get(u.upper())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class CapacityMB(Capacity):
def _format(self):
super(CapacityMB, self)._format()
if isinstance(self._origin, str) and self._origin.isdigit():
self.value = self._origin + 'M'
if not self._origin:
self.value = '0M'
class StringList(ConfigItemType):
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
self._value = self._origin.split(';')
else:
self._value = []
class Dict(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, dict):
raise Exception("Invalid Value")
self._value = self._origin
else:
self._value = self.value = {}
class List(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, list):
raise Exception("Invalid value: {} is not a list.".format(self._origin))
self._value = self._origin
else:
self._value = self.value = []
class StringOrKvList(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, list):
raise Exception("Invalid value: {} is not a list.".format(self._origin))
for item in self._origin:
if not item:
continue
if not isinstance(item, (str, dict)):
raise Exception("Invalid value: {} should be string or key-value format.".format(item))
if isinstance(item, dict):
if len(item.keys()) != 1:
raise Exception("Invalid value: {} should be single key-value format".format(item))
self._value = self._origin
else:
self._value = self.value = []
class Double(ConfigItemType):
def _format(self):
self.value = self._value = float(self._origin) if self._origin else 0
class Boolean(ConfigItemType):
def _format(self):
if isinstance(self._origin, bool):
self._value = self._origin
else:
_origin = str(self._origin).lower()
if _origin == 'true':
self._value = True
elif _origin == 'false':
self._value = False
elif _origin.isdigit():
self._value = bool(self._origin)
else:
raise Exception('%s is not Boolean' % _origin)
self.value = self._value
class Integer(ConfigItemType):
def _format(self):
if self._origin is None:
self._value = 0
self._origin = 0
else:
_origin = str(self._origin)
try:
self.value = self._value = int(_origin)
except:
raise Exception('%s is not Integer' % _origin)
class String(ConfigItemType):
def _format(self):
self.value = self._value = str(self._origin) if self._origin else ''
\ No newline at end of file
......@@ -2284,13 +2284,16 @@ class ObdHome(object):
self._call_stdio('stop_loading', 'succeed')
return False
def redeploy_cluster(self, name, search_repo=True):
def redeploy_cluster(self, name, search_repo=True, need_confirm=False):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
self.set_deploy(deploy)
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
if need_confirm and not self._call_stdio('confirm', 'Are you sure to destroy the "%s" cluster and rebuild it?' % name):
return False
deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Get deploy configuration')
......@@ -2630,9 +2633,9 @@ class ObdHome(object):
route = []
use_images = []
upgrade_route_plugins = self.search_py_script_plugin([current_repository], 'upgrade_route', no_found_act='warn')
if current_repository in upgrade_route_plugins:
ret = self.call_plugin(upgrade_route_plugins[current_repository], current_repository , current_repository=current_repository, dest_repository=dest_repository)
upgrade_route_plugins = self.search_py_script_plugin([dest_repository], 'upgrade_route', no_found_act='warn')
if dest_repository in upgrade_route_plugins:
ret = self.call_plugin(upgrade_route_plugins[dest_repository], current_repository , current_repository=current_repository, dest_repository=dest_repository)
route = ret.get_return('route')
if not route:
return False
......@@ -2742,9 +2745,6 @@ class ObdHome(object):
if not install_plugins:
return False
if not self.install_repositories_to_servers(deploy_config, upgrade_repositories[1:], install_plugins, ssh_clients, self.options):
return False
script_query_timeout = getattr(self.options, 'script_query_timeout', '')
n = len(upgrade_repositories)
while upgrade_ctx['index'] < n:
......@@ -4006,7 +4006,7 @@ class ObdHome(object):
cluster_config = deploy_config.components[component_name]
if not cluster_config.servers:
self._call_stdio('error', '%s server list is empty' % allow_components[0])
self._call_stdio('error', '%s server list is empty' % allow_components)
return False
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
......@@ -4015,8 +4015,9 @@ class ObdHome(object):
self._call_stdio('stop_loading', 'succeed')
target_repository = None
for repository in repositories:
if repository.name == allow_components[0]:
if repository.name == component_name:
target_repository = repository
break
if gather_type in ['gather_plan_monitor']:
setattr(opts, 'connect_cluster', True)
obdiag_path = getattr(opts, 'obdiag_dir', None)
......
......@@ -181,7 +181,7 @@ grafana:
- prometheus
global:
home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'.
login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
......@@ -181,7 +181,7 @@ grafana:
- prometheus
global:
home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'.
login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
......@@ -223,7 +223,7 @@ grafana:
- prometheus
global:
home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'.
login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
......@@ -28,8 +28,6 @@ oceanbase-ce:
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
skip_proxy_sys_private_check: true
enable_strict_kernel_release: false
# root_password: # root user password
# In this example , support multiple ob process in single node, so different process use different ports.
# If deploy ob cluster in multiple nodes, the port and path setting can be same.
......
......@@ -210,7 +210,7 @@ grafana:
- prometheus
global:
home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'.
login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
......@@ -10,7 +10,7 @@ grafana:
- 192.168.1.5
global:
home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'.
login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
......@@ -77,7 +77,7 @@ grafana:
- prometheus
global:
home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'.
login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
......@@ -23,8 +23,10 @@ oceanbase-ce:
# please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files.
datafile_size: 2G # Size of the data file.
datafile_next: 2G # the auto extend step. Please enter an capacity, such as 2G
datafile_maxsize: 20G # the auto extend max size. Please enter an capacity, such as 20G
log_disk_size: 13G # The size of disk space used by the clog files.
cpu_count: 16
production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
......
......@@ -23,8 +23,10 @@ oceanbase-ce:
# please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files.
datafile_size: 2G # Size of the data file.
datafile_next: 2G # the auto extend step. Please enter an capacity, such as 2G
datafile_maxsize: 20G # the auto extend max size. Please enter an capacity, such as 20G
log_disk_size: 13G # The size of disk space used by the clog files.
cpu_count: 16
production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
......
......@@ -20,8 +20,10 @@ oceanbase-ce:
# please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files.
datafile_size: 2G # Size of the data file.
datafile_next: 2G # the auto extend step. Please enter an capacity, such as 2G
datafile_maxsize: 20G # the auto extend max size. Please enter an capacity, such as 20G
log_disk_size: 13G # The size of disk space used by the clog files.
cpu_count: 16
production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
......
......@@ -27,8 +27,10 @@ oceanbase-ce:
# please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files.
datafile_size: 2G # Size of the data file.
datafile_next: 2G # the auto extend step. Please enter an capacity, such as 2G
datafile_maxsize: 20G # the auto extend max size. Please enter an capacity, such as 20G
log_disk_size: 13G # The size of disk space used by the clog files.
cpu_count: 16
production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
......
......@@ -27,8 +27,10 @@ oceanbase-ce:
# please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files.
datafile_size: 2G # Size of the data file.
datafile_next: 2G # the auto extend step. Please enter an capacity, such as 2G
datafile_maxsize: 20G # the auto extend max size. Please enter an capacity, such as 20G
log_disk_size: 13G # The size of disk space used by the clog files.
cpu_count: 16
production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
......
......@@ -22,10 +22,11 @@ test:
value: 'true'
optimizer: tenant
- name: memory_chunk_cache_size
value: '0'
value_type: STRING
value: '0M'
value_type: CAPACITY_MB
- name: syslog_io_bandwidth_limit
value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog
value: 'true'
- name: large_query_worker_percentage
......@@ -55,6 +56,7 @@ test:
value: 1m
- name: cache_wash_threshold
value: 10G
value_type: CAPACITY_MB
- name: plan_cache_evict_interval
value: 30s
- name: bf_cache_miss_count_threshold
......
......@@ -25,10 +25,11 @@ build:
value: 'true'
optimizer: tenant
- name: memory_chunk_cache_size
value: '0'
value_type: STRING
value: '0M'
value_type: CAPACITY_MB
- name: syslog_io_bandwidth_limit
value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog
value: 'true'
- name: large_query_worker_percentage
......@@ -58,6 +59,7 @@ build:
value: 1m
- name: cache_wash_threshold
value: 10G
value_type: CAPACITY_MB
- name: plan_cache_evict_interval
value: 30s
- name: bf_cache_miss_count_threshold
......
......@@ -26,9 +26,11 @@ test:
value: false
value_type: BOOL
- name: memory_chunk_cache_size
value: 0
value: 0M
value_type: CAPACITY_MB
- name: cache_wash_threshold
value: 30g
value_type: CAPACITY_MB
- name: ob_enable_batched_multi_statement
value: true
optimizer: tenant
......@@ -47,6 +49,7 @@ test:
value: 4
- name: syslog_io_bandwidth_limit
value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog
value: true
- name: large_query_worker_percentage
......
test:
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 3
optimizer: sleep
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: _enable_newsort
value: 'false'
- name: _enable_adaptive_compaction
value: 'false'
optimizer: tenant
- name: enable_record_trace_log
value: 'false'
\ No newline at end of file
build:
variables:
- name: ob_query_timeout
value: 36000000000
- name: ob_trx_timeout
value: 36000000000
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 5
optimizer: sleep
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: enable_record_trace_log
value: 'false'
- name: _enable_defensive_check
value: false
- name: default_auto_increment_mode
value: 'NOORDER'
optimizer: tenant
- name: _rowsets_enabled
value: false
optimizer: tenant
- name: freeze_trigger_percentage
value: 40
optimizer: tenant
\ No newline at end of file
......@@ -22,10 +22,11 @@ test:
value: 'true'
optimizer: tenant
- name: memory_chunk_cache_size
value: '0'
value_type: STRING
value: '0M'
value_type: CAPACITY_MB
- name: syslog_io_bandwidth_limit
value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog
value: 'true'
- name: large_query_worker_percentage
......@@ -55,6 +56,7 @@ test:
value: 1m
- name: cache_wash_threshold
value: 10G
value_type: CAPACITY_MB
- name: plan_cache_evict_interval
value: 30s
- name: bf_cache_miss_count_threshold
......
......@@ -25,10 +25,11 @@ build:
value: 'true'
optimizer: tenant
- name: memory_chunk_cache_size
value: '0'
value_type: STRING
value: '0M'
value_type: CAPACITY_MB
- name: syslog_io_bandwidth_limit
value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog
value: 'true'
- name: large_query_worker_percentage
......@@ -58,6 +59,7 @@ build:
value: 1m
- name: cache_wash_threshold
value: 10G
value_type: CAPACITY_MB
- name: plan_cache_evict_interval
value: 30s
- name: bf_cache_miss_count_threshold
......
......@@ -26,9 +26,10 @@ test:
value: false
value_type: BOOL
- name: memory_chunk_cache_size
value: 0
value: 0M
- name: cache_wash_threshold
value: 30g
value_type: CAPACITY_MB
- name: ob_enable_batched_multi_statement
value: true
optimizer: tenant
......@@ -47,6 +48,7 @@ test:
value: 4
- name: syslog_io_bandwidth_limit
value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog
value: true
- name: large_query_worker_percentage
......
test:
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 3
optimizer: sleep
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: _enable_newsort
value: 'false'
- name: _enable_adaptive_compaction
value: 'false'
optimizer: tenant
- name: enable_record_trace_log
value: 'false'
\ No newline at end of file
build:
variables:
- name: ob_query_timeout
value: 36000000000
- name: ob_trx_timeout
value: 36000000000
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 5
optimizer: sleep
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: enable_record_trace_log
value: 'false'
- name: _enable_defensive_check
value: false
- name: default_auto_increment_mode
value: 'NOORDER'
optimizer: tenant
- name: _rowsets_enabled
value: false
optimizer: tenant
- name: freeze_trigger_percentage
value: 40
optimizer: tenant
\ No newline at end of file
......@@ -23,6 +23,7 @@ from __future__ import absolute_import, division, print_function
import re
import time
from copy import deepcopy
from _types import *
from _stdio import SafeStdio
......@@ -32,179 +33,6 @@ VARIABLES = 'variables'
SYSTEM_CONFIG = 'system_config'
class OptimizeItem(object):
class OptimizeItemType(object):
TYPE_STR = None
def __init__(self, s):
try:
self._origin = s
self._value = 0
self._format()
except:
raise Exception("'%s' is not %s" % (self._origin, self._type_str))
@property
def _type_str(self):
if self.TYPE_STR is None:
self.TYPE_STR = str(self.__class__.__name__).split('.')[-1]
return self.TYPE_STR
def _format(self):
raise NotImplementedError
def __str__(self):
return str(self._origin)
def __repr__(self):
return self.__str__()
def __hash__(self):
return self._origin.__hash__()
@property
def __cmp_value__(self):
return self._value
def __eq__(self, value):
if value is None:
return False
return self.__cmp_value__ == value.__cmp_value__
def __gt__(self, value):
if value is None:
return True
return self.__cmp_value__ > value.__cmp_value__
def __ge__(self, value):
if value is None:
return True
return self.__eq__(value) or self.__gt__(value)
def __lt__(self, value):
if value is None:
return False
return self.__cmp_value__ < value.__cmp_value__
def __le__(self, value):
if value is None:
return False
return self.__eq__(value) or self.__lt__(value)
class Moment(OptimizeItemType):
def _format(self):
if self._origin:
if self._origin.upper() == 'DISABLE':
self._value = 0
else:
r = re.match('^(\d{1,2}):(\d{1,2})$', self._origin)
h, m = r.groups()
h, m = int(h), int(m)
if 0 <= h <= 23 and 0 <= m <= 60:
self._value = h * 60 + m
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Time(OptimizeItemType):
UNITS = {
'ns': 0.000000001,
'us': 0.000001,
'ms': 0.001,
's': 1,
'm': 60,
'h': 3600,
'd': 86400
}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['s']
else:
r = re.match('^(\d+)(\w+)$', self._origin.lower())
n, u = r.groups()
unit = self.UNITS.get(u.lower())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Capacity(OptimizeItemType):
UNITS = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40, 'P': 1 << 50}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['M']
else:
r = re.match('^(\d+)(\w)B?$', self._origin.upper())
n, u = r.groups()
unit = self.UNITS.get(u.upper())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class StringList(OptimizeItemType):
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
self._value = self._origin.split(';')
else:
self._value = []
class Double(OptimizeItemType):
def _format(self):
self._value = float(self._origin) if self._origin else 0
class Boolean(OptimizeItemType):
def _format(self):
if isinstance(self._origin, bool):
self._value = self._origin
else:
_origin = str(self._origin).lower()
if _origin == 'true':
self._value = True
elif _origin == 'false':
self._value = False
elif _origin.isdigit():
self._value = bool(self._origin)
else:
raise Exception('%s is not Boolean' % _origin)
class Integer(OptimizeItemType):
def _format(self):
if self._origin is None:
self._value = 0
self._origin = 0
else:
_origin = str(self._origin)
try:
self.value = self._value = int(_origin)
except:
raise Exception('%s is not Integer' % _origin)
class String(OptimizeItemType):
def _format(self):
self._value = str(self._origin) if self._origin else ''
class SqlFile(object):
def __init__(self, path, entrance, sys=False, **kwargs):
......@@ -245,14 +73,15 @@ class SqlFile(object):
class Variable(object):
TYPES = {
'DOUBLE': OptimizeItem.Double,
'BOOL': OptimizeItem.Boolean,
'INT': OptimizeItem.Integer,
'STRING': OptimizeItem.String,
'MOMENT': OptimizeItem.Moment,
'TIME': OptimizeItem.Time,
'CAPACITY': OptimizeItem.Capacity,
'STRING_LIST': OptimizeItem.StringList
'DOUBLE': Double,
'BOOL': Boolean,
'INT': Integer,
'STRING': String,
'MOMENT': Moment,
'TIME': Time,
'CAPACITY': Capacity,
'CAPACITY_MB': CapacityMB,
'STRING_LIST': StringList
}
def __init__(self, value, entrance, name=None, value_type=None, condition="lambda n, o: n != o",
......
......@@ -38,7 +38,6 @@ shell_command_map = {
"cpu_logical_cores": 'cat /proc/cpuinfo | grep "processor" | wc -l',
"cpu_model_name": 'cat /proc/cpuinfo | grep name | cut -f2 -d: | uniq',
"cpu_frequency": 'cat /proc/cpuinfo | grep MHz | cut -f2 -d: | uniq',
"cpu_flags": 'cat /proc/cpuinfo | grep flags | cut -f2 -d: | uniq',
"memory_total": 'cat /proc/meminfo | grep MemTotal | cut -f2 -d: | uniq',
"memory_free": 'cat /proc/meminfo | grep MemFree | cut -f2 -d: | uniq',
"memory_avaiable": 'cat /proc/meminfo | grep MemAvailable | cut -f2 -d: | uniq',
......@@ -121,11 +120,6 @@ class CpuInfo:
def cpu_frequency(*args, **kwargs):
return kwargs["bash_result"]
@staticmethod
@shell_command
def cpu_flags(*args, **kwargs):
return kwargs["bash_result"]
class MemInfo:
@staticmethod
......@@ -237,7 +231,6 @@ def telemetry_machine_data(data):
_hosts['cpu']['logicalCores'] = CpuInfo.cpu_logical_cores()
_hosts['cpu']['modelName'] = CpuInfo.cpu_model_name()
_hosts['cpu']['frequency'] = CpuInfo.cpu_frequency()
_hosts['cpu']['flags'] = CpuInfo.cpu_flags()
_hosts['memory']['total'] = MemInfo.memory_total()
_hosts['memory']['free'] = MemInfo.memory_free()
......
......@@ -216,7 +216,7 @@
"targets": [
{
"exemplar": true,
"expr": "(sum(rate(ob_sysstat{stat_id=\"40003\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40005\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40009\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40009\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40001\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))\n/\n(sum(rate(ob_sysstat{stat_id=\"40002\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40004\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40006\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40008\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40000\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))",
"expr": "(sum(rate(ob_sysstat{stat_id=\"40003\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40005\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40007\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40009\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40001\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))\n/\n(sum(rate(ob_sysstat{stat_id=\"40002\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40004\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40006\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40008\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40000\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))",
"interval": "",
"legendFormat": "sql latency {{$group}}",
"refId": "A"
......
......@@ -37,7 +37,7 @@ def call_plugin(plugin, plugin_context, repositories, *args, **kwargs):
stdio, *args, **kwargs)
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, **kwargs):
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, *args, **kwargs):
cluster_config = plugin_context.cluster_config
clients = plugin_context.clients
......@@ -57,7 +57,8 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
apply_param_plugin(cur_repository)
if not call_plugin(stop_plugin, plugin_context, [cur_repository], *args, **kwargs):
return
return
install_repository_to_servers(cluster_config.name, cluster_config, dest_repository, clients)
apply_param_plugin(dest_repository)
if not call_plugin(start_plugin, plugin_context, [dest_repository], *args, **kwargs):
......
......@@ -37,7 +37,7 @@ def call_plugin(plugin, plugin_context, repositories, *args, **kwargs):
stdio, *args, **kwargs)
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, **kwargs):
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, *args, **kwargs):
def summit_config():
generate_global_config = generate_configs['global']
......@@ -71,6 +71,7 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
apply_param_plugin(cur_repository)
if not call_plugin(stop_plugin, plugin_context, repositories=[cur_repository], *args, **kwargs):
return
install_repository_to_servers(cluster_config.name, cluster_config, dest_repository, clients)
# clean useless config
clean_files = [
"conf/config_properties/monagent_basic_auth.yaml",
......
......@@ -242,7 +242,7 @@ def start(plugin_context, need_bootstrap=False, *args, **kwargs):
stdio.start_loading('obproxy program health check')
failed = []
servers = cluster_config.servers
count = 20
count = 300
while servers and count:
count -= 1
tmp_servers = []
......
......@@ -21,7 +21,7 @@
from __future__ import absolute_import, division, print_function
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, **kwargs):
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, *args, **kwargs):
namespace = plugin_context.namespace
namespaces = plugin_context.namespaces
deploy_name = plugin_context.deploy_name
......@@ -53,8 +53,8 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
apply_param_plugin(cur_repository)
if not stop_plugin(namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs):
return
return
install_repository_to_servers(cluster_config.name, cluster_config, dest_repository, clients)
apply_param_plugin(dest_repository)
if not start_plugin(namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmds, options, stdio, need_bootstrap=True, *args, **kwargs):
return
......
......@@ -134,8 +134,9 @@ def connect(plugin_context, target_server=None, *args, **kwargs):
server_config = cluster_config.get_server_conf(server)
password = server_config.get('root_password', '') if count % 2 else ''
cursor = Cursor(ip=server.ip, port=server_config['mysql_port'], tenant='', password=password if password is not None else '', stdio=stdio)
stdio.stop_loading('succeed')
return plugin_context.return_true(connect=cursor.db, cursor=cursor, server=server)
if cursor.execute('select 1', raise_exception=True):
stdio.stop_loading('succeed')
return plugin_context.return_true(connect=cursor.db, cursor=cursor, server=server)
except:
if count == 0:
stdio.exception('')
......
......@@ -96,12 +96,14 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
def summit_config():
generate_global_config = generate_configs['global']
for key in generate_global_config:
stdio.verbose('Update global config %s to %s' % (key, generate_global_config[key]))
cluster_config.update_global_conf(key, generate_global_config[key], False)
for server in cluster_config.servers:
if server not in generate_configs:
continue
generate_server_config = generate_configs[server]
for key in generate_server_config:
stdio.verbose('Update server %s config %s to %s' % (server, key, generate_server_config[key]))
cluster_config.update_server_conf(server, key, generate_server_config[key], False)
clients = plugin_context.clients
......@@ -145,7 +147,7 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
ip = server.ip
client = clients[server]
server_config = cluster_config.get_server_conf_with_default(server)
user_server_config = cluster_config.get_original_server_conf_with_global(server)
user_server_config = cluster_config.get_original_server_conf_with_global(server, format_conf=True)
if user_server_config.get('devname') is None:
if client.is_localhost():
......
......@@ -148,7 +148,7 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor
if server_memory_config[server]['system_memory']:
memory_limit = server_memory_config[server]['num']
if not memory_limit:
memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total']
memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total'] / 100
factor = 0.7
suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor)
......
......@@ -158,15 +158,15 @@ class ObVersionGraph(object):
res.insert(0, start_node)
if res and res[-1].deprecated:
raise Exception('upgrade destination version:{}{} is deprecated, not support upgrade.'.format(res[-1].version, '-{}'.format(res[-1].release) if res[-1].release else ''))
return format_route(res)
return format_route(res, current_repository)
def format_route(routes):
def format_route(routes, repository):
route_res = []
for node in routes:
require_from_binary = getattr(node, 'require_from_binary', False)
if node.when_come_from:
require_from_binary = require_from_binary and routes[0].version in node.when_come_from
if getattr(node, 'when_come_from', False):
require_from_binary = require_from_binary and (repository.version in node.when_come_from or '%s-%s' % (repository.version, repository.release.split('.')[0]) in node.when_come_from)
route_res.append({
'version': node.version,
'release': None if node.release == VersionNode.RELEASE_NULL else node.release,
......@@ -180,17 +180,13 @@ def upgrade_route(plugin_context, current_repository, dest_repository, *args, **
stdio = plugin_context.stdio
repository_dir = dest_repository.repository_dir
if dest_repository.version > Version("4.1.0.0"):
stdio.error('upgrade observer to version {} is not support, please upgrade obd first.'.format(dest_repository.version))
return
if current_repository.version == dest_repository.version:
return plugin_context.return_true(route=format_route([current_repository, dest_repository]))
return plugin_context.return_true(route=format_route([current_repository, dest_repository], current_repository))
upgrade_dep_name = 'etc/oceanbase_upgrade_dep.yml'
upgrade_dep_path = os.path.join(repository_dir, upgrade_dep_name)
if not os.path.isfile(upgrade_dep_path):
stdio.error('%s No such file: %s' % (dest_repository, upgrade_dep_name))
stdio.error('%s No such file: %s. \n No upgrade route available' % (dest_repository, upgrade_dep_name))
return
version_dep = {}
......
......@@ -119,9 +119,10 @@ def bootstrap(plugin_context, cursor, *args, **kwargs):
has_ocp = True
if has_ocp:
global_conf_with_default = deepcopy(cluster_config.get_global_conf_with_default())
original_global_conf = cluster_config.get_original_global_conf()
ocp_meta_tenant_prefix = 'ocp_meta_tenant_'
for key in global_conf_with_default:
if key.startswith(ocp_meta_tenant_prefix):
if key.startswith(ocp_meta_tenant_prefix) and original_global_conf.get(key, None):
global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key]
tenant_info = global_conf_with_default["ocp_meta_tenant"]
tenant_info["variables"] = "ob_tcp_invited_nodes='%'"
......
......@@ -104,12 +104,14 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
def summit_config():
generate_global_config = generate_configs['global']
for key in generate_global_config:
stdio.verbose('Update global config %s to %s' % (key, generate_global_config[key]))
cluster_config.update_global_conf(key, generate_global_config[key], False)
for server in cluster_config.servers:
if server not in generate_configs:
continue
generate_server_config = generate_configs[server]
for key in generate_server_config:
stdio.verbose('Update server %s config %s to %s' % (server, key, generate_server_config[key]))
cluster_config.update_server_conf(server, key, generate_server_config[key], False)
clients = plugin_context.clients
......@@ -147,7 +149,7 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
ip = server.ip
client = clients[server]
server_config = cluster_config.get_server_conf_with_default(server)
user_server_config = cluster_config.get_original_server_conf_with_global(server)
user_server_config = cluster_config.get_original_server_conf_with_global(server, format_conf=True)
if user_server_config.get('devname') is None:
if client.is_localhost():
......@@ -308,7 +310,7 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
if not datafile_size:
datafile_disk_percentage = int(user_server_config.get('datafile_disk_percentage', 0))
if datafile_disk_percentage:
datafile_size = data_dir_mount['total'] * datafile_disk_percentage / 100
datafile_size = data_dir_disk['total'] * datafile_disk_percentage / 100
elif generate_config_mini:
datafile_size = MINI_DATA_FILE_SIZE
update_server_conf(server, 'datafile_size', format_size(datafile_size, 0))
......
......@@ -256,7 +256,7 @@
name_local: 数据文件大小
require: false
essential: true
type: CAPACITY
type: CAPACITY_MB
default: 0
min_value: 0M
max_value: NULL
......@@ -277,7 +277,7 @@
name_local: Redo 日志大小
require: false
essential: true
type: CAPACITY
type: CAPACITY_MB
default: 0
min_value: 0M
max_value: NULL
......@@ -295,7 +295,7 @@
description_local: 合并时候数据列统计信息的采样率
- name: cache_wash_threshold
require: false
type: CAPACITY
type: CAPACITY_MB
default: 4GB
min_value: 0B
max_value: NULL
......@@ -385,7 +385,7 @@
description_local: 系统可以使用的最小CPU配额,将会预留
- name: memory_reserved
require: false
type: CAPACITY
type: CAPACITY_MB
default: 500M
min_value: 10M
max_value: NULL
......@@ -475,7 +475,7 @@
description_local: 升级模式开关。在升级模式中,会暂停部分系统后台功能。
- name: multiblock_read_size
require: false
type: CAPACITY
type: CAPACITY_MB
default: 128K
min_value: 0K
max_value: 2M
......@@ -495,7 +495,7 @@
description_local: 因磁盘满等原因导致某个节点数据迁入失败时,暂停迁入时长
- name: tablet_size
require: false
type: CAPACITY
type: CAPACITY_MB
default: 128M
min_value: NULL
max_value: NULL
......@@ -594,7 +594,7 @@
description_local: 数据块缓存在缓存系统中的优先级
- name: syslog_io_bandwidth_limit
require: false
type: CAPACITY
type: CAPACITY_MB
default: 30MB
min_value: NULL
max_value: NULL
......@@ -656,7 +656,7 @@
description_local: 系统日志自动回收复用时,最多保留多少个。值0表示不自动清理。
- name: px_task_size
require: false
type: CAPACITY
type: CAPACITY_MB
default: 2M
min_value: 2M
max_value: NULL
......@@ -1017,7 +1017,7 @@
description_local: 控制租户CPU调度中每次预留多少比例的空闲token数给租户
- name: stack_size
require: false
type: CAPACITY
type: CAPACITY_MB
default: 1M
min_value: 512K
max_value: 20M
......@@ -1039,7 +1039,7 @@
name_local: 最大运行内存
require: false
essential: true
type: CAPACITY
type: CAPACITY_MB
default: 0
min_value: NULL
max_value: NULL
......@@ -1051,7 +1051,7 @@
- name: system_memory
name_local: 集群系统内存
essential: true
type: CAPACITY
type: CAPACITY_MB
default: 30G
min_value: 0M
max_value: NULL
......@@ -1180,7 +1180,7 @@
description_local: OB内置本地磁盘RAID特性。暂勿使用
- name: rootservice_memory_limit
require: false
type: CAPACITY
type: CAPACITY_MB
default: 2G
min_value: 2G
max_value: NULL
......@@ -1190,7 +1190,7 @@
description_local: RootService最大内存限制
- name: plan_cache_low_watermark
require: false
type: CAPACITY
type: CAPACITY_MB
default: 1500M
min_value: NULL
max_value: NULL
......@@ -1252,7 +1252,7 @@
description_local: 控制内存大页的行为,"true"表示在操作系统开启内存大页并且有空闲大页时,数据库总是申请内存大页,否则申请普通内存页, "false"表示数据库不使用大页, "only"表示数据库总是分配大页
- name: dtl_buffer_size
require: false
type: CAPACITY
type: CAPACITY_MB
default: 64K
min_value: 4K
max_value: 2M
......@@ -1522,7 +1522,7 @@
description_local: MySQL模式下,建表时使用的默认压缩算法
- name: memory_chunk_cache_size
require: false
type: CAPACITY
type: CAPACITY_MB
default: 0M
min_value: 0M
max_value: NULL
......@@ -1699,7 +1699,7 @@
description_local: 系统内部执行 schema 多版本记录回收任务的时间间隔。
- name: backup_data_file_size
require: false
type: CAPACITY
type: CAPACITY_MB
default: 4G
min_value: 512M
max_value: 4G
......@@ -1863,7 +1863,7 @@
name_local: OCP express元数据库租户内存
essential: true
require: false
type: CAPACITY
type: CAPACITY_MB
default: 2G
need_redeploy: true
description_en: The tenant memory size for ocp meta db
......@@ -1872,7 +1872,7 @@
name_local: OCP express元数据库租户日志磁盘大小
essential: true
require: false
type: CAPACITY
type: CAPACITY_MB
default: 6656M
need_redeploy: true
description_en: The tenant log disk size for ocp meta db
......
......@@ -176,7 +176,7 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor
if server_memory_config[server]['system_memory']:
memory_limit = server_memory_config[server]['num']
if not memory_limit:
server_memory_config[server]['num'] = memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total']
server_memory_config[server]['num'] = memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total'] / 100
factor = 0.75
suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor)
suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {})
......@@ -586,9 +586,10 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor
has_ocp = True
if has_ocp and need_bootstrap:
global_conf_with_default = copy.deepcopy(cluster_config.get_global_conf_with_default())
original_global_conf = cluster_config.get_original_global_conf()
ocp_meta_tenant_prefix = 'ocp_meta_tenant_'
for key in global_conf_with_default:
if key.startswith(ocp_meta_tenant_prefix):
if key.startswith(ocp_meta_tenant_prefix) and original_global_conf.get(key, None):
global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key]
meta_db_memory_size = parse_size(global_conf_with_default['ocp_meta_tenant'].get('memory_size'))
servers_sys_memory = {}
......
......@@ -390,23 +390,39 @@ class Upgrader(object):
time.sleep(3)
# major freeze
# 1. check merge status
pre_global_broadcast_scn = 0
while True:
merge_status = self.execute_sql("select max(global_broadcast_scn) as global_broadcast_scn, max(global_broadcast_scn > last_scn) as is_merging from CDB_OB_MAJOR_COMPACTION")
if merge_status['is_merging'] == 0:
pre_global_broadcast_scn = merge_status['global_broadcast_scn']
break
time.sleep(3)
# 1. wait all tenant global_broadcast_scn = last_scn, record tenant_id, global_broadcast_scn
pre_tenant_scn_dict = {}
tenant_ids = []
for tenant_info in self.execute_sql("select tenant_id from CDB_OB_MAJOR_COMPACTION", one=False):
tenant_ids.append(tenant_info['tenant_id'])
while tenant_ids:
pre_tenant_scn_list = self.execute_sql("select tenant_id, global_broadcast_scn, last_scn from CDB_OB_MAJOR_COMPACTION where tenant_id in ({})".format(",".join([str(x) for x in tenant_ids])), one=False)
tenant_ids = []
for pre_tenant_scn in pre_tenant_scn_list:
if pre_tenant_scn['global_broadcast_scn'] > pre_tenant_scn['last_scn']:
tenant_ids.append(pre_tenant_scn['tenant_id'])
continue
pre_tenant_scn_dict[pre_tenant_scn['tenant_id']] = pre_tenant_scn['global_broadcast_scn']
time.sleep(1)
# 2. begin merge
self.execute_sql("alter system major freeze tenant = all", error=False)
# 3. wait merge start
while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn <= %s", [pre_global_broadcast_scn]):
tenant_ids = pre_tenant_scn_dict.keys()
while tenant_ids:
tenant_scn_list = self.execute_sql("select tenant_id, global_broadcast_scn from CDB_OB_MAJOR_COMPACTION where tenant_id in ({})".format(",".join([str(x) for x in tenant_ids])), one=False)
tenant_ids = []
for tenant_scn in tenant_scn_list:
if pre_tenant_scn_dict[tenant_scn['tenant_id']] >= tenant_scn['global_broadcast_scn']:
tenant_ids.append(tenant_scn['tenant_id'])
continue
time.sleep(3)
# 4.wait merge finsh
# 4. wait merge finish
while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn > last_scn"):
time.sleep(3)
self.stdio.stop_loading('succeed')
return True
......
......@@ -158,15 +158,15 @@ class ObVersionGraph(object):
res.insert(0, start_node)
if len(res) > 0 and res[-1].deprecated:
raise Exception('upgrade destination version:{}{} is deprecated, not support upgrade.'.format(res[-1].version, '-{}'.format(res[-1].release) if res[-1].release else ''))
return format_route(res)
return format_route(res, current_repository)
def format_route(routes):
def format_route(routes, repository):
route_res = []
for i, node in enumerate(routes):
require_from_binary = getattr(node, 'require_from_binary', False)
if getattr(node, 'when_come_from', False):
require_from_binary = require_from_binary and routes[0].version in node.when_come_from
require_from_binary = require_from_binary and (repository.version in node.when_come_from or '%s-%s' % (repository.version, repository.release.split('.')[0]) in node.when_come_from)
route_res.append({
'version': node.version,
......@@ -194,17 +194,17 @@ def upgrade_route(plugin_context, current_repository, dest_repository, *args, **
stdio = plugin_context.stdio
repository_dir = dest_repository.repository_dir
if dest_repository.version >= Version("4.2"):
if dest_repository.version >= Version("4.3"):
stdio.error('upgrade observer to version {} is not support, please upgrade obd first.'.format(dest_repository.version))
return
if current_repository.version == dest_repository.version:
return plugin_context.return_true(route=format_route([current_repository, dest_repository]))
return plugin_context.return_true(route=format_route([current_repository, dest_repository], current_repository))
upgrade_dep_name = 'etc/oceanbase_upgrade_dep.yml'
upgrade_dep_path = os.path.join(repository_dir, upgrade_dep_name)
if not os.path.isfile(upgrade_dep_path):
stdio.error('%s No such file: %s' % (dest_repository, upgrade_dep_name))
stdio.error('%s No such file: %s. \n No upgrade route available' % (dest_repository, upgrade_dep_name))
return
version_dep = {}
......
......@@ -374,20 +374,36 @@ class Upgrader(object):
time.sleep(3)
# major freeze
# 1. check merge status
pre_global_broadcast_scn = 0
while True:
merge_status = self.execute_sql("select max(global_broadcast_scn) as global_broadcast_scn, max(global_broadcast_scn > last_scn) as is_merging from CDB_OB_MAJOR_COMPACTION")
if merge_status['is_merging'] == 0:
pre_global_broadcast_scn = merge_status['global_broadcast_scn']
break
time.sleep(3)
# 1. wait all tenant global_broadcast_scn = last_scn, record tenant_id, global_broadcast_scn
pre_tenant_scn_dict = {}
tenant_ids = []
for tenant_info in self.execute_sql("select tenant_id from CDB_OB_MAJOR_COMPACTION", one=False):
tenant_ids.append(tenant_info['tenant_id'])
while tenant_ids:
pre_tenant_scn_list = self.execute_sql("select tenant_id, global_broadcast_scn, last_scn from CDB_OB_MAJOR_COMPACTION where tenant_id in ({})".format(",".join([str(x) for x in tenant_ids])), one=False)
tenant_ids = []
for pre_tenant_scn in pre_tenant_scn_list:
if pre_tenant_scn['global_broadcast_scn'] > pre_tenant_scn['last_scn']:
tenant_ids.append(pre_tenant_scn['tenant_id'])
continue
pre_tenant_scn_dict[pre_tenant_scn['tenant_id']] = pre_tenant_scn['global_broadcast_scn']
time.sleep(1)
# 2. begin merge
self.execute_sql("alter system major freeze tenant = all", error=False)
# 3. wait merge start
while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn <= %s", [pre_global_broadcast_scn]):
tenant_ids = pre_tenant_scn_dict.keys()
while tenant_ids:
tenant_scn_list = self.execute_sql("select tenant_id, global_broadcast_scn from CDB_OB_MAJOR_COMPACTION where tenant_id in ({})".format(",".join([str(x) for x in tenant_ids])), one=False)
tenant_ids = []
for tenant_scn in tenant_scn_list:
if pre_tenant_scn_dict[tenant_scn['tenant_id']] >= tenant_scn['global_broadcast_scn']:
tenant_ids.append(tenant_scn['tenant_id'])
continue
time.sleep(3)
# 4.wait merge finsh
# 4. wait merge finish
while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn > last_scn"):
time.sleep(3)
......
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import re, os
from math import sqrt
from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED, EC_OBSERVER_GET_MEMINFO_FAIL
import _errno as err
from tool import ConfigUtil
def parse_size(size):
_bytes = 0
if not isinstance(size, str) or size.isdigit():
_bytes = int(size)
else:
units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40}
match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)]
return _bytes
def format_size(size, precision=1):
units = ['B', 'K', 'M', 'G']
units_num = len(units) - 1
idx = 0
if precision:
div = 1024.0
format = '%.' + str(precision) + 'f%s'
limit = 1024
else:
div = 1024
limit = 1024
format = '%d%s'
while idx < units_num and size >= limit:
size /= div
idx += 1
return format % (size, units[idx])
def get_system_memory(memory_limit, min_pool_memory, generate_config_mini):
if generate_config_mini and memory_limit <= 6 << 30:
system_memory = 1 << 30
elif memory_limit <= 8 << 30:
system_memory = 2 << 30
elif memory_limit <= 16 << 30:
system_memory = 3 << 30
elif memory_limit <= 32 << 30:
system_memory = 5 << 30
elif memory_limit <= 48 << 30:
system_memory = 7 << 30
elif memory_limit <= 64 << 30:
system_memory = 10 << 30
else:
memory_limit_gb = memory_limit >> 30
system_memory = int(3 * (sqrt(memory_limit_gb) - 3)) << 30
return max(system_memory, min_pool_memory)
def generate_config(plugin_context, generate_config_mini=False, generate_check=True, return_generate_keys=False, generate_consistent_config=False, only_generate_password=False, generate_password=True, *args, **kwargs):
if return_generate_keys:
generate_keys = []
if not only_generate_password:
generate_keys += [
'memory_limit', 'datafile_size', 'log_disk_size', 'devname', 'system_memory', 'cpu_count', 'production_mode',
'syslog_level', 'enable_syslog_recycle', 'enable_syslog_wf', 'max_syslog_file_count', 'cluster_id', 'ocp_meta_tenant_log_disk_size',
'datafile_next', 'datafile_maxsize'
]
if generate_password:
generate_keys += ['root_password', 'proxyro_password', 'ocp_meta_password', 'ocp_agent_monitor_password']
return plugin_context.return_true(generate_keys=generate_keys)
cluster_config = plugin_context.cluster_config
if generate_password:
generate_random_password(plugin_context, cluster_config)
if only_generate_password:
return plugin_context.return_true()
def update_server_conf(server, key, value):
if server not in generate_configs:
generate_configs[server] = {}
generate_configs[server][key] = value
def update_global_conf(key, value):
generate_configs['global'][key] = value
def summit_config():
generate_global_config = generate_configs['global']
for key in generate_global_config:
stdio.verbose('Update global config %s to %s' % (key, generate_global_config[key]))
cluster_config.update_global_conf(key, generate_global_config[key], False)
for server in cluster_config.servers:
if server not in generate_configs:
continue
generate_server_config = generate_configs[server]
for key in generate_server_config:
stdio.verbose('Update server %s config %s to %s' % (server, key, generate_server_config[key]))
cluster_config.update_server_conf(server, key, generate_server_config[key], False)
clients = plugin_context.clients
stdio = plugin_context.stdio
success = True
generate_configs = {'global': {}}
plugin_context.set_variable('generate_configs', generate_configs)
stdio.start_loading('Generate observer configuration')
global_config = cluster_config.get_global_conf()
max_syslog_file_count_default = 4
if global_config.get('enable_syslog_recycle') is None:
update_global_conf('enable_syslog_recycle', True)
if global_config.get('enable_syslog_wf') is None:
update_global_conf('enable_syslog_wf', False)
if global_config.get('max_syslog_file_count') is None:
update_global_conf('max_syslog_file_count', max_syslog_file_count_default)
if global_config.get('cluster_id') is None:
update_global_conf('cluster_id', 1)
MIN_MEMORY = 6 << 30
PRO_MEMORY_MIN = 16 << 30
SLOG_SIZE = 4 << 30
MIN_CPU_COUNT = 16
START_NEED_MEMORY = 3 << 30
MINI_MEMORY_SIZE = MIN_MEMORY
MINI_DATA_FILE_SIZE = 2 << 30
MINI_DATA_FILE_NEXT = 2 << 30
MINI_DATA_FILE_MAX_SIZE = 8 << 30
MINI_LOG_DISK_SIZE = 13 << 30
has_ocp = 'ocp-express' in [repo.name for repo in plugin_context.repositories]
ip_server_memory_info = {}
servers_info = {}
for server in cluster_config.servers:
ip = server.ip
client = clients[server]
server_config = cluster_config.get_server_conf_with_default(server)
user_server_config = cluster_config.get_original_server_conf_with_global(server, format_conf=True)
if user_server_config.get('devname') is None:
if client.is_localhost():
update_server_conf(server, 'devname', 'lo')
else:
devinfo = client.execute_command('cat /proc/net/dev').stdout
interfaces = re.findall('\n\s+(\w+):', devinfo)
for interface in interfaces:
if interface == 'lo':
continue
if client.execute_command('ping -W 1 -c 1 -I %s %s' % (interface, ip)):
update_server_conf(server, 'devname', interface)
break
dirs = {"home_path": server_config['home_path']}
dirs["data_dir"] = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store')
dirs["redo_dir"] = server_config['redo_dir'] if server_config.get('redo_dir') else dirs["data_dir"]
dirs["clog_dir"] = server_config['clog_dir'] if server_config.get('clog_dir') else os.path.join(dirs["redo_dir"], 'clog')
# memory
auto_set_memory = False
auto_set_system_memory = False
auto_set_min_pool_memory = False
system_memory = 0
if user_server_config.get('system_memory'):
system_memory = parse_size(user_server_config.get('system_memory'))
if generate_config_mini and '__min_full_resource_pool_memory' not in user_server_config:
auto_set_min_pool_memory = True
min_pool_memory = server_config['__min_full_resource_pool_memory']
min_memory = max(system_memory, MIN_MEMORY)
if ip not in ip_server_memory_info:
ret = client.execute_command('cat /proc/meminfo')
if ret:
ip_server_memory_info[ip] = server_memory_stats = {}
memory_key_map = {
'MemTotal': 'total',
'MemFree': 'free',
'MemAvailable': 'available',
'Buffers': 'buffers',
'Cached': 'cached'
}
for key in memory_key_map:
server_memory_stats[memory_key_map[key]] = 0
for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout):
if k in memory_key_map:
key = memory_key_map[k]
server_memory_stats[key] = parse_size(str(v))
if user_server_config.get('memory_limit_percentage'):
if ip in ip_server_memory_info:
total_memory = parse_size(ip_server_memory_info[ip]['total'])
memory_limit = int(total_memory * user_server_config.get('memory_limit_percentage') / 100)
elif generate_check:
stdio.error(EC_OBSERVER_GET_MEMINFO_FAIL.format(server=server))
success = False
continue
else:
memory_limit = MIN_MEMORY
elif not server_config.get('memory_limit'):
if generate_config_mini:
memory_limit = MINI_MEMORY_SIZE
update_server_conf(server, 'memory_limit', format_size(memory_limit, 0))
update_server_conf(server, 'production_mode', False)
if auto_set_min_pool_memory:
min_pool_memory = 1073741824
update_server_conf(server, '__min_full_resource_pool_memory', min_pool_memory)
else:
if ip in ip_server_memory_info:
server_memory_stats = ip_server_memory_info[ip]
if generate_check:
if server_memory_stats['available'] < START_NEED_MEMORY:
stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(START_NEED_MEMORY)))
success = False
continue
if server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached'] < MIN_MEMORY:
stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(MIN_MEMORY)))
success = False
continue
memory_limit = max(MIN_MEMORY, int(server_memory_stats['available'] * 0.9))
update_server_conf(server, 'memory_limit', format_size(memory_limit, 0))
auto_set_memory = True
elif generate_check:
stdio.error(EC_OBSERVER_GET_MEMINFO_FAIL.format(server=server))
success = False
continue
else:
memory_limit = MIN_MEMORY
else:
memory_limit = parse_size(server_config.get('memory_limit'))
if system_memory == 0:
auto_set_system_memory = True
system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini)
update_server_conf(server, 'system_memory', format_size(system_memory, 0))
# cpu
if not server_config.get('cpu_count'):
ret = client.execute_command("grep -e 'processor\s*:' /proc/cpuinfo | wc -l")
if ret and ret.stdout.strip().isdigit():
cpu_num = int(ret.stdout)
server_config['cpu_count'] = max(MIN_CPU_COUNT, int(cpu_num - 2))
else:
server_config['cpu_count'] = MIN_CPU_COUNT
update_server_conf(server, 'cpu_count', server_config['cpu_count'])
elif server_config['cpu_count'] < MIN_CPU_COUNT:
update_server_conf(server, 'cpu_count', MIN_CPU_COUNT)
stdio.warn('(%s): automatically adjust the cpu_count %s' % (server, MIN_CPU_COUNT))
# disk
datafile_size = parse_size(server_config.get('datafile_size', 0))
log_disk_size = parse_size(server_config.get('log_disk_size', 0))
if not server_config.get('datafile_size') or not server_config.get('log_disk_size'):
disk = {'/': 0}
ret = client.execute_command('df --block-size=1024')
if ret:
for total, used, avail, puse, path in re.findall('(\d+)\s+(\d+)\s+(\d+)\s+(\d+%)\s+(.+)', ret.stdout):
disk[path] = {
'total': int(total) << 10,
'avail': int(avail) << 10,
'need': 0,
}
for include_dir in dirs.values():
while include_dir not in disk:
ret = client.execute_command('df --block-size=1024 %s' % include_dir)
if ret:
for total, used, avail, puse, path in re.findall('(\d+)\s+(\d+)\s+(\d+)\s+(\d+%)\s+(.+)', ret.stdout):
disk[path] = {
'total': int(total) << 10,
'avail': int(avail) << 10,
'need': 0,
}
break
else:
include_dir = os.path.dirname(include_dir)
mounts = {}
for key in dirs:
path = dirs[key]
kp = '/'
for p in disk:
if p in path:
if len(p) > len(kp):
kp = p
mounts[path] = kp
home_path_mount = mounts[dirs['home_path']]
home_path_disk = disk[home_path_mount]
data_dir_mount = mounts[dirs['data_dir']]
data_dir_disk = disk[data_dir_mount]
clog_dir_mount = mounts[dirs['clog_dir']]
clog_dir_disk = disk[clog_dir_mount]
auto_set_datafile_size = False
auto_set_log_disk_size = False
if not datafile_size:
datafile_disk_percentage = int(user_server_config.get('datafile_disk_percentage', 0))
if datafile_disk_percentage:
datafile_size = data_dir_disk['total'] * datafile_disk_percentage / 100
elif generate_config_mini:
datafile_size = MINI_DATA_FILE_SIZE
update_server_conf(server, 'datafile_size', format_size(datafile_size, 0))
if 'datafile_maxsize' not in user_server_config:
update_server_conf(server, 'datafile_maxsize', format_size(MINI_DATA_FILE_MAX_SIZE, 0))
if 'datafile_next' not in user_server_config:
update_server_conf(server, 'datafile_next', format_size(MINI_DATA_FILE_NEXT, 0))
else:
auto_set_datafile_size = True
if not log_disk_size:
log_disk_percentage = int(user_server_config.get('log_disk_percentage', 0))
if log_disk_percentage:
log_disk_size = clog_dir_disk['total'] * log_disk_percentage / 100
elif generate_config_mini:
log_disk_size = MINI_LOG_DISK_SIZE
update_server_conf(server, 'log_disk_size', format_size(log_disk_size, 0))
else:
auto_set_log_disk_size = True
if user_server_config.get('enable_syslog_recycle') is False:
log_size = 1 << 30 # 默认先给1G普通日志空间
else:
log_size = (256 << 20) * int(user_server_config.get('max_syslog_file_count', max_syslog_file_count_default)) * 4
if clog_dir_mount == data_dir_mount:
min_log_size = log_size if clog_dir_mount == home_path_mount else 0
MIN_NEED = min_log_size + SLOG_SIZE
if auto_set_datafile_size:
datafile_size =min_datafile_size = MINI_DATA_FILE_SIZE
else:
min_datafile_size = datafile_size
MIN_NEED += min_datafile_size
if auto_set_log_disk_size:
log_disk_size = min_log_disk_size = (memory_limit - system_memory) * 3 + system_memory
else:
min_log_disk_size = log_disk_size
MIN_NEED += min_log_disk_size
min_need = min_log_size + min_datafile_size + min_log_disk_size
disk_free = data_dir_disk['avail']
if MIN_NEED > disk_free:
if generate_check:
stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(MIN_NEED)))
success = False
continue
else:
if auto_set_memory:
memory_limit = MIN_MEMORY
update_server_conf(server, 'memory_limit', format_size(memory_limit, 0))
if auto_set_system_memory:
system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini)
update_server_conf(server, 'system_memory', format_size(system_memory, 0))
if auto_set_datafile_size:
datafile_size = MINI_DATA_FILE_SIZE
if auto_set_log_disk_size:
log_disk_size = (memory_limit - system_memory) * 3 + system_memory
elif min_need > disk_free:
if generate_check and not auto_set_memory:
stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need)))
success = False
continue
disk_free = disk_free - log_size - SLOG_SIZE
memory_factor = 6
if auto_set_datafile_size is False:
disk_free -= min_datafile_size
memory_factor -= 3
if auto_set_log_disk_size is False:
disk_free -= min_log_disk_size
memory_factor -= 3
memory_limit = format_size(disk_free / max(1, memory_factor), 0)
update_server_conf(server, 'memory_limit', memory_limit)
memory_limit = parse_size(memory_limit)
if auto_set_system_memory:
system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini)
update_server_conf(server, 'system_memory', format_size(system_memory, 0))
if auto_set_log_disk_size:
log_disk_size = (memory_limit - system_memory) * 3 + system_memory
if auto_set_datafile_size:
datafile_size = max(disk_free - log_disk_size, memory_limit * 3)
else:
if auto_set_log_disk_size:
log_disk_size = (memory_limit - system_memory) * 3 + system_memory
if auto_set_datafile_size:
datafile_size = max(disk_free - log_size - SLOG_SIZE - log_disk_size, memory_limit * 3)
if auto_set_datafile_size:
update_server_conf(server, 'datafile_size', format_size(datafile_size, 0))
if auto_set_log_disk_size:
update_server_conf(server, 'log_disk_size', format_size(log_disk_size, 0))
else:
datafile_min_memory_limit = memory_limit
if auto_set_datafile_size:
datafile_size = 3 * datafile_min_memory_limit
min_log_size = log_size if data_dir_mount == home_path_mount else 0
disk_free = data_dir_disk['avail']
min_need = min_log_size + datafile_size + SLOG_SIZE
if generate_check and min_need > disk_free:
if not auto_set_memory:
stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need)))
success = False
continue
datafile_min_memory_limit = (disk_free - min_log_size - SLOG_SIZE) / 3
if datafile_min_memory_limit < min_memory:
stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need)))
success = False
continue
datafile_min_memory_limit = parse_size(format_size(datafile_min_memory_limit, 0))
datafile_size = datafile_min_memory_limit * 3
log_disk_min_memory_limit = memory_limit
if auto_set_log_disk_size:
log_disk_size = 3 * memory_limit
min_log_size = log_size if clog_dir_mount == home_path_mount else 0
disk_free = clog_dir_disk['avail']
min_need = min_log_size + log_disk_size
if generate_check and min_need > disk_free:
if not auto_set_memory:
stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need)))
success = False
continue
log_disk_min_memory_limit = (disk_free - log_size) / 3
if log_disk_min_memory_limit < min_memory:
stdio.error(err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=data_dir_mount, avail=format_size(disk_free), need=format_size(min_need)))
success = False
continue
log_disk_min_memory_limit = parse_size(format_size(log_disk_min_memory_limit, 0))
log_disk_size = log_disk_min_memory_limit * 3
if auto_set_memory:
update_server_conf(server, 'memory_limit', format_size(memory_limit, 0))
if auto_set_system_memory:
system_memory = get_system_memory(memory_limit, min_pool_memory, generate_config_mini)
update_server_conf(server, 'system_memory', system_memory)
if auto_set_datafile_size:
update_server_conf(server, 'datafile_size', format_size(datafile_size, 0))
if auto_set_log_disk_size:
update_server_conf(server, 'log_disk_size', format_size(log_disk_size, 0))
if memory_limit < PRO_MEMORY_MIN:
update_server_conf(server, 'production_mode', False)
servers_info[server] = {
"memory_limit": memory_limit,
"system_memory": system_memory,
"min_pool_memory": min_pool_memory,
"log_disk_size": log_disk_size
}
# ocp meta db
SYS_TENANT_LOG_DISK_SCALE = 1
if has_ocp:
if 'ocp_meta_tenant_log_disk_size' not in global_config and 'log_disk_size' not in global_config.get('ocp_meta_tenant', {}):
if generate_config_mini:
update_global_conf('ocp_meta_tenant_log_disk_size', '6656M')
else:
meta_min_log_disk_size = 6 << 30
expect_log_disk_size = (9 * 512 + 512 * len(cluster_config.servers) + 512 * 3) << 20
max_available = 0
sys_memory_size = None
sys_log_disk_size = None
if 'sys_tenant' in global_config:
if 'memory_size' in global_config['sys_tenant']:
sys_memory_size = global_config['sys_tenant']['memory_size']
if 'log_disk_size' in global_config['sys_tenant']:
sys_log_disk_size = global_config['sys_tenant']['log_disk_size']
for server in cluster_config.servers:
# server_config = cluster_config.get_server_conf_with_default(server)
server_info = servers_info.get(server)
if not server_info:
continue
memory_limit = server_info['memory_limit']
system_memory = server_info['system_memory']
log_disk_size = server_info['log_disk_size']
min_pool_memory = server_info['min_pool_memory']
if not sys_log_disk_size:
if not sys_memory_size:
sys_memory_size = max(min_pool_memory, min(int((memory_limit - system_memory) * 0.25), 16 << 30))
sys_log_disk_size = sys_memory_size * SYS_TENANT_LOG_DISK_SCALE
max_available = max(max_available, log_disk_size - sys_log_disk_size)
if expect_log_disk_size > max_available:
expect_log_disk_size = meta_min_log_disk_size
if expect_log_disk_size > max_available and generate_check:
stdio.error(err.EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK_AVAILABLE.format(avail=max_available, need=expect_log_disk_size))
success = False
cluster_config.update_global_conf('ocp_meta_tenant_log_disk_size', format_size(expect_log_disk_size, 0))
if generate_config_mini and 'ocp_meta_tenant_memory_size' not in global_config and 'memory_size' not in global_config.get('ocp_meta_tenant', {}):
update_global_conf('ocp_meta_tenant_memory_size', '1536M')
if generate_password:
generate_random_password(plugin_context, cluster_config)
if generate_consistent_config:
generate_global_config = generate_configs['global']
server_num = len(cluster_config.servers)
keys = ['memory_limit', 'datafile_size', 'system_memory', 'log_disk_size', 'cpu_count', 'production_mode']
for key in keys:
servers = []
values = []
is_capacity_key = (key != 'cpu_count' and key != 'production_mode')
for server in cluster_config.servers:
if key in generate_configs.get(server, {}):
value = generate_configs[server][key]
servers.append(server)
values.append(parse_size(value) if is_capacity_key else value)
if values:
if len(values) != server_num and key in generate_global_config:
continue
value = min(values)
generate_global_config[key] = format_size(value, 0) if is_capacity_key else value
for server in servers:
del generate_configs[server][key]
# merge_generate_config
merge_config = {}
generate_global_config = generate_configs['global']
count_base = len(cluster_config.servers) - 1
if count_base < 1:
for server in cluster_config.servers:
if server not in generate_configs:
continue
generate_global_config.update(generate_configs[server])
generate_configs[server] = {}
else:
for server in cluster_config.servers:
if server not in generate_configs:
continue
generate_server_config = generate_configs[server]
merged_server_config = {}
for key in generate_server_config:
if key in generate_global_config:
if generate_global_config[key] != generate_server_config[key]:
merged_server_config[key] = generate_server_config[key]
elif key in merge_config:
if merge_config[key]['value'] != generate_server_config[key]:
merged_server_config[key] = generate_server_config[key]
elif count_base == merge_config[key]['count']:
generate_global_config[key] = generate_server_config[key]
del merge_config[key]
else:
merge_config[key]['severs'].append(server)
merge_config[key]['count'] += 1
else:
merge_config[key] = {'value': generate_server_config[key], 'severs': [server], 'count': 1}
generate_configs[server] = merged_server_config
for key in merge_config:
config_st = merge_config[key]
for server in config_st['severs']:
if server not in generate_configs:
continue
generate_server_config = generate_configs[server]
generate_server_config[key] = config_st['value']
# summit_config
summit_config()
if success:
stdio.stop_loading('succeed')
return plugin_context.return_true()
stdio.stop_loading('fail')
def generate_random_password(plugin_context, cluster_config):
global_config = cluster_config.get_original_global_conf()
if 'root_password' not in global_config:
cluster_config.update_global_conf('root_password', ConfigUtil.get_random_pwd_by_total_length(20))
components_name_list = [repo.name for repo in plugin_context.repositories]
if 'obagent' in components_name_list and 'ocp_agent_monitor_password' not in global_config:
cluster_config.update_global_conf('ocp_agent_monitor_password', ConfigUtil.get_random_pwd_by_total_length())
if 'obproxy' in components_name_list or 'obproxy-ce' in components_name_list and 'proxyro_password' not in global_config:
cluster_config.update_global_conf('proxyro_password', ConfigUtil.get_random_pwd_by_total_length())
if 'ocp-express' in components_name_list and 'ocp_meta_password' not in global_config:
cluster_config.update_global_conf('ocp_meta_password', ConfigUtil.get_random_pwd_by_total_length())
\ No newline at end of file
- name: home_path
name_local: 工作目录
require: true
essential: true
type: STRING
min_value: NULL
max_value: NULL
need_redeploy: true
description_en: the directory for the work data file
description_local: OceanBase工作目录
- name: cluster_id
name_local: 集群ID
require: true
essential: true
type: INT
default: 1
min_value: 1
max_value: 4294901759
modify_limit: modify
need_redeploy: true
description_en: ID of the cluster
description_local: 本OceanBase集群ID
- name: data_dir
name_local: 数据目录
essential: true
type: STRING
min_value: NULL
max_value: NULL
need_redeploy: true
description_en: the directory for the data file
description_local: 存储sstable等数据的目录
- name: redo_dir
name_local: 日志目录
essential: true
type: STRING
min_value: NULL
max_value: NULL
need_redeploy: true
description_en: the directory for the redo file
description_local: 存储clog, iclog, slog数据的目录
- name: clog_dir
type: STRING
min_value: NULL
max_value: NULL
need_redeploy: true
description_en: the directory for the clog file
description_local: 存储clog数据的目录, clog 应该与 ilog 同盘
- name: slog_dir
type: STRING
min_value: NULL
max_value: NULL
need_redeploy: true
description_en: the directory for the slog file
description_local: 存储slog数据的目录. 4.0版本开始不支持配置该项
- name: ilog_dir
type: STRING
min_value: NULL
max_value: NULL
need_redeploy: true
description_en: the directory for the ilog file
description_local: 存储ilog数据的目录
- name: devname
name_local: 网卡名
essential: true
type: STRING
min_value: NULL
max_value: NULL
need_restart: true
description_en: name of network adapter
description_local: 服务进程绑定的网卡设备名
- name: rpc_port
name_local: 内部通信端口
require: true
essential: true
type: INT
default: 2882
min_value: 1025
max_value: 65535
modify_limit: modify
need_restart: true
description_en: the port number for RPC protocol.
description_local: 集群内部通信的端口号
- name: mysql_port
name_local: 服务端口
require: true
essential: true
type: INT
default: 2881
min_value: 1025
max_value: 65535
modify_limit: modify
need_restart: true
description_en: port number for mysql connection
description_local: SQL服务协议端口号
- name: zone
require: true
type: STRING
default: zone1
min_value: NULL
max_value: NULL
section: OBSERVER
need_redeploy: true
description_en: specifies the zone name
description_local: 节点所在的zone的名字。
- name: sys_cpu_limit_trigger
require: false
type: INT
default: 80
min_value: 50
max_value: NULL
section: OBSERVER
need_restart: false
description_en: when the cpu usage percentage exceed the trigger, will limit the sys cpu usage
description_local: 当CPU利用率超过该阈值的时候,将暂停系统后台任务的执行
- name: memory_limit_percentage
require: false
type: INT
default: 80
min_value: 10
max_value: 90
modify_limit: decrease
section: OBSERVER
need_restart: false
description_en: memory limit percentage of the total physical memory
description_local: 系统总可用内存大小占总内存大小的百分比
- name: sys_bkgd_migration_retry_num
require: false
type: INT
default: 3
min_value: 3
max_value: 100
section: OBSERVER
need_restart: false
description_en: retry num limit during migration.
description_local: 副本迁移失败时最多重试次数
- name: tableapi_transport_compress_func
require: false
type: STRING
default: none
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: compressor used for tableAPI query result.
description_local: tableAPI查询结果传输使用的压缩算法
- name: disk_io_thread_count
require: false
type: INT
default: 8
min_value: 2
max_value: 32
section: OBSERVER
need_restart: false
description_en: The number of io threads on each disk.
description_local: 磁盘IO线程数。必须为偶数。
- name: location_cache_refresh_min_interval
require: false
type: TIME
default: 100ms
min_value: 0s
max_value: NULL
section: LOCATION_CACHE
need_restart: false
description_en: the time interval in which no request for location cache renewal will be executed.
description_local: 位置缓存刷新请求的最小间隔,防止产生过多刷新请求造成系统压力过大
- name: trace_log_slow_query_watermark
type: TIME
default: 1s
min_value: 1ms
max_value: NULL
section: OBSERVER
need_restart: false
description_en: the threshold of execution time (in milliseconds) of a query beyond which it is considered to be a slow query.
description_local: 执行时间超过该阈值的查询会被认为是慢查询,慢查询的追踪日志会被打印到系统日志中
- name: max_string_print_length
require: false
type: INT
default: 500
min_value: 0
max_value: NULL
section: OBSERVER
need_restart: false
description_en: truncate very long string when printing to log file
description_local: 打印系统日志时,单行日志最大长度
- name: row_compaction_update_limit
require: false
type: INT
default: 6
min_value: 1
max_value: 6400
section: TRANS
need_restart: false
description_en: maximum update count before trigger row compaction
description_local: 触发内存中行内数据合并的修改次数
- name: enable_rereplication
require: false
type: BOOL
default: true
min_value: NULL
max_value: NULL
section: LOAD_BALANCE
need_restart: false
description_en: specifies whether the partition auto-replication is turned on.
description_local: 自动补副本开关
- name: rootservice_async_task_thread_count
require: false
type: INT
default: 4
min_value: 1
max_value: 10
section: ROOT_SERVICE
need_restart: false
description_en: maximum of threads allowed for executing asynchronous task at rootserver.
description_local: RootService内部异步任务使用的线程池大小
- name: major_compact_trigger
require: false
type: INT
default: 5
min_value: 0
max_value: 65535
section: TENANT
need_restart: false
description_en: major_compact_trigger alias to minor_freeze_times
description_local: 多少次小合并触发一次全局合并。值为0时,表示关闭小合并
- name: default_compress
require: false
type: STRING
default: archive
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: default compress function name for create new table
description_local: Oracle模式下,建表时使用的默认压缩策略
- name: ssl_client_authentication
require: false
type: BOOL
default: false
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: true
description_en: enable server supports SSL connection, takes effect only after server restart with all ca/cert/key file.
description_local: 是否开启SSL连接功能
- name: datafile_size
name_local: 数据文件大小
require: false
essential: true
type: CAPACITY_MB
default: 0
min_value: 0M
max_value: NULL
modify_limit: decrease
section: SSTABLE
need_restart: false
description_en: size of the data file. Please enter an capacity, such as 20G
description_local: 数据文件大小。请输入带容量带单位的整数,如20G
- name: datafile_maxsize
name_local: 数据文件最大空间
require: false
essential: true
type: CAPACITY_MB
default: 0
min_value: 0M
max_value: NULL
modify_limit: decrease
section: SSTABLE
need_restart: false
description_en: the auto extend max size. Please enter an capacity, such as 20G
description_local: 数据文件最大空间。请输入带容量带单位的整数,如20G
- name: datafile_next
name_local: 数据文件自增步长
require: false
essential: true
type: CAPACITY_MB
default: 0
min_value: 0M
max_value: NULL
modify_limit: decrease
section: SSTABLE
need_restart: false
description_en: the auto extend step. Please enter an capacity, such as 2G
description_local: 数据文件自增步长。请输入带容量带单位的整数,如2G
- name: log_disk_percentage
require: false
type: INT
default: 0
min_value: 0
max_value: 99
description_en: the percentage of disk space used by the clog files.
description_local: Redo 日志占用其所在磁盘总空间的百分比。
- name: log_disk_size
name_local: Redo 日志大小
require: false
essential: true
type: CAPACITY_MB
default: 0
min_value: 0M
max_value: NULL
description_en: the size of disk space used by the clog files. Please enter an capacity, such as 20G
description_local: Redo 日志磁盘的大小。请输入带容量带单位的整数,如24G
- name: merge_stat_sampling_ratio
require: false
type: INT
default: 100
min_value: 0
max_value: 100
section: OBSERVER
need_restart: false
description_en: column stats sampling ratio daily merge.
description_local: 合并时候数据列统计信息的采样率
- name: cache_wash_threshold
require: false
type: CAPACITY_MB
default: 4GB
min_value: 0B
max_value: NULL
section: OBSERVER
need_restart: false
description_en: size of remaining memory at which cache eviction will be triggered.
description_local: 触发缓存清理的容量阈值
- name: user_iort_up_percentage
require: false
type: INT
default: 100
min_value: 0
max_value: NULL
section: OBSERVER
need_restart: false
description_en: variable to control sys io, the percentage of use io rt can raise
description_local: 用户磁盘IO时延超过该阈值后,系统后台IO任务将被限流
- name: high_priority_net_thread_count
require: false
type: INT
default: 0
min_value: 0
max_value: 100
section: OBSERVER
need_restart: true
description_en: the number of rpc I/O threads for high priority messages, 0 means set off
description_local: 高优先级网络线程数,值0表示关闭
- name: max_kept_major_version_number
require: false
type: INT
default: 2
min_value: 1
max_value: 16
section: DAILY_MERGE
need_restart: false
description_en: the maximum number of kept major versions
description_local: 数据保留多少个冻结版本
- name: enable_sys_unit_standalone
require: false
type: BOOL
default: false
min_value: NULL
max_value: NULL
section: LOAD_BALANCE
need_restart: false
description_en: specifies whether sys unit standalone deployment is turned on.
description_local: 系统租户UNIT是否独占节点
- name: freeze_trigger_percentage
require: false
type: INT
default: 50
min_value: 1
max_value: 99
section: TENANT
need_restart: false
description_en: the threshold of the size of the mem store when freeze will be triggered.
description_local: 触发全局冻结的租户使用内存阈值。另见enable_global_freeze_trigger。
- name: enable_major_freeze
require: false
type: BOOL
default: true
min_value: NULL
max_value: NULL
section: ROOT_SERVICE
need_restart: false
description_en: specifies whether major_freeze function is turned on.
description_local: 自动全局冻结开关
- name: balancer_tolerance_percentage
require: false
type: INT
default: 10
min_value: 1
max_value: 99
section: LOAD_BALANCE
need_restart: false
description_en: specifies the tolerance (in percentage) of the unbalance of the disk space utilization among all units.
description_local: 租户内多个UNIT间磁盘不均衡程度的宽容度,在均值+-宽容度范围之内的不均衡不会触发执行均衡动作
- name: server_cpu_quota_min
require: false
type: DOUBLE
default: 2.5
min_value: 0
max_value: 16
section: TENANT
need_restart: true
description_en: the number of minimal vCPUs allocated to the server tenant(a special internal tenant that exists on every observer)
description_local: 系统可以使用的最小CPU配额,将会预留
- name: memory_reserved
require: false
type: CAPACITY_MB
default: 500M
min_value: 10M
max_value: NULL
section: SSTABLE
need_restart: false
description_en: the size of the system memory reserved for emergency internal use.
description_local: 系统预留内存大小
- name: server_cpu_quota_max
require: false
type: DOUBLE
default: 5
min_value: 0
max_value: 16
section: TENANT
need_restart: true
description_en: the number of maximal vCPUs allocated to the server tenant
description_local: 系统可以使用的最大CPU配额
- name: rootservice_ready_check_interval
require: false
type: TIME
default: 3s
min_value: 100000us
max_value: 1m
section: ROOT_SERVICE
need_restart: false
description_en: the interval between the schedule of the task that checks on the status of the ZONE during restarting.
description_local: RootService启动后等待和检查集群状态的时间间隔
- name: debug_sync_timeout
require: false
type: TIME
default: 0
min_value: 0
max_value: NULL
section: OBSERVER
need_restart: false
description_en: Enable the debug sync facility and optionally specify a default wait timeout in micro seconds. A zero value keeps the facility disabled
description_local: 打开debug sync调试开关,并设置其超时时间;值为0时,则关闭。
- name: syslog_level
require: false
type: STRING
default: INFO
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: specifies the current level of logging.
description_local: 系统日志级别
- name: resource_hard_limit
require: false
type: INT
default: 100
min_value: 1
max_value: 10000
section: LOAD_BALANCE
need_restart: false
description_en: Used along with resource_soft_limit in unit allocation. If server utilization is less than resource_soft_limit, a policy of best fit will be used for unit allocation; otherwise, a least load policy will be employed. Ultimately,system utilization should not be large than resource_hard_limit.
description_local: CPU和内存等资源进行分配的时候,资源总量是实际数量乘以该百分比的值
- name: leak_mod_to_check
require: false
type: STRING
default: NONE
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: the name of the module under memory leak checks
description_local: 内存泄露检查,用于内部调试目的
- name: balancer_task_timeout
require: false
type: TIME
default: 20m
min_value: 1s
max_value: NULL
section: LOAD_BALANCE
need_restart: false
description_en: the time to execute the load-balancing task before it is terminated.
description_local: 负载均衡等后台任务的超时时间
- name: enable_upgrade_mode
require: false
type: BOOL
default: false
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: specifies whether upgrade mode is turned on. If turned on, daily merger and balancer will be disabled.
description_local: 升级模式开关。在升级模式中,会暂停部分系统后台功能。
- name: multiblock_read_size
require: false
type: CAPACITY_MB
default: 128K
min_value: 0K
max_value: 2M
section: SSTABLE
need_restart: false
description_en: multiple block batch read size in one read io request.
description_local: 读取数据时IO聚合大小
- name: migration_disable_time
require: false
type: TIME
default: 3600s
min_value: 1s
max_value: NULL
section: ROOT_SERVICE
need_restart: false
description_en: the duration in which the observer stays in the block_migrate_in status, which means no partition is allowed to migrate into the server.
description_local: 因磁盘满等原因导致某个节点数据迁入失败时,暂停迁入时长
- name: tablet_size
require: false
type: CAPACITY_MB
default: 128M
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: default tablet size, has to be a multiple of 2M
description_local: 分区内部并行处理(合并、查询等)时每个分片的大小
- name: dead_socket_detection_timeout
require: false
type: TIME
default: 10s
min_value: 0s
max_value: 2h
section: OBSERVER
need_restart: false
description_en: specify a tcp_user_timeout for RFC5482. A zero value makes the option disabled
description_local: 失效socket检测超时时间
- name: server_check_interval
require: false
type: TIME
default: 30s
min_value: 1s
max_value: NULL
section: ROOT_SERVICE
need_restart: false
description_en: the time interval between schedules of a task that examines the __all_server table.
description_local: server表一致性检查的时间间隔
- name: lease_time
require: false
type: TIME
default: 10s
min_value: 1s
max_value: 5m
section: ROOT_SERVICE
need_restart: false
description_en: Lease for current heartbeat. If the root server does not received any heartbeat from an observer in lease_time seconds, that observer is considered to be offline.
description_local: RootService与其他服务节点之间的租约时长。一般请勿修改。
- name: rootservice_async_task_queue_size
require: false
type: INT
default: 16384
min_value: 8
max_value: 131072
section: ROOT_SERVICE
need_restart: false
description_en: the size of the queue for all asynchronous tasks at rootserver.
description_local: RootService内部异步任务队列的容量
- name: location_refresh_thread_count
require: false
type: INT
default: 4
min_value: 2
max_value: 64
section: LOCATION_CACHE
need_restart: false
description_en: the number of threads that fetch the partition location information from the root service.
description_local: 用于位置缓存刷新的线程数
- name: minor_compact_trigger
require: false
type: INT
default: 2
min_value: 0
max_value: 16
section: TENANT
need_restart: false
description_en: minor_compact_trigger
description_local: 触发小合并的迷你合并次数
- name: major_freeze_duty_time
type: MOMENT
default: Disable
min_value: 00:00
max_value: 23:59
section: DAILY_MERGE
need_restart: false
description_en: the start time of system daily merge procedure.
description_local: 每日定时冻结和合并的触发时刻
- name: ignore_replay_checksum_error
require: false
type: BOOL
default: false
min_value: NULL
max_value: NULL
section: TRANS
need_restart: false
description_en: specifies whether error raised from the memtable replay checksum validation can be ignored.
description_local: 是否忽略回放事务日志时发生的校验和错误
- name: user_block_cache_priority
require: false
type: INT
default: 1
min_value: 1
max_value: NULL
section: CACHE
need_restart: false
description_en: user block cache priority
description_local: 数据块缓存在缓存系统中的优先级
- name: syslog_io_bandwidth_limit
require: false
type: CAPACITY_MB
default: 30MB
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: Syslog IO bandwidth limitation, exceeding syslog would be truncated. Use 0 to disable ERROR log.
description_local: 系统日志所能占用的磁盘IO带宽上限,超过带宽的系统日志将被丢弃
- name: workers_per_cpu_quota
require: false
type: INT
default: 10
min_value: 2
max_value: 20
section: TENANT
need_restart: false
description_en: the ratio(integer) between the number of system allocated workers vs the maximum number of threads that can be scheduled concurrently.
description_local: 每个CPU配额分配多少个工作线程
- name: enable_record_trace_id
require: false
type: BOOL
default: true
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: specifies whether record app trace id is turned on.
description_local: 是否记录应用端设置的追踪ID
- name: config_additional_dir
require: false
type: STRING_LIST
default: etc2;etc3
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: additional directories of configure file
description_local: 本地存储配置文件的多个目录,为了冗余存储多份配置文件
- name: enable_syslog_recycle
require: false
essential: true
type: BOOL
default: false
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: specifies whether log file recycling is turned on
description_local: 是否自动回收系统日志
- name: max_syslog_file_count
require: false
essential: true
type: INT
default: 0
min_value: 0
max_value: NULL
section: OBSERVER
need_restart: false
description_en: specifies the maximum number of the log files that can co-exist before the log file recycling kicks in. Each log file can occupy at most 256MB disk space. When this value is set to 0, no log file will be removed.
description_local: 系统日志自动回收复用时,最多保留多少个。值0表示不自动清理。
- name: px_task_size
require: false
type: CAPACITY_MB
default: 2M
min_value: 2M
max_value: NULL
section: OBSERVER
need_restart: false
description_en: min task access size of px task
description_local: SQL并行查询引擎每个任务处理的数据量大小
- name: replica_safe_remove_time
require: false
type: TIME
default: 2h
min_value: 1m
max_value: NULL
section: ROOT_SERVICE
need_restart: false
description_en: the time interval that replica not existed has not been modified beyond which a replica is considered can be safely removed
description_local: 已删除副本可以被清理的安全保留时间
- name: builtin_db_data_verify_cycle
require: false
type: INT
default: 20
min_value: 0
max_value: 360
section: OBSERVER
need_restart: false
description_en: check cycle of db data.
description_local: 数据坏块自检周期,单位为天。值0表示不检查。
- name: system_cpu_quota
require: false
type: DOUBLE
default: 10
min_value: 0
max_value: 16
section: TENANT
need_restart: false
description_en: the number of vCPUs allocated to the server tenant
description_local: 系统后台任务可使用CPU配额
- name: enable_sys_table_ddl
require: false
type: BOOL
default: false
min_value: NULL
max_value: NULL
section: ROOT_SERVICE
need_restart: false
description_en: specifies whether a system table is allowed be to created manually.
description_local: 是否允许新建和修改系统表。主要在系统升级过程中使用。
- name: merge_thread_count
require: false
type: INT
default: 0
min_value: 0
max_value: 256
section: OBSERVER
need_restart: false
description_en: worker thread num for compaction
description_local: 用于合并的线程数
- name: net_thread_count
require: false
type: INT
default: 0
min_value: 0
max_value: 128
section: OBSERVER
need_restart: true
description_en: the number of rpc/mysql I/O threads for Libeasy.
description_local: 网络IO线程数
- name: max_stale_time_for_weak_consistency
require: false
type: TIME
default: 5s
min_value: 5s
max_value: NULL
section: OBSERVER
need_restart: false
description_en: the max data stale time that observer can provide service when its parent is invalid.
description_local: 弱一致性读允许读到多旧的数据
- name: backup_log_archive_option
require: false
type: STRING
default: OPTIONAL
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: backup log archive option, support MANDATORY/OPTIONAL, COMPRESSION
description_local: 日志备份的参数
- name: backup_concurrency
require: false
type: INT
default: 0
min_value: 0
max_value: 100
section: OBSERVER
need_restart: false
description_en: backup concurrency limit.
description_local: observer备份基线的并发度
- name: balancer_log_interval
require: false
type: TIME
default: 1m
min_value: 1s
max_value: NULL
section: LOAD_BALANCE
need_restart: false
description_en: the time interval between logging the load-balancing tasks statistics.
description_local: 负载均衡等后台任务线程打印统计日志的间隔时间
- name: restore_concurrency
require: false
type: INT
default: 0
min_value: 0
max_value: 512
section: OBSERVER
need_restart: false
description_en: the current work thread num of restore macro block.
description_local: 从备份恢复租户数据时最大并发度
- name: micro_block_merge_verify_level
require: false
type: INT
default: 2
min_value: 0
max_value: 3
section: OBSERVER
need_restart: false
description_en: specify what kind of verification should be done when merging micro block. 0, no verification will be done; 1, verify encoding algorithm, encoded micro block will be read to ensure data is correct; 2, verify encoding and compression algorithm, besides encoding verification, compressed block will be decompressed to ensure data is correct; 3, verify encoding, compression algorithm and lost write protect
description_local: 控制合并时宏块的校验级别
- name: bf_cache_miss_count_threshold
require: false
type: INT
default: 100
min_value: 0
max_value: NULL
section: CACHE
need_restart: false
description_en: bf cache miss count threshold, 0 means disable bf cache
description_local: 用于控制bloomfilter cache的触发次数,当宏块未命中次数达到这个值时,给创建bloomfilter缓存。0表示关闭。
- name: weak_read_version_refresh_interval
require: false
type: TIME
default: 50ms
min_value: 0ms
max_value: NULL
section: OBSERVER
need_restart: false
description_en: the time interval to refresh cluster weak read version
description_local: 弱一致性读版本号的刷新周期,影响弱一致性读数据的延时;值为0时,表示不再刷新弱一致性读版本号,不提供单调读功能
- name: large_query_worker_percentage
require: false
type: DOUBLE
default: 30
min_value: 0
max_value: 100
section: TENANT
need_restart: false
description_en: the percentage of the workers reserved to serve large query request.
description_local: 预留给大查询处理的工作线程百分比
- name: clog_transport_compress_all
require: false
type: BOOL
default: false
min_value: NULL
max_value: NULL
section: TRANS
need_restart: false
description_en: If this option is set to true, use compression for clog transport. The default is false(no compression)
description_local: 事务日志传输时是否压缩
- name: flush_log_at_trx_commit
require: false
type: INT
default: 1
min_value: 0
max_value: 2
section: TRANS
need_restart: false
description_en: 0 means commit transactions without waiting clog write to buffer cache, 1 means commit transactions after clog flush to disk, 2 means commit transactions after clog write to buffer cache
description_local: 事务提交时写事务日志策略。0表示不等待日志写入缓冲区,1表示等待日志写入磁盘,2表示等待日志写入缓冲区而不等落盘
- name: global_major_freeze_residual_memory
require: false
type: INT
default: 40
min_value: 1
max_value: 99
section: OBSERVER
need_restart: false
description_en: post global major freeze when observer memsotre free memory(plus memory hold by frozen memstore and blockcache) reach this limit. limit calc by memory_limit * (1 - system_memory_percentage/100) * global_major_freeze_residual_memory/100
description_local: 当剩余内存小于这个百分比时,触发全局冻结
- name: enable_sql_audit
require: false
type: BOOL
default: true
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: specifies whether SQL audit is turned on.
description_local: SQL审计功能开关
- name: merger_switch_leader_duration_time
require: false
type: TIME
default: 3m
min_value: 0s
max_value: 30m
section: ROOT_SERVICE
need_restart: false
description_en: switch leader duration time for daily merge.
description_local: 合并时,批量切主的时间间隔
- name: enable_record_trace_log
require: false
type: BOOL
default: false
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: specifies whether to always record the trace log.
description_local: 是否记录追踪日志
- name: sys_bkgd_migration_change_member_list_timeout
require: false
type: TIME
default: 1h
min_value: 0s
max_value: 24h
section: OBSERVER
need_restart: false
description_en: the timeout for migration change member list retry.
description_local: 副本迁移时变更Paxos成员组操作的超时时间
- name: rootservice_list
require: false
type: STRING_LIST
default:
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: a list of servers which contains rootservice
description_local: RootService及其副本所在的机器列表
- name: enable_syslog_wf
require: false
type: BOOL
default: true
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: specifies whether any log message with a log level higher than WARN would be printed into a separate file with a suffix of wf
description_local: 是否把WARN以上级别的系统日志打印到一个单独的日志文件中
- name: global_index_build_single_replica_timeout
require: false
type: TIME
default: 48h
min_value: 1h
max_value: NULL
section: ROOT_SERVICE
need_restart: false
description_en: build single replica task timeout when rootservice schedule to build global index.
description_local: 建全局索引时,每个副本构建的超时时间
- name: memstore_limit_percentage
require: false
type: INT
default: 50
min_value: 1
max_value: 99
section: TENANT
need_restart: false
description_en: used in calculating the value of MEMSTORE_LIMIT
description_local: 租户用于memstore的内存占其总可用内存的百分比
- name: minor_deferred_gc_time
require: false
type: TIME
default: 0s
min_value: 0s
max_value: 24h
section: OBSERVER
need_restart: false
description_en: sstable deferred gc time after merge
description_local: 合并之后SSTable延迟回收间隔
- name: data_disk_usage_limit_percentage
require: false
type: INT
default: 90
min_value: 50
max_value: 100
section: OBSERVER
need_restart: false
description_en: the safe use percentage of data disk
description_local: 数据文件最大可以写入的百分比,超过这个阈值后,禁止数据迁入
- name: enable_perf_event
require: false
type: BOOL
default: true
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: specifies whether to enable perf event feature.
description_local: perf event调试特性开关
- name: obconfig_url
require: false
type: STRING
default:
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: URL for OBConfig service
description_local: OBConfig服务的URL地址
- name: cpu_quota_concurrency
require: false
type: DOUBLE
default: 4
min_value: 1
max_value: 10
section: TENANT
need_restart: false
description_en: max allowed concurrency for 1 CPU quota
description_local: 租户每个CPU配额允许的最大并发数
- name: zone_merge_order
require: false
type: STRING
default:
min_value: NULL
max_value: NULL
section: DAILY_MERGE
need_restart: false
description_en: the order of zone start merge in daily merge
description_local: 轮转合并的时候,多个Zone的顺序。不指定的时候,由系统自动决定。
- name: backup_recovery_window
require: false
type: TIME
default: 0
min_value: 0
max_value: NULL
section: OBSERVER
need_restart: false
description_en: backup expired day limit, 0 means not expired
description_local: 恢复窗口大小
- name: default_row_format
require: false
type: STRING
default: compact
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: default row format in mysql mode
description_local: MySQL模式下,建表时使用的默认行格式
- name: stack_size
require: false
type: CAPACITY_MB
default: 1M
min_value: 512K
max_value: 20M
section: OBSERVER
need_restart: true
description_en: the size of routine execution stack
description_local: 程序函数调用栈大小
- name: balancer_idle_time
require: false
type: TIME
default: 5m
min_value: 10s
max_value: NULL
section: LOAD_BALANCE
need_restart: false
description_en: the time interval between the schedules of the partition load-balancing task.
description_local: 负载均衡等后台任务线程空闲时的唤醒间隔时间
- name: memory_limit
name_local: 最大运行内存
require: false
essential: true
type: CAPACITY_MB
default: 0
min_value: NULL
max_value: NULL
modify_limit: decrease
section: OBSERVER
need_restart: false
description_en: the size of the memory reserved for internal use(for testing purpose). Please enter an capacity, such as 8G
description_local: 可用总内存大小。请输入带容量带单位的整数,如8G
- name: system_memory
name_local: 集群系统内存
essential: true
type: CAPACITY_MB
default: 30G
min_value: 0M
max_value: NULL
section: OBSERVER
need_restart: false
description_en: the memory reserved for internal use which cannot be allocated to any outer-tenant, and should be determined to guarantee every server functions normally. Please enter an capacity, such as 2G
description_local: 系统预留内存大小,不能分配给普通租户使用。请输入带容量带单位的整数,如2G
- name: __min_full_resource_pool_memory
require: true
type: INT
default: 2147483648
min_value: 1073741824
max_value: NULL
need_restart: false
description_en: the minimum memory limit of the resource pool
description_local: 资源池最小内存限制
- name: virtual_table_location_cache_expire_time
require: false
type: TIME
default: 8s
min_value: 1s
max_value: NULL
section: LOCATION_CACHE
need_restart: false
description_en: expiration time for virtual table location info in partiton location cache.
description_local: 虚拟表的位置信息缓存过期时间
- name: ssl_external_kms_info
require: false
type: STRING
default:
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: when using the external key management center for ssl, this parameter will store some key management information
description_local: 配置ssl使用的主密钥管理服务
- name: enable_sql_operator_dump
require: false
type: BOOL
default: true
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: specifies whether sql operators (sort/hash join/material/window function/interm result/...) allowed to write to disk
description_local: 是否允许SQL处理过程的中间结果写入磁盘以释放内存
- name: enable_rich_error_msg
require: false
type: BOOL
default: false
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: specifies whether add ip:port, time and trace id to user error message.
description_local: 是否在客户端消息中添加服务器地址、时间、追踪ID等调试信息
- name: log_archive_concurrency
require: false
type: INT
default: 0
min_value: 0
max_value: NULL
section: OBSERVER
need_restart: false
description_en: concurrency for log_archive_sender and log_archive_spiter
description_local: 日志归档并发度
- name: server_balance_disk_tolerance_percent
require: false
type: INT
default: 1
min_value: 1
max_value: 100
section: LOAD_BALANCE
need_restart: false
description_en: specifies the tolerance (in percentage) of the unbalance of the disk space utilization among all servers. The average disk space utilization is calculated by dividing the total space by the number of servers. server balancer will start a rebalancing task when the deviation between the average usage and some server load is greater than this tolerance
description_local: 节点负载均衡策略中,磁盘资源不均衡的容忍度
- name: user_tab_col_stat_cache_priority
require: false
type: INT
default: 1
min_value: 1
max_value: NULL
section: CACHE
need_restart: false
description_en: user tab col stat cache priority
description_local: 统计数据缓存在缓存系统中的优先级
- name: recyclebin_object_expire_time
require: false
type: TIME
default: 0s
min_value: 0s
max_value: NULL
section: ROOT_SERVICE
need_restart: false
description_en: recyclebin object expire time, default 0 that means auto purge recyclebin off.
description_local: 回收站对象的有效期,超过有效的对象将被回收;0表示关闭回收功能;
- name: minor_warm_up_duration_time
require: false
type: TIME
default: 30s
min_value: 0s
max_value: 60m
section: OBSERVER
need_restart: false
description_en: warm up duration time for minor freeze.
description_local: 小合并产生新转储文件的预热时间
- name: migrate_concurrency
require: false
type: INT
default: 10
min_value: 0
max_value: 64
section: OBSERVER
need_restart: false
description_en: set concurrency of migration, set upper limit to migrate_concurrency and set lower limit to migrate_concurrency/2
description_local: 控制内部数据迁移的并发度
- name: redundancy_level
require: false
type: STRING
default: NORMAL
min_value: NULL
max_value: NULL
section: SSTABLE
need_restart: false
description_en: EXTERNAL, use extrernal redundancy; NORMAL, tolerate one disk failure, HIGH tolerate two disk failure if disk count is enough
description_local: OB内置本地磁盘RAID特性。暂勿使用
- name: trx_2pc_retry_interval
require: false
type: TIME
default: 100ms
min_value: 1ms
max_value: 5000ms
section: TRANS
need_restart: false
description_en: the time interval between the retries in case of failure during a transactions two-phase commit phase
description_local: 两阶段提交失败时候自动重试的间隔
- name: cpu_count
name_local: 系统CPU总数
require: false
essential: true
type: INT
default: 0
min_value: 0
max_value: NULL
section: OBSERVER
need_restart: true
description_en: the number of CPUs in the system. If this parameter is set to zero, the number will be set according to sysconf; otherwise, this parameter is used.
description_local: 系统CPU总数,如果设置为0,将自动检测
- name: appname
require: false
type: STRING
default: obcluster
min_value: NULL
max_value: NULL
section: OBSERVER
need_redeploy: true
description_en: Name of the cluster
description_local: 本OceanBase集群名
- name: use_large_pages
require: false
type: STRING
default: false
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: true
description_en: used to manage the databases use of large pages, values are false, true, only
description_local: 控制内存大页的行为,"true"表示在操作系统开启内存大页并且有空闲大页时,数据库总是申请内存大页,否则申请普通内存页, "false"表示数据库不使用大页, "only"表示数据库总是分配大页
- name: dtl_buffer_size
require: false
type: CAPACITY_MB
default: 64K
min_value: 4K
max_value: 2M
section: OBSERVER
need_restart: false
description_en: buffer size for DTL
description_local: SQL数据传输模块使用的缓存大小
- name: server_balance_critical_disk_waterlevel
require: false
type: INT
default: 80
min_value: 0
max_value: 100
section: LOAD_BALANCE
need_restart: false
description_en: disk water level to determine server balance strategy
description_local: 磁盘水位线超过该阈值时,负载均衡策略将倾向于优先考虑磁盘均衡
- name: location_fetch_concurrency
require: false
type: INT
default: 20
min_value: 1
max_value: 1000
section: LOCATION_CACHE
need_restart: false
description_en: the maximum number of the tasks which fetch the partition location information concurrently.
description_local: 位置缓存信息刷新的最大并发度
- name: enable_async_syslog
require: false
type: BOOL
default: true
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: specifies whether use async syslog
description_local: 是否启用系统日志异步写
- name: clog_sync_time_warn_threshold
require: false
type: TIME
default: 1s
min_value: 1ms
max_value: 10000ms
section: TRANS
need_restart: false
description_en: the time given to the commit log synchronization between a leader and its followers before a warning message is printed in the log file.
description_local: 事务日志同步耗时告警阈值,同步耗时超过该值产生WARN日志
- name: location_cache_cpu_quota
require: false
type: DOUBLE
default: 5
min_value: 0
max_value: 10
section: TENANT
need_restart: false
description_en: the number of vCPUs allocated for the requests regarding location info of the core tables.
description_local: 位置缓存模块使用的CPU配额
- name: bf_cache_priority
require: false
type: INT
default: 1
min_value: 1
max_value: NULL
section: CACHE
need_restart: false
description_en: bloomfilter cache priority
description_local: 布隆过滤器占用缓存的优先级
- name: merger_check_interval
require: false
type: TIME
default: 10m
min_value: 10s
max_value: 60m
section: DAILY_MERGE
need_restart: false
description_en: the time interval between the schedules of the task that checks on the progress of MERGE for each zone.
description_local: 合并状态检查线程的调度间隔
- name: enable_rootservice_standalone
require: false
type: BOOL
default: false
min_value: NULL
max_value: NULL
section: ROOT_SERVICE
need_restart: false
description_en: specifies whether the SYS tenant is allowed to occupy an observer exclusively, thus running in the standalone mode.
description_local: 是否让系统租户和RootService独占observer节点
- name: px_workers_per_cpu_quota
require: false
type: INT
default: 10
min_value: 0
max_value: 20
section: TENANT
need_restart: false
description_en: the ratio between the number of system allocated px workers vs the maximum number of threads that can be scheduled concurrently.
description_local: 并行执行工作线程数的比例
- name: large_query_threshold
require: false
type: TIME
default: 100ms
min_value: 1ms
max_value: NULL
section: TENANT
need_restart: false
description_en: threshold for execution time beyond which a request may be paused and rescheduled as large request
description_local: 一个查询执行时间超过该阈值会被判断为大查询,执行大查询调度策略
- name: sys_bkgd_net_percentage
require: false
type: INT
default: 60
min_value: 0
max_value: 100
section: OBSERVER
need_restart: false
description_en: the net percentage of sys background net.
description_local: 后台系统任务可占用网络带宽百分比
- name: fuse_row_cache_priority
require: false
type: INT
default: 1
min_value: 1
max_value: NULL
section: CACHE
need_restart: false
description_en: fuse row cache priority
description_local: 融合行缓存在缓存系统中的优先级
- name: rpc_timeout
require: false
type: TIME
default: 2s
min_value: NULL
max_value: NULL
section: RPC
need_restart: false
description_en: the time during which a RPC request is permitted to execute before it is terminated
description_local: 集群内部请求的超时时间
- name: tenant_task_queue_size
require: false
type: INT
default: 65536
min_value: 1024
max_value: NULL
section: OBSERVER
need_restart: false
description_en: the size of the task queue for each tenant.
description_local: 每个租户的请求队列大小
- name: resource_soft_limit
require: false
type: INT
default: 50
min_value: 1
max_value: 10000
section: LOAD_BALANCE
need_restart: false
description_en: Used along with resource_hard_limit in unit allocation. If server utilization is less than resource_soft_limit, a policy of best fit will be used for unit allocation; otherwise, a least loadpolicy will be employed. Ultimately,system utilization should not be large than resource_hard_limit.
description_local: 当所有节点的资源水位低于该阈值时,不执行负载均衡
- name: plan_cache_evict_interval
require: false
type: TIME
default: 1s
min_value: 0s
max_value: NULL
section: TENANT
need_restart: false
description_en: time interval for periodic plan cache eviction.
description_local: 执行计划缓存的淘汰间隔
- name: server_balance_cpu_mem_tolerance_percent
require: false
type: INT
default: 5
min_value: 1
max_value: 100
section: LOAD_BALANCE
need_restart: false
description_en: specifies the tolerance (in percentage) of the unbalance of the cpu/memory utilization among all servers. The average cpu/memory utilization is calculated by dividing the total cpu/memory by the number of servers. server balancer will start a rebalancing task when the deviation between the average usage and some server load is greater than this tolerance
description_local: 节点负载均衡策略中,CPU和内存资源不均衡的容忍度
- name: autoinc_cache_refresh_interval
require: false
type: TIME
default: 3600s
min_value: 100ms
max_value: NULL
section: OBSERVER
need_restart: false
description_en: auto-increment service cache refresh sync_value in this interval
description_local: 自动刷新自增列值的时间间隔
- name: all_server_list
require: false
type: STRING
default:
min_value: NULL
max_value: NULL
section: LOCATION_CACHE
need_restart: false
description_en: all server addr in cluster
description_local: 集群中所有机器的列表,不建议人工修改
- name: enable_rebalance
require: false
type: BOOL
default: true
min_value: NULL
max_value: NULL
section: LOAD_BALANCE
need_restart: false
description_en: specifies whether the partition load-balancing is turned on.
description_local: 自动负载均衡开关
- name: internal_sql_execute_timeout
require: false
type: TIME
default: 30s
min_value: 1000us
max_value: 10m
section: OBSERVER
need_restart: false
description_en: the number of microseconds an internal DML request is permitted to execute before it is terminated.
description_local: 系统内部SQL请求的超时时间
- name: user_row_cache_priority
require: false
type: INT
default: 1
min_value: 1
max_value: NULL
section: CACHE
need_restart: false
description_en: user row cache priority
description_local: 基线数据行缓存在缓存系统中的优先级
- name: server_permanent_offline_time
require: false
type: TIME
default: 3600s
min_value: 20s
max_value: NULL
section: ROOT_SERVICE
need_restart: false
description_en: the time interval between any two heartbeats beyond which a server is considered to be permanently offline.
description_local: 节点心跳中断多久后认为其被“永久下线”,“永久下线”的节点上的数据副本需要被自动补足
- name: schema_history_expire_time
require: false
type: TIME
default: 7d
min_value: 1m
max_value: 30d
section: OBSERVER
need_restart: false
description_en: the hour of expire time for schema history
description_local: 元数据历史数据过期时间
- name: datafile_disk_percentage
require: false
type: INT
min_value: 0
max_value: 99
modify_limit: decrease
section: SSTABLE
need_restart: false
description_en: the percentage of disk space used by the data files.
description_local: data_dir所在磁盘将被OceanBase系统初始化用于存储数据,本配置项表示占用该磁盘总空间的百分比
- name: default_compress_func
require: false
type: STRING
default: zstd_1.3.8
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
description_en: default compress function name for create new table
description_local: MySQL模式下,建表时使用的默认压缩算法
- name: memory_chunk_cache_size
require: false
type: CAPACITY_MB
default: 0M
min_value: 0M
max_value: NULL
section: OBSERVER
need_restart: false
description_en: the maximum size of memory cached by memory chunk cache.
description_local: 内存分配器缓存的内存块容量。值为0的时候表示系统自适应。
- name: ob_event_history_recycle_interval
require: false
type: TIME
default: 7d
min_value: 1d
max_value: 180d
section: ROOT_SERVICE
need_restart: false
description_en: the time to recycle event history.
description_local: OB事件表中事件条目的保存期限
- name: enable_ddl
require: false
type: BOOL
default: true
min_value: NULL
max_value: NULL
section: ROOT_SERVICE
need_restart: false
description_en: specifies whether DDL operation is turned on.
description_local: 是否允许执行DDL
- name: balance_blacklist_failure_threshold
require: false
type: INT
default: 5
min_value: 0
max_value: 1000
section: LOAD_BALANCE
need_restart: false
description_en: a balance task failed count to be putted into blacklist
description_local: 副本迁移等后台任务连续失败超过该阈值后,将被放入黑名单
- name: wait_leader_batch_count
require: false
type: INT
default: 1024
min_value: 128
max_value: 5000
section: ROOT_SERVICE
need_restart: false
description_en: leader batch count everytime leader coordinator wait.
description_local: RootService发送切主命令的批次大小
- name: proxyro_password
require: false
type: STRING
default: ''
min_value: NULL
max_value: NULL
need_restart: false
description_en: password of observer proxyro user
description_local: proxyro用户的密码
- name: root_password
require: false
type: STRING
default: ''
min_value: NULL
max_value: NULL
need_restart: false
description_en: password of observer root user
description_local: sys租户root用户的密码
# todo: 等文档更新
- name: sql_login_thread_count
require: false
type: INT
default: 0
min_value: 0
max_value: 32
section: OBSERVER
need_restart: false
need_redeploy: false
description_en: 'the number of threads for sql login request. Range: [0, 32] in integer, 0 stands for use default thread count defined in TG.the default thread count for login request in TG is normal:6 mini-mode:2'
description_local: ''
- name: tcp_keepcnt
require: false
type: INT
default: 10
min_value: 1
max_value: NULL
section: OBSERVER
need_restart: false
need_redeploy: false
description_en: 'The maximum number of keepalive probes TCP should send before dropping the connection. Take effect for new established connections. Range: [1,+∞)'
description_local: 关闭一个非活跃连接之前的最大重试次数。
- name: tcp_keepintvl
require: false
type: TIME
default: 6s
min_value: 1s
max_value: NULL
section: OBSERVER
need_restart: false
need_redeploy: false
description_en: 'The time (in seconds) between individual keepalive probes. Take effect for new established connections. Range: [1s, +∞]'
description_local: 开启客户端连接的探活机制后,前后两次探测之间的时间间隔,单位为秒。
- name: tcp_keepidle
require: false
type: TIME
default: 7200s
min_value: 1s
max_value: NULL
section: OBSERVER
need_restart: false
need_redeploy: false
description_en: 'The time (in seconds) the connection needs to remain idle before TCP starts sending keepalive probe. Take effect for new established connections. Range: [1s, +∞]'
description_local: 客户端连接上服务器后,如果没有数据发送,多久后会发送 Keepalive 探测分组,单位为秒。
- name: enable_tcp_keepalive
require: false
type: BOOL
default: true
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
need_redeploy: false
description_en: enable TCP keepalive for the TCP connection of sql protocol. Take effect for new established connections.
description_local: 开启或关闭客户端连接的探活机制。
- name: ob_ratelimit_stat_period
require: false
type: TIME
default: 1s
min_value: 100ms
max_value: NULL
section: OBSERVER
need_restart: false
need_redeploy: false
description_en: "the time interval to update observer's maximum bandwidth to a certain region. "
description_local: OBServer 计算和更新最大带宽的时间间隔。
- name: enable_ob_ratelimit
require: false
type: BOOL
default: false
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
need_redeploy: false
description_en: enable ratelimit between regions for RPC connection.
description_local: 开启或关闭客户端连接的探活机制。
- name: schema_history_recycle_interval
require: false
type: TIME
default: 10m
min_value: 0s
max_value: NULL
section: LOAD_BALANCE
need_restart: false
need_redeploy: false
description_en: 'the time interval between the schedules of schema history recyle task. Range: [0s, +∞)'
description_local: 系统内部执行 schema 多版本记录回收任务的时间间隔。
- name: backup_data_file_size
require: false
type: CAPACITY_MB
default: 4G
min_value: 512M
max_value: 4G
section: OBSERVER
need_restart: false
need_redeploy: false
description_en: 'backup data file size. Range: [512M, 4G] in integer'
description_local: 备份数据文件的容量。
- name: data_storage_error_tolerance_time
require: false
type: TIME
default: 300s
min_value: 10s
max_value: 7200s
section: OBSERVER
need_restart: false
need_redeploy: false
description_en: time to tolerate disk read failure, after that, the disk status will be set error. Range [10s,7200s]. The default value is 300s
description_local: 数据盘状态设为 ERROR 状态的容忍时间。
- name: data_storage_warning_tolerance_time
require: false
type: TIME
default: 30s
min_value: 10s
max_value: 300s
section: OBSERVER
need_restart: false
need_redeploy: false
description_en: time to tolerate disk read failure, after that, the disk status will be set warning. Range [10s,300s]. The default value is 30s
description_local: 数据盘状态设为 WARNING 状态的容忍时间。
- name: index_block_cache_priority
require: false
type: INT
default: 10
min_value: 1
max_value: NULL
section: CACHE
need_restart: false
need_redeploy: false
description_en: index cache priority. Range:[1, )
description_local: Tablet 映射缓存优先级。
- name: opt_tab_stat_cache_priority
require: false
type: INT
default: 1
min_value: 1
max_value: NULL
section: CACHE
need_restart: false
need_redeploy: false
description_en: tab stat cache priority. Range:[1, )
description_local: 统计信息缓存优先级。
- name: tablet_ls_cache_priority
require: false
type: INT
default: 1000
min_value: 1
max_value: NULL
section: CACHE
need_restart: false
need_redeploy: false
description_en: tablet ls cache priority. Range:[1, )
description_local: 元数据索引微块缓存优先级。
- name: location_cache_refresh_sql_timeout
require: false
type: TIME
default: 1s
min_value: 1ms
max_value: NULL
section: LOCATION_CACHE
need_restart: false
need_redeploy: false
description_en: 'The timeout used for refreshing location cache by SQL. Range: [1ms, +∞)'
description_local: 通过 SQL 刷新 location_cache 的超时时间。
- name: location_cache_refresh_rpc_timeout
require: false
type: TIME
default: 500ms
min_value: 1ms
max_value: NULL
section: LOCATION_CACHE
need_restart: false
need_redeploy: false
description_en: 'The timeout used for refreshing location cache by RPC. Range: [1ms, +∞)'
description_local: 通过 RPC 刷新 location_cache 的超时时间。
- name: tablet_meta_table_check_interval
require: false
type: TIME
default: 30m
min_value: 1m
max_value: NULL
section: ROOT_SERVICE
need_restart: false
need_redeploy: false
description_en: 'the time interval that observer compares tablet meta table with local ls replica info and make adjustments to ensure the correctness of tablet meta table. Range: [1m,+∞)'
description_local: DBA_OB_TABLET_REPLICAS/CDB_OB_TABLET_REPLICAS 视图的后台巡检线程的检查间隔。
- name: ls_meta_table_check_interval
require: false
type: TIME
default: 1s
min_value: 1ms
max_value: NULL
section: ROOT_SERVICE
need_restart: false
need_redeploy: false
description_en: 'the time interval that observer compares ls meta table with local ls replica info and make adjustments to ensure the correctness of ls meta table. Range: [1ms,+∞)'
description_local: DBA_OB_LS_LOCATIONS/CDB_OB_LS_LOCATIONS 视图的后台巡检线程的检查间隔。
- name: tablet_meta_table_scan_batch_count
require: false
type: INT
default: 999
min_value: 1
max_value: 65536
section: ROOT_SERVICE
need_restart: false
need_redeploy: false
description_en: the number of tablet replica info that will be read by each request on the tablet-related system tables during procedures such as load-balancing, daily merge, election and etc. Range:(0,65536]
description_local: Tablet meta table 迭代器使用过程中在内存里缓存的 Tablet 数量。
- name: rdma_io_thread_count
require: false
type: INT
default: 0
min_value: 0
max_value: 8
section: OBSERVER
need_restart: true
need_redeploy: false
description_en: 'the number of RDMA I/O threads for Libreasy. Range: [0, 8] in integer, 0 stands for RDMA being disabled.'
description_local: Libreasy 的 RDMA I/O 线程数。
- name: production_mode
require: true
type: BOOL
default: true
min_value: NULL
max_value: NULL
section: OBSERVER
need_restart: false
need_redeploy: false
description_en: Production mode switch, default True. Adjust the memory_limit and __min_full_resource_pool_memory The lower bound of memory is adjusted to 16G and 2147483648
description_local: 生产模式开关, 默认开启。开启后调整memory limit 和 __min_full_resource_pool_memory 下界调整为 16G 和 2147483648
- name: ocp_meta_tenant
require: false
type: DICT
default:
tenant_name: ocp
max_cpu: 1
memory_size: 2147483648
need_redeploy: true
description_en: The tenant specifications for ocp meta db
description_local: ocp express的元数据库使用的租户规格
- name: ocp_meta_tenant_max_cpu
name_local: OCP express元数据库租户的CPU数
essential: true
require: false
type: INT
default: 1
need_redeploy: true
description_en: The tenant cpu count for ocp meta db
description_local: ocp express的元数据库使用的CPU数量
- name: ocp_meta_tenant_memory_size
name_local: OCP express元数据库租户内存
essential: true
require: false
type: CAPACITY_MB
default: 2G
need_redeploy: true
description_en: The tenant memory size for ocp meta db
description_local: ocp express的元数据库使用的租户内存大小
- name: ocp_meta_tenant_log_disk_size
name_local: OCP express元数据库租户日志磁盘大小
essential: true
require: false
type: CAPACITY_MB
default: 6656M
need_redeploy: true
description_en: The tenant log disk size for ocp meta db
description_local: ocp express的元数据库使用的租户日志磁盘大小
- name: ocp_meta_db
require: false
type: STRING
default: ocp_express
need_redeploy: true
description_en: The database name for ocp meta db
description_local: ocp express的元数据库使用的数据库名
- name: ocp_meta_username
require: false
type: STRING
default: meta
need_redeploy: true
description_en: The database name for ocp meta db
description_local: ocp express的元数据库使用的数据库名
- name: ocp_meta_password
require: false
type: STRING
default: oceanbase
need_redeploy: true
description_en: The database name for ocp meta db
description_local: ocp express的元数据库使用的数据库名
- name: ocp_agent_monitor_password
require: false
type: STRING
default: ''
need_redeploy: true
description_en: The password for obagent monitor user
description_local: obagent 监控用户的密码
\ No newline at end of file
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import json
import time
import requests
from copy import deepcopy
from _errno import EC_OBSERVER_FAIL_TO_START, EC_OBSERVER_FAIL_TO_START_WITH_ERR, EC_OBSERVER_FAILED_TO_REGISTER, EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS
from collections import OrderedDict
def config_url(ocp_config_server, appname, cid):
cfg_url = '%s&Action=ObRootServiceInfo&ObCluster=%s' % (ocp_config_server, appname)
proxy_cfg_url = '%s&Action=GetObProxyConfig&ObRegionGroup=%s' % (ocp_config_server, appname)
# Command that clears the URL content for the cluster
cleanup_config_url_content = '%s&Action=DeleteObRootServiceInfoByClusterName&ClusterName=%s' % (ocp_config_server, appname)
# Command that register the cluster information to the Config URL
register_to_config_url = '%s&Action=ObRootServiceRegister&ObCluster=%s&ObClusterId=%s' % (ocp_config_server, appname, cid)
return cfg_url, cleanup_config_url_content, register_to_config_url
def init_config_server(ocp_config_server, appname, cid, force_delete, stdio):
def post(url):
stdio.verbose('post %s' % url)
response = requests.post(url)
if response.status_code != 200:
raise Exception('%s status code %s' % (url, response.status_code))
return json.loads(response.text)['Code']
cfg_url, cleanup_config_url_content, register_to_config_url = config_url(ocp_config_server, appname, cid)
ret = post(register_to_config_url)
if ret != 200:
if not force_delete:
raise Exception('%s may have been registered in %s' % (appname, ocp_config_server))
ret = post(cleanup_config_url_content)
if ret != 200 :
raise Exception('failed to clean up the config url content, return code %s' % ret)
if post(register_to_config_url) != 200:
return False
return cfg_url
class EnvVariables(object):
def __init__(self, environments, client):
self.environments = environments
self.client = client
self.env_done = {}
def __enter__(self):
for env_key, env_value in self.environments.items():
self.env_done[env_key] = self.client.get_env(env_key)
self.client.add_env(env_key, env_value, True)
def __exit__(self, *args, **kwargs):
for env_key, env_value in self.env_done.items():
if env_value is not None:
self.client.add_env(env_key, env_value, True)
else:
self.client.del_env(env_key)
def start(plugin_context, *args, **kwargs):
cluster_config = plugin_context.cluster_config
options = plugin_context.options
clients = plugin_context.clients
stdio = plugin_context.stdio
clusters_cmd = {}
need_bootstrap = True
root_servers = {}
global_config = cluster_config.get_global_conf()
appname = global_config['appname'] if 'appname' in global_config else None
cluster_id = global_config['cluster_id'] if 'cluster_id' in global_config else None
obconfig_url = global_config['obconfig_url'] if 'obconfig_url' in global_config else None
cfg_url = ''
if obconfig_url:
if not appname or not cluster_id:
stdio.error('need appname and cluster_id')
return
try:
cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), stdio)
if not cfg_url:
stdio.error(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url))
return
except:
stdio.exception(EC_OBSERVER_FAILED_TO_REGISTER.format())
return
stdio.start_loading('Start observer')
for server in cluster_config.original_servers:
config = cluster_config.get_server_conf(server)
zone = config['zone']
if zone not in root_servers:
root_servers[zone] = '%s:%s:%s' % (server.ip, config['rpc_port'], config['mysql_port'])
rs_list_opt = '-r \'%s\'' % ';'.join([root_servers[zone] for zone in root_servers])
for server in cluster_config.servers:
client = clients[server]
server_config = cluster_config.get_server_conf(server)
home_path = server_config['home_path']
if not server_config.get('data_dir'):
server_config['data_dir'] = '%s/store' % home_path
if client.execute_command('ls %s/clog/tenant_1/' % server_config['data_dir']).stdout.strip():
need_bootstrap = False
remote_pid_path = '%s/run/observer.pid' % home_path
remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip()
if remote_pid:
if client.execute_command('ls /proc/%s' % remote_pid):
continue
stdio.verbose('%s start command construction' % server)
if getattr(options, 'without_parameter', False) and client.execute_command('ls %s/etc/observer.config.bin' % home_path):
use_parameter = False
else:
use_parameter = True
cmd = []
if use_parameter:
not_opt_str = OrderedDict({
'mysql_port': '-p',
'rpc_port': '-P',
'zone': '-z',
'nodaemon': '-N',
'appname': '-n',
'cluster_id': '-c',
'data_dir': '-d',
'syslog_level': '-l',
'ipv6': '-6',
'mode': '-m',
'scn': '-f'
})
not_cmd_opt = [
'home_path', 'obconfig_url', 'root_password', 'proxyro_password',
'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode',
'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password'
]
get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key]
opt_str = []
for key in server_config:
if key not in not_cmd_opt and key not in not_opt_str and not key.startswith('ocp_meta_tenant_'):
value = get_value(key)
opt_str.append('%s=%s' % (key, value))
if cfg_url:
opt_str.append('obconfig_url=\'%s\'' % cfg_url)
else:
cmd.append(rs_list_opt)
for key in not_opt_str:
if key in server_config:
value = get_value(key)
cmd.append('%s %s' % (not_opt_str[key], value))
cmd.append('-I %s' % server.ip)
cmd.append('-o %s' % ','.join(opt_str))
else:
cmd.append('-p %s' % server_config['mysql_port'])
clusters_cmd[server] = 'cd %s; %s/bin/observer %s' % (home_path, home_path, ' '.join(cmd))
for server in clusters_cmd:
environments = deepcopy(cluster_config.get_environments())
client = clients[server]
server_config = cluster_config.get_server_conf(server)
stdio.verbose('starting %s observer', server)
if 'LD_LIBRARY_PATH' not in environments:
environments['LD_LIBRARY_PATH'] = '%s/lib:' % server_config['home_path']
with EnvVariables(environments, client):
ret = client.execute_command(clusters_cmd[server])
if not ret:
stdio.stop_loading('fail')
stdio.error(EC_OBSERVER_FAIL_TO_START_WITH_ERR.format(server=server, stderr=ret.stderr))
return
stdio.stop_loading('succeed')
stdio.start_loading('observer program health check')
time.sleep(3)
failed = []
for server in cluster_config.servers:
client = clients[server]
server_config = cluster_config.get_server_conf(server)
home_path = server_config['home_path']
remote_pid_path = '%s/run/observer.pid' % home_path
stdio.verbose('%s program health check' % server)
remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip()
if remote_pid and client.execute_command('ls /proc/%s' % remote_pid):
stdio.verbose('%s observer[pid: %s] started', server, remote_pid)
else:
failed.append(EC_OBSERVER_FAIL_TO_START.format(server=server))
if failed:
stdio.stop_loading('fail')
for msg in failed:
stdio.warn(msg)
return plugin_context.return_false()
else:
stdio.stop_loading('succeed')
return plugin_context.return_true(need_bootstrap=need_bootstrap)
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import os
import re
import time
import copy
from math import sqrt
import _errno as err
stdio = None
success = True
def get_port_socket_inode(client, port):
port = hex(port)[2:].zfill(4).upper()
cmd = "bash -c 'cat /proc/net/{tcp*,udp*}' | awk -F' ' '{print $2,$10}' | grep '00000000:%s' | awk -F' ' '{print $2}' | uniq" % port
res = client.execute_command(cmd)
if not res or not res.stdout.strip():
return False
stdio.verbose(res.stdout)
return res.stdout.strip().split('\n')
def parse_size(size):
_bytes = 0
if not isinstance(size, str) or size.isdigit():
_bytes = int(size)
else:
units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40}
match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)]
return _bytes
def format_size(size):
units = ['B', 'K', 'M', 'G', 'T', 'P']
idx = 0
while idx < 5 and size >= 1024:
size /= 1024.0
idx += 1
return '%.1f%s' % (size, units[idx])
def time_delta(client):
time_st = time.time() * 1000
time_srv = int(client.execute_command('date +%s%N').stdout) / 1000000
time_ed = time.time() * 1000
time_it = time_ed - time_st
time_srv -= time_it
return time_srv - time_st
def get_mount_path(disk, _path):
_mount_path = '/'
for p in disk:
if p in _path:
if len(p) > len(_mount_path):
_mount_path = p
return _mount_path
def get_system_memory(memory_limit, min_pool_memory):
if memory_limit <= 8 << 30:
system_memory = 2 << 30
elif memory_limit <= 16 << 30:
system_memory = 3 << 30
elif memory_limit <= 32 << 30:
system_memory = 5 << 30
elif memory_limit <= 48 << 30:
system_memory = 7 << 30
elif memory_limit <= 64 << 30:
system_memory = 10 << 30
else:
memory_limit_gb = memory_limit >> 30
system_memory = int(3 * (sqrt(memory_limit_gb) - 3)) << 30
return max(system_memory, min_pool_memory)
def get_disk_info_by_path(path, client, stdio):
disk_info = {}
ret = client.execute_command('df --block-size=1024 {}'.format(path))
if ret:
for total, used, avail, puse, path in re.findall(r'(\d+)\s+(\d+)\s+(\d+)\s+(\d+%)\s+(.+)', ret.stdout):
disk_info[path] = {'total': int(total) << 10, 'avail': int(avail) << 10, 'need': 0}
stdio.verbose('get disk info for path {}, total: {} avail: {}'.format(path, disk_info[path]['total'], disk_info[path]['avail']))
return disk_info
def get_disk_info(all_paths, client, stdio):
overview_ret = True
disk_info = get_disk_info_by_path('', client, stdio)
if not disk_info:
overview_ret = False
disk_info = get_disk_info_by_path('/', client, stdio)
if not disk_info:
disk_info['/'] = {'total': 0, 'avail': 0, 'need': 0}
all_path_success = {}
for path in all_paths:
all_path_success[path] = False
cur_path = path
while cur_path not in disk_info:
disk_info_for_current_path = get_disk_info_by_path(cur_path, client, stdio)
if disk_info_for_current_path:
disk_info.update(disk_info_for_current_path)
all_path_success[path] = True
break
else:
cur_path = os.path.dirname(cur_path)
if overview_ret or all(all_path_success.values()):
return disk_info
def start_check(plugin_context, init_check_status=False, strict_check=False, work_dir_check=False, work_dir_empty_check=True, generate_configs={}, precheck=False, *args, **kwargs):
def check_pass(item):
status = check_status[server]
if status[item].status == err.CheckStatus.WAIT:
status[item].status = err.CheckStatus.PASS
def check_fail(item, error, suggests=[]):
status = check_status[server][item]
if status.status == err.CheckStatus.WAIT:
status.error = error
status.suggests = suggests
status.status = err.CheckStatus.FAIL
def wait_2_pass():
status = check_status[server]
for item in status:
check_pass(item)
def alert(item, error, suggests=[]):
global success
if strict_check:
success = False
check_fail(item, error, suggests)
stdio.error(error)
else:
stdio.warn(error)
def error(item, _error, suggests=[]):
global success
if plugin_context.dev_mode:
stdio.warn(_error)
else:
success = False
check_fail(item, _error, suggests)
stdio.error(_error)
def critical(item, error, suggests=[]):
global success
success = False
check_fail(item, error, suggests)
stdio.error(error)
def system_memory_check():
server_memory_config = server_memory_stat['servers']
for server in server_memory_config:
if server_memory_config[server]['system_memory']:
memory_limit = server_memory_config[server]['num']
if not memory_limit:
server_memory_config[server]['num'] = memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total']
factor = 0.75
suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor)
suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {})
if memory_limit < server_memory_config[server]['system_memory']:
critical('mem', err.EC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server), [suggest])
elif memory_limit * factor < server_memory_config[server]['system_memory']:
alert('mem', err.WC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server, factor=factor), [suggest])
global stdio, success
success = True
check_status = {}
cluster_config = plugin_context.cluster_config
plugin_context.set_variable('start_check_status', check_status)
for server in cluster_config.servers:
check_status[server] = {
'port': err.CheckStatus(),
'mem': err.CheckStatus(),
'disk': err.CheckStatus(),
'ulimit': err.CheckStatus(),
'aio': err.CheckStatus(),
'net': err.CheckStatus(),
'ntp': err.CheckStatus(),
'ocp meta db': err.CheckStatus()
}
if work_dir_check:
check_status[server]['dir'] = err.CheckStatus()
if init_check_status:
return plugin_context.return_true(start_check_status=check_status)
clients = plugin_context.clients
stdio = plugin_context.stdio
servers_clients = {}
servers_port = {}
servers_memory = {}
servers_disk = {}
servers_clog_mount = {}
servers_net_inferface = {}
servers_dirs = {}
servers_check_dirs = {}
servers_log_disk_size = {}
servers_min_pool_memory = {}
PRO_MEMORY_MIN = 16 << 30
PRO_POOL_MEM_MIN = 2147483648
START_NEED_MEMORY = 3 << 30
global_generate_config = generate_configs.get('global', {})
stdio.start_loading('Check before start observer')
need_bootstrap = True
for server in cluster_config.servers:
ip = server.ip
client = clients[server]
server_generate_config = generate_configs.get(server, {})
servers_clients[ip] = client
server_config = cluster_config.get_server_conf_with_default(server)
home_path = server_config['home_path']
if not precheck:
if need_bootstrap:
data_dir = server_config['data_dir'] if server_config.get('data_dir') else '%s/store' % home_path
if client.execute_command('ls %s/clog/tenant_1/' % data_dir).stdout.strip():
need_bootstrap = False
remote_pid_path = '%s/run/observer.pid' % home_path
remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip()
if remote_pid:
if client.execute_command('ls /proc/%s' % remote_pid):
stdio.verbose('%s is runnning, skip' % server)
continue
if work_dir_check:
stdio.verbose('%s dir check' % server)
if ip not in servers_dirs:
servers_dirs[ip] = {}
servers_check_dirs[ip] = {}
dirs = servers_dirs[ip]
check_dirs = servers_check_dirs[ip]
original_server_conf = cluster_config.get_server_conf(server)
if not server_config.get('data_dir'):
server_config['data_dir'] = '%s/store' % home_path
if not server_config.get('redo_dir'):
server_config['redo_dir'] = server_config['data_dir']
if not server_config.get('clog_dir'):
server_config['clog_dir'] = '%s/clog' % server_config['redo_dir']
if not server_config.get('ilog_dir'):
server_config['ilog_dir'] = '%s/ilog' % server_config['redo_dir']
if not server_config.get('slog_dir'):
server_config['slog_dir'] = '%s/slog' % server_config['redo_dir']
if server_config['redo_dir'] == server_config['data_dir']:
keys = ['home_path', 'data_dir', 'clog_dir', 'ilog_dir', 'slog_dir']
else:
keys = ['home_path', 'data_dir', 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir']
for key in keys:
path = server_config.get(key)
suggests = [err.SUG_CONFIG_CONFLICT_DIR.format(key=key, server=server)]
if path in dirs and dirs[path]:
critical('dir', err.EC_CONFIG_CONFLICT_DIR.format(server1=server, path=path, server2=dirs[path]['server'], key=dirs[path]['key']), suggests)
dirs[path] = {
'server': server,
'key': key,
}
if key not in original_server_conf:
continue
empty_check = work_dir_empty_check
while True:
if path in check_dirs:
if check_dirs[path] != True:
critical('dir', check_dirs[path], suggests)
break
if client.execute_command('bash -c "[ -a %s ]"' % path):
is_dir = client.execute_command('[ -d {} ]'.format(path))
has_write_permission = client.execute_command('[ -w {} ]'.format(path))
if is_dir and has_write_permission:
if empty_check:
ret = client.execute_command('ls %s' % path)
if not ret or ret.stdout.strip():
check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.NOT_EMPTY.format(path=path))
else:
check_dirs[path] = True
else:
check_dirs[path] = True
else:
if not is_dir:
check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.NOT_DIR.format(path=path))
else:
check_dirs[path] = err.EC_FAIL_TO_INIT_PATH.format(server=server, key=key, msg=err.InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=path))
else:
path = os.path.dirname(path)
empty_check = False
if ip not in servers_port:
servers_disk[ip] = {}
servers_port[ip] = {}
servers_clog_mount[ip] = {}
servers_net_inferface[ip] = {}
servers_memory[ip] = {'num': 0, 'percentage': 0, 'servers': {}}
memory = servers_memory[ip]
ports = servers_port[ip]
disk = servers_disk[ip]
clog_mount = servers_clog_mount[ip]
inferfaces = servers_net_inferface[ip]
stdio.verbose('%s port check' % server)
for key in ['mysql_port', 'rpc_port']:
port = int(server_config[key])
if port in ports:
critical(
'port',
err.EC_CONFIG_CONFLICT_PORT.format(server1=server, port=port, server2=ports[port]['server'], key=ports[port]['key']),
[err.SUG_PORT_CONFLICTS.format()]
)
continue
ports[port] = {
'server': server,
'key': key
}
if get_port_socket_inode(client, port):
critical('port', err.EC_CONFLICT_PORT.format(server=ip, port=port), [err.SUG_USE_OTHER_PORT.format()])
servers_min_pool_memory[server] = __min_full_resource_pool_memory = server_config.get('__min_full_resource_pool_memory')
if server_config.get('production_mode') and __min_full_resource_pool_memory < PRO_POOL_MEM_MIN:
error('mem', err.EC_OBSERVER_PRODUCTION_MODE_LIMIT.format(server=server, key="__min_full_resource_pool_memory", limit=PRO_POOL_MEM_MIN), [err.SUB_SET_NO_PRODUCTION_MODE.format()])
memory_limit = 0
percentage = 0
if server_config.get('memory_limit'):
memory_limit = parse_size(server_config['memory_limit'])
if server_config.get('production_mode') and memory_limit < PRO_MEMORY_MIN:
error('mem', err.EC_OBSERVER_PRODUCTION_MODE_LIMIT.format(server=server, key='memory_limit', limit=format_size(PRO_MEMORY_MIN)), [err.SUB_SET_NO_PRODUCTION_MODE.format()])
memory['num'] += memory_limit
elif 'memory_limit_percentage' in server_config:
percentage = int(parse_size(server_config['memory_limit_percentage']))
memory['percentage'] += percentage
else:
percentage = 80
memory['percentage'] += percentage
memory['servers'][server] = {
'num': memory_limit,
'percentage': percentage,
'system_memory': parse_size(server_config.get('system_memory', 0))
}
data_path = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store')
redo_dir = server_config['redo_dir'] if server_config.get('redo_dir') else data_path
clog_dir = server_config['clog_dir'] if server_config.get('clog_dir') else os.path.join(redo_dir, 'clog')
if not client.execute_command('ls %s/sstable/block_file' % data_path):
disk[data_path] = {'server': server}
clog_mount[clog_dir] = {'server': server}
if 'datafile_size' in server_config and server_config['datafile_size'] and parse_size(server_config['datafile_size']):
# if need is string, it means use datafile_size
disk[data_path]['need'] = server_config['datafile_size']
elif 'datafile_disk_percentage' in server_config and server_config['datafile_disk_percentage']:
# if need is integer, it means use datafile_disk_percentage
disk[data_path]['need'] = int(server_config['datafile_disk_percentage'])
if 'log_disk_size' in server_config and server_config['log_disk_size'] and parse_size(server_config['log_disk_size']):
# if need is string, it means use log_disk_size
clog_mount[clog_dir]['need'] = server_config['log_disk_size']
elif 'log_disk_percentage' in server_config and server_config['log_disk_percentage']:
# if need is integer, it means use log_disk_percentage
clog_mount[clog_dir]['need'] = int(server_config['log_disk_percentage'])
devname = server_config.get('devname')
if devname:
if not client.execute_command("grep -e '^ *%s:' /proc/net/dev" % devname):
suggest = err.SUG_NO_SUCH_NET_DEVIC.format(ip=ip)
suggest.auto_fix = 'devname' not in global_generate_config and 'devname' not in server_generate_config
critical('net', err.EC_NO_SUCH_NET_DEVICE.format(server=server, devname=devname), suggests=[suggest])
if devname not in inferfaces:
inferfaces[devname] = []
inferfaces[devname].append(ip)
ip_server_memory_info = {}
for ip in servers_disk:
ip_servers = servers_memory[ip]['servers'].keys()
server_num = len(ip_servers)
client = servers_clients[ip]
ret = client.execute_command('cat /proc/sys/fs/aio-max-nr /proc/sys/fs/aio-nr')
if not ret:
for server in ip_servers:
alert('aio', err.EC_FAILED_TO_GET_AIO_NR.format(ip=ip), [err.SUG_CONNECT_EXCEPT.format()])
else:
try:
max_nr, nr = ret.stdout.strip().split('\n')
max_nr, nr = int(max_nr), int(nr)
need = server_num * 20000
RECD_AIO = 1048576
if need > max_nr - nr:
for server in ip_servers:
critical('aio', err.EC_AIO_NOT_ENOUGH.format(ip=ip, avail=max_nr - nr, need=need), [err.SUG_SYSCTL.format(var='fs.aio-max-nr', value=max(RECD_AIO, need), ip=ip)])
elif int(max_nr) < RECD_AIO:
for server in ip_servers:
alert('aio', err.WC_AIO_NOT_ENOUGH.format(ip=ip, current=max_nr), [err.SUG_SYSCTL.format(var='fs.aio-max-nr', value=RECD_AIO, ip=ip)])
except:
for server in ip_servers:
alert('aio', err.EC_FAILED_TO_GET_AIO_NR.format(ip=ip), [err.SUG_UNSUPPORT_OS.format()])
stdio.exception('')
ret = client.execute_command('ulimit -a')
ulimits_min = {
'open files': {
'need': lambda x: 20000 * x,
'recd': lambda x: 655350,
'name': 'nofile'
},
'max user processes': {
'need': lambda x: 4096,
'recd': lambda x: 4096 * x,
'name': 'nproc'
},
}
ulimits = {}
src_data = re.findall('\s?([a-zA-Z\s]+[a-zA-Z])\s+\([a-zA-Z\-,\s]+\)\s+([\d[a-zA-Z]+)', ret.stdout) if ret else []
for key, value in src_data:
ulimits[key] = value
for key in ulimits_min:
value = ulimits.get(key)
if value == 'unlimited':
continue
if not value or not (value.strip().isdigit()):
for server in ip_servers:
alert('ulimit', '(%s) failed to get %s' % (ip, key), [err.SUG_UNSUPPORT_OS.format()])
else:
value = int(value)
need = ulimits_min[key]['need'](server_num)
if need > value:
for server in ip_servers:
critical('ulimit', err.EC_ULIMIT_CHECK.format(server=ip, key=key, need=need, now=value), [err.SUG_ULIMIT.format(name=ulimits_min[key]['name'], value=need, ip=ip)])
else:
need = ulimits_min[key]['recd'](server_num)
if need > value:
for server in ip_servers:
alert('ulimit', err.WC_ULIMIT_CHECK.format(server=ip, key=key, need=need, now=value), [err.SUG_ULIMIT.format(name=ulimits_min[key]['name'], value=need, ip=ip)])
# memory
ret = client.execute_command('cat /proc/meminfo')
if ret:
server_memory_stats = {}
memory_key_map = {
'MemTotal': 'total',
'MemFree': 'free',
'MemAvailable': 'available',
'Buffers': 'buffers',
'Cached': 'cached'
}
for key in memory_key_map:
server_memory_stats[memory_key_map[key]] = 0
for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout):
if k in memory_key_map:
key = memory_key_map[k]
server_memory_stats[key] = parse_size(str(v))
ip_server_memory_info[ip] = server_memory_stats
server_memory_stat = servers_memory[ip]
min_start_need = server_num * START_NEED_MEMORY
total_use = server_memory_stat['percentage'] * server_memory_stats['total'] / 100 + server_memory_stat['num']
if min_start_need > server_memory_stats['available']:
for server in ip_servers:
error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(min_start_need)), [err.SUG_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip)])
elif total_use > server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached']:
for server in ip_servers:
server_generate_config = generate_configs.get(server, {})
suggest = err.SUG_OBSERVER_REDUCE_MEM.format()
suggest.auto_fix = True
for key in ['memory_limit', 'memory_limit_percentage']:
if key in global_generate_config or key in server_generate_config:
suggest.auto_fix = False
break
error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(total_use)), [suggest])
elif total_use > server_memory_stats['free']:
system_memory_check()
for server in ip_servers:
alert('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(server_memory_stats['free']), need=format_size(total_use)), [err.SUG_OBSERVER_REDUCE_MEM.format()])
else:
system_memory_check()
# disk
all_path = set(list(servers_disk[ip].keys()) + list(servers_clog_mount[ip].keys()))
disk = get_disk_info(all_paths=all_path, client=client, stdio=stdio)
stdio.verbose('disk: {}'.format(disk))
for path in servers_disk[ip]:
mount_path = get_mount_path(disk, path)
need = servers_disk[ip][path].get('need')
if not need:
for clog_path in servers_clog_mount[ip]:
clog_mount_path = get_mount_path(disk, clog_path)
if clog_mount_path == mount_path:
need = 60
stdio.verbose('clog and data use the same disk, datadisk percentage: {}'.format(need))
break
else:
need = 90
stdio.verbose('datadisk percentage: {}'.format(need))
slog_size = float(4 << 30)
if isinstance(need, int):
# slog need 4G
disk[mount_path]['need'] += max(disk[mount_path]['total'] - slog_size, 0) * need / 100
else:
disk[mount_path]['need'] += parse_size(need)
disk[mount_path]['need'] += slog_size
disk[mount_path]['is_data_disk'] = True
for path in servers_clog_mount[ip]:
mount_path = get_mount_path(disk, path)
if 'need' in servers_clog_mount[ip][path]:
need = servers_clog_mount[ip][path]['need']
elif disk[mount_path].get('is_data_disk'):
# hard code
need = 30
stdio.verbose('clog and data use the same disk, clog percentage: {}'.format(need))
else:
need = 90
stdio.verbose('clog percentage: {}'.format(need))
if isinstance(need, int):
# log_disk_percentage
log_disk_size = disk[mount_path]['total'] * need / 100
else:
# log_disk_size
log_disk_size = parse_size(need)
servers_log_disk_size[servers_clog_mount[ip][path]['server']] = log_disk_size
disk[mount_path]['need'] += log_disk_size
disk[mount_path]['is_clog_disk'] = True
for p in disk:
avail = disk[p]['avail']
need = disk[p]['need']
suggests = []
if disk[p].get('is_data_disk') and disk[p].get('is_clog_disk'):
suggests.append(err.SUG_OBSERVER_SAME_DISK.format())
for server in ip_servers:
alert('disk', err.WC_OBSERVER_SAME_DISK.format(ip=ip, disk=p), suggests)
if need > avail:
suggest_temps = {
'data': {
'tmplate': err.SUG_OBSERVER_NOT_ENOUGH_DISK,
'keys': ['datafile_size', 'datafile_disk_percentage']
}
}
if suggests:
suggest_temps['mem'] = {
'tmplate': err.SUG_OBSERVER_REDUCE_MEM,
'keys': ['memory_limit', 'memory_limit_percentage']
}
suggest_temps['redo'] = {
'tmplate': err.SUG_OBSERVER_REDUCE_REDO,
'keys': ['log_disk_size', 'log_disk_percentage']
}
for server in ip_servers:
tmp_suggests = []
server_generate_config = generate_configs.get(server, {})
for item in suggest_temps:
suggest = suggest_temps[item]['tmplate'].format()
suggest.auto_fix = True
for key in suggest_temps[item]['keys']:
if key in global_generate_config or key in server_generate_config:
suggest.auto_fix = False
break
tmp_suggests.append(suggest)
tmp_suggests = sorted(tmp_suggests, key=lambda suggest: suggest.auto_fix, reverse=True)
critical('disk', err.EC_OBSERVER_NOT_ENOUGH_DISK.format(ip=ip, disk=p, avail=format_size(avail), need=format_size(need)), tmp_suggests + suggests)
global_conf = cluster_config.get_global_conf()
has_ocp = 'ocp-express' in plugin_context.components
if not has_ocp and any([key.startswith('ocp_meta') for key in global_conf]):
has_ocp = True
if has_ocp and need_bootstrap:
global_conf_with_default = copy.deepcopy(cluster_config.get_global_conf_with_default())
original_global_conf = cluster_config.get_original_global_conf()
ocp_meta_tenant_prefix = 'ocp_meta_tenant_'
for key in global_conf_with_default:
if key.startswith(ocp_meta_tenant_prefix) and original_global_conf.get(key, None):
global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key]
meta_db_memory_size = parse_size(global_conf_with_default['ocp_meta_tenant'].get('memory_size'))
servers_sys_memory = {}
if meta_db_memory_size:
sys_memory_size = None
if 'sys_tenant' in global_conf and 'memory_size' in global_conf['sys_tenant']:
sys_memory_size = global_conf['sys_tenant']['memory_size']
for server in cluster_config.servers:
if server.ip not in servers_memory or server not in servers_memory[server.ip]['servers'] or server not in servers_min_pool_memory:
stdio.verbose('skip server {} for missing some memory info.'.format(server))
continue
memory_limit = servers_memory[server.ip]['servers'][server]['num']
system_memory = servers_memory[server.ip]['servers'][server]['system_memory']
min_pool_memory = servers_min_pool_memory[server]
if system_memory == 0:
system_memory = get_system_memory(memory_limit, min_pool_memory)
if not sys_memory_size:
sys_memory_size = servers_sys_memory[server] = max(min_pool_memory, min((memory_limit - system_memory) * 0.25, parse_size('16G')))
if meta_db_memory_size + system_memory + sys_memory_size <= memory_limit:
break
else:
suggest = err.SUG_OCP_EXPRESS_REDUCE_META_DB_MEM.format()
suggest.auto_fix = True
if 'ocp_meta_tenant_memory_size' in global_generate_config:
suggest.auto_fix = False
error('ocp meta db', err.EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_MEM.format(), [suggest])
meta_db_log_disk_size = global_conf_with_default['ocp_meta_tenant'].get('log_disk_size')
meta_db_log_disk_size = parse_size(meta_db_log_disk_size) if meta_db_log_disk_size else meta_db_log_disk_size
if not meta_db_log_disk_size and meta_db_memory_size:
meta_db_log_disk_size = meta_db_memory_size * 3
if meta_db_log_disk_size:
for server in cluster_config.servers:
log_disk_size = servers_log_disk_size[server]
sys_log_disk_size = servers_sys_memory.get(server, 0)
if meta_db_log_disk_size + sys_log_disk_size <= log_disk_size:
break
else:
suggest = err.SUG_OCP_EXPRESS_REDUCE_META_DB_LOG_DISK.format()
suggest.auto_fix = True
if 'ocp_meta_tenant_log_disk_size' in global_generate_config:
suggest.auto_fix = False
error('ocp meta db', err.EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK.format(), [suggest])
if success:
for ip in servers_net_inferface:
if servers_net_inferface[ip].get(None):
devinfo = client.execute_command('cat /proc/net/dev').stdout
interfaces = []
for interface in re.findall('\n\s+(\w+):', devinfo):
if interface != 'lo':
interfaces.append(interface)
if not interfaces:
interfaces = ['lo']
if len(interfaces) > 1:
servers = ','.join(str(server) for server in servers_net_inferface[ip][None])
suggest = err.SUG_NO_SUCH_NET_DEVIC.format(ip=ip)
for server in ip_servers:
critical('net', err.EC_OBSERVER_MULTI_NET_DEVICE.format(ip=ip, server=servers), [suggest])
else:
servers_net_inferface[ip][interfaces[0]] = servers_net_inferface[ip][None]
del servers_net_inferface[ip][None]
if success:
for ip in servers_net_inferface:
client = servers_clients[ip]
for devname in servers_net_inferface[ip]:
if client.is_localhost() and devname != 'lo' or (not client.is_localhost() and devname == 'lo'):
suggest = err.SUG_NO_SUCH_NET_DEVIC.format(ip=ip)
suggest.auto_fix = client.is_localhost() and 'devname' not in global_generate_config and 'devname' not in server_generate_config
for server in ip_servers:
critical('net', err.EC_OBSERVER_PING_FAILED.format(ip1=ip, devname=devname, ip2=ip), [suggest])
continue
for _ip in servers_clients:
if ip == _ip:
continue
if not client.execute_command('ping -W 1 -c 1 -I %s %s' % (devname, _ip)):
suggest = err.SUG_NO_SUCH_NET_DEVIC.format(ip=ip)
suggest.auto_fix = 'devname' not in global_generate_config and 'devname' not in server_generate_config
for server in ip_servers:
critical('net', err.EC_OBSERVER_PING_FAILED.format(ip1=ip, devname=devname, ip2=_ip), [suggest])
break
if success:
times = []
for ip in servers_clients:
client = servers_clients[ip]
delta = time_delta(client)
stdio.verbose('%s time delta %s' % (ip, delta))
times.append(delta)
if times and max(times) - min(times) > 500:
critical('ntp', err.EC_OBSERVER_TIME_OUT_OF_SYNC.format(), [err.SUG_OBSERVER_TIME_OUT_OF_SYNC.format()])
for server in cluster_config.servers:
status = check_status[server]
for key in status:
if status[key].status == err.CheckStatus.WAIT:
status[key].status = err.CheckStatus.PASS
if success:
stdio.stop_loading('succeed')
plugin_context.return_true()
else:
stdio.stop_loading('fail')
......@@ -27,7 +27,7 @@
name_local: 进程内存
require: true
essential: true
type: CAPACITY
type: CAPACITY_MB
min_value: 512M
need_restart: true
description_en: the memroy size of ocp express server. Please enter an capacity, such as 2G
......
......@@ -202,14 +202,15 @@ def prepare_parameters(cluster_config, stdio):
if value is not None:
depend_info[key] = value
ob_servers = cluster_config.get_depend_servers(comp)
connect_infos = []
for ob_server in ob_servers:
ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server)
if 'server_ip' not in depend_info:
depend_info['server_ip'] = ob_server.ip
depend_info['mysql_port'] = ob_server_conf['mysql_port']
connect_infos.append([ob_server.ip, ob_server_conf['mysql_port']])
zone = ob_server_conf['zone']
if zone not in ob_zones:
ob_zones[zone] = ob_server
depend_info['connect_infos'] = connect_infos
root_servers = ob_zones.values()
break
for comp in ['obproxy', 'obproxy-ce']:
......@@ -266,7 +267,12 @@ def prepare_parameters(cluster_config, stdio):
missed_keys = get_missing_required_parameters(original_server_config)
if missed_keys:
if 'jdbc_url' in missed_keys and depend_observer:
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
if depend_info.get('server_ip'):
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
else:
server_config['connect_infos'] = depend_info.get('connect_infos')
server_config['ocp_meta_db'] = depend_info.get('ocp_meta_db')
server_config['jdbc_url'] = ''
if 'jdbc_username' in missed_keys and depend_observer:
server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'],
depend_info.get('ocp_meta_tenant', {}).get("tenant_name"))
......@@ -333,26 +339,37 @@ def start(plugin_context, start_env=None, *args, **kwargs):
else:
use_parameter = True
# check meta db connect before start
matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url)
if matched:
if jdbc_url:
matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url)
if not matched:
stdio.error("Invalid jdbc url: %s" % jdbc_url)
return
ip = matched.group(1)
sql_port = matched.group(2)[1:]
database = matched.group(3)
connected = False
retries = 300
while not connected and retries:
connect_infos = [[ip, sql_port]]
else:
connect_infos = server_config.get('connect_infos', '')
database = server_config.get('ocp_meta_db', '')
connected = False
retries = 300
while not connected and retries:
for connect_info in connect_infos:
retries -= 1
server_ip = connect_info[0]
server_port = connect_info[-1]
try:
Cursor(ip=ip, port=sql_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio)
Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio)
jdbc_url = 'jdbc:oceanbase://{}:{}/{}'.format(server_ip, server_port, database)
connected = True
break
except:
time.sleep(1)
if not connected:
success = False
stdio.error("{}: failed to connect meta db".format(server))
continue
else:
stdio.verbose('unmatched jdbc url, skip meta db connection check')
if not connected:
success = False
stdio.error("{}: failed to connect meta db".format(server))
continue
if server_config.get('encrypt_password', False):
private_key, public_key = get_key(client, os.path.join(home_path, 'conf'), stdio)
public_key_str = get_plain_public_key(public_key)
......
......@@ -145,14 +145,14 @@ def prepare_parameters(cluster_config, stdio):
if value is not None:
depend_info[key] = value
ob_servers = cluster_config.get_depend_servers(comp)
connect_infos = []
for ob_server in ob_servers:
ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server)
if 'server_ip' not in depend_info:
depend_info['server_ip'] = ob_server.ip
depend_info['mysql_port'] = ob_server_conf['mysql_port']
connect_infos.append([ob_server.ip, ob_server_conf['mysql_port']])
zone = ob_server_conf['zone']
if zone not in ob_zones:
ob_zones[zone] = ob_server
depend_info['connect_infos'] = connect_infos
root_servers = ob_zones.values()
break
for comp in ['obproxy', 'obproxy-ce']:
......@@ -209,7 +209,12 @@ def prepare_parameters(cluster_config, stdio):
missed_keys = get_missing_required_parameters(original_server_config)
if missed_keys:
if 'jdbc_url' in missed_keys and depend_observer:
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
if depend_info.get('server_ip'):
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
else:
server_config['connect_infos'] = depend_info.get('connect_infos')
server_config['ocp_meta_db'] = depend_info.get('ocp_meta_db')
server_config['jdbc_url'] = ''
if 'jdbc_username' in missed_keys and depend_observer:
server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], depend_info.get('ocp_meta_tenant', {}).get("tenant_name"))
depends_key_maps = {
......
......@@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function
from tool import ConfigUtil
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, **kwargs):
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, *args, **kwargs):
namespace = plugin_context.namespace
namespaces = plugin_context.namespaces
deploy_name = plugin_context.deploy_name
......@@ -51,8 +51,8 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
apply_param_plugin(cur_repository)
if not stop_plugin(namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs):
return
return
install_repository_to_servers(cluster_config.name, cluster_config, dest_repository, clients)
apply_param_plugin(dest_repository)
warns = {}
not_support = ['system_password']
......
......@@ -27,7 +27,7 @@
name_local: 进程内存
require: true
essential: true
type: CAPACITY
type: CAPACITY_MB
min_value: 512M
need_restart: true
description_en: the memroy size of ocp express server. Please enter an capacity, such as 2G
......
......@@ -201,14 +201,15 @@ def prepare_parameters(cluster_config, stdio):
if value is not None:
depend_info[key] = value
ob_servers = cluster_config.get_depend_servers(comp)
connect_infos = []
for ob_server in ob_servers:
ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server)
if 'server_ip' not in depend_info:
depend_info['server_ip'] = ob_server.ip
depend_info['mysql_port'] = ob_server_conf['mysql_port']
connect_infos.append([ob_server.ip, ob_server_conf['mysql_port']])
zone = ob_server_conf['zone']
if zone not in ob_zones:
ob_zones[zone] = ob_server
depend_info['connect_infos'] = connect_infos
root_servers = ob_zones.values()
break
for comp in ['obproxy', 'obproxy-ce']:
......@@ -265,7 +266,12 @@ def prepare_parameters(cluster_config, stdio):
missed_keys = get_missing_required_parameters(original_server_config)
if missed_keys:
if 'jdbc_url' in missed_keys and depend_observer:
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
if depend_info.get('server_ip'):
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
else:
server_config['connect_infos'] = depend_info.get('connect_infos')
server_config['ocp_meta_db'] = depend_info.get('ocp_meta_db')
server_config['jdbc_url'] = ''
if 'jdbc_username' in missed_keys and depend_observer:
server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'],
depend_info.get('ocp_meta_tenant', {}).get("tenant_name"))
......@@ -333,26 +339,37 @@ def start(plugin_context, start_env=None, *args, **kwargs):
else:
use_parameter = True
# check meta db connect before start
matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url)
if matched:
if jdbc_url:
matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url)
if not matched:
stdio.error("Invalid jdbc url: %s" % jdbc_url)
return
ip = matched.group(1)
sql_port = matched.group(2)[1:]
database = matched.group(3)
connected = False
retries = 300
while not connected and retries:
connect_infos = [[ip, sql_port]]
else:
connect_infos = server_config.get('connect_infos', '')
database = server_config.get('ocp_meta_db', '')
connected = False
retries = 300
while not connected and retries:
for connect_info in connect_infos:
retries -= 1
server_ip = connect_info[0]
server_port = connect_info[-1]
try:
Cursor(ip=ip, port=sql_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio)
Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio)
jdbc_url = 'jdbc:oceanbase://{}:{}/{}'.format(server_ip, server_port, database)
connected = True
break
except:
time.sleep(1)
if not connected:
success = False
stdio.error("{}: failed to connect meta db".format(server))
continue
else:
stdio.verbose('unmatched jdbc url, skip meta db connection check')
if not connected:
success = False
stdio.error("{}: failed to connect meta db".format(server))
continue
if server_config.get('encrypt_password', False):
private_key, public_key = get_key(client, os.path.join(home_path, 'conf'), stdio)
public_key_str = get_plain_public_key(public_key)
......
......@@ -146,14 +146,14 @@ def prepare_parameters(cluster_config, stdio):
if value is not None:
depend_info[key] = value
ob_servers = cluster_config.get_depend_servers(comp)
connect_infos = []
for ob_server in ob_servers:
ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server)
if 'server_ip' not in depend_info:
depend_info['server_ip'] = ob_server.ip
depend_info['mysql_port'] = ob_server_conf['mysql_port']
connect_infos.append([ob_server.ip, ob_server_conf['mysql_port']])
zone = ob_server_conf['zone']
if zone not in ob_zones:
ob_zones[zone] = ob_server
depend_info['connect_infos'] = connect_infos
root_servers = ob_zones.values()
break
for comp in ['obproxy', 'obproxy-ce']:
......@@ -210,7 +210,12 @@ def prepare_parameters(cluster_config, stdio):
missed_keys = get_missing_required_parameters(original_server_config)
if missed_keys:
if 'jdbc_url' in missed_keys and depend_observer:
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
if depend_info.get('server_ip'):
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
else:
server_config['connect_infos'] = depend_info.get('connect_infos')
server_config['ocp_meta_db'] = depend_info.get('ocp_meta_db')
server_config['jdbc_url'] = ''
if 'jdbc_username' in missed_keys and depend_observer:
server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], depend_info.get('ocp_meta_tenant', {}).get("tenant_name"))
depends_key_maps = {
......
- name: home_path
name_local: 工作目录
require: true
essential: true
type: STRING
need_redeploy: true
description_en: the directory for the work data
description_local: OCP express server工作目录
- name: log_dir
name_local: 日志目录
type: STRING
require: false
essential: true
need_redeploy: true
description_en: The directory for logging file. The default value is $home_path/log.
description_local: OCP express server日志目录, 默认为工作目录下的log
- name: java_bin
name_local: java路径
type: STRING
require: true
essential: true
default: java
need_restart: true
description_en: The path of java binary
description_local: OCP express 使用的java可执行文件的路径
- name: memory_size
name_local: 进程内存
require: true
essential: true
type: CAPACITY_MB
min_value: 512M
need_restart: true
description_en: the memroy size of ocp express server. Please enter an capacity, such as 2G
description_local: OCP express server进程内存大小。请输入带容量带单位的整数,如2G
- name: logging_file_max_size
name_local: 单个日志文件大小
type: STRING
require: false
essential: true
default: 100MB
need_restart: true
description_local: 单个日志文件大小
description_en: When logging_file_name is configured, specify the log file size through this configuration
- name: logging_file_total_size_cap
name_local: 日志总大小
type: STRING
require: true
essential: true
default: 1GB
need_restart: true
description_local: 日志文件总大小
description_en: When logging_file_name is configured, specify the total log file size through this configuration
- name: port
name_local: 端口
require: true
essential: true
type: INT
default: 8180
need_restart: true
description_en: the port of ocp server.
description_local: OCP server使用的端口
- name: jdbc_url
require: false
type: STRING
need_redeploy: true
description_en: The jdbc connection url for ocp meta db
description_local: OCP使用的元数据库的jdbc连接串
- name: jdbc_username
require: false
type: STRING
need_redeploy: true
description_en: The username name for ocp meta db
description_local: OCP使用的元数据库的用户名
- name: jdbc_password
require: false
type: STRING
default:
need_redeploy: true
description_en: The password name for ocp meta db
description_local: OCP使用的元数据库的密码
- name: admin_passwd
require: true
type: STRING
modify_limit: modify
default:
need_redeploy: true
description_en: The password for ocp web admin user,The password must be 8 to 32 characters in length, and must contain at least two digits, two uppercase letters, two lowercase letters, and two of the following special characters:~!@#%^&*_-+=|(){}[]:;,.?/
description_local: OCP登录页面的admin账户密码(密码长度8~32位,至少包含2位数字、2位大写字母、2位小写字母和2位特殊字符(~!@#%^&*_-+=|(){}[]:;,.?/))
# bootstrap parameters
- name: cluster_name
requrire: false
type: STRING
default: obcluster
need_restart: true
description_en: The cluster name of observer
description_local: Oceanbase数据库的集群名称
- name: ob_cluster_id
require: false
type: INT
min_value: 1
max_value: 4294901759
need_restart: true
description_en: ID of the cluster
description_local: OceanBase集群ID
- name: root_sys_password
require: false
type: STRING
default:
need_restart: true
description_en: password of observer root user
description_local: sys租户root用户的密码
- name: server_addresses
require: false
type: LIST
need_restart: true
description_en: the servers info for oceanbase cluster
description_local: Oceanbase集群的节点信息
- name: 'session_timeout'
type: 'STRING'
require: false
need_restart: true
description_local: '登陆会话/Session超时的时间,默认是30m,最少60s。如果不加后缀单位,则默认是秒。重启生效。'
description_en: 'Session timeout interval, default is 30m, at least 60s. If the suffix unit is not added, the default is seconds. Restart OCP to take effect.'
- name: 'login_encrypt_enabled'
type: 'STRING'
require: false
need_restart: true
description_local: '登录信息是否开启加密传输,默认开启,重启生效'
description_en: 'Switch to enable encrypted transmission of login information, enabled by default. Restart OCP to take effect.'
- name: 'login_encrypt_public_key'
type: 'STRING'
require: false
need_restart: true
description_local: '加密登录信息的公钥,建议部署后修改此配置,修改后重启生效'
description_en: 'The public key for login encryption, It is recommended to modify this configuration after deployment. Restart OCP to take effect.'
- name: 'login_encrypt_private_key'
type: 'STRING'
require: false
need_restart: true
description_local: '加密登录信息的私钥,建议部署后修改此配置,修改后重启生效'
description_en: 'The private key for encryption. It is recommended to modify this configuration after deployment. Restart OCP to take effect.'
- name: 'enable_basic_auth'
type: 'STRING'
require: false
need_restart: true
description_local: '是否启用Basic Auth登陆模式,通常供程序和SDK等客户端场景使用,默认true。本配置与ocp.iam.auth可同时开启。重启生效。'
description_en: 'Whether to enable Basic Authentication, usually for client programs and SDKs to call server APIs. The default is true. This configuration and ocp.iam.auth can be enabled together. Restart OCP to take effect.'
- name: 'enable_csrf'
type: 'STRING'
require: false
need_restart: true
description_local: '是否启用CSRF跨站点请求伪造安全保护,通常基于网页登陆的方式都推荐要启用,默认true。重启生效。'
description_en: 'Whether to enable CSRF cross-site request forgery security protection. It is recommended to enable it, the default is true. Restart OCP to take effect.'
- name: 'vault_key'
type: 'STRING'
require: false
need_restart: true
description_local: '密码箱加密密钥'
description_en: 'vault secret key'
- name: 'druid_name'
type: 'STRING'
require: false
need_restart: true
description_local: 'metadb的druid连接池名称。重启生效'
description_en: 'metadb druid connection pool name. Restart to take effect'
- name: 'druid_init_size'
type: 'STRING'
require: false
need_restart: true
description_local: '初始化时建立物理连接的个数。重启生效'
description_en: 'The number of physical connections established during initialization. Restart to take effect'
- name: 'druid_min_idle'
type: 'STRING'
require: false
need_restart: true
description_local: '最小连接池数量。重启生效'
description_en: 'Minimum number of connections. Restart to take effect'
- name: 'druid_max_active'
type: 'STRING'
require: false
need_restart: true
description_local: '最大连接池数量。重启生效'
description_en: 'The maximum number of connections. Restart to take effect'
- name: 'druid_test_while_idle'
type: 'STRING'
require: false
need_restart: true
description_local: '建议配置为true,不影响性能,并且保证安全性。申请连接的时候检测。重启生效'
description_en: 'It is recommended to set it to true, which will not affect performance and ensure safety. Detect when applying for connection. Restart to take effect'
- name: 'druid_validation_query'
type: 'STRING'
require: false
need_restart: true
description_local: '用来检测连接是否有效的sql。重启生效'
description_en: 'SQL used to detect whether the connection is valid. Restart to take effect'
- name: 'druid_max_wait'
type: 'STRING'
require: false
need_restart: true
description_local: '获取连接时最大等待时间,单位毫秒。重启生效'
description_en: 'Maximum waiting time when getting a connection, in milliseconds. Restart to take effect'
- name: 'druid_keep_alive'
type: 'STRING'
require: false
need_restart: true
description_local: '连接池中的minIdle数量以内的连接,空闲时间超过minEvictableIdleTimeMillis(缺省值1800秒),则会执行keepAlive操作。重启生效'
description_en: 'For connections within the number of minIdle in the connection pool, if the idle time exceeds minEvictableIdleTimeMillis (the default value is 1800 seconds), the keepAlive operation will be performed. Restart to take effect'
- name: 'logging_pattern_console'
type: 'STRING'
require: false
need_restart: true
description_local: '用于控制台输出的日志格式'
description_en: 'Log format for console output'
- name: 'logging_pattern_file'
type: 'STRING'
require: false
need_restart: true
description_local: '用于文件输出的日志格式'
description_en: 'Log format used for file output'
- name: 'logging_file_clean_when_start'
type: 'STRING'
require: false
need_restart: true
description_local: '启动时删除压缩的日志文件'
description_en: 'Clean the archive log files on startup'
- name: 'logging_file_max_history'
name_local: 日志保留天数
type: INT
require: false
essential: true
need_restart: true
min_value: 1
max_value: 2147483647
description_local: '最多保留的归档日志文件的天数,默认不限制'
description_en: 'When logging.file is configured, set the maximum of retention days the log archive log files to keep. The default value is unlimited'
- name: 'ocp.idempotent.client-token.expire.time'
type: 'STRING'
require: false
need_restart: true
description_local: '幂等请求token的缓存过期时间,默认14d'
description_en: 'Expire time of idempotent client token, the default is 14d'
- name: 'obsdk_sql_query_limit'
type: 'STRING'
require: false
need_restart: true
description_local: '基于 obsdk 的采集查询,SQL 查询行数限制,默认 10000'
description_en: 'Sql query row limit for obsdk based collect'
- name: 'ocp.monitor.host.exporters'
type: 'STRING'
require: false
need_restart: true
description_local: '主机监控exporter'
description_en: 'exporters of ocp host'
- name: 'ocp.monitor.ob.exporters'
type: 'STRING'
require: false
need_restart: true
description_local: 'OB监控exporter'
description_en: 'exporters of ob'
- name: 'monitor_collect_interval'
type: 'STRING'
require: false
need_restart: true
description_local: '秒级别监控采集间隔,默认 1s,支持配置选项是 1s, 5s, 10s, 15s'
description_en: 'The parameter determines the second-level monitoring and collection interval. The supported configuration options are 1s, 5s, 10s, 15s. Default value is 1s'
- name: 'montior_retention_days'
type: 'STRING'
require: false
need_restart: true
description_local: '监控数据保存天数,key 是监控数据的表名,value 是保存的天数,修改后重启生效.'
description_en: 'Retention days for monitor data, key is table name for monitor data, value is the retention days. Restart to take effect.'
- name: 'obsdk_cache_size'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk连接器池容量,取值范围10~200,默认值100'
description_en: 'Obsdk connector holder capacity, value range 10~200, default value 100'
- name: 'obsdk_max_idle'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk空闲连接器的过期时间,单位秒,取值范围300~18000,默认值3600'
description_en: 'The expiration time of the obsdk idle connector, in seconds, the value range is 300~18000, and the default value is 3600'
- name: 'obsdk_cleanup_period'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk过期连接器的清理周期,单位秒,取值范围30~1800,默认值300'
description_en: 'The interval for obsdk to clean up the expired connector, in seconds, the value range is 30~1800, and the default value is 300'
- name: 'obsdk_print_sql'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk中sql打印开关,默认开启'
description_en: 'Sql print switch in obsdk, enabled by default'
- name: 'obsdk_slow_query_threshold'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk中慢查询日志阈值,单位毫秒,默认值 1000'
description_en: 'Slow query log threshold in obsdk, in milliseconds, the default value is 1000'
- name: 'obsdk_init_timeout'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk中连接器初始化超时时间,单位毫秒,默认值 3000'
description_en: 'Timeout of connector initialization in obsdk, in milliseconds, the default value is 5000'
- name: 'obsdk_init_core_size'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk中连接器初始化的线程个数'
description_en: 'The thread count of connector initialization in obsdk, the default value is 16'
- name: 'obsdk_global_timeout'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk中运维命令全局超时时间,单位毫秒,取值范围10000~7200000,默认值 300000'
description_en: 'Global timeout of operation in obsdk, in milliseconds, the value range is 10000~7200000, and the default value is 300000'
- name: 'obsdk_connect_timeout'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk建立Socket连接的超时时间,单位:ms'
description_en: 'The timeout period for obsdk to connect to ob, unit: ms'
- name: 'obsdk_read_timeout'
type: 'STRING'
require: false
need_restart: true
description_local: 'Obsdk的Socket读取数据的超时时间,单位:ms'
description_en: 'Obsdk socket read data timeout time, unit: ms'
\ No newline at end of file
......@@ -14,4 +14,5 @@ pycryptodome==3.10.1
inspect2==0.1.2
six==1.16.0
pyinstaller==3.6
bcrypt==3.1.7
\ No newline at end of file
bcrypt==3.1.7
zstandard==0.14.1
\ No newline at end of file
......@@ -12,4 +12,5 @@ inspect2==0.1.2
six==1.16.0
pyinstaller>=4.3
bcrypt==4.0.0
configparser>=5.2.0
\ No newline at end of file
configparser>=5.2.0
zstandard==0.21.0
\ No newline at end of file
......@@ -26,7 +26,7 @@ MINIMAL_CONFIG = '''
'''
PKG_ESTIMATED_SIZE = defaultdict(lambda:0)
PKG_ESTIMATED_SIZE.update({"oceanbase-ce":314142720, "obproxy-ce":45424640, "obagent": 25124864})
PKG_ESTIMATED_SIZE.update({"oceanbase-ce": 347142720, "oceanbase": 358142928, "obproxy-ce": 45424640, "obproxy": 56428687, "obagent": 76124864, "ocp-express": 95924680})
OCEANBASE_CE = 'oceanbase-ce'
......
......@@ -12,4 +12,5 @@ Start the dev server,
```bash
$ yarn start
$ yarn dev
```
......@@ -32,5 +32,5 @@ export default defineConfig({
`!function(modules){function __webpack_require__(moduleId){if(installedModules[moduleId])return installedModules[moduleId].exports;var module=installedModules[moduleId]={exports:{},id:moduleId,loaded:!1};return modules[moduleId].call(module.exports,module,module.exports,__webpack_require__),module.loaded=!0,module.exports}var installedModules={};return __webpack_require__.m=modules,__webpack_require__.c=installedModules,__webpack_require__.p="",__webpack_require__(0)}([function(module,exports){"use strict";!function(){if(!window.Tracert){for(var Tracert={_isInit:!0,_readyToRun:[],_guid:function(){return"xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g,function(c){var r=16*Math.random()|0,v="x"===c?r:3&r|8;return v.toString(16)})},get:function(key){if("pageId"===key){if(window._tracert_loader_cfg=window._tracert_loader_cfg||{},window._tracert_loader_cfg.pageId)return window._tracert_loader_cfg.pageId;var metaa=document.querySelectorAll("meta[name=data-aspm]"),spma=metaa&&metaa[0].getAttribute("content"),spmb=document.body&&document.body.getAttribute("data-aspm"),pageId=spma&&spmb?spma+"."+spmb+"_"+Tracert._guid()+"_"+Date.now():"-_"+Tracert._guid()+"_"+Date.now();return window._tracert_loader_cfg.pageId=pageId,pageId}return this[key]},call:function(){var argsList,args=arguments;try{argsList=[].slice.call(args,0)}catch(ex){var argsLen=args.length;argsList=[];for(var i=0;i<argsLen;i++)argsList.push(args[i])}Tracert.addToRun(function(){Tracert.call.apply(Tracert,argsList)})},addToRun:function(_fn){var fn=_fn;"function"==typeof fn&&(fn._logTimer=new Date-0,Tracert._readyToRun.push(fn))}},fnlist=["config","logPv","info","err","click","expo","pageName","pageState","time","timeEnd","parse","checkExpo","stringify","report"],i=0;i<fnlist.length;i++){var fn=fnlist[i];!function(fn){Tracert[fn]=function(){var argsList,args=arguments;try{argsList=[].slice.call(args,0)}catch(ex){var argsLen=args.length;argsList=[];for(var i=0;i<argsLen;i++)argsList.push(args[i])}argsList.unshift(fn),Tracert.addToRun(function(){Tracert.call.apply(Tracert,argsList)})}}(fn)}window.Tracert=Tracert}}()}]);`,
'https://gw.alipayobjects.com/as/g/component/tracert/4.4.9/index.js',
],
plugins: ['./config/plugin.ts'],
plugins: ['./config/plugin.ts']
});
......@@ -28,7 +28,7 @@
"antd": "5.0.7",
"copy-to-clipboard": "3.3.3",
"cross-env": "7.0.3",
"i18next": "22.4.15",
"i18next": "^23.2.11",
"lottie-web": "5.10.2",
"moment": "2.29.4",
"number-precision": "1.6.0",
......@@ -51,5 +51,6 @@
},
"gitHooks": {
"pre-commit": "lint-staged"
}
},
"repository": "git@gitlab.alibaba-inc.com:oceanbase/ob-deploy.git"
}
......@@ -285,5 +285,6 @@
"OBD.pages.components.NodeConfig.OnlyOneNodeCanBe": "Only one node can be selected or entered",
"OBD.pages.components.NodeConfig.NodeConfigurationPreviousStep": "Node Configuration-Previous Step",
"OBD.pages.components.NodeConfig.AreYouSureYouWant": "Are you sure to delete the configuration of this Zone?",
"OBD.pages.components.ClusterConfig.PortObproxyOfExporterIs": "Used for Prometheus to pull OBProxy monitoring data."
"OBD.pages.components.ClusterConfig.PortObproxyOfExporterIs": "Used for Prometheus to pull OBProxy monitoring data.",
"OBD.pages.components.InstallConfig.OptionalComponents": "Optional components"
}
......@@ -285,5 +285,6 @@
"OBD.pages.components.NodeConfig.NodeConfigurationPreviousStep": "节点配置-上一步",
"OBD.pages.components.NodeConfig.AreYouSureYouWant": "确定删除该条 Zone 的相关配置吗?",
"OBD.pages.components.ClusterConfig.PortObproxyOfExporterIs": "OBProxy 的 Exporter 端口,用于 Prometheus 拉取 OBProxy 监控数据。",
"OBD.pages.components.InstallProcess.Deploying": "部署中..."
"OBD.pages.components.InstallProcess.Deploying": "部署中...",
"OBD.pages.components.InstallConfig.OptionalComponents": "可选组件"
}
......@@ -6,6 +6,7 @@ import { getErrorInfo } from '@/utils';
export default () => {
const initAppName = 'myoceanbase';
const [selectedConfig,setSelectedConfig] = useState(['obproxy','ocp-express','obagent']); // 有ocpexpress必定有obagent
const [currentStep, setCurrentStep] = useState<number>(0);
const [configData, setConfigData] = useState<any>({});
const [currentType, setCurrentType] = useState('all');
......@@ -41,13 +42,13 @@ export default () => {
});
return {
selectedConfig,
setSelectedConfig,
initAppName,
currentStep,
setCurrentStep,
configData,
setConfigData,
currentType,
setCurrentType,
checkOK,
setCheckOK,
installStatus,
......
......@@ -31,13 +31,13 @@ interface ComponentsNodeConfig {
export default function CheckInfo() {
const {
configData,
currentType,
setCheckOK,
lowVersion,
setCurrentStep,
handleQuitProgress,
setErrorVisible,
setErrorsList,
selectedConfig,
errorsList,
} = useModel('global');
const { components = {}, auth, home_path } = configData || {};
......@@ -91,8 +91,15 @@ export default function CheckInfo() {
const getComponentsNodeConfigList = () => {
const componentsNodeConfigList: ComponentsNodeConfig[] = [];
//todo:待优化
let _selectedConfig = [...selectedConfig];
_selectedConfig.forEach((item, idx) => {
if (item === 'ocp-express') {
_selectedConfig[idx] = 'ocpexpress';
}
});
let currentOnlyComponentsKeys = onlyComponentsKeys.filter(
(key) => key !== 'obagent',
(key) => key !== 'obagent' && _selectedConfig.includes(key),
);
if (lowVersion) {
......@@ -271,39 +278,58 @@ export default function CheckInfo() {
},
];
if (currentType === 'all') {
const content = [
{
label: intl.formatMessage({
id: 'OBD.pages.components.CheckInfo.ObproxyServicePort',
defaultMessage: 'OBProxy 服务端口',
}),
value: obproxy?.listen_port,
},
{
label: intl.formatMessage({
id: 'OBD.pages.components.CheckInfo.PortObproxyExporter',
defaultMessage: 'OBProxy Exporter 端口',
}),
value: obproxy?.prometheus_listen_port,
},
{
label: intl.formatMessage({
id: 'OBD.pages.components.CheckInfo.ObagentMonitoringServicePort',
defaultMessage: 'OBAgent 监控服务端口',
}),
value: obagent?.monagent_http_port,
},
{
label: intl.formatMessage({
id: 'OBD.pages.components.CheckInfo.ObagentManageServicePorts',
defaultMessage: 'OBAgent 管理服务端口',
}),
value: obagent?.mgragent_http_port,
},
];
if (selectedConfig.length) {
let content: any[] = [],
more: any = [];
if (selectedConfig.includes('obproxy')) {
content = content.concat(
{
label: intl.formatMessage({
id: 'OBD.pages.components.CheckInfo.ObproxyServicePort',
defaultMessage: 'OBProxy 服务端口',
}),
value: obproxy?.listen_port,
},
{
label: intl.formatMessage({
id: 'OBD.pages.components.CheckInfo.PortObproxyExporter',
defaultMessage: 'OBProxy Exporter 端口',
}),
value: obproxy?.prometheus_listen_port,
},
);
obproxy?.parameters?.length &&
more.push({
label: componentsConfig['obproxy'].labelName,
parameters: obproxy?.parameters,
});
}
if (!lowVersion) {
if (selectedConfig.includes('obagent')) {
content = content.concat(
{
label: intl.formatMessage({
id: 'OBD.pages.components.CheckInfo.ObagentMonitoringServicePort',
defaultMessage: 'OBAgent 监控服务端口',
}),
value: obagent?.monagent_http_port,
},
{
label: intl.formatMessage({
id: 'OBD.pages.components.CheckInfo.ObagentManageServicePorts',
defaultMessage: 'OBAgent 管理服务端口',
}),
value: obagent?.mgragent_http_port,
},
);
obagent?.parameters?.length &&
more.push({
label: componentsConfig['obagent'].labelName,
parameters: obagent?.parameters,
});
}
// more是否有数据跟前面是否打开更多配置有关
if (!lowVersion && selectedConfig.includes('ocp-express')) {
content.push({
label: intl.formatMessage({
id: 'OBD.pages.components.CheckInfo.PortOcpExpress',
......@@ -311,28 +337,13 @@ export default function CheckInfo() {
}),
value: ocpexpress?.port,
});
}
let more: any = [];
if (obproxy?.parameters?.length) {
more = [
{
label: componentsConfig['obproxy'].labelName,
parameters: obproxy?.parameters,
},
{
label: componentsConfig['obagent'].labelName,
parameters: obagent?.parameters,
},
];
if (!lowVersion) {
ocpexpress?.parameters?.length &&
more.push({
label: componentsConfig['ocpexpress'].labelName,
parameters: ocpexpress?.parameters,
});
}
}
clusterConfigInfo.push({
key: 'components',
group: intl.formatMessage({
......@@ -342,6 +353,7 @@ export default function CheckInfo() {
content,
more,
});
console.log('clusterConfigInfo', clusterConfigInfo);
}
return (
......@@ -380,23 +392,6 @@ export default function CheckInfo() {
>
{oceanbase?.appname}
</ProCard>
<ProCard
colSpan={14}
title={intl.formatMessage({
id: 'OBD.pages.components.CheckInfo.DeploymentType',
defaultMessage: '部署类型',
})}
>
{currentType === 'all'
? intl.formatMessage({
id: 'OBD.pages.components.CheckInfo.FullyDeployed',
defaultMessage: '完全部署',
})
: intl.formatMessage({
id: 'OBD.pages.components.CheckInfo.ThinDeployment',
defaultMessage: '精简部署',
})}
</ProCard>
</ProCard>
</Col>
</ProCard>
......@@ -479,7 +474,7 @@ export default function CheckInfo() {
/>
</ProCard>
</ProCard>
{currentType === 'all' ? (
{selectedConfig.length ? (
<ProCard
title={intl.formatMessage({
id: 'OBD.pages.components.CheckInfo.ComponentNodeConfiguration',
......@@ -612,31 +607,31 @@ export default function CheckInfo() {
))}
</ProCard>
</Col>
<Space
direction="vertical"
size="middle"
style={{ marginTop: 16 }}
>
{item?.more?.length
? item?.more.map((moreItem) => (
<ProCard
className={styles.infoSubCard}
style={{ border: '1px solid #e2e8f3' }}
split="vertical"
key={moreItem.label}
>
<Table
className={`${styles.infoCheckTable} ob-table`}
columns={getMoreColumns(moreItem.label)}
dataSource={moreItem?.parameters}
pagination={false}
scroll={{ y: 300 }}
rowKey="key"
/>
</ProCard>
))
: null}
</Space>
{item?.more?.length ? (
<Space
direction="vertical"
size="middle"
style={{ marginTop: 16 }}
>
{item?.more.map((moreItem) => (
<ProCard
className={styles.infoSubCard}
style={{ border: '1px solid #e2e8f3' }}
split="vertical"
key={moreItem.label}
>
<Table
className={`${styles.infoCheckTable} ob-table`}
columns={getMoreColumns(moreItem.label)}
dataSource={moreItem?.parameters}
pagination={false}
scroll={{ y: 300 }}
rowKey="key"
/>
</ProCard>
))}
</Space>
) : null}
</ProCard>
))}
</Row>
......
......@@ -69,10 +69,10 @@ const showConfigKeys = {
export default function ClusterConfig() {
const {
selectedConfig,
setCurrentStep,
configData,
setConfigData,
currentType,
lowVersion,
clusterMore,
setClusterMore,
......@@ -122,19 +122,21 @@ export default function ClusterConfig() {
const setData = (dataSource: FormValues) => {
let newComponents: API.Components = { ...components };
if (currentType === 'all') {
if (selectedConfig.includes('obproxy')) {
newComponents.obproxy = {
...(components.obproxy || {}),
...dataSource.obproxy,
parameters: formatParameters(dataSource.obproxy?.parameters),
};
if (!lowVersion) {
newComponents.ocpexpress = {
...(components.ocpexpress || {}),
...dataSource.ocpexpress,
parameters: formatParameters(dataSource.ocpexpress?.parameters),
};
}
}
if (selectedConfig.includes('ocp-express') && !lowVersion) {
newComponents.ocpexpress = {
...(components.ocpexpress || {}),
...dataSource.ocpexpress,
parameters: formatParameters(dataSource.ocpexpress?.parameters),
};
}
if (selectedConfig.includes('obagent')) {
newComponents.obagent = {
...(components.obagent || {}),
...dataSource.obagent,
......@@ -504,13 +506,13 @@ export default function ClusterConfig() {
};
useEffect(() => {
if (clusterMore && !clusterMoreConfig?.length) {
if (clusterMore) {
getClusterMoreParamsters();
}
if (componentsMore && !componentsMoreConfig?.length) {
if (componentsMore) {
getComponentsMoreParamsters();
}
}, []);
}, [selectedConfig]);
const initPassword = getRandomPassword();
......@@ -764,7 +766,7 @@ export default function ClusterConfig() {
: null}
</ProCard>
</ProCard>
{currentType === 'all' ? (
{selectedConfig.length ? (
<ProCard className={styles.pageCard} split="horizontal">
<ProCard
title={intl.formatMessage({
......@@ -773,119 +775,123 @@ export default function ClusterConfig() {
})}
className="card-padding-bottom-24"
>
<Row>
<Space size="middle">
<ProFormDigit
name={['obproxy', 'listen_port']}
label={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.ObproxyServicePort',
defaultMessage: 'OBProxy 服务端口',
})}
fieldProps={{ style: commonStyle }}
placeholder={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
})}
rules={[
{
required: true,
message: intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
}),
},
{ validator: portValidator },
]}
/>
<ProFormDigit
name={['obproxy', 'prometheus_listen_port']}
label={
<>
{intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PortObproxyExporter',
defaultMessage: 'OBProxy Exporter 端口',
})}
<Tooltip
title={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PortObproxyOfExporterIs',
defaultMessage:
'OBProxy 的 Exporter 端口,用于 Prometheus 拉取 OBProxy 监控数据。',
{selectedConfig.includes('obproxy') && (
<Row>
<Space size="middle">
<ProFormDigit
name={['obproxy', 'listen_port']}
label={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.ObproxyServicePort',
defaultMessage: 'OBProxy 服务端口',
})}
fieldProps={{ style: commonStyle }}
placeholder={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
})}
rules={[
{
required: true,
message: intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
}),
},
{ validator: portValidator },
]}
/>
<ProFormDigit
name={['obproxy', 'prometheus_listen_port']}
label={
<>
{intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PortObproxyExporter',
defaultMessage: 'OBProxy Exporter 端口',
})}
>
<QuestionCircleOutlined className="ml-10" />
</Tooltip>
</>
}
fieldProps={{ style: commonStyle }}
placeholder={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
})}
rules={[
{
required: true,
message: intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
}),
},
{ validator: portValidator },
]}
/>
</Space>
</Row>
<Row>
<Space size="middle">
<ProFormDigit
name={['obagent', 'monagent_http_port']}
label={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.ObagentMonitoringServicePort',
defaultMessage: 'OBAgent 监控服务端口',
})}
fieldProps={{ style: commonStyle }}
placeholder={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
})}
rules={[
{
required: true,
message: intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
}),
},
{ validator: portValidator },
]}
/>
<ProFormDigit
name={['obagent', 'mgragent_http_port']}
label={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.ObagentManageServicePorts',
defaultMessage: 'OBAgent 管理服务端口',
})}
fieldProps={{ style: commonStyle }}
placeholder={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
})}
rules={[
{
required: true,
message: intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
}),
},
{ validator: portValidator },
]}
/>
</Space>
</Row>
{!lowVersion ? (
<Tooltip
title={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PortObproxyOfExporterIs',
defaultMessage:
'OBProxy 的 Exporter 端口,用于 Prometheus 拉取 OBProxy 监控数据。',
})}
>
<QuestionCircleOutlined className="ml-10" />
</Tooltip>
</>
}
fieldProps={{ style: commonStyle }}
placeholder={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
})}
rules={[
{
required: true,
message: intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
}),
},
{ validator: portValidator },
]}
/>
</Space>
</Row>
)}
{selectedConfig.includes('obagent') && (
<Row>
<Space size="middle">
<ProFormDigit
name={['obagent', 'monagent_http_port']}
label={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.ObagentMonitoringServicePort',
defaultMessage: 'OBAgent 监控服务端口',
})}
fieldProps={{ style: commonStyle }}
placeholder={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
})}
rules={[
{
required: true,
message: intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
}),
},
{ validator: portValidator },
]}
/>
<ProFormDigit
name={['obagent', 'mgragent_http_port']}
label={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.ObagentManageServicePorts',
defaultMessage: 'OBAgent 管理服务端口',
})}
fieldProps={{ style: commonStyle }}
placeholder={intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
})}
rules={[
{
required: true,
message: intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.PleaseEnter',
defaultMessage: '请输入',
}),
},
{ validator: portValidator },
]}
/>
</Space>
</Row>
)}
{selectedConfig.includes('ocp-express') && !lowVersion && (
<Row>
<ProFormDigit
name={['ocpexpress', 'port']}
......@@ -910,7 +916,7 @@ export default function ClusterConfig() {
]}
/>
</Row>
) : null}
)}
<div className={styles.moreSwitch}>
{intl.formatMessage({
id: 'OBD.pages.components.ClusterConfig.MoreConfigurations',
......
......@@ -37,13 +37,14 @@ export default function DeleteDeployModal({
setOBVersionValue,
}: Props) {
const {
selectedConfig,
setSelectedConfig,
setConfigData,
setIsDraft,
setClusterMore,
setComponentsMore,
componentsVersionInfo,
setComponentsVersionInfo,
setCurrentType,
getInfoByName,
setLowVersion,
setErrorVisible,
......@@ -95,14 +96,21 @@ export default function DeleteDeployModal({
if (nameSuccess) {
const { config } = nameData;
const { components = {} } = config;
const newSelectedConfig:string[] = []
Object.keys(components).forEach((key)=>{
if(selectedConfig.includes(key) && components[key]){
newSelectedConfig.push(key)
}else if(key === 'ocpexpress' && components[key]){
// todo:同步为ocpexpress
newSelectedConfig.push('ocp-express')
}
})
setSelectedConfig(newSelectedConfig)
setConfigData(config || {});
setLowVersion(checkLowVersion(components?.oceanbase?.version));
setClusterMore(!!components?.oceanbase?.parameters?.length);
setComponentsMore(!!components?.obproxy?.parameters?.length);
setIsDraft(true);
setCurrentType(
components?.oceanbase && !components?.obproxy ? 'ob' : 'all',
);
const newSelectedVersionInfo = componentsVersionInfo?.[
oceanbaseComponent
......
......@@ -47,36 +47,39 @@ import { getLocale } from 'umi';
import EnStyles from './indexEn.less';
import ZhStyles from './indexZh.less';
type rowDataType = {
key: string;
name: string;
onlyAll: boolean;
desc: string;
doc: string;
};
const locale = getLocale();
const styles = locale === 'zh-CN' ? ZhStyles : EnStyles;
interface FormValues {
type?: string;
}
const appnameReg = /^[a-zA-Z]([a-zA-Z0-9]{0,19})$/;
const oceanBaseInfo = {
group: intl.formatMessage({
id: 'OBD.pages.components.InstallConfig.Database',
defaultMessage: '数据库',
}),
key: 'database',
content: [
{
key: oceanbaseComponent,
name: 'OceanBase Database',
onlyAll: false,
desc: intl.formatMessage({
id: 'OBD.pages.components.InstallConfig.ItIsAFinancialLevel',
defaultMessage:
'是金融级分布式数据库,具备数据强一致、高扩展、高可用、高性价比、稳定可靠等特征。',
}),
doc: 'https://www.oceanbase.com/docs/oceanbase-database-cn',
},
],
};
const componentsGroupInfo = [
{
group: intl.formatMessage({
id: 'OBD.pages.components.InstallConfig.Database',
defaultMessage: '数据库',
}),
key: 'database',
content: [
{
key: oceanbaseComponent,
name: 'OceanBase Database',
onlyAll: false,
desc: intl.formatMessage({
id: 'OBD.pages.components.InstallConfig.ItIsAFinancialLevel',
defaultMessage:
'是金融级分布式数据库,具备数据强一致、高扩展、高可用、高性价比、稳定可靠等特征。',
}),
doc: 'https://www.oceanbase.com/docs/oceanbase-database-cn',
},
],
},
{
group: intl.formatMessage({
id: 'OBD.pages.components.InstallConfig.Proxy',
......@@ -103,7 +106,7 @@ const componentsGroupInfo = [
id: 'OBD.pages.components.InstallConfig.Tools',
defaultMessage: '工具',
}),
key: 'tool',
key: 'ocpexpressTool',
onlyAll: true,
content: [
{
......@@ -117,6 +120,16 @@ const componentsGroupInfo = [
}),
doc: 'https://www.oceanbase.com/docs/common-oceanbase-database-cn-0000000001626262',
},
],
},
{
group: intl.formatMessage({
id: 'OBD.pages.components.InstallConfig.Tools',
defaultMessage: '工具',
}),
key: 'obagentTool',
onlyAll: true,
content: [
{
key: obagentComponent,
name: 'OBAgent',
......@@ -140,8 +153,6 @@ export default function InstallConfig() {
setCurrentStep,
configData,
setConfigData,
currentType,
setCurrentType,
lowVersion,
isFirstTime,
setIsFirstTime,
......@@ -155,6 +166,8 @@ export default function InstallConfig() {
setErrorVisible,
errorsList,
setErrorsList,
selectedConfig,
setSelectedConfig,
} = useModel('global');
const { components, home_path } = configData || {};
const { oceanbase } = components || {};
......@@ -166,7 +179,8 @@ export default function InstallConfig() {
const [hasDraft, setHasDraft] = useState(false);
const [deleteLoadingVisible, setDeleteLoadingVisible] = useState(false);
const [deleteName, setDeleteName] = useState('');
const [installMemory, setInstallMemory] = useState(0);
const [deployMemory, setDeployMemory] = useState(0);
const [componentsMemory, setComponentsMemory] = useState(0);
const [form] = ProForm.useForm();
const [unavailableList, setUnavailableList] = useState<string[]>([]);
const [componentLoading, setComponentLoading] = useState(false);
......@@ -204,21 +218,11 @@ export default function InstallConfig() {
},
});
const judgVersions = (type: string, source: API.ComponentsVersionInfo) => {
if (type === 'all') {
if (Object.keys(source).length !== allComponentsName.length) {
setExistNoVersion(true);
} else {
setExistNoVersion(false);
}
const judgVersions = (source: API.ComponentsVersionInfo) => {
if (Object.keys(source).length !== allComponentsName.length) {
setExistNoVersion(true);
} else {
if (
!(source?.[oceanbaseComponent] && source?.[oceanbaseComponent]?.version)
) {
setExistNoVersion(true);
} else {
setExistNoVersion(false);
}
setExistNoVersion(false);
}
};
......@@ -230,12 +234,16 @@ export default function InstallConfig() {
}: API.OBResponseDataListComponent_) => {
if (success) {
const newComponentsVersionInfo = {};
const oceanbaseVersionsData = data?.items?.filter(item => item.name === oceanbaseComponent);
const oceanbaseVersionsData = data?.items?.filter(
(item) => item.name === oceanbaseComponent,
);
const initOceanbaseVersionInfo = oceanbaseVersionsData[0]?.info[0] || {};
const newSelectedOceanbaseVersionInfo = oceanbaseVersionsData[0]?.info?.filter(
(item) => item.md5 === oceanbase?.package_hash,
)?.[0];
const initOceanbaseVersionInfo =
oceanbaseVersionsData[0]?.info[0] || {};
const newSelectedOceanbaseVersionInfo =
oceanbaseVersionsData[0]?.info?.filter(
(item) => item.md5 === oceanbase?.package_hash,
)?.[0];
const currentOceanbaseVersionInfo =
newSelectedOceanbaseVersionInfo || initOceanbaseVersionInfo;
......@@ -254,12 +262,15 @@ export default function InstallConfig() {
};
} else if (item.name === obproxyComponent) {
let currentObproxyVersionInfo = {};
item?.info?.some(subItem => {
if (subItem?.version_type === currentOceanbaseVersionInfo?.version_type) {
item?.info?.some((subItem) => {
if (
subItem?.version_type ===
currentOceanbaseVersionInfo?.version_type
) {
currentObproxyVersionInfo = subItem;
return true;
}
return false
return false;
});
newComponentsVersionInfo[item.name] = {
...currentObproxyVersionInfo,
......@@ -278,7 +289,7 @@ export default function InstallConfig() {
const noVersion =
Object.keys(newComponentsVersionInfo).length !==
allComponentsName.length;
judgVersions(currentType, newComponentsVersionInfo);
judgVersions(newComponentsVersionInfo);
setComponentsVersionInfo(newComponentsVersionInfo);
if (noVersion) {
......@@ -328,13 +339,6 @@ export default function InstallConfig() {
},
});
const onValuesChange = (values: FormValues) => {
if (values?.type) {
setCurrentType(values?.type);
judgVersions(values?.type, componentsVersionInfo);
}
};
const nameValidator = async (_: any, value: string) => {
if (value) {
if (hasDraft || isDraft) {
......@@ -402,7 +406,7 @@ export default function InstallConfig() {
package_hash: componentsVersionInfo?.[oceanbaseComponent]?.md5,
},
};
if (currentType === 'all') {
if (selectedConfig.includes('obproxy')) {
newComponents.obproxy = {
...(components?.obproxy || {}),
component:
......@@ -413,15 +417,8 @@ export default function InstallConfig() {
release: componentsVersionInfo?.[obproxyComponent]?.release,
package_hash: componentsVersionInfo?.[obproxyComponent]?.md5,
};
if (!lowVersion) {
newComponents.ocpexpress = {
...(components?.ocpexpress || {}),
component: ocpexpressComponent,
version: componentsVersionInfo?.[ocpexpressComponent]?.version,
release: componentsVersionInfo?.[ocpexpressComponent]?.release,
package_hash: componentsVersionInfo?.[ocpexpressComponent]?.md5,
};
}
}
if (selectedConfig.includes('obagent')) {
newComponents.obagent = {
...(components?.obagent || {}),
component: obagentComponent,
......@@ -430,6 +427,16 @@ export default function InstallConfig() {
package_hash: componentsVersionInfo?.[obagentComponent]?.md5,
};
}
if (!lowVersion && selectedConfig.includes('ocp-express')) {
newComponents.ocpexpress = {
...(components?.ocpexpress || {}),
component: ocpexpressComponent,
version: componentsVersionInfo?.[ocpexpressComponent]?.version,
release: componentsVersionInfo?.[ocpexpressComponent]?.release,
package_hash: componentsVersionInfo?.[ocpexpressComponent]?.md5,
};
}
setConfigData({
...configData,
components: newComponents,
......@@ -454,15 +461,15 @@ export default function InstallConfig() {
)[0];
let currentObproxyVersionInfo = {};
componentsVersionInfo?.[
obproxyComponent
]?.dataSource?.some((item: API.service_model_components_ComponentInfo) => {
if (item?.version_type === newSelectedVersionInfo?.version_type) {
currentObproxyVersionInfo = item;
return true;
}
return false
});
componentsVersionInfo?.[obproxyComponent]?.dataSource?.some(
(item: API.service_model_components_ComponentInfo) => {
if (item?.version_type === newSelectedVersionInfo?.version_type) {
currentObproxyVersionInfo = item;
return true;
}
return false;
},
);
setComponentsVersionInfo({
...componentsVersionInfo,
[oceanbaseComponent]: {
......@@ -471,8 +478,8 @@ export default function InstallConfig() {
},
[obproxyComponent]: {
...componentsVersionInfo[obproxyComponent],
...currentObproxyVersionInfo
}
...currentObproxyVersionInfo,
},
});
setLowVersion(
!!(
......@@ -491,45 +498,42 @@ export default function InstallConfig() {
}
};
const getColumns = (group: string) => {
const getColumns = (group: string, supportCheckbox: boolean) => {
const columns: ColumnsType<API.TableComponentInfo> = [
{
title: group,
dataIndex: 'name',
width: 195,
width: supportCheckbox ? 147 : 195,
render: (text, record) => {
if (currentType === 'all') {
return (
<>
{text}
{record.key === ocpexpressComponent && lowVersion ? (
<Tooltip
title={intl.formatMessage({
id: 'OBD.pages.components.InstallConfig.OcpExpressOnlySupportsAnd',
defaultMessage:
'OCP Express 仅支持 4.0 及以上版本 OceanBase Database。',
})}
>
<span className={`${styles.iconContainer} warning-color`}>
<InfoOutlined className={styles.icon} />
</span>
</Tooltip>
) : !componentsVersionInfo[record.key]?.version ? (
<Tooltip
title={intl.formatMessage({
id: 'OBD.pages.components.InstallConfig.UnableToObtainTheInstallation',
defaultMessage: '无法获取安装包,请检查安装程序配置。',
})}
>
<span className={`${styles.iconContainer} error-color`}>
<CloseOutlined className={styles.icon} />
</span>
</Tooltip>
) : null}
</>
);
}
return text;
return (
<>
{text}
{record.key === ocpexpressComponent && lowVersion ? (
<Tooltip
title={intl.formatMessage({
id: 'OBD.pages.components.InstallConfig.OcpExpressOnlySupportsAnd',
defaultMessage:
'OCP Express 仅支持 4.0 及以上版本 OceanBase Database。',
})}
>
<span className={`${styles.iconContainer} warning-color`}>
<InfoOutlined className={styles.icon} />
</span>
</Tooltip>
) : !componentsVersionInfo[record.key]?.version ? (
<Tooltip
title={intl.formatMessage({
id: 'OBD.pages.components.InstallConfig.UnableToObtainTheInstallation',
defaultMessage: '无法获取安装包,请检查安装程序配置。',
})}
>
<span className={`${styles.iconContainer} error-color`}>
<CloseOutlined className={styles.icon} />
</span>
</Tooltip>
) : null}
</>
);
},
},
{
......@@ -619,8 +623,8 @@ export default function InstallConfig() {
render: (text, record) => {
let disabled = false;
if (
(record.key === ocpexpressComponent && lowVersion) ||
(currentType === 'ob' && record.onlyAll)
record.key === ocpexpressComponent &&
lowVersion
) {
disabled = true;
}
......@@ -658,6 +662,36 @@ export default function InstallConfig() {
);
};
/**
* tip:如果选择OCP Express,则OBAgent则自动选择,无需提示
* 如果不选择 OBAgent, 则 OCP Express 则自动不选择,无需提示
*/
const handleSelect = (record: rowDataType, selected: boolean) => {
if (!selected) {
let newConfig = [],
target = false;
target =
record.key === 'obagent' && selectedConfig.includes('ocp-express');
for (let val of selectedConfig) {
if (target && val === 'ocp-express') continue;
if (val !== record.key) {
newConfig.push(val);
}
}
setSelectedConfig(newConfig);
} else {
if (record.key === 'ocp-express' && !selectedConfig.includes('obagent')) {
setSelectedConfig([...selectedConfig, record.key, 'obagent']);
} else {
setSelectedConfig([...selectedConfig, record.key]);
}
}
};
const caculateSize = (originSize: number): string => {
return NP.divide(NP.divide(originSize, 1024), 1024).toFixed(2);
};
useEffect(() => {
setComponentLoading(true);
if (isFirstTime) {
......@@ -740,23 +774,18 @@ export default function InstallConfig() {
}, []);
useEffect(() => {
let newInstallMemory = 0;
if (currentType === 'ob') {
newInstallMemory =
componentsVersionInfo?.[oceanbaseComponent]?.estimated_size;
} else {
const keys = Object.keys(componentsVersionInfo);
keys.forEach((key) => {
newInstallMemory =
newInstallMemory + componentsVersionInfo[key]?.estimated_size;
});
}
setInstallMemory(newInstallMemory);
}, [componentsVersionInfo, currentType]);
useEffect(() => {
form.setFieldsValue({ type: currentType });
}, [currentType]);
let deployMemory: number =
componentsVersionInfo?.[oceanbaseComponent]?.estimated_size || 0;
let componentsMemory: number = 0;
const keys = Object.keys(componentsVersionInfo);
keys.forEach((key) => {
if (key !== 'oceanbaseComponent' && selectedConfig.includes(key)) {
componentsMemory += componentsVersionInfo[key]?.estimated_size;
}
});
setDeployMemory(deployMemory);
setComponentsMemory(componentsMemory);
}, [componentsVersionInfo, selectedConfig]);
useEffect(() => {
form.setFieldsValue({
......@@ -764,8 +793,6 @@ export default function InstallConfig() {
});
}, [configData]);
const size = NP.divide(NP.divide(installMemory, 1024), 1024).toFixed(2);
return (
<Spin spinning={loading}>
<Space className={styles.spaceWidth} direction="vertical" size="middle">
......@@ -782,9 +809,7 @@ export default function InstallConfig() {
submitter={false}
initialValues={{
appname: oceanbase?.appname || initAppName,
type: currentType,
}}
onValuesChange={onValuesChange}
>
<ProFormText
name="appname"
......@@ -821,16 +846,6 @@ export default function InstallConfig() {
]}
/>
<Form.Item
name="type"
label={intl.formatMessage({
id: 'OBD.pages.components.InstallConfig.DeploymentType',
defaultMessage: '部署类型',
})}
className="form-item-no-bottom"
>
<DeployType />
</Form.Item>
</ProForm>
</ProCard>
<ProCard
......@@ -848,7 +863,7 @@ export default function InstallConfig() {
id: 'OBD.pages.components.InstallConfig.EstimatedInstallationRequiresSizeMb',
defaultMessage: '预计安装需要 {size}MB 空间',
},
{ size: size },
{ size: caculateSize(deployMemory) },
)}
</span>
</>
......@@ -934,35 +949,81 @@ export default function InstallConfig() {
)
) : null}
<Spin spinning={componentLoading}>
{componentsGroupInfo.map((info) => (
<ProCard
type="inner"
className={`${styles.componentCard} ${currentType === 'ob' && info.onlyAll
? styles.disabledCard
: ''
}`}
key={info.group}
>
<Table
className={styles.componentTable}
columns={getColumns(info.group)}
rowKey="key"
dataSource={info.content}
pagination={false}
rowClassName={(record) => {
if (
(record.key === ocpexpressComponent && lowVersion) ||
(currentType === 'ob' && record?.onlyAll)
) {
return styles.disabledRow;
}
}}
/>
</ProCard>
))}
<ProCard
type="inner"
className={`${styles.componentCard}`}
key={oceanBaseInfo.group}
>
<Table
className={styles.componentTable}
columns={getColumns(oceanBaseInfo.group, false)}
rowKey="key"
dataSource={oceanBaseInfo.content}
pagination={false}
rowClassName={(record) => {
if (record.key === ocpexpressComponent && lowVersion) {
return styles.disabledRow;
}
}}
/>
</ProCard>
</Spin>
</Space>
</ProCard>
<ProCard
title={
<>
{intl.formatMessage({
id: 'OBD.pages.components.InstallConfig.OptionalComponents',
defaultMessage: '可选组件',
})}
<span className={styles.titleExtra}>
<InfoCircleOutlined />{' '}
{intl.formatMessage(
{
id: 'OBD.pages.components.InstallConfig.EstimatedInstallationRequiresSizeMb',
defaultMessage: '预计部署需要 {size}MB 空间',
},
{ size: caculateSize(componentsMemory) },
)}
</span>
</>
}
className="card-header-padding-top-0 card-padding-bottom-24 card-padding-top-0"
>
{componentsGroupInfo.map((componentInfo) => (
<Space
className={styles.spaceWidth}
direction="vertical"
size="middle"
>
<ProCard
type="inner"
className={`${styles.componentCard}`}
key={componentInfo.group}
>
<Table
rowSelection={{
hideSelectAll: true,
selectedRowKeys: selectedConfig,
onSelect: handleSelect,
}}
className={styles.componentTable}
columns={getColumns(componentInfo.group, true)}
rowKey="key"
dataSource={componentInfo.content}
pagination={false}
rowClassName={(record) => {
if (record.key === ocpexpressComponent && lowVersion) {
return styles.disabledRow;
}
}}
/>
</ProCard>
</Space>
))}
</ProCard>
</ProCard>
<footer className={styles.pageFooterContainer}>
<div className={styles.pageFooter}>
......
......@@ -47,10 +47,10 @@ interface FormValues extends API.Components {
export default function NodeConfig() {
const {
selectedConfig,
setCurrentStep,
configData,
setConfigData,
currentType,
lowVersion,
handleQuitProgress,
nameIndex,
......@@ -139,17 +139,19 @@ export default function NodeConfig() {
const setData = (dataSource: FormValues) => {
let newComponents: API.Components = {};
if (currentType === 'all') {
if (selectedConfig.includes('obproxy')) {
newComponents.obproxy = {
...(components.obproxy || {}),
...dataSource.obproxy,
};
if (!lowVersion) {
newComponents.ocpexpress = {
...(components.ocpexpress || {}),
...dataSource?.ocpexpress,
};
}
}
if (selectedConfig.includes('ocp-express') && !lowVersion) {
newComponents.ocpexpress = {
...(components.ocpexpress || {}),
...dataSource?.ocpexpress,
};
}
if (selectedConfig.includes('obagent')) {
newComponents.obagent = {
...(components.obagent || {}),
servers: allOBServer,
......@@ -827,7 +829,8 @@ export default function NodeConfig() {
}}
/>
</ProCard>
{currentType === 'all' ? (
{selectedConfig.includes('ocp-express') ||
selectedConfig.includes('obproxy') ? (
<ProCard
className={styles.pageCard}
title={intl.formatMessage({
......@@ -837,7 +840,7 @@ export default function NodeConfig() {
bodyStyle={{ paddingBottom: '0' }}
>
<Space size={16}>
{!lowVersion ? (
{selectedConfig.includes('ocp-express') && !lowVersion ? (
<ProFormSelect
mode="tags"
name={['ocpexpress', 'servers']}
......@@ -884,36 +887,39 @@ export default function NodeConfig() {
options={formatOptions(allOBServer)}
/>
) : null}
<ProFormSelect
mode="tags"
name={['obproxy', 'servers']}
label={intl.formatMessage({
id: 'OBD.pages.components.NodeConfig.ObproxyNodes',
defaultMessage: 'OBProxy 节点',
})}
fieldProps={{ style: { width: 504 }, maxTagCount: 3 }}
placeholder={intl.formatMessage({
id: 'OBD.pages.components.NodeConfig.PleaseSelect',
defaultMessage: '请选择',
})}
rules={[
{
required: true,
message: intl.formatMessage({
id: 'OBD.pages.components.NodeConfig.SelectOrEnterObproxyNodes',
defaultMessage: '请选择或输入 OBProxy 节点',
}),
},
{
validator: (_: any, value: string[]) =>
serversValidator(_, value, 'OBProxy'),
},
]}
options={formatOptions(allOBServer)}
/>
{selectedConfig.includes('obproxy') && (
<ProFormSelect
mode="tags"
name={['obproxy', 'servers']}
label={intl.formatMessage({
id: 'OBD.pages.components.NodeConfig.ObproxyNodes',
defaultMessage: 'OBProxy 节点',
})}
fieldProps={{ style: { width: 504 }, maxTagCount: 3 }}
placeholder={intl.formatMessage({
id: 'OBD.pages.components.NodeConfig.PleaseSelect',
defaultMessage: '请选择',
})}
rules={[
{
required: true,
message: intl.formatMessage({
id: 'OBD.pages.components.NodeConfig.SelectOrEnterObproxyNodes',
defaultMessage: '请选择或输入 OBProxy 节点',
}),
},
{
validator: (_: any, value: string[]) =>
serversValidator(_, value, 'OBProxy'),
},
]}
options={formatOptions(allOBServer)}
/>
)}
</Space>
</ProCard>
) : null}
{/* 设计稿字段好像写错了 */}
<ProCard
className={styles.pageCard}
title={intl.formatMessage({
......
......@@ -149,6 +149,7 @@ export default function IndexPage() {
useEffect(() => {
let token = '';
fetchDeploymentInfo({ task_status: 'INSTALLING' }).then(
({ success, data }: API.OBResponse) => {
if (success && data?.items?.length) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册