未验证 提交 d84e56c9 编写于 作者: R Rongfeng Fu 提交者: GitHub

V2.2.0 (#176)

上级 156416e1
...@@ -132,7 +132,3 @@ A:You can use the `obd update` command to update OBD. When you are done with t ...@@ -132,7 +132,3 @@ A:You can use the `obd update` command to update OBD. When you are done with t
## Protocol ## Protocol
OBD complies with [GPL-3.0](/LICENSE). OBD complies with [GPL-3.0](/LICENSE).
## Sysbench benchmark
- [Run the Sysbench benchmark test in OceanBase Database (Paetica, VLDB 2023)](https://github.com/oceanbase/oceanbase-doc/blob/V4.1.0/en-US/7.reference/3.performance-tuning-guide/6.performance-whitepaper/3.run-the-sysbench-benchmark-test-in-oceanbase-database.md)
...@@ -44,6 +44,7 @@ ROOT_IO = IO(1) ...@@ -44,6 +44,7 @@ ROOT_IO = IO(1)
OBD_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), '.obd') OBD_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), '.obd')
OBDIAG_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), 'oceanbase-diagnostic-tool') OBDIAG_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), 'oceanbase-diagnostic-tool')
COMMAND_ENV.load(os.path.join(OBD_HOME_PATH, '.obd_environ'), ROOT_IO) COMMAND_ENV.load(os.path.join(OBD_HOME_PATH, '.obd_environ'), ROOT_IO)
ROOT_IO.default_confirm = COMMAND_ENV.get(ENV.ENV_DEFAULT_CONFIRM, '0') == '1'
class OptionHelpFormatter(IndentedHelpFormatter): class OptionHelpFormatter(IndentedHelpFormatter):
...@@ -871,10 +872,11 @@ class ClusterRedeployCommand(ClusterMirrorCommand): ...@@ -871,10 +872,11 @@ class ClusterRedeployCommand(ClusterMirrorCommand):
def __init__(self): def __init__(self):
super(ClusterRedeployCommand, self).__init__('redeploy', 'Redeploy a started cluster.') super(ClusterRedeployCommand, self).__init__('redeploy', 'Redeploy a started cluster.')
self.parser.add_option('-f', '--force-kill', action='store_true', help="Force kill the running observer process in the working directory.") self.parser.add_option('-f', '--force-kill', action='store_true', help="Force kill the running observer process in the working directory.")
self.parser.add_option('--confirm', action='store_true', help='Confirm to redeploy.')
def _do_command(self, obd): def _do_command(self, obd):
if self.cmds: if self.cmds:
res = obd.redeploy_cluster(self.cmds[0]) res = obd.redeploy_cluster(self.cmds[0], need_confirm=not getattr(self.opts, 'confirm', False))
self.background_telemetry_task(obd) self.background_telemetry_task(obd)
return res return res
else: else:
......
...@@ -820,15 +820,20 @@ class ClusterConfig(object): ...@@ -820,15 +820,20 @@ class ClusterConfig(object):
self._cache_server[server] = self._apply_temp_conf(self._get_unprocessed_server_conf(server)) self._cache_server[server] = self._apply_temp_conf(self._get_unprocessed_server_conf(server))
return self._cache_server[server] return self._cache_server[server]
def get_original_global_conf(self): def get_original_global_conf(self, format_conf=False):
return deepcopy(self._original_global_conf) conf = deepcopy(self._original_global_conf)
format_conf and self._apply_temp_conf(conf)
return conf
def get_original_server_conf(self, server): def get_original_server_conf(self, server, format_conf=False):
return self._server_conf.get(server) conf = deepcopy(self._server_conf.get(server))
format_conf and self._apply_temp_conf(conf)
return conf
def get_original_server_conf_with_global(self, server): def get_original_server_conf_with_global(self, server, format_conf=False):
config = self.get_original_global_conf() config = deepcopy(self.get_original_global_conf())
config.update(self._server_conf.get(server, {})) config.update(self._server_conf.get(server, {}))
format_conf and self._apply_temp_conf(config)
return config return config
......
...@@ -42,3 +42,6 @@ TELEMETRY_MODE = "TELEMETRY_MODE" ...@@ -42,3 +42,6 @@ TELEMETRY_MODE = "TELEMETRY_MODE"
# telemetry log mode. 0 - disable, 1 - enable. # telemetry log mode. 0 - disable, 1 - enable.
TELEMETRY_LOG_MODE = "TELEMETRY_LOG_MODE" TELEMETRY_LOG_MODE = "TELEMETRY_LOG_MODE"
# ROOT IO DEFAULT CONFIRM. 0 - disable, 1 - enable.
ENV_DEFAULT_CONFIRM = "IO_DEFAULT_CONFIRM"
\ No newline at end of file
...@@ -175,6 +175,9 @@ EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK_AVAILABLE = OBDErrorCodeTemplate(4305 ...@@ -175,6 +175,9 @@ EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK_AVAILABLE = OBDErrorCodeTemplate(4305
EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK = OBDErrorCodeTemplate(4305, 'There is not enough log disk for ocp meta tenant.') EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK = OBDErrorCodeTemplate(4305, 'There is not enough log disk for ocp meta tenant.')
EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_MEM = OBDErrorCodeTemplate(4305, 'There is not enough memory for ocp meta tenant') EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_MEM = OBDErrorCodeTemplate(4305, 'There is not enough memory for ocp meta tenant')
EC_OCP_EXPRESS_ADMIN_PASSWD_ERROR = OBDErrorCodeTemplate(4306, '({ip}) ocp-express admin_passwd invalid.(Current :{current})') EC_OCP_EXPRESS_ADMIN_PASSWD_ERROR = OBDErrorCodeTemplate(4306, '({ip}) ocp-express admin_passwd invalid.(Current :{current})')
# 4350-4399 had been used by ocp
# sql # sql
EC_SQL_EXECUTE_FAILED = OBDErrorCodeTemplate(5000, "{sql} execute failed") EC_SQL_EXECUTE_FAILED = OBDErrorCodeTemplate(5000, "{sql} execute failed")
......
...@@ -271,9 +271,9 @@ class RemoteMirrorRepository(MirrorRepository): ...@@ -271,9 +271,9 @@ class RemoteMirrorRepository(MirrorRepository):
self._load_repo_age() self._load_repo_age()
if self.enabled: if self.enabled:
repo_age = ConfigUtil.get_value_from_dict(meta_data, 'repo_age', 0, int) repo_age = ConfigUtil.get_value_from_dict(meta_data, 'repo_age', 0, int)
if repo_age > self.repo_age or int(time.time()) - 86400 > self.repo_age: if (repo_age > self.repo_age or int(time.time()) - 86400 > self.repo_age) and self.available:
if self.update_mirror():
self.repo_age = repo_age self.repo_age = repo_age
self.update_mirror()
@property @property
def available(self): def available(self):
......
...@@ -32,6 +32,7 @@ from _manager import Manager ...@@ -32,6 +32,7 @@ from _manager import Manager
from _rpm import Version from _rpm import Version
from ssh import ConcurrentExecutor from ssh import ConcurrentExecutor
from tool import ConfigUtil, DynamicLoading, YamlLoader, FileUtil from tool import ConfigUtil, DynamicLoading, YamlLoader, FileUtil
from _types import *
yaml = YamlLoader() yaml = YamlLoader()
...@@ -360,225 +361,9 @@ class Null(object): ...@@ -360,225 +361,9 @@ class Null(object):
def __init__(self): def __init__(self):
pass pass
class ParamPlugin(Plugin): class ParamPlugin(Plugin):
class ConfigItemType(object):
TYPE_STR = None
NULL = Null()
def __init__(self, s):
try:
self._origin = s
self._value = 0
self.value = self.NULL
self._format()
if self.value == self.NULL:
self.value = self._origin
except:
raise Exception("'%s' is not %s" % (self._origin, self._type_str))
@property
def _type_str(self):
if self.TYPE_STR is None:
self.TYPE_STR = str(self.__class__.__name__).split('.')[-1]
return self.TYPE_STR
def _format(self):
raise NotImplementedError
def __str__(self):
return str(self._origin)
def __hash__(self):
return self._origin.__hash__()
@property
def __cmp_value__(self):
return self._value
def __eq__(self, value):
if value is None:
return False
return self.__cmp_value__ == value.__cmp_value__
def __gt__(self, value):
if value is None:
return True
return self.__cmp_value__ > value.__cmp_value__
def __ge__(self, value):
if value is None:
return True
return self.__eq__(value) or self.__gt__(value)
def __lt__(self, value):
if value is None:
return False
return self.__cmp_value__ < value.__cmp_value__
def __le__(self, value):
if value is None:
return False
return self.__eq__(value) or self.__lt__(value)
class Moment(ConfigItemType):
def _format(self):
if self._origin:
if self._origin.upper() == 'DISABLE':
self._value = 0
else:
r = re.match('^(\d{1,2}):(\d{1,2})$', self._origin)
h, m = r.groups()
h, m = int(h), int(m)
if 0 <= h <= 23 and 0 <= m <= 60:
self._value = h * 60 + m
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Time(ConfigItemType):
UNITS = {
'ns': 0.000000001,
'us': 0.000001,
'ms': 0.001,
's': 1,
'm': 60,
'h': 3600,
'd': 86400
}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['s']
else:
r = re.match('^(\d+)(\w+)$', self._origin.lower())
n, u = r.groups()
unit = self.UNITS.get(u.lower())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Capacity(ConfigItemType):
UNITS = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40, 'P': 1 << 50}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['M']
else:
r = re.match('^(\d+)(\w)B?$', self._origin.upper())
n, u = r.groups()
unit = self.UNITS.get(u.upper())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class StringList(ConfigItemType):
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
self._value = self._origin.split(';')
else:
self._value = []
class Dict(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, dict):
raise Exception("Invalid Value")
self._value = self._origin
else:
self._value = self.value = {}
class List(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, list):
raise Exception("Invalid value: {} is not a list.".format(self._origin))
self._value = self._origin
else:
self._value = self.value = []
class StringOrKvList(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, list):
raise Exception("Invalid value: {} is not a list.".format(self._origin))
for item in self._origin:
if not item:
continue
if not isinstance(item, (str, dict)):
raise Exception("Invalid value: {} should be string or key-value format.".format(item))
if isinstance(item, dict):
if len(item.keys()) != 1:
raise Exception("Invalid value: {} should be single key-value format".format(item))
self._value = self._origin
else:
self._value = self.value = []
class Double(ConfigItemType):
def _format(self):
self.value = self._value = float(self._origin) if self._origin else 0
class Boolean(ConfigItemType):
def _format(self):
if isinstance(self._origin, bool):
self._value = self._origin
else:
_origin = str(self._origin).lower()
if _origin == 'true':
self._value = True
elif _origin == 'false':
self._value = False
elif _origin.isdigit():
self._value = bool(self._origin)
else:
raise Exception('%s is not Boolean' % _origin)
self.value = self._value
class Integer(ConfigItemType):
def _format(self):
if self._origin is None:
self._value = 0
self._origin = 0
else:
_origin = str(self._origin)
try:
self.value = self._value = int(_origin)
except:
raise Exception('%s is not Integer' % _origin)
class String(ConfigItemType):
def _format(self):
self.value = self._value = str(self._origin) if self._origin else ''
class ConfigItem(object): class ConfigItem(object):
def __init__( def __init__(
...@@ -667,17 +452,18 @@ class ParamPlugin(Plugin): ...@@ -667,17 +452,18 @@ class ParamPlugin(Plugin):
if self._src_data is None: if self._src_data is None:
try: try:
TYPES = { TYPES = {
'DOUBLE': ParamPlugin.Double, 'DOUBLE': Double,
'BOOL': ParamPlugin.Boolean, 'BOOL': Boolean,
'INT': ParamPlugin.Integer, 'INT': Integer,
'STRING': ParamPlugin.String, 'STRING': String,
'MOMENT': ParamPlugin.Moment, 'MOMENT': Moment,
'TIME': ParamPlugin.Time, 'TIME': Time,
'CAPACITY': ParamPlugin.Capacity, 'CAPACITY': Capacity,
'STRING_LIST': ParamPlugin.StringList, 'CAPACITY_MB': CapacityMB,
'DICT': ParamPlugin.Dict, 'STRING_LIST': StringList,
'LIST': ParamPlugin.List, 'DICT': Dict,
'PARAM_LIST': ParamPlugin.StringOrKvList 'LIST': List,
'PARAM_LIST': StringOrKvList
} }
self._src_data = {} self._src_data = {}
with open(self.def_param_yaml_path, 'rb') as f: with open(self.def_param_yaml_path, 'rb') as f:
...@@ -688,7 +474,7 @@ class ParamPlugin(Plugin): ...@@ -688,7 +474,7 @@ class ParamPlugin(Plugin):
if param_type in TYPES: if param_type in TYPES:
param_type = TYPES[param_type] param_type = TYPES[param_type]
else: else:
param_type = ParamPlugin.String param_type = String
self._src_data[conf['name']] = ParamPlugin.ConfigItem( self._src_data[conf['name']] = ParamPlugin.ConfigItem(
name=conf['name'], name=conf['name'],
......
...@@ -232,7 +232,7 @@ class ParallerExtractor(object): ...@@ -232,7 +232,7 @@ class ParallerExtractor(object):
pool.close() pool.close()
pool = None pool = None
except: except:
self.stdio and getattr(self.stdio, 'exception', print)() self.stdio and getattr(self.stdio, 'exception', print)('')
finally: finally:
pool and pool.close() pool and pool.close()
return False return False
......
...@@ -379,6 +379,7 @@ class IO(object): ...@@ -379,6 +379,7 @@ class IO(object):
): ):
self.level = level self.level = level
self.msg_lv = msg_lv self.msg_lv = msg_lv
self.default_confirm = False
self._log_path = None self._log_path = None
self._trace_id = None self._trace_id = None
self._log_name = 'default' self._log_name = 'default'
...@@ -672,6 +673,8 @@ class IO(object): ...@@ -672,6 +673,8 @@ class IO(object):
def confirm(self, msg): def confirm(self, msg):
msg = '%s [y/n]: ' % msg msg = '%s [y/n]: ' % msg
self.print(msg, end='') self.print(msg, end='')
if self.default_confirm:
return True
if self._input_is_tty: if self._input_is_tty:
while True: while True:
try: try:
...@@ -748,7 +751,7 @@ class IO(object): ...@@ -748,7 +751,7 @@ class IO(object):
self._print(MsgLevel.VERBOSE, '%s %s' % (self._verbose_prefix, msg), *args, **kwargs) self._print(MsgLevel.VERBOSE, '%s %s' % (self._verbose_prefix, msg), *args, **kwargs)
if sys.version_info.major == 2: if sys.version_info.major == 2:
def exception(self, msg, *args, **kwargs): def exception(self, msg='', *args, **kwargs):
import linecache import linecache
exception_msg = [] exception_msg = []
ei = sys.exc_info() ei = sys.exc_info()
...@@ -780,7 +783,7 @@ class IO(object): ...@@ -780,7 +783,7 @@ class IO(object):
msg and self.error(msg) msg and self.error(msg)
print_stack('\n'.join(exception_msg)) print_stack('\n'.join(exception_msg))
else: else:
def exception(self, msg, *args, **kwargs): def exception(self, msg='', *args, **kwargs):
ei = sys.exc_info() ei = sys.exc_info()
traceback_e = traceback.TracebackException(type(ei[1]), ei[1], ei[2], limit=None) traceback_e = traceback.TracebackException(type(ei[1]), ei[1], ei[2], limit=None)
pre_stach = traceback.extract_stack()[self.track_limit:-2] pre_stach = traceback.extract_stack()[self.track_limit:-2]
......
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import re
__all__ = ("Moment", "Time", "Capacity", "CapacityMB", "StringList", "Dict", "List", "StringOrKvList", "Double", "Boolean", "Integer", "String")
class Null(object):
def __init__(self):
pass
class ConfigItemType(object):
TYPE_STR = None
NULL = Null()
def __init__(self, s):
try:
self._origin = s
self._value = 0
self.value = self.NULL
self._format()
if self.value == self.NULL:
self.value = self._origin
except:
raise Exception("'%s' is not %s" % (self._origin, self._type_str))
@property
def _type_str(self):
if self.TYPE_STR is None:
self.TYPE_STR = str(self.__class__.__name__).split('.')[-1]
return self.TYPE_STR
def _format(self):
raise NotImplementedError
def __str__(self):
return str(self._origin)
def __hash__(self):
return self._origin.__hash__()
@property
def __cmp_value__(self):
return self._value
def __eq__(self, value):
if value is None:
return False
return self.__cmp_value__ == value.__cmp_value__
def __gt__(self, value):
if value is None:
return True
return self.__cmp_value__ > value.__cmp_value__
def __ge__(self, value):
if value is None:
return True
return self.__eq__(value) or self.__gt__(value)
def __lt__(self, value):
if value is None:
return False
return self.__cmp_value__ < value.__cmp_value__
def __le__(self, value):
if value is None:
return False
return self.__eq__(value) or self.__lt__(value)
class Moment(ConfigItemType):
def _format(self):
if self._origin:
if self._origin.upper() == 'DISABLE':
self._value = 0
else:
r = re.match('^(\d{1,2}):(\d{1,2})$', self._origin)
h, m = r.groups()
h, m = int(h), int(m)
if 0 <= h <= 23 and 0 <= m <= 60:
self._value = h * 60 + m
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Time(ConfigItemType):
UNITS = {
'ns': 0.000000001,
'us': 0.000001,
'ms': 0.001,
's': 1,
'm': 60,
'h': 3600,
'd': 86400
}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['s']
else:
r = re.match('^(\d+)(\w+)$', self._origin.lower())
n, u = r.groups()
unit = self.UNITS.get(u.lower())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Capacity(ConfigItemType):
UNITS = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40, 'P': 1 << 50}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['M']
else:
r = re.match('^(\d+)(\w)B?$', self._origin.upper())
n, u = r.groups()
unit = self.UNITS.get(u.upper())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class CapacityMB(Capacity):
def _format(self):
super(CapacityMB, self)._format()
if isinstance(self._origin, str) and self._origin.isdigit():
self.value = self._origin + 'M'
if not self._origin:
self.value = '0M'
class StringList(ConfigItemType):
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
self._value = self._origin.split(';')
else:
self._value = []
class Dict(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, dict):
raise Exception("Invalid Value")
self._value = self._origin
else:
self._value = self.value = {}
class List(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, list):
raise Exception("Invalid value: {} is not a list.".format(self._origin))
self._value = self._origin
else:
self._value = self.value = []
class StringOrKvList(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, list):
raise Exception("Invalid value: {} is not a list.".format(self._origin))
for item in self._origin:
if not item:
continue
if not isinstance(item, (str, dict)):
raise Exception("Invalid value: {} should be string or key-value format.".format(item))
if isinstance(item, dict):
if len(item.keys()) != 1:
raise Exception("Invalid value: {} should be single key-value format".format(item))
self._value = self._origin
else:
self._value = self.value = []
class Double(ConfigItemType):
def _format(self):
self.value = self._value = float(self._origin) if self._origin else 0
class Boolean(ConfigItemType):
def _format(self):
if isinstance(self._origin, bool):
self._value = self._origin
else:
_origin = str(self._origin).lower()
if _origin == 'true':
self._value = True
elif _origin == 'false':
self._value = False
elif _origin.isdigit():
self._value = bool(self._origin)
else:
raise Exception('%s is not Boolean' % _origin)
self.value = self._value
class Integer(ConfigItemType):
def _format(self):
if self._origin is None:
self._value = 0
self._origin = 0
else:
_origin = str(self._origin)
try:
self.value = self._value = int(_origin)
except:
raise Exception('%s is not Integer' % _origin)
class String(ConfigItemType):
def _format(self):
self.value = self._value = str(self._origin) if self._origin else ''
\ No newline at end of file
...@@ -2284,13 +2284,16 @@ class ObdHome(object): ...@@ -2284,13 +2284,16 @@ class ObdHome(object):
self._call_stdio('stop_loading', 'succeed') self._call_stdio('stop_loading', 'succeed')
return False return False
def redeploy_cluster(self, name, search_repo=True): def redeploy_cluster(self, name, search_repo=True, need_confirm=False):
self._call_stdio('verbose', 'Get Deploy by name') self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name) deploy = self.deploy_manager.get_deploy_config(name)
self.set_deploy(deploy) self.set_deploy(deploy)
if not deploy: if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name) self._call_stdio('error', 'No such deploy: %s.' % name)
return False return False
if need_confirm and not self._call_stdio('confirm', 'Are you sure to destroy the "%s" cluster and rebuild it?' % name):
return False
deploy_info = deploy.deploy_info deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Get deploy configuration') self._call_stdio('verbose', 'Get deploy configuration')
...@@ -2630,9 +2633,9 @@ class ObdHome(object): ...@@ -2630,9 +2633,9 @@ class ObdHome(object):
route = [] route = []
use_images = [] use_images = []
upgrade_route_plugins = self.search_py_script_plugin([current_repository], 'upgrade_route', no_found_act='warn') upgrade_route_plugins = self.search_py_script_plugin([dest_repository], 'upgrade_route', no_found_act='warn')
if current_repository in upgrade_route_plugins: if dest_repository in upgrade_route_plugins:
ret = self.call_plugin(upgrade_route_plugins[current_repository], current_repository , current_repository=current_repository, dest_repository=dest_repository) ret = self.call_plugin(upgrade_route_plugins[dest_repository], current_repository , current_repository=current_repository, dest_repository=dest_repository)
route = ret.get_return('route') route = ret.get_return('route')
if not route: if not route:
return False return False
...@@ -2742,9 +2745,6 @@ class ObdHome(object): ...@@ -2742,9 +2745,6 @@ class ObdHome(object):
if not install_plugins: if not install_plugins:
return False return False
if not self.install_repositories_to_servers(deploy_config, upgrade_repositories[1:], install_plugins, ssh_clients, self.options):
return False
script_query_timeout = getattr(self.options, 'script_query_timeout', '') script_query_timeout = getattr(self.options, 'script_query_timeout', '')
n = len(upgrade_repositories) n = len(upgrade_repositories)
while upgrade_ctx['index'] < n: while upgrade_ctx['index'] < n:
...@@ -4006,7 +4006,7 @@ class ObdHome(object): ...@@ -4006,7 +4006,7 @@ class ObdHome(object):
cluster_config = deploy_config.components[component_name] cluster_config = deploy_config.components[component_name]
if not cluster_config.servers: if not cluster_config.servers:
self._call_stdio('error', '%s server list is empty' % allow_components[0]) self._call_stdio('error', '%s server list is empty' % allow_components)
return False return False
self._call_stdio('start_loading', 'Get local repositories and plugins') self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository # Get the repository
...@@ -4015,8 +4015,9 @@ class ObdHome(object): ...@@ -4015,8 +4015,9 @@ class ObdHome(object):
self._call_stdio('stop_loading', 'succeed') self._call_stdio('stop_loading', 'succeed')
target_repository = None target_repository = None
for repository in repositories: for repository in repositories:
if repository.name == allow_components[0]: if repository.name == component_name:
target_repository = repository target_repository = repository
break
if gather_type in ['gather_plan_monitor']: if gather_type in ['gather_plan_monitor']:
setattr(opts, 'connect_cluster', True) setattr(opts, 'connect_cluster', True)
obdiag_path = getattr(opts, 'obdiag_dir', None) obdiag_path = getattr(opts, 'obdiag_dir', None)
......
...@@ -181,7 +181,7 @@ grafana: ...@@ -181,7 +181,7 @@ grafana:
- prometheus - prometheus
global: global:
home_path: /root/grafana home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'. login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data. # data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log. # logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins. # plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
...@@ -181,7 +181,7 @@ grafana: ...@@ -181,7 +181,7 @@ grafana:
- prometheus - prometheus
global: global:
home_path: /root/grafana home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'. login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data. # data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log. # logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins. # plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
...@@ -223,7 +223,7 @@ grafana: ...@@ -223,7 +223,7 @@ grafana:
- prometheus - prometheus
global: global:
home_path: /root/grafana home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'. login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data. # data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log. # logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins. # plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
...@@ -28,8 +28,6 @@ oceanbase-ce: ...@@ -28,8 +28,6 @@ oceanbase-ce:
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
skip_proxy_sys_private_check: true
enable_strict_kernel_release: false
# root_password: # root user password # root_password: # root user password
# In this example , support multiple ob process in single node, so different process use different ports. # In this example , support multiple ob process in single node, so different process use different ports.
# If deploy ob cluster in multiple nodes, the port and path setting can be same. # If deploy ob cluster in multiple nodes, the port and path setting can be same.
......
...@@ -210,7 +210,7 @@ grafana: ...@@ -210,7 +210,7 @@ grafana:
- prometheus - prometheus
global: global:
home_path: /root/grafana home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'. login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data. # data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log. # logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins. # plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
...@@ -10,7 +10,7 @@ grafana: ...@@ -10,7 +10,7 @@ grafana:
- 192.168.1.5 - 192.168.1.5
global: global:
home_path: /root/grafana home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'. login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data. # data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log. # logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins. # plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
...@@ -77,7 +77,7 @@ grafana: ...@@ -77,7 +77,7 @@ grafana:
- prometheus - prometheus
global: global:
home_path: /root/grafana home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'. login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data. # data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log. # logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins. # plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
...@@ -23,8 +23,10 @@ oceanbase-ce: ...@@ -23,8 +23,10 @@ oceanbase-ce:
# please set memory limit to a suitable value which is matching resource. # please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file. datafile_size: 2G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files. datafile_next: 2G # the auto extend step. Please enter an capacity, such as 2G
datafile_maxsize: 20G # the auto extend max size. Please enter an capacity, such as 20G
log_disk_size: 13G # The size of disk space used by the clog files.
cpu_count: 16 cpu_count: 16
production_mode: false production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
......
...@@ -23,8 +23,10 @@ oceanbase-ce: ...@@ -23,8 +23,10 @@ oceanbase-ce:
# please set memory limit to a suitable value which is matching resource. # please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file. datafile_size: 2G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files. datafile_next: 2G # the auto extend step. Please enter an capacity, such as 2G
datafile_maxsize: 20G # the auto extend max size. Please enter an capacity, such as 20G
log_disk_size: 13G # The size of disk space used by the clog files.
cpu_count: 16 cpu_count: 16
production_mode: false production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
......
...@@ -20,8 +20,10 @@ oceanbase-ce: ...@@ -20,8 +20,10 @@ oceanbase-ce:
# please set memory limit to a suitable value which is matching resource. # please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file. datafile_size: 2G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files. datafile_next: 2G # the auto extend step. Please enter an capacity, such as 2G
datafile_maxsize: 20G # the auto extend max size. Please enter an capacity, such as 20G
log_disk_size: 13G # The size of disk space used by the clog files.
cpu_count: 16 cpu_count: 16
production_mode: false production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
......
...@@ -27,8 +27,10 @@ oceanbase-ce: ...@@ -27,8 +27,10 @@ oceanbase-ce:
# please set memory limit to a suitable value which is matching resource. # please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file. datafile_size: 2G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files. datafile_next: 2G # the auto extend step. Please enter an capacity, such as 2G
datafile_maxsize: 20G # the auto extend max size. Please enter an capacity, such as 20G
log_disk_size: 13G # The size of disk space used by the clog files.
cpu_count: 16 cpu_count: 16
production_mode: false production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
......
...@@ -27,8 +27,10 @@ oceanbase-ce: ...@@ -27,8 +27,10 @@ oceanbase-ce:
# please set memory limit to a suitable value which is matching resource. # please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file. datafile_size: 2G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files. datafile_next: 2G # the auto extend step. Please enter an capacity, such as 2G
datafile_maxsize: 20G # the auto extend max size. Please enter an capacity, such as 20G
log_disk_size: 13G # The size of disk space used by the clog files.
cpu_count: 16 cpu_count: 16
production_mode: false production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
......
...@@ -22,10 +22,11 @@ test: ...@@ -22,10 +22,11 @@ test:
value: 'true' value: 'true'
optimizer: tenant optimizer: tenant
- name: memory_chunk_cache_size - name: memory_chunk_cache_size
value: '0' value: '0M'
value_type: STRING value_type: CAPACITY_MB
- name: syslog_io_bandwidth_limit - name: syslog_io_bandwidth_limit
value: 30m value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog - name: enable_async_syslog
value: 'true' value: 'true'
- name: large_query_worker_percentage - name: large_query_worker_percentage
...@@ -55,6 +56,7 @@ test: ...@@ -55,6 +56,7 @@ test:
value: 1m value: 1m
- name: cache_wash_threshold - name: cache_wash_threshold
value: 10G value: 10G
value_type: CAPACITY_MB
- name: plan_cache_evict_interval - name: plan_cache_evict_interval
value: 30s value: 30s
- name: bf_cache_miss_count_threshold - name: bf_cache_miss_count_threshold
......
...@@ -25,10 +25,11 @@ build: ...@@ -25,10 +25,11 @@ build:
value: 'true' value: 'true'
optimizer: tenant optimizer: tenant
- name: memory_chunk_cache_size - name: memory_chunk_cache_size
value: '0' value: '0M'
value_type: STRING value_type: CAPACITY_MB
- name: syslog_io_bandwidth_limit - name: syslog_io_bandwidth_limit
value: 30m value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog - name: enable_async_syslog
value: 'true' value: 'true'
- name: large_query_worker_percentage - name: large_query_worker_percentage
...@@ -58,6 +59,7 @@ build: ...@@ -58,6 +59,7 @@ build:
value: 1m value: 1m
- name: cache_wash_threshold - name: cache_wash_threshold
value: 10G value: 10G
value_type: CAPACITY_MB
- name: plan_cache_evict_interval - name: plan_cache_evict_interval
value: 30s value: 30s
- name: bf_cache_miss_count_threshold - name: bf_cache_miss_count_threshold
......
...@@ -26,9 +26,11 @@ test: ...@@ -26,9 +26,11 @@ test:
value: false value: false
value_type: BOOL value_type: BOOL
- name: memory_chunk_cache_size - name: memory_chunk_cache_size
value: 0 value: 0M
value_type: CAPACITY_MB
- name: cache_wash_threshold - name: cache_wash_threshold
value: 30g value: 30g
value_type: CAPACITY_MB
- name: ob_enable_batched_multi_statement - name: ob_enable_batched_multi_statement
value: true value: true
optimizer: tenant optimizer: tenant
...@@ -47,6 +49,7 @@ test: ...@@ -47,6 +49,7 @@ test:
value: 4 value: 4
- name: syslog_io_bandwidth_limit - name: syslog_io_bandwidth_limit
value: 30m value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog - name: enable_async_syslog
value: true value: true
- name: large_query_worker_percentage - name: large_query_worker_percentage
......
test:
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 3
optimizer: sleep
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: _enable_newsort
value: 'false'
- name: _enable_adaptive_compaction
value: 'false'
optimizer: tenant
- name: enable_record_trace_log
value: 'false'
\ No newline at end of file
build:
variables:
- name: ob_query_timeout
value: 36000000000
- name: ob_trx_timeout
value: 36000000000
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 5
optimizer: sleep
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: enable_record_trace_log
value: 'false'
- name: _enable_defensive_check
value: false
- name: default_auto_increment_mode
value: 'NOORDER'
optimizer: tenant
- name: _rowsets_enabled
value: false
optimizer: tenant
- name: freeze_trigger_percentage
value: 40
optimizer: tenant
\ No newline at end of file
...@@ -22,10 +22,11 @@ test: ...@@ -22,10 +22,11 @@ test:
value: 'true' value: 'true'
optimizer: tenant optimizer: tenant
- name: memory_chunk_cache_size - name: memory_chunk_cache_size
value: '0' value: '0M'
value_type: STRING value_type: CAPACITY_MB
- name: syslog_io_bandwidth_limit - name: syslog_io_bandwidth_limit
value: 30m value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog - name: enable_async_syslog
value: 'true' value: 'true'
- name: large_query_worker_percentage - name: large_query_worker_percentage
...@@ -55,6 +56,7 @@ test: ...@@ -55,6 +56,7 @@ test:
value: 1m value: 1m
- name: cache_wash_threshold - name: cache_wash_threshold
value: 10G value: 10G
value_type: CAPACITY_MB
- name: plan_cache_evict_interval - name: plan_cache_evict_interval
value: 30s value: 30s
- name: bf_cache_miss_count_threshold - name: bf_cache_miss_count_threshold
......
...@@ -25,10 +25,11 @@ build: ...@@ -25,10 +25,11 @@ build:
value: 'true' value: 'true'
optimizer: tenant optimizer: tenant
- name: memory_chunk_cache_size - name: memory_chunk_cache_size
value: '0' value: '0M'
value_type: STRING value_type: CAPACITY_MB
- name: syslog_io_bandwidth_limit - name: syslog_io_bandwidth_limit
value: 30m value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog - name: enable_async_syslog
value: 'true' value: 'true'
- name: large_query_worker_percentage - name: large_query_worker_percentage
...@@ -58,6 +59,7 @@ build: ...@@ -58,6 +59,7 @@ build:
value: 1m value: 1m
- name: cache_wash_threshold - name: cache_wash_threshold
value: 10G value: 10G
value_type: CAPACITY_MB
- name: plan_cache_evict_interval - name: plan_cache_evict_interval
value: 30s value: 30s
- name: bf_cache_miss_count_threshold - name: bf_cache_miss_count_threshold
......
...@@ -26,9 +26,10 @@ test: ...@@ -26,9 +26,10 @@ test:
value: false value: false
value_type: BOOL value_type: BOOL
- name: memory_chunk_cache_size - name: memory_chunk_cache_size
value: 0 value: 0M
- name: cache_wash_threshold - name: cache_wash_threshold
value: 30g value: 30g
value_type: CAPACITY_MB
- name: ob_enable_batched_multi_statement - name: ob_enable_batched_multi_statement
value: true value: true
optimizer: tenant optimizer: tenant
...@@ -47,6 +48,7 @@ test: ...@@ -47,6 +48,7 @@ test:
value: 4 value: 4
- name: syslog_io_bandwidth_limit - name: syslog_io_bandwidth_limit
value: 30m value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog - name: enable_async_syslog
value: true value: true
- name: large_query_worker_percentage - name: large_query_worker_percentage
......
test:
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 3
optimizer: sleep
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: _enable_newsort
value: 'false'
- name: _enable_adaptive_compaction
value: 'false'
optimizer: tenant
- name: enable_record_trace_log
value: 'false'
\ No newline at end of file
build:
variables:
- name: ob_query_timeout
value: 36000000000
- name: ob_trx_timeout
value: 36000000000
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 5
optimizer: sleep
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: enable_record_trace_log
value: 'false'
- name: _enable_defensive_check
value: false
- name: default_auto_increment_mode
value: 'NOORDER'
optimizer: tenant
- name: _rowsets_enabled
value: false
optimizer: tenant
- name: freeze_trigger_percentage
value: 40
optimizer: tenant
\ No newline at end of file
...@@ -23,6 +23,7 @@ from __future__ import absolute_import, division, print_function ...@@ -23,6 +23,7 @@ from __future__ import absolute_import, division, print_function
import re import re
import time import time
from copy import deepcopy from copy import deepcopy
from _types import *
from _stdio import SafeStdio from _stdio import SafeStdio
...@@ -32,179 +33,6 @@ VARIABLES = 'variables' ...@@ -32,179 +33,6 @@ VARIABLES = 'variables'
SYSTEM_CONFIG = 'system_config' SYSTEM_CONFIG = 'system_config'
class OptimizeItem(object):
class OptimizeItemType(object):
TYPE_STR = None
def __init__(self, s):
try:
self._origin = s
self._value = 0
self._format()
except:
raise Exception("'%s' is not %s" % (self._origin, self._type_str))
@property
def _type_str(self):
if self.TYPE_STR is None:
self.TYPE_STR = str(self.__class__.__name__).split('.')[-1]
return self.TYPE_STR
def _format(self):
raise NotImplementedError
def __str__(self):
return str(self._origin)
def __repr__(self):
return self.__str__()
def __hash__(self):
return self._origin.__hash__()
@property
def __cmp_value__(self):
return self._value
def __eq__(self, value):
if value is None:
return False
return self.__cmp_value__ == value.__cmp_value__
def __gt__(self, value):
if value is None:
return True
return self.__cmp_value__ > value.__cmp_value__
def __ge__(self, value):
if value is None:
return True
return self.__eq__(value) or self.__gt__(value)
def __lt__(self, value):
if value is None:
return False
return self.__cmp_value__ < value.__cmp_value__
def __le__(self, value):
if value is None:
return False
return self.__eq__(value) or self.__lt__(value)
class Moment(OptimizeItemType):
def _format(self):
if self._origin:
if self._origin.upper() == 'DISABLE':
self._value = 0
else:
r = re.match('^(\d{1,2}):(\d{1,2})$', self._origin)
h, m = r.groups()
h, m = int(h), int(m)
if 0 <= h <= 23 and 0 <= m <= 60:
self._value = h * 60 + m
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Time(OptimizeItemType):
UNITS = {
'ns': 0.000000001,
'us': 0.000001,
'ms': 0.001,
's': 1,
'm': 60,
'h': 3600,
'd': 86400
}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['s']
else:
r = re.match('^(\d+)(\w+)$', self._origin.lower())
n, u = r.groups()
unit = self.UNITS.get(u.lower())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Capacity(OptimizeItemType):
UNITS = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40, 'P': 1 << 50}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['M']
else:
r = re.match('^(\d+)(\w)B?$', self._origin.upper())
n, u = r.groups()
unit = self.UNITS.get(u.upper())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class StringList(OptimizeItemType):
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
self._value = self._origin.split(';')
else:
self._value = []
class Double(OptimizeItemType):
def _format(self):
self._value = float(self._origin) if self._origin else 0
class Boolean(OptimizeItemType):
def _format(self):
if isinstance(self._origin, bool):
self._value = self._origin
else:
_origin = str(self._origin).lower()
if _origin == 'true':
self._value = True
elif _origin == 'false':
self._value = False
elif _origin.isdigit():
self._value = bool(self._origin)
else:
raise Exception('%s is not Boolean' % _origin)
class Integer(OptimizeItemType):
def _format(self):
if self._origin is None:
self._value = 0
self._origin = 0
else:
_origin = str(self._origin)
try:
self.value = self._value = int(_origin)
except:
raise Exception('%s is not Integer' % _origin)
class String(OptimizeItemType):
def _format(self):
self._value = str(self._origin) if self._origin else ''
class SqlFile(object): class SqlFile(object):
def __init__(self, path, entrance, sys=False, **kwargs): def __init__(self, path, entrance, sys=False, **kwargs):
...@@ -245,14 +73,15 @@ class SqlFile(object): ...@@ -245,14 +73,15 @@ class SqlFile(object):
class Variable(object): class Variable(object):
TYPES = { TYPES = {
'DOUBLE': OptimizeItem.Double, 'DOUBLE': Double,
'BOOL': OptimizeItem.Boolean, 'BOOL': Boolean,
'INT': OptimizeItem.Integer, 'INT': Integer,
'STRING': OptimizeItem.String, 'STRING': String,
'MOMENT': OptimizeItem.Moment, 'MOMENT': Moment,
'TIME': OptimizeItem.Time, 'TIME': Time,
'CAPACITY': OptimizeItem.Capacity, 'CAPACITY': Capacity,
'STRING_LIST': OptimizeItem.StringList 'CAPACITY_MB': CapacityMB,
'STRING_LIST': StringList
} }
def __init__(self, value, entrance, name=None, value_type=None, condition="lambda n, o: n != o", def __init__(self, value, entrance, name=None, value_type=None, condition="lambda n, o: n != o",
......
...@@ -38,7 +38,6 @@ shell_command_map = { ...@@ -38,7 +38,6 @@ shell_command_map = {
"cpu_logical_cores": 'cat /proc/cpuinfo | grep "processor" | wc -l', "cpu_logical_cores": 'cat /proc/cpuinfo | grep "processor" | wc -l',
"cpu_model_name": 'cat /proc/cpuinfo | grep name | cut -f2 -d: | uniq', "cpu_model_name": 'cat /proc/cpuinfo | grep name | cut -f2 -d: | uniq',
"cpu_frequency": 'cat /proc/cpuinfo | grep MHz | cut -f2 -d: | uniq', "cpu_frequency": 'cat /proc/cpuinfo | grep MHz | cut -f2 -d: | uniq',
"cpu_flags": 'cat /proc/cpuinfo | grep flags | cut -f2 -d: | uniq',
"memory_total": 'cat /proc/meminfo | grep MemTotal | cut -f2 -d: | uniq', "memory_total": 'cat /proc/meminfo | grep MemTotal | cut -f2 -d: | uniq',
"memory_free": 'cat /proc/meminfo | grep MemFree | cut -f2 -d: | uniq', "memory_free": 'cat /proc/meminfo | grep MemFree | cut -f2 -d: | uniq',
"memory_avaiable": 'cat /proc/meminfo | grep MemAvailable | cut -f2 -d: | uniq', "memory_avaiable": 'cat /proc/meminfo | grep MemAvailable | cut -f2 -d: | uniq',
...@@ -121,11 +120,6 @@ class CpuInfo: ...@@ -121,11 +120,6 @@ class CpuInfo:
def cpu_frequency(*args, **kwargs): def cpu_frequency(*args, **kwargs):
return kwargs["bash_result"] return kwargs["bash_result"]
@staticmethod
@shell_command
def cpu_flags(*args, **kwargs):
return kwargs["bash_result"]
class MemInfo: class MemInfo:
@staticmethod @staticmethod
...@@ -237,7 +231,6 @@ def telemetry_machine_data(data): ...@@ -237,7 +231,6 @@ def telemetry_machine_data(data):
_hosts['cpu']['logicalCores'] = CpuInfo.cpu_logical_cores() _hosts['cpu']['logicalCores'] = CpuInfo.cpu_logical_cores()
_hosts['cpu']['modelName'] = CpuInfo.cpu_model_name() _hosts['cpu']['modelName'] = CpuInfo.cpu_model_name()
_hosts['cpu']['frequency'] = CpuInfo.cpu_frequency() _hosts['cpu']['frequency'] = CpuInfo.cpu_frequency()
_hosts['cpu']['flags'] = CpuInfo.cpu_flags()
_hosts['memory']['total'] = MemInfo.memory_total() _hosts['memory']['total'] = MemInfo.memory_total()
_hosts['memory']['free'] = MemInfo.memory_free() _hosts['memory']['free'] = MemInfo.memory_free()
......
...@@ -216,7 +216,7 @@ ...@@ -216,7 +216,7 @@
"targets": [ "targets": [
{ {
"exemplar": true, "exemplar": true,
"expr": "(sum(rate(ob_sysstat{stat_id=\"40003\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40005\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40009\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40009\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40001\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))\n/\n(sum(rate(ob_sysstat{stat_id=\"40002\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40004\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40006\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40008\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40000\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))", "expr": "(sum(rate(ob_sysstat{stat_id=\"40003\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40005\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40007\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40009\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40001\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))\n/\n(sum(rate(ob_sysstat{stat_id=\"40002\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40004\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40006\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40008\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40000\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))",
"interval": "", "interval": "",
"legendFormat": "sql latency {{$group}}", "legendFormat": "sql latency {{$group}}",
"refId": "A" "refId": "A"
......
...@@ -37,7 +37,7 @@ def call_plugin(plugin, plugin_context, repositories, *args, **kwargs): ...@@ -37,7 +37,7 @@ def call_plugin(plugin, plugin_context, repositories, *args, **kwargs):
stdio, *args, **kwargs) stdio, *args, **kwargs)
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, **kwargs): def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, *args, **kwargs):
cluster_config = plugin_context.cluster_config cluster_config = plugin_context.cluster_config
clients = plugin_context.clients clients = plugin_context.clients
...@@ -58,6 +58,7 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, ...@@ -58,6 +58,7 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
apply_param_plugin(cur_repository) apply_param_plugin(cur_repository)
if not call_plugin(stop_plugin, plugin_context, [cur_repository], *args, **kwargs): if not call_plugin(stop_plugin, plugin_context, [cur_repository], *args, **kwargs):
return return
install_repository_to_servers(cluster_config.name, cluster_config, dest_repository, clients)
apply_param_plugin(dest_repository) apply_param_plugin(dest_repository)
if not call_plugin(start_plugin, plugin_context, [dest_repository], *args, **kwargs): if not call_plugin(start_plugin, plugin_context, [dest_repository], *args, **kwargs):
......
...@@ -37,7 +37,7 @@ def call_plugin(plugin, plugin_context, repositories, *args, **kwargs): ...@@ -37,7 +37,7 @@ def call_plugin(plugin, plugin_context, repositories, *args, **kwargs):
stdio, *args, **kwargs) stdio, *args, **kwargs)
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, **kwargs): def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, *args, **kwargs):
def summit_config(): def summit_config():
generate_global_config = generate_configs['global'] generate_global_config = generate_configs['global']
...@@ -71,6 +71,7 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, ...@@ -71,6 +71,7 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
apply_param_plugin(cur_repository) apply_param_plugin(cur_repository)
if not call_plugin(stop_plugin, plugin_context, repositories=[cur_repository], *args, **kwargs): if not call_plugin(stop_plugin, plugin_context, repositories=[cur_repository], *args, **kwargs):
return return
install_repository_to_servers(cluster_config.name, cluster_config, dest_repository, clients)
# clean useless config # clean useless config
clean_files = [ clean_files = [
"conf/config_properties/monagent_basic_auth.yaml", "conf/config_properties/monagent_basic_auth.yaml",
......
...@@ -242,7 +242,7 @@ def start(plugin_context, need_bootstrap=False, *args, **kwargs): ...@@ -242,7 +242,7 @@ def start(plugin_context, need_bootstrap=False, *args, **kwargs):
stdio.start_loading('obproxy program health check') stdio.start_loading('obproxy program health check')
failed = [] failed = []
servers = cluster_config.servers servers = cluster_config.servers
count = 20 count = 300
while servers and count: while servers and count:
count -= 1 count -= 1
tmp_servers = [] tmp_servers = []
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, **kwargs): def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, *args, **kwargs):
namespace = plugin_context.namespace namespace = plugin_context.namespace
namespaces = plugin_context.namespaces namespaces = plugin_context.namespaces
deploy_name = plugin_context.deploy_name deploy_name = plugin_context.deploy_name
...@@ -54,7 +54,7 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, ...@@ -54,7 +54,7 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
apply_param_plugin(cur_repository) apply_param_plugin(cur_repository)
if not stop_plugin(namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs): if not stop_plugin(namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs):
return return
install_repository_to_servers(cluster_config.name, cluster_config, dest_repository, clients)
apply_param_plugin(dest_repository) apply_param_plugin(dest_repository)
if not start_plugin(namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmds, options, stdio, need_bootstrap=True, *args, **kwargs): if not start_plugin(namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmds, options, stdio, need_bootstrap=True, *args, **kwargs):
return return
......
...@@ -134,6 +134,7 @@ def connect(plugin_context, target_server=None, *args, **kwargs): ...@@ -134,6 +134,7 @@ def connect(plugin_context, target_server=None, *args, **kwargs):
server_config = cluster_config.get_server_conf(server) server_config = cluster_config.get_server_conf(server)
password = server_config.get('root_password', '') if count % 2 else '' password = server_config.get('root_password', '') if count % 2 else ''
cursor = Cursor(ip=server.ip, port=server_config['mysql_port'], tenant='', password=password if password is not None else '', stdio=stdio) cursor = Cursor(ip=server.ip, port=server_config['mysql_port'], tenant='', password=password if password is not None else '', stdio=stdio)
if cursor.execute('select 1', raise_exception=True):
stdio.stop_loading('succeed') stdio.stop_loading('succeed')
return plugin_context.return_true(connect=cursor.db, cursor=cursor, server=server) return plugin_context.return_true(connect=cursor.db, cursor=cursor, server=server)
except: except:
......
...@@ -96,12 +96,14 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T ...@@ -96,12 +96,14 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
def summit_config(): def summit_config():
generate_global_config = generate_configs['global'] generate_global_config = generate_configs['global']
for key in generate_global_config: for key in generate_global_config:
stdio.verbose('Update global config %s to %s' % (key, generate_global_config[key]))
cluster_config.update_global_conf(key, generate_global_config[key], False) cluster_config.update_global_conf(key, generate_global_config[key], False)
for server in cluster_config.servers: for server in cluster_config.servers:
if server not in generate_configs: if server not in generate_configs:
continue continue
generate_server_config = generate_configs[server] generate_server_config = generate_configs[server]
for key in generate_server_config: for key in generate_server_config:
stdio.verbose('Update server %s config %s to %s' % (server, key, generate_server_config[key]))
cluster_config.update_server_conf(server, key, generate_server_config[key], False) cluster_config.update_server_conf(server, key, generate_server_config[key], False)
clients = plugin_context.clients clients = plugin_context.clients
...@@ -145,7 +147,7 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T ...@@ -145,7 +147,7 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
ip = server.ip ip = server.ip
client = clients[server] client = clients[server]
server_config = cluster_config.get_server_conf_with_default(server) server_config = cluster_config.get_server_conf_with_default(server)
user_server_config = cluster_config.get_original_server_conf_with_global(server) user_server_config = cluster_config.get_original_server_conf_with_global(server, format_conf=True)
if user_server_config.get('devname') is None: if user_server_config.get('devname') is None:
if client.is_localhost(): if client.is_localhost():
......
...@@ -148,7 +148,7 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor ...@@ -148,7 +148,7 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor
if server_memory_config[server]['system_memory']: if server_memory_config[server]['system_memory']:
memory_limit = server_memory_config[server]['num'] memory_limit = server_memory_config[server]['num']
if not memory_limit: if not memory_limit:
memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total'] memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total'] / 100
factor = 0.7 factor = 0.7
suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor) suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor)
......
...@@ -158,15 +158,15 @@ class ObVersionGraph(object): ...@@ -158,15 +158,15 @@ class ObVersionGraph(object):
res.insert(0, start_node) res.insert(0, start_node)
if res and res[-1].deprecated: if res and res[-1].deprecated:
raise Exception('upgrade destination version:{}{} is deprecated, not support upgrade.'.format(res[-1].version, '-{}'.format(res[-1].release) if res[-1].release else '')) raise Exception('upgrade destination version:{}{} is deprecated, not support upgrade.'.format(res[-1].version, '-{}'.format(res[-1].release) if res[-1].release else ''))
return format_route(res) return format_route(res, current_repository)
def format_route(routes): def format_route(routes, repository):
route_res = [] route_res = []
for node in routes: for node in routes:
require_from_binary = getattr(node, 'require_from_binary', False) require_from_binary = getattr(node, 'require_from_binary', False)
if node.when_come_from: if getattr(node, 'when_come_from', False):
require_from_binary = require_from_binary and routes[0].version in node.when_come_from require_from_binary = require_from_binary and (repository.version in node.when_come_from or '%s-%s' % (repository.version, repository.release.split('.')[0]) in node.when_come_from)
route_res.append({ route_res.append({
'version': node.version, 'version': node.version,
'release': None if node.release == VersionNode.RELEASE_NULL else node.release, 'release': None if node.release == VersionNode.RELEASE_NULL else node.release,
...@@ -180,17 +180,13 @@ def upgrade_route(plugin_context, current_repository, dest_repository, *args, ** ...@@ -180,17 +180,13 @@ def upgrade_route(plugin_context, current_repository, dest_repository, *args, **
stdio = plugin_context.stdio stdio = plugin_context.stdio
repository_dir = dest_repository.repository_dir repository_dir = dest_repository.repository_dir
if dest_repository.version > Version("4.1.0.0"):
stdio.error('upgrade observer to version {} is not support, please upgrade obd first.'.format(dest_repository.version))
return
if current_repository.version == dest_repository.version: if current_repository.version == dest_repository.version:
return plugin_context.return_true(route=format_route([current_repository, dest_repository])) return plugin_context.return_true(route=format_route([current_repository, dest_repository], current_repository))
upgrade_dep_name = 'etc/oceanbase_upgrade_dep.yml' upgrade_dep_name = 'etc/oceanbase_upgrade_dep.yml'
upgrade_dep_path = os.path.join(repository_dir, upgrade_dep_name) upgrade_dep_path = os.path.join(repository_dir, upgrade_dep_name)
if not os.path.isfile(upgrade_dep_path): if not os.path.isfile(upgrade_dep_path):
stdio.error('%s No such file: %s' % (dest_repository, upgrade_dep_name)) stdio.error('%s No such file: %s. \n No upgrade route available' % (dest_repository, upgrade_dep_name))
return return
version_dep = {} version_dep = {}
......
...@@ -119,9 +119,10 @@ def bootstrap(plugin_context, cursor, *args, **kwargs): ...@@ -119,9 +119,10 @@ def bootstrap(plugin_context, cursor, *args, **kwargs):
has_ocp = True has_ocp = True
if has_ocp: if has_ocp:
global_conf_with_default = deepcopy(cluster_config.get_global_conf_with_default()) global_conf_with_default = deepcopy(cluster_config.get_global_conf_with_default())
original_global_conf = cluster_config.get_original_global_conf()
ocp_meta_tenant_prefix = 'ocp_meta_tenant_' ocp_meta_tenant_prefix = 'ocp_meta_tenant_'
for key in global_conf_with_default: for key in global_conf_with_default:
if key.startswith(ocp_meta_tenant_prefix): if key.startswith(ocp_meta_tenant_prefix) and original_global_conf.get(key, None):
global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key] global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key]
tenant_info = global_conf_with_default["ocp_meta_tenant"] tenant_info = global_conf_with_default["ocp_meta_tenant"]
tenant_info["variables"] = "ob_tcp_invited_nodes='%'" tenant_info["variables"] = "ob_tcp_invited_nodes='%'"
......
...@@ -104,12 +104,14 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T ...@@ -104,12 +104,14 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
def summit_config(): def summit_config():
generate_global_config = generate_configs['global'] generate_global_config = generate_configs['global']
for key in generate_global_config: for key in generate_global_config:
stdio.verbose('Update global config %s to %s' % (key, generate_global_config[key]))
cluster_config.update_global_conf(key, generate_global_config[key], False) cluster_config.update_global_conf(key, generate_global_config[key], False)
for server in cluster_config.servers: for server in cluster_config.servers:
if server not in generate_configs: if server not in generate_configs:
continue continue
generate_server_config = generate_configs[server] generate_server_config = generate_configs[server]
for key in generate_server_config: for key in generate_server_config:
stdio.verbose('Update server %s config %s to %s' % (server, key, generate_server_config[key]))
cluster_config.update_server_conf(server, key, generate_server_config[key], False) cluster_config.update_server_conf(server, key, generate_server_config[key], False)
clients = plugin_context.clients clients = plugin_context.clients
...@@ -147,7 +149,7 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T ...@@ -147,7 +149,7 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
ip = server.ip ip = server.ip
client = clients[server] client = clients[server]
server_config = cluster_config.get_server_conf_with_default(server) server_config = cluster_config.get_server_conf_with_default(server)
user_server_config = cluster_config.get_original_server_conf_with_global(server) user_server_config = cluster_config.get_original_server_conf_with_global(server, format_conf=True)
if user_server_config.get('devname') is None: if user_server_config.get('devname') is None:
if client.is_localhost(): if client.is_localhost():
...@@ -308,7 +310,7 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T ...@@ -308,7 +310,7 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
if not datafile_size: if not datafile_size:
datafile_disk_percentage = int(user_server_config.get('datafile_disk_percentage', 0)) datafile_disk_percentage = int(user_server_config.get('datafile_disk_percentage', 0))
if datafile_disk_percentage: if datafile_disk_percentage:
datafile_size = data_dir_mount['total'] * datafile_disk_percentage / 100 datafile_size = data_dir_disk['total'] * datafile_disk_percentage / 100
elif generate_config_mini: elif generate_config_mini:
datafile_size = MINI_DATA_FILE_SIZE datafile_size = MINI_DATA_FILE_SIZE
update_server_conf(server, 'datafile_size', format_size(datafile_size, 0)) update_server_conf(server, 'datafile_size', format_size(datafile_size, 0))
......
...@@ -256,7 +256,7 @@ ...@@ -256,7 +256,7 @@
name_local: 数据文件大小 name_local: 数据文件大小
require: false require: false
essential: true essential: true
type: CAPACITY type: CAPACITY_MB
default: 0 default: 0
min_value: 0M min_value: 0M
max_value: NULL max_value: NULL
...@@ -277,7 +277,7 @@ ...@@ -277,7 +277,7 @@
name_local: Redo 日志大小 name_local: Redo 日志大小
require: false require: false
essential: true essential: true
type: CAPACITY type: CAPACITY_MB
default: 0 default: 0
min_value: 0M min_value: 0M
max_value: NULL max_value: NULL
...@@ -295,7 +295,7 @@ ...@@ -295,7 +295,7 @@
description_local: 合并时候数据列统计信息的采样率 description_local: 合并时候数据列统计信息的采样率
- name: cache_wash_threshold - name: cache_wash_threshold
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 4GB default: 4GB
min_value: 0B min_value: 0B
max_value: NULL max_value: NULL
...@@ -385,7 +385,7 @@ ...@@ -385,7 +385,7 @@
description_local: 系统可以使用的最小CPU配额,将会预留 description_local: 系统可以使用的最小CPU配额,将会预留
- name: memory_reserved - name: memory_reserved
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 500M default: 500M
min_value: 10M min_value: 10M
max_value: NULL max_value: NULL
...@@ -475,7 +475,7 @@ ...@@ -475,7 +475,7 @@
description_local: 升级模式开关。在升级模式中,会暂停部分系统后台功能。 description_local: 升级模式开关。在升级模式中,会暂停部分系统后台功能。
- name: multiblock_read_size - name: multiblock_read_size
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 128K default: 128K
min_value: 0K min_value: 0K
max_value: 2M max_value: 2M
...@@ -495,7 +495,7 @@ ...@@ -495,7 +495,7 @@
description_local: 因磁盘满等原因导致某个节点数据迁入失败时,暂停迁入时长 description_local: 因磁盘满等原因导致某个节点数据迁入失败时,暂停迁入时长
- name: tablet_size - name: tablet_size
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 128M default: 128M
min_value: NULL min_value: NULL
max_value: NULL max_value: NULL
...@@ -594,7 +594,7 @@ ...@@ -594,7 +594,7 @@
description_local: 数据块缓存在缓存系统中的优先级 description_local: 数据块缓存在缓存系统中的优先级
- name: syslog_io_bandwidth_limit - name: syslog_io_bandwidth_limit
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 30MB default: 30MB
min_value: NULL min_value: NULL
max_value: NULL max_value: NULL
...@@ -656,7 +656,7 @@ ...@@ -656,7 +656,7 @@
description_local: 系统日志自动回收复用时,最多保留多少个。值0表示不自动清理。 description_local: 系统日志自动回收复用时,最多保留多少个。值0表示不自动清理。
- name: px_task_size - name: px_task_size
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 2M default: 2M
min_value: 2M min_value: 2M
max_value: NULL max_value: NULL
...@@ -1017,7 +1017,7 @@ ...@@ -1017,7 +1017,7 @@
description_local: 控制租户CPU调度中每次预留多少比例的空闲token数给租户 description_local: 控制租户CPU调度中每次预留多少比例的空闲token数给租户
- name: stack_size - name: stack_size
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 1M default: 1M
min_value: 512K min_value: 512K
max_value: 20M max_value: 20M
...@@ -1039,7 +1039,7 @@ ...@@ -1039,7 +1039,7 @@
name_local: 最大运行内存 name_local: 最大运行内存
require: false require: false
essential: true essential: true
type: CAPACITY type: CAPACITY_MB
default: 0 default: 0
min_value: NULL min_value: NULL
max_value: NULL max_value: NULL
...@@ -1051,7 +1051,7 @@ ...@@ -1051,7 +1051,7 @@
- name: system_memory - name: system_memory
name_local: 集群系统内存 name_local: 集群系统内存
essential: true essential: true
type: CAPACITY type: CAPACITY_MB
default: 30G default: 30G
min_value: 0M min_value: 0M
max_value: NULL max_value: NULL
...@@ -1180,7 +1180,7 @@ ...@@ -1180,7 +1180,7 @@
description_local: OB内置本地磁盘RAID特性。暂勿使用 description_local: OB内置本地磁盘RAID特性。暂勿使用
- name: rootservice_memory_limit - name: rootservice_memory_limit
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 2G default: 2G
min_value: 2G min_value: 2G
max_value: NULL max_value: NULL
...@@ -1190,7 +1190,7 @@ ...@@ -1190,7 +1190,7 @@
description_local: RootService最大内存限制 description_local: RootService最大内存限制
- name: plan_cache_low_watermark - name: plan_cache_low_watermark
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 1500M default: 1500M
min_value: NULL min_value: NULL
max_value: NULL max_value: NULL
...@@ -1252,7 +1252,7 @@ ...@@ -1252,7 +1252,7 @@
description_local: 控制内存大页的行为,"true"表示在操作系统开启内存大页并且有空闲大页时,数据库总是申请内存大页,否则申请普通内存页, "false"表示数据库不使用大页, "only"表示数据库总是分配大页 description_local: 控制内存大页的行为,"true"表示在操作系统开启内存大页并且有空闲大页时,数据库总是申请内存大页,否则申请普通内存页, "false"表示数据库不使用大页, "only"表示数据库总是分配大页
- name: dtl_buffer_size - name: dtl_buffer_size
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 64K default: 64K
min_value: 4K min_value: 4K
max_value: 2M max_value: 2M
...@@ -1522,7 +1522,7 @@ ...@@ -1522,7 +1522,7 @@
description_local: MySQL模式下,建表时使用的默认压缩算法 description_local: MySQL模式下,建表时使用的默认压缩算法
- name: memory_chunk_cache_size - name: memory_chunk_cache_size
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 0M default: 0M
min_value: 0M min_value: 0M
max_value: NULL max_value: NULL
...@@ -1699,7 +1699,7 @@ ...@@ -1699,7 +1699,7 @@
description_local: 系统内部执行 schema 多版本记录回收任务的时间间隔。 description_local: 系统内部执行 schema 多版本记录回收任务的时间间隔。
- name: backup_data_file_size - name: backup_data_file_size
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 4G default: 4G
min_value: 512M min_value: 512M
max_value: 4G max_value: 4G
...@@ -1863,7 +1863,7 @@ ...@@ -1863,7 +1863,7 @@
name_local: OCP express元数据库租户内存 name_local: OCP express元数据库租户内存
essential: true essential: true
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 2G default: 2G
need_redeploy: true need_redeploy: true
description_en: The tenant memory size for ocp meta db description_en: The tenant memory size for ocp meta db
...@@ -1872,7 +1872,7 @@ ...@@ -1872,7 +1872,7 @@
name_local: OCP express元数据库租户日志磁盘大小 name_local: OCP express元数据库租户日志磁盘大小
essential: true essential: true
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 6656M default: 6656M
need_redeploy: true need_redeploy: true
description_en: The tenant log disk size for ocp meta db description_en: The tenant log disk size for ocp meta db
......
...@@ -176,7 +176,7 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor ...@@ -176,7 +176,7 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor
if server_memory_config[server]['system_memory']: if server_memory_config[server]['system_memory']:
memory_limit = server_memory_config[server]['num'] memory_limit = server_memory_config[server]['num']
if not memory_limit: if not memory_limit:
server_memory_config[server]['num'] = memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total'] server_memory_config[server]['num'] = memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total'] / 100
factor = 0.75 factor = 0.75
suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor) suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor)
suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {}) suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {})
...@@ -586,9 +586,10 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor ...@@ -586,9 +586,10 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor
has_ocp = True has_ocp = True
if has_ocp and need_bootstrap: if has_ocp and need_bootstrap:
global_conf_with_default = copy.deepcopy(cluster_config.get_global_conf_with_default()) global_conf_with_default = copy.deepcopy(cluster_config.get_global_conf_with_default())
original_global_conf = cluster_config.get_original_global_conf()
ocp_meta_tenant_prefix = 'ocp_meta_tenant_' ocp_meta_tenant_prefix = 'ocp_meta_tenant_'
for key in global_conf_with_default: for key in global_conf_with_default:
if key.startswith(ocp_meta_tenant_prefix): if key.startswith(ocp_meta_tenant_prefix) and original_global_conf.get(key, None):
global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key] global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key]
meta_db_memory_size = parse_size(global_conf_with_default['ocp_meta_tenant'].get('memory_size')) meta_db_memory_size = parse_size(global_conf_with_default['ocp_meta_tenant'].get('memory_size'))
servers_sys_memory = {} servers_sys_memory = {}
......
...@@ -390,20 +390,36 @@ class Upgrader(object): ...@@ -390,20 +390,36 @@ class Upgrader(object):
time.sleep(3) time.sleep(3)
# major freeze # major freeze
# 1. check merge status # 1. wait all tenant global_broadcast_scn = last_scn, record tenant_id, global_broadcast_scn
pre_global_broadcast_scn = 0 pre_tenant_scn_dict = {}
while True: tenant_ids = []
merge_status = self.execute_sql("select max(global_broadcast_scn) as global_broadcast_scn, max(global_broadcast_scn > last_scn) as is_merging from CDB_OB_MAJOR_COMPACTION") for tenant_info in self.execute_sql("select tenant_id from CDB_OB_MAJOR_COMPACTION", one=False):
if merge_status['is_merging'] == 0: tenant_ids.append(tenant_info['tenant_id'])
pre_global_broadcast_scn = merge_status['global_broadcast_scn'] while tenant_ids:
break pre_tenant_scn_list = self.execute_sql("select tenant_id, global_broadcast_scn, last_scn from CDB_OB_MAJOR_COMPACTION where tenant_id in ({})".format(",".join([str(x) for x in tenant_ids])), one=False)
time.sleep(3) tenant_ids = []
for pre_tenant_scn in pre_tenant_scn_list:
if pre_tenant_scn['global_broadcast_scn'] > pre_tenant_scn['last_scn']:
tenant_ids.append(pre_tenant_scn['tenant_id'])
continue
pre_tenant_scn_dict[pre_tenant_scn['tenant_id']] = pre_tenant_scn['global_broadcast_scn']
time.sleep(1)
# 2. begin merge # 2. begin merge
self.execute_sql("alter system major freeze tenant = all", error=False) self.execute_sql("alter system major freeze tenant = all", error=False)
# 3. wait merge start # 3. wait merge start
while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn <= %s", [pre_global_broadcast_scn]): tenant_ids = pre_tenant_scn_dict.keys()
while tenant_ids:
tenant_scn_list = self.execute_sql("select tenant_id, global_broadcast_scn from CDB_OB_MAJOR_COMPACTION where tenant_id in ({})".format(",".join([str(x) for x in tenant_ids])), one=False)
tenant_ids = []
for tenant_scn in tenant_scn_list:
if pre_tenant_scn_dict[tenant_scn['tenant_id']] >= tenant_scn['global_broadcast_scn']:
tenant_ids.append(tenant_scn['tenant_id'])
continue
time.sleep(3) time.sleep(3)
# 4.wait merge finsh
# 4. wait merge finish
while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn > last_scn"): while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn > last_scn"):
time.sleep(3) time.sleep(3)
......
...@@ -158,15 +158,15 @@ class ObVersionGraph(object): ...@@ -158,15 +158,15 @@ class ObVersionGraph(object):
res.insert(0, start_node) res.insert(0, start_node)
if len(res) > 0 and res[-1].deprecated: if len(res) > 0 and res[-1].deprecated:
raise Exception('upgrade destination version:{}{} is deprecated, not support upgrade.'.format(res[-1].version, '-{}'.format(res[-1].release) if res[-1].release else '')) raise Exception('upgrade destination version:{}{} is deprecated, not support upgrade.'.format(res[-1].version, '-{}'.format(res[-1].release) if res[-1].release else ''))
return format_route(res) return format_route(res, current_repository)
def format_route(routes): def format_route(routes, repository):
route_res = [] route_res = []
for i, node in enumerate(routes): for i, node in enumerate(routes):
require_from_binary = getattr(node, 'require_from_binary', False) require_from_binary = getattr(node, 'require_from_binary', False)
if getattr(node, 'when_come_from', False): if getattr(node, 'when_come_from', False):
require_from_binary = require_from_binary and routes[0].version in node.when_come_from require_from_binary = require_from_binary and (repository.version in node.when_come_from or '%s-%s' % (repository.version, repository.release.split('.')[0]) in node.when_come_from)
route_res.append({ route_res.append({
'version': node.version, 'version': node.version,
...@@ -194,17 +194,17 @@ def upgrade_route(plugin_context, current_repository, dest_repository, *args, ** ...@@ -194,17 +194,17 @@ def upgrade_route(plugin_context, current_repository, dest_repository, *args, **
stdio = plugin_context.stdio stdio = plugin_context.stdio
repository_dir = dest_repository.repository_dir repository_dir = dest_repository.repository_dir
if dest_repository.version >= Version("4.2"): if dest_repository.version >= Version("4.3"):
stdio.error('upgrade observer to version {} is not support, please upgrade obd first.'.format(dest_repository.version)) stdio.error('upgrade observer to version {} is not support, please upgrade obd first.'.format(dest_repository.version))
return return
if current_repository.version == dest_repository.version: if current_repository.version == dest_repository.version:
return plugin_context.return_true(route=format_route([current_repository, dest_repository])) return plugin_context.return_true(route=format_route([current_repository, dest_repository], current_repository))
upgrade_dep_name = 'etc/oceanbase_upgrade_dep.yml' upgrade_dep_name = 'etc/oceanbase_upgrade_dep.yml'
upgrade_dep_path = os.path.join(repository_dir, upgrade_dep_name) upgrade_dep_path = os.path.join(repository_dir, upgrade_dep_name)
if not os.path.isfile(upgrade_dep_path): if not os.path.isfile(upgrade_dep_path):
stdio.error('%s No such file: %s' % (dest_repository, upgrade_dep_name)) stdio.error('%s No such file: %s. \n No upgrade route available' % (dest_repository, upgrade_dep_name))
return return
version_dep = {} version_dep = {}
......
...@@ -374,20 +374,36 @@ class Upgrader(object): ...@@ -374,20 +374,36 @@ class Upgrader(object):
time.sleep(3) time.sleep(3)
# major freeze # major freeze
# 1. check merge status # 1. wait all tenant global_broadcast_scn = last_scn, record tenant_id, global_broadcast_scn
pre_global_broadcast_scn = 0 pre_tenant_scn_dict = {}
while True: tenant_ids = []
merge_status = self.execute_sql("select max(global_broadcast_scn) as global_broadcast_scn, max(global_broadcast_scn > last_scn) as is_merging from CDB_OB_MAJOR_COMPACTION") for tenant_info in self.execute_sql("select tenant_id from CDB_OB_MAJOR_COMPACTION", one=False):
if merge_status['is_merging'] == 0: tenant_ids.append(tenant_info['tenant_id'])
pre_global_broadcast_scn = merge_status['global_broadcast_scn'] while tenant_ids:
break pre_tenant_scn_list = self.execute_sql("select tenant_id, global_broadcast_scn, last_scn from CDB_OB_MAJOR_COMPACTION where tenant_id in ({})".format(",".join([str(x) for x in tenant_ids])), one=False)
time.sleep(3) tenant_ids = []
for pre_tenant_scn in pre_tenant_scn_list:
if pre_tenant_scn['global_broadcast_scn'] > pre_tenant_scn['last_scn']:
tenant_ids.append(pre_tenant_scn['tenant_id'])
continue
pre_tenant_scn_dict[pre_tenant_scn['tenant_id']] = pre_tenant_scn['global_broadcast_scn']
time.sleep(1)
# 2. begin merge # 2. begin merge
self.execute_sql("alter system major freeze tenant = all", error=False) self.execute_sql("alter system major freeze tenant = all", error=False)
# 3. wait merge start # 3. wait merge start
while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn <= %s", [pre_global_broadcast_scn]): tenant_ids = pre_tenant_scn_dict.keys()
while tenant_ids:
tenant_scn_list = self.execute_sql("select tenant_id, global_broadcast_scn from CDB_OB_MAJOR_COMPACTION where tenant_id in ({})".format(",".join([str(x) for x in tenant_ids])), one=False)
tenant_ids = []
for tenant_scn in tenant_scn_list:
if pre_tenant_scn_dict[tenant_scn['tenant_id']] >= tenant_scn['global_broadcast_scn']:
tenant_ids.append(tenant_scn['tenant_id'])
continue
time.sleep(3) time.sleep(3)
# 4.wait merge finsh
# 4. wait merge finish
while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn > last_scn"): while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn > last_scn"):
time.sleep(3) time.sleep(3)
......
此差异已折叠。
此差异已折叠。
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import json
import time
import requests
from copy import deepcopy
from _errno import EC_OBSERVER_FAIL_TO_START, EC_OBSERVER_FAIL_TO_START_WITH_ERR, EC_OBSERVER_FAILED_TO_REGISTER, EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS
from collections import OrderedDict
def config_url(ocp_config_server, appname, cid):
cfg_url = '%s&Action=ObRootServiceInfo&ObCluster=%s' % (ocp_config_server, appname)
proxy_cfg_url = '%s&Action=GetObProxyConfig&ObRegionGroup=%s' % (ocp_config_server, appname)
# Command that clears the URL content for the cluster
cleanup_config_url_content = '%s&Action=DeleteObRootServiceInfoByClusterName&ClusterName=%s' % (ocp_config_server, appname)
# Command that register the cluster information to the Config URL
register_to_config_url = '%s&Action=ObRootServiceRegister&ObCluster=%s&ObClusterId=%s' % (ocp_config_server, appname, cid)
return cfg_url, cleanup_config_url_content, register_to_config_url
def init_config_server(ocp_config_server, appname, cid, force_delete, stdio):
def post(url):
stdio.verbose('post %s' % url)
response = requests.post(url)
if response.status_code != 200:
raise Exception('%s status code %s' % (url, response.status_code))
return json.loads(response.text)['Code']
cfg_url, cleanup_config_url_content, register_to_config_url = config_url(ocp_config_server, appname, cid)
ret = post(register_to_config_url)
if ret != 200:
if not force_delete:
raise Exception('%s may have been registered in %s' % (appname, ocp_config_server))
ret = post(cleanup_config_url_content)
if ret != 200 :
raise Exception('failed to clean up the config url content, return code %s' % ret)
if post(register_to_config_url) != 200:
return False
return cfg_url
class EnvVariables(object):
def __init__(self, environments, client):
self.environments = environments
self.client = client
self.env_done = {}
def __enter__(self):
for env_key, env_value in self.environments.items():
self.env_done[env_key] = self.client.get_env(env_key)
self.client.add_env(env_key, env_value, True)
def __exit__(self, *args, **kwargs):
for env_key, env_value in self.env_done.items():
if env_value is not None:
self.client.add_env(env_key, env_value, True)
else:
self.client.del_env(env_key)
def start(plugin_context, *args, **kwargs):
cluster_config = plugin_context.cluster_config
options = plugin_context.options
clients = plugin_context.clients
stdio = plugin_context.stdio
clusters_cmd = {}
need_bootstrap = True
root_servers = {}
global_config = cluster_config.get_global_conf()
appname = global_config['appname'] if 'appname' in global_config else None
cluster_id = global_config['cluster_id'] if 'cluster_id' in global_config else None
obconfig_url = global_config['obconfig_url'] if 'obconfig_url' in global_config else None
cfg_url = ''
if obconfig_url:
if not appname or not cluster_id:
stdio.error('need appname and cluster_id')
return
try:
cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), stdio)
if not cfg_url:
stdio.error(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url))
return
except:
stdio.exception(EC_OBSERVER_FAILED_TO_REGISTER.format())
return
stdio.start_loading('Start observer')
for server in cluster_config.original_servers:
config = cluster_config.get_server_conf(server)
zone = config['zone']
if zone not in root_servers:
root_servers[zone] = '%s:%s:%s' % (server.ip, config['rpc_port'], config['mysql_port'])
rs_list_opt = '-r \'%s\'' % ';'.join([root_servers[zone] for zone in root_servers])
for server in cluster_config.servers:
client = clients[server]
server_config = cluster_config.get_server_conf(server)
home_path = server_config['home_path']
if not server_config.get('data_dir'):
server_config['data_dir'] = '%s/store' % home_path
if client.execute_command('ls %s/clog/tenant_1/' % server_config['data_dir']).stdout.strip():
need_bootstrap = False
remote_pid_path = '%s/run/observer.pid' % home_path
remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip()
if remote_pid:
if client.execute_command('ls /proc/%s' % remote_pid):
continue
stdio.verbose('%s start command construction' % server)
if getattr(options, 'without_parameter', False) and client.execute_command('ls %s/etc/observer.config.bin' % home_path):
use_parameter = False
else:
use_parameter = True
cmd = []
if use_parameter:
not_opt_str = OrderedDict({
'mysql_port': '-p',
'rpc_port': '-P',
'zone': '-z',
'nodaemon': '-N',
'appname': '-n',
'cluster_id': '-c',
'data_dir': '-d',
'syslog_level': '-l',
'ipv6': '-6',
'mode': '-m',
'scn': '-f'
})
not_cmd_opt = [
'home_path', 'obconfig_url', 'root_password', 'proxyro_password',
'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode',
'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password'
]
get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key]
opt_str = []
for key in server_config:
if key not in not_cmd_opt and key not in not_opt_str and not key.startswith('ocp_meta_tenant_'):
value = get_value(key)
opt_str.append('%s=%s' % (key, value))
if cfg_url:
opt_str.append('obconfig_url=\'%s\'' % cfg_url)
else:
cmd.append(rs_list_opt)
for key in not_opt_str:
if key in server_config:
value = get_value(key)
cmd.append('%s %s' % (not_opt_str[key], value))
cmd.append('-I %s' % server.ip)
cmd.append('-o %s' % ','.join(opt_str))
else:
cmd.append('-p %s' % server_config['mysql_port'])
clusters_cmd[server] = 'cd %s; %s/bin/observer %s' % (home_path, home_path, ' '.join(cmd))
for server in clusters_cmd:
environments = deepcopy(cluster_config.get_environments())
client = clients[server]
server_config = cluster_config.get_server_conf(server)
stdio.verbose('starting %s observer', server)
if 'LD_LIBRARY_PATH' not in environments:
environments['LD_LIBRARY_PATH'] = '%s/lib:' % server_config['home_path']
with EnvVariables(environments, client):
ret = client.execute_command(clusters_cmd[server])
if not ret:
stdio.stop_loading('fail')
stdio.error(EC_OBSERVER_FAIL_TO_START_WITH_ERR.format(server=server, stderr=ret.stderr))
return
stdio.stop_loading('succeed')
stdio.start_loading('observer program health check')
time.sleep(3)
failed = []
for server in cluster_config.servers:
client = clients[server]
server_config = cluster_config.get_server_conf(server)
home_path = server_config['home_path']
remote_pid_path = '%s/run/observer.pid' % home_path
stdio.verbose('%s program health check' % server)
remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip()
if remote_pid and client.execute_command('ls /proc/%s' % remote_pid):
stdio.verbose('%s observer[pid: %s] started', server, remote_pid)
else:
failed.append(EC_OBSERVER_FAIL_TO_START.format(server=server))
if failed:
stdio.stop_loading('fail')
for msg in failed:
stdio.warn(msg)
return plugin_context.return_false()
else:
stdio.stop_loading('succeed')
return plugin_context.return_true(need_bootstrap=need_bootstrap)
此差异已折叠。
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
name_local: 进程内存 name_local: 进程内存
require: true require: true
essential: true essential: true
type: CAPACITY type: CAPACITY_MB
min_value: 512M min_value: 512M
need_restart: true need_restart: true
description_en: the memroy size of ocp express server. Please enter an capacity, such as 2G description_en: the memroy size of ocp express server. Please enter an capacity, such as 2G
......
...@@ -202,14 +202,15 @@ def prepare_parameters(cluster_config, stdio): ...@@ -202,14 +202,15 @@ def prepare_parameters(cluster_config, stdio):
if value is not None: if value is not None:
depend_info[key] = value depend_info[key] = value
ob_servers = cluster_config.get_depend_servers(comp) ob_servers = cluster_config.get_depend_servers(comp)
connect_infos = []
for ob_server in ob_servers: for ob_server in ob_servers:
ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server) ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server)
if 'server_ip' not in depend_info: connect_infos.append([ob_server.ip, ob_server_conf['mysql_port']])
depend_info['server_ip'] = ob_server.ip
depend_info['mysql_port'] = ob_server_conf['mysql_port']
zone = ob_server_conf['zone'] zone = ob_server_conf['zone']
if zone not in ob_zones: if zone not in ob_zones:
ob_zones[zone] = ob_server ob_zones[zone] = ob_server
depend_info['connect_infos'] = connect_infos
root_servers = ob_zones.values() root_servers = ob_zones.values()
break break
for comp in ['obproxy', 'obproxy-ce']: for comp in ['obproxy', 'obproxy-ce']:
...@@ -266,7 +267,12 @@ def prepare_parameters(cluster_config, stdio): ...@@ -266,7 +267,12 @@ def prepare_parameters(cluster_config, stdio):
missed_keys = get_missing_required_parameters(original_server_config) missed_keys = get_missing_required_parameters(original_server_config)
if missed_keys: if missed_keys:
if 'jdbc_url' in missed_keys and depend_observer: if 'jdbc_url' in missed_keys and depend_observer:
if depend_info.get('server_ip'):
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db']) server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
else:
server_config['connect_infos'] = depend_info.get('connect_infos')
server_config['ocp_meta_db'] = depend_info.get('ocp_meta_db')
server_config['jdbc_url'] = ''
if 'jdbc_username' in missed_keys and depend_observer: if 'jdbc_username' in missed_keys and depend_observer:
server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'],
depend_info.get('ocp_meta_tenant', {}).get("tenant_name")) depend_info.get('ocp_meta_tenant', {}).get("tenant_name"))
...@@ -333,26 +339,37 @@ def start(plugin_context, start_env=None, *args, **kwargs): ...@@ -333,26 +339,37 @@ def start(plugin_context, start_env=None, *args, **kwargs):
else: else:
use_parameter = True use_parameter = True
# check meta db connect before start # check meta db connect before start
if jdbc_url:
matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url) matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url)
if matched: if not matched:
stdio.error("Invalid jdbc url: %s" % jdbc_url)
return
ip = matched.group(1) ip = matched.group(1)
sql_port = matched.group(2)[1:] sql_port = matched.group(2)[1:]
database = matched.group(3) database = matched.group(3)
connect_infos = [[ip, sql_port]]
else:
connect_infos = server_config.get('connect_infos', '')
database = server_config.get('ocp_meta_db', '')
connected = False connected = False
retries = 300 retries = 300
while not connected and retries: while not connected and retries:
for connect_info in connect_infos:
retries -= 1 retries -= 1
server_ip = connect_info[0]
server_port = connect_info[-1]
try: try:
Cursor(ip=ip, port=sql_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio) Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio)
jdbc_url = 'jdbc:oceanbase://{}:{}/{}'.format(server_ip, server_port, database)
connected = True connected = True
break
except: except:
time.sleep(1) time.sleep(1)
if not connected: if not connected:
success = False success = False
stdio.error("{}: failed to connect meta db".format(server)) stdio.error("{}: failed to connect meta db".format(server))
continue continue
else:
stdio.verbose('unmatched jdbc url, skip meta db connection check')
if server_config.get('encrypt_password', False): if server_config.get('encrypt_password', False):
private_key, public_key = get_key(client, os.path.join(home_path, 'conf'), stdio) private_key, public_key = get_key(client, os.path.join(home_path, 'conf'), stdio)
public_key_str = get_plain_public_key(public_key) public_key_str = get_plain_public_key(public_key)
......
...@@ -145,14 +145,14 @@ def prepare_parameters(cluster_config, stdio): ...@@ -145,14 +145,14 @@ def prepare_parameters(cluster_config, stdio):
if value is not None: if value is not None:
depend_info[key] = value depend_info[key] = value
ob_servers = cluster_config.get_depend_servers(comp) ob_servers = cluster_config.get_depend_servers(comp)
connect_infos = []
for ob_server in ob_servers: for ob_server in ob_servers:
ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server) ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server)
if 'server_ip' not in depend_info: connect_infos.append([ob_server.ip, ob_server_conf['mysql_port']])
depend_info['server_ip'] = ob_server.ip
depend_info['mysql_port'] = ob_server_conf['mysql_port']
zone = ob_server_conf['zone'] zone = ob_server_conf['zone']
if zone not in ob_zones: if zone not in ob_zones:
ob_zones[zone] = ob_server ob_zones[zone] = ob_server
depend_info['connect_infos'] = connect_infos
root_servers = ob_zones.values() root_servers = ob_zones.values()
break break
for comp in ['obproxy', 'obproxy-ce']: for comp in ['obproxy', 'obproxy-ce']:
...@@ -209,7 +209,12 @@ def prepare_parameters(cluster_config, stdio): ...@@ -209,7 +209,12 @@ def prepare_parameters(cluster_config, stdio):
missed_keys = get_missing_required_parameters(original_server_config) missed_keys = get_missing_required_parameters(original_server_config)
if missed_keys: if missed_keys:
if 'jdbc_url' in missed_keys and depend_observer: if 'jdbc_url' in missed_keys and depend_observer:
if depend_info.get('server_ip'):
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db']) server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
else:
server_config['connect_infos'] = depend_info.get('connect_infos')
server_config['ocp_meta_db'] = depend_info.get('ocp_meta_db')
server_config['jdbc_url'] = ''
if 'jdbc_username' in missed_keys and depend_observer: if 'jdbc_username' in missed_keys and depend_observer:
server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], depend_info.get('ocp_meta_tenant', {}).get("tenant_name")) server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], depend_info.get('ocp_meta_tenant', {}).get("tenant_name"))
depends_key_maps = { depends_key_maps = {
......
...@@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function ...@@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function
from tool import ConfigUtil from tool import ConfigUtil
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, **kwargs): def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, *args, **kwargs):
namespace = plugin_context.namespace namespace = plugin_context.namespace
namespaces = plugin_context.namespaces namespaces = plugin_context.namespaces
deploy_name = plugin_context.deploy_name deploy_name = plugin_context.deploy_name
...@@ -52,7 +52,7 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, ...@@ -52,7 +52,7 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
apply_param_plugin(cur_repository) apply_param_plugin(cur_repository)
if not stop_plugin(namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs): if not stop_plugin(namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs):
return return
install_repository_to_servers(cluster_config.name, cluster_config, dest_repository, clients)
apply_param_plugin(dest_repository) apply_param_plugin(dest_repository)
warns = {} warns = {}
not_support = ['system_password'] not_support = ['system_password']
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
name_local: 进程内存 name_local: 进程内存
require: true require: true
essential: true essential: true
type: CAPACITY type: CAPACITY_MB
min_value: 512M min_value: 512M
need_restart: true need_restart: true
description_en: the memroy size of ocp express server. Please enter an capacity, such as 2G description_en: the memroy size of ocp express server. Please enter an capacity, such as 2G
......
...@@ -201,14 +201,15 @@ def prepare_parameters(cluster_config, stdio): ...@@ -201,14 +201,15 @@ def prepare_parameters(cluster_config, stdio):
if value is not None: if value is not None:
depend_info[key] = value depend_info[key] = value
ob_servers = cluster_config.get_depend_servers(comp) ob_servers = cluster_config.get_depend_servers(comp)
connect_infos = []
for ob_server in ob_servers: for ob_server in ob_servers:
ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server) ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server)
if 'server_ip' not in depend_info: connect_infos.append([ob_server.ip, ob_server_conf['mysql_port']])
depend_info['server_ip'] = ob_server.ip
depend_info['mysql_port'] = ob_server_conf['mysql_port']
zone = ob_server_conf['zone'] zone = ob_server_conf['zone']
if zone not in ob_zones: if zone not in ob_zones:
ob_zones[zone] = ob_server ob_zones[zone] = ob_server
depend_info['connect_infos'] = connect_infos
root_servers = ob_zones.values() root_servers = ob_zones.values()
break break
for comp in ['obproxy', 'obproxy-ce']: for comp in ['obproxy', 'obproxy-ce']:
...@@ -265,7 +266,12 @@ def prepare_parameters(cluster_config, stdio): ...@@ -265,7 +266,12 @@ def prepare_parameters(cluster_config, stdio):
missed_keys = get_missing_required_parameters(original_server_config) missed_keys = get_missing_required_parameters(original_server_config)
if missed_keys: if missed_keys:
if 'jdbc_url' in missed_keys and depend_observer: if 'jdbc_url' in missed_keys and depend_observer:
if depend_info.get('server_ip'):
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db']) server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
else:
server_config['connect_infos'] = depend_info.get('connect_infos')
server_config['ocp_meta_db'] = depend_info.get('ocp_meta_db')
server_config['jdbc_url'] = ''
if 'jdbc_username' in missed_keys and depend_observer: if 'jdbc_username' in missed_keys and depend_observer:
server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'],
depend_info.get('ocp_meta_tenant', {}).get("tenant_name")) depend_info.get('ocp_meta_tenant', {}).get("tenant_name"))
...@@ -333,26 +339,37 @@ def start(plugin_context, start_env=None, *args, **kwargs): ...@@ -333,26 +339,37 @@ def start(plugin_context, start_env=None, *args, **kwargs):
else: else:
use_parameter = True use_parameter = True
# check meta db connect before start # check meta db connect before start
if jdbc_url:
matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url) matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url)
if matched: if not matched:
stdio.error("Invalid jdbc url: %s" % jdbc_url)
return
ip = matched.group(1) ip = matched.group(1)
sql_port = matched.group(2)[1:] sql_port = matched.group(2)[1:]
database = matched.group(3) database = matched.group(3)
connect_infos = [[ip, sql_port]]
else:
connect_infos = server_config.get('connect_infos', '')
database = server_config.get('ocp_meta_db', '')
connected = False connected = False
retries = 300 retries = 300
while not connected and retries: while not connected and retries:
for connect_info in connect_infos:
retries -= 1 retries -= 1
server_ip = connect_info[0]
server_port = connect_info[-1]
try: try:
Cursor(ip=ip, port=sql_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio) Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio)
jdbc_url = 'jdbc:oceanbase://{}:{}/{}'.format(server_ip, server_port, database)
connected = True connected = True
break
except: except:
time.sleep(1) time.sleep(1)
if not connected: if not connected:
success = False success = False
stdio.error("{}: failed to connect meta db".format(server)) stdio.error("{}: failed to connect meta db".format(server))
continue continue
else:
stdio.verbose('unmatched jdbc url, skip meta db connection check')
if server_config.get('encrypt_password', False): if server_config.get('encrypt_password', False):
private_key, public_key = get_key(client, os.path.join(home_path, 'conf'), stdio) private_key, public_key = get_key(client, os.path.join(home_path, 'conf'), stdio)
public_key_str = get_plain_public_key(public_key) public_key_str = get_plain_public_key(public_key)
......
...@@ -146,14 +146,14 @@ def prepare_parameters(cluster_config, stdio): ...@@ -146,14 +146,14 @@ def prepare_parameters(cluster_config, stdio):
if value is not None: if value is not None:
depend_info[key] = value depend_info[key] = value
ob_servers = cluster_config.get_depend_servers(comp) ob_servers = cluster_config.get_depend_servers(comp)
connect_infos = []
for ob_server in ob_servers: for ob_server in ob_servers:
ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server) ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server)
if 'server_ip' not in depend_info: connect_infos.append([ob_server.ip, ob_server_conf['mysql_port']])
depend_info['server_ip'] = ob_server.ip
depend_info['mysql_port'] = ob_server_conf['mysql_port']
zone = ob_server_conf['zone'] zone = ob_server_conf['zone']
if zone not in ob_zones: if zone not in ob_zones:
ob_zones[zone] = ob_server ob_zones[zone] = ob_server
depend_info['connect_infos'] = connect_infos
root_servers = ob_zones.values() root_servers = ob_zones.values()
break break
for comp in ['obproxy', 'obproxy-ce']: for comp in ['obproxy', 'obproxy-ce']:
...@@ -210,7 +210,12 @@ def prepare_parameters(cluster_config, stdio): ...@@ -210,7 +210,12 @@ def prepare_parameters(cluster_config, stdio):
missed_keys = get_missing_required_parameters(original_server_config) missed_keys = get_missing_required_parameters(original_server_config)
if missed_keys: if missed_keys:
if 'jdbc_url' in missed_keys and depend_observer: if 'jdbc_url' in missed_keys and depend_observer:
if depend_info.get('server_ip'):
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db']) server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
else:
server_config['connect_infos'] = depend_info.get('connect_infos')
server_config['ocp_meta_db'] = depend_info.get('ocp_meta_db')
server_config['jdbc_url'] = ''
if 'jdbc_username' in missed_keys and depend_observer: if 'jdbc_username' in missed_keys and depend_observer:
server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], depend_info.get('ocp_meta_tenant', {}).get("tenant_name")) server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], depend_info.get('ocp_meta_tenant', {}).get("tenant_name"))
depends_key_maps = { depends_key_maps = {
......
- name: home_path
name_local: 工作目录
require: true
essential: true
type: STRING
need_redeploy: true
description_en: the directory for the work data
description_local: OCP express server工作目录
- name: log_dir
name_local: 日志目录
type: STRING
require: false
essential: true
need_redeploy: true
description_en: The directory for logging file. The default value is $home_path/log.
description_local: OCP express server日志目录, 默认为工作目录下的log
- name: java_bin
name_local: java路径
type: STRING
require: true
essential: true
default: java
need_restart: true
description_en: The path of java binary
description_local: OCP express 使用的java可执行文件的路径
- name: memory_size
name_local: 进程内存
require: true
essential: true
type: CAPACITY_MB
min_value: 512M
need_restart: true
description_en: the memroy size of ocp express server. Please enter an capacity, such as 2G
description_local: OCP express server进程内存大小。请输入带容量带单位的整数,如2G
- name: logging_file_max_size
name_local: 单个日志文件大小
type: STRING
require: false
essential: true
default: 100MB
need_restart: true
description_local: 单个日志文件大小
description_en: When logging_file_name is configured, specify the log file size through this configuration
- name: logging_file_total_size_cap
name_local: 日志总大小
type: STRING
require: true
essential: true
default: 1GB
need_restart: true
description_local: 日志文件总大小
description_en: When logging_file_name is configured, specify the total log file size through this configuration
- name: port
name_local: 端口
require: true
essential: true
type: INT
default: 8180
need_restart: true
description_en: the port of ocp server.
description_local: OCP server使用的端口
- name: jdbc_url
require: false
type: STRING
need_redeploy: true
description_en: The jdbc connection url for ocp meta db
description_local: OCP使用的元数据库的jdbc连接串
- name: jdbc_username
require: false
type: STRING
need_redeploy: true
description_en: The username name for ocp meta db
description_local: OCP使用的元数据库的用户名
- name: jdbc_password
require: false
type: STRING
default:
need_redeploy: true
description_en: The password name for ocp meta db
description_local: OCP使用的元数据库的密码
- name: admin_passwd
require: true
type: STRING
modify_limit: modify
default:
need_redeploy: true
description_en: The password for ocp web admin user,The password must be 8 to 32 characters in length, and must contain at least two digits, two uppercase letters, two lowercase letters, and two of the following special characters:~!@#%^&*_-+=|(){}[]:;,.?/
description_local: OCP登录页面的admin账户密码(密码长度8~32位,至少包含2位数字、2位大写字母、2位小写字母和2位特殊字符(~!@#%^&*_-+=|(){}[]:;,.?/))
# bootstrap parameters
- name: cluster_name
requrire: false
type: STRING
default: obcluster
need_restart: true
description_en: The cluster name of observer
description_local: Oceanbase数据库的集群名称
- name: ob_cluster_id
require: false
type: INT
min_value: 1
max_value: 4294901759
need_restart: true
description_en: ID of the cluster
description_local: OceanBase集群ID
- name: root_sys_password
require: false
type: STRING
default:
need_restart: true
description_en: password of observer root user
description_local: sys租户root用户的密码
- name: server_addresses
require: false
type: LIST
need_restart: true
description_en: the servers info for oceanbase cluster
description_local: Oceanbase集群的节点信息
- name: 'session_timeout'
type: 'STRING'
require: false
need_restart: true
description_local: '登陆会话/Session超时的时间,默认是30m,最少60s。如果不加后缀单位,则默认是秒。重启生效。'
description_en: 'Session timeout interval, default is 30m, at least 60s. If the suffix unit is not added, the default is seconds. Restart OCP to take effect.'
- name: 'login_encrypt_enabled'
type: 'STRING'
require: false
need_restart: true
description_local: '登录信息是否开启加密传输,默认开启,重启生效'
description_en: 'Switch to enable encrypted transmission of login information, enabled by default. Restart OCP to take effect.'
- name: 'login_encrypt_public_key'
type: 'STRING'
require: false
need_restart: true
description_local: '加密登录信息的公钥,建议部署后修改此配置,修改后重启生效'
description_en: 'The public key for login encryption, It is recommended to modify this configuration after deployment. Restart OCP to take effect.'
- name: 'login_encrypt_private_key'
type: 'STRING'
require: false
need_restart: true
description_local: '加密登录信息的私钥,建议部署后修改此配置,修改后重启生效'
description_en: 'The private key for encryption. It is recommended to modify this configuration after deployment. Restart OCP to take effect.'
- name: 'enable_basic_auth'
type: 'STRING'
require: false
need_restart: true
description_local: '是否启用Basic Auth登陆模式,通常供程序和SDK等客户端场景使用,默认true。本配置与ocp.iam.auth可同时开启。重启生效。'
description_en: 'Whether to enable Basic Authentication, usually for client programs and SDKs to call server APIs. The default is true. This configuration and ocp.iam.auth can be enabled together. Restart OCP to take effect.'
- name: 'enable_csrf'
type: 'STRING'
require: false
need_restart: true
description_local: '是否启用CSRF跨站点请求伪造安全保护,通常基于网页登陆的方式都推荐要启用,默认true。重启生效。'
description_en: 'Whether to enable CSRF cross-site request forgery security protection. It is recommended to enable it, the default is true. Restart OCP to take effect.'
- name: 'vault_key'
type: 'STRING'
require: false
need_restart: true
description_local: '密码箱加密密钥'
description_en: 'vault secret key'
- name: 'druid_name'
type: 'STRING'
require: false
need_restart: true
description_local: 'metadb的druid连接池名称。重启生效'
description_en: 'metadb druid connection pool name. Restart to take effect'
- name: 'druid_init_size'
type: 'STRING'
require: false
need_restart: true
description_local: '初始化时建立物理连接的个数。重启生效'
description_en: 'The number of physical connections established during initialization. Restart to take effect'
- name: 'druid_min_idle'
type: 'STRING'
require: false
need_restart: true
description_local: '最小连接池数量。重启生效'
description_en: 'Minimum number of connections. Restart to take effect'
- name: 'druid_max_active'
type: 'STRING'
require: false
need_restart: true
description_local: '最大连接池数量。重启生效'
description_en: 'The maximum number of connections. Restart to take effect'
- name: 'druid_test_while_idle'
type: 'STRING'
require: false
need_restart: true
description_local: '建议配置为true,不影响性能,并且保证安全性。申请连接的时候检测。重启生效'
description_en: 'It is recommended to set it to true, which will not affect performance and ensure safety. Detect when applying for connection. Restart to take effect'
- name: 'druid_validation_query'
type: 'STRING'
require: false
need_restart: true
description_local: '用来检测连接是否有效的sql。重启生效'
description_en: 'SQL used to detect whether the connection is valid. Restart to take effect'
- name: 'druid_max_wait'
type: 'STRING'
require: false
need_restart: true
description_local: '获取连接时最大等待时间,单位毫秒。重启生效'
description_en: 'Maximum waiting time when getting a connection, in milliseconds. Restart to take effect'
- name: 'druid_keep_alive'
type: 'STRING'
require: false
need_restart: true
description_local: '连接池中的minIdle数量以内的连接,空闲时间超过minEvictableIdleTimeMillis(缺省值1800秒),则会执行keepAlive操作。重启生效'
description_en: 'For connections within the number of minIdle in the connection pool, if the idle time exceeds minEvictableIdleTimeMillis (the default value is 1800 seconds), the keepAlive operation will be performed. Restart to take effect'
- name: 'logging_pattern_console'
type: 'STRING'
require: false
need_restart: true
description_local: '用于控制台输出的日志格式'
description_en: 'Log format for console output'
- name: 'logging_pattern_file'
type: 'STRING'
require: false
need_restart: true
description_local: '用于文件输出的日志格式'
description_en: 'Log format used for file output'
- name: 'logging_file_clean_when_start'
type: 'STRING'
require: false
need_restart: true
description_local: '启动时删除压缩的日志文件'
description_en: 'Clean the archive log files on startup'
- name: 'logging_file_max_history'
name_local: 日志保留天数
type: INT
require: false
essential: true
need_restart: true
min_value: 1
max_value: 2147483647
description_local: '最多保留的归档日志文件的天数,默认不限制'
description_en: 'When logging.file is configured, set the maximum of retention days the log archive log files to keep. The default value is unlimited'
- name: 'ocp.idempotent.client-token.expire.time'
type: 'STRING'
require: false
need_restart: true
description_local: '幂等请求token的缓存过期时间,默认14d'
description_en: 'Expire time of idempotent client token, the default is 14d'
- name: 'obsdk_sql_query_limit'
type: 'STRING'
require: false
need_restart: true
description_local: '基于 obsdk 的采集查询,SQL 查询行数限制,默认 10000'
description_en: 'Sql query row limit for obsdk based collect'
- name: 'ocp.monitor.host.exporters'
type: 'STRING'
require: false
need_restart: true
description_local: '主机监控exporter'
description_en: 'exporters of ocp host'
- name: 'ocp.monitor.ob.exporters'
type: 'STRING'
require: false
need_restart: true
description_local: 'OB监控exporter'
description_en: 'exporters of ob'
- name: 'monitor_collect_interval'
type: 'STRING'
require: false
need_restart: true
description_local: '秒级别监控采集间隔,默认 1s,支持配置选项是 1s, 5s, 10s, 15s'
description_en: 'The parameter determines the second-level monitoring and collection interval. The supported configuration options are 1s, 5s, 10s, 15s. Default value is 1s'
- name: 'montior_retention_days'
type: 'STRING'
require: false
need_restart: true
description_local: '监控数据保存天数,key 是监控数据的表名,value 是保存的天数,修改后重启生效.'
description_en: 'Retention days for monitor data, key is table name for monitor data, value is the retention days. Restart to take effect.'
- name: 'obsdk_cache_size'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk连接器池容量,取值范围10~200,默认值100'
description_en: 'Obsdk connector holder capacity, value range 10~200, default value 100'
- name: 'obsdk_max_idle'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk空闲连接器的过期时间,单位秒,取值范围300~18000,默认值3600'
description_en: 'The expiration time of the obsdk idle connector, in seconds, the value range is 300~18000, and the default value is 3600'
- name: 'obsdk_cleanup_period'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk过期连接器的清理周期,单位秒,取值范围30~1800,默认值300'
description_en: 'The interval for obsdk to clean up the expired connector, in seconds, the value range is 30~1800, and the default value is 300'
- name: 'obsdk_print_sql'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk中sql打印开关,默认开启'
description_en: 'Sql print switch in obsdk, enabled by default'
- name: 'obsdk_slow_query_threshold'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk中慢查询日志阈值,单位毫秒,默认值 1000'
description_en: 'Slow query log threshold in obsdk, in milliseconds, the default value is 1000'
- name: 'obsdk_init_timeout'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk中连接器初始化超时时间,单位毫秒,默认值 3000'
description_en: 'Timeout of connector initialization in obsdk, in milliseconds, the default value is 5000'
- name: 'obsdk_init_core_size'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk中连接器初始化的线程个数'
description_en: 'The thread count of connector initialization in obsdk, the default value is 16'
- name: 'obsdk_global_timeout'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk中运维命令全局超时时间,单位毫秒,取值范围10000~7200000,默认值 300000'
description_en: 'Global timeout of operation in obsdk, in milliseconds, the value range is 10000~7200000, and the default value is 300000'
- name: 'obsdk_connect_timeout'
type: 'STRING'
require: false
need_restart: true
description_local: 'obsdk建立Socket连接的超时时间,单位:ms'
description_en: 'The timeout period for obsdk to connect to ob, unit: ms'
- name: 'obsdk_read_timeout'
type: 'STRING'
require: false
need_restart: true
description_local: 'Obsdk的Socket读取数据的超时时间,单位:ms'
description_en: 'Obsdk socket read data timeout time, unit: ms'
\ No newline at end of file
...@@ -15,3 +15,4 @@ inspect2==0.1.2 ...@@ -15,3 +15,4 @@ inspect2==0.1.2
six==1.16.0 six==1.16.0
pyinstaller==3.6 pyinstaller==3.6
bcrypt==3.1.7 bcrypt==3.1.7
zstandard==0.14.1
\ No newline at end of file
...@@ -13,3 +13,4 @@ six==1.16.0 ...@@ -13,3 +13,4 @@ six==1.16.0
pyinstaller>=4.3 pyinstaller>=4.3
bcrypt==4.0.0 bcrypt==4.0.0
configparser>=5.2.0 configparser>=5.2.0
zstandard==0.21.0
\ No newline at end of file
...@@ -26,7 +26,7 @@ MINIMAL_CONFIG = ''' ...@@ -26,7 +26,7 @@ MINIMAL_CONFIG = '''
''' '''
PKG_ESTIMATED_SIZE = defaultdict(lambda:0) PKG_ESTIMATED_SIZE = defaultdict(lambda:0)
PKG_ESTIMATED_SIZE.update({"oceanbase-ce":314142720, "obproxy-ce":45424640, "obagent": 25124864}) PKG_ESTIMATED_SIZE.update({"oceanbase-ce": 347142720, "oceanbase": 358142928, "obproxy-ce": 45424640, "obproxy": 56428687, "obagent": 76124864, "ocp-express": 95924680})
OCEANBASE_CE = 'oceanbase-ce' OCEANBASE_CE = 'oceanbase-ce'
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册