You need to sign in or sign up before continuing.
未验证 提交 d84e56c9 编写于 作者: R Rongfeng Fu 提交者: GitHub

V2.2.0 (#176)

上级 156416e1
...@@ -132,7 +132,3 @@ A:You can use the `obd update` command to update OBD. When you are done with t ...@@ -132,7 +132,3 @@ A:You can use the `obd update` command to update OBD. When you are done with t
## Protocol ## Protocol
OBD complies with [GPL-3.0](/LICENSE). OBD complies with [GPL-3.0](/LICENSE).
## Sysbench benchmark
- [Run the Sysbench benchmark test in OceanBase Database (Paetica, VLDB 2023)](https://github.com/oceanbase/oceanbase-doc/blob/V4.1.0/en-US/7.reference/3.performance-tuning-guide/6.performance-whitepaper/3.run-the-sysbench-benchmark-test-in-oceanbase-database.md)
...@@ -44,6 +44,7 @@ ROOT_IO = IO(1) ...@@ -44,6 +44,7 @@ ROOT_IO = IO(1)
OBD_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), '.obd') OBD_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), '.obd')
OBDIAG_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), 'oceanbase-diagnostic-tool') OBDIAG_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), 'oceanbase-diagnostic-tool')
COMMAND_ENV.load(os.path.join(OBD_HOME_PATH, '.obd_environ'), ROOT_IO) COMMAND_ENV.load(os.path.join(OBD_HOME_PATH, '.obd_environ'), ROOT_IO)
ROOT_IO.default_confirm = COMMAND_ENV.get(ENV.ENV_DEFAULT_CONFIRM, '0') == '1'
class OptionHelpFormatter(IndentedHelpFormatter): class OptionHelpFormatter(IndentedHelpFormatter):
...@@ -871,10 +872,11 @@ class ClusterRedeployCommand(ClusterMirrorCommand): ...@@ -871,10 +872,11 @@ class ClusterRedeployCommand(ClusterMirrorCommand):
def __init__(self): def __init__(self):
super(ClusterRedeployCommand, self).__init__('redeploy', 'Redeploy a started cluster.') super(ClusterRedeployCommand, self).__init__('redeploy', 'Redeploy a started cluster.')
self.parser.add_option('-f', '--force-kill', action='store_true', help="Force kill the running observer process in the working directory.") self.parser.add_option('-f', '--force-kill', action='store_true', help="Force kill the running observer process in the working directory.")
self.parser.add_option('--confirm', action='store_true', help='Confirm to redeploy.')
def _do_command(self, obd): def _do_command(self, obd):
if self.cmds: if self.cmds:
res = obd.redeploy_cluster(self.cmds[0]) res = obd.redeploy_cluster(self.cmds[0], need_confirm=not getattr(self.opts, 'confirm', False))
self.background_telemetry_task(obd) self.background_telemetry_task(obd)
return res return res
else: else:
......
...@@ -820,15 +820,20 @@ class ClusterConfig(object): ...@@ -820,15 +820,20 @@ class ClusterConfig(object):
self._cache_server[server] = self._apply_temp_conf(self._get_unprocessed_server_conf(server)) self._cache_server[server] = self._apply_temp_conf(self._get_unprocessed_server_conf(server))
return self._cache_server[server] return self._cache_server[server]
def get_original_global_conf(self): def get_original_global_conf(self, format_conf=False):
return deepcopy(self._original_global_conf) conf = deepcopy(self._original_global_conf)
format_conf and self._apply_temp_conf(conf)
return conf
def get_original_server_conf(self, server): def get_original_server_conf(self, server, format_conf=False):
return self._server_conf.get(server) conf = deepcopy(self._server_conf.get(server))
format_conf and self._apply_temp_conf(conf)
return conf
def get_original_server_conf_with_global(self, server): def get_original_server_conf_with_global(self, server, format_conf=False):
config = self.get_original_global_conf() config = deepcopy(self.get_original_global_conf())
config.update(self._server_conf.get(server, {})) config.update(self._server_conf.get(server, {}))
format_conf and self._apply_temp_conf(config)
return config return config
......
...@@ -42,3 +42,6 @@ TELEMETRY_MODE = "TELEMETRY_MODE" ...@@ -42,3 +42,6 @@ TELEMETRY_MODE = "TELEMETRY_MODE"
# telemetry log mode. 0 - disable, 1 - enable. # telemetry log mode. 0 - disable, 1 - enable.
TELEMETRY_LOG_MODE = "TELEMETRY_LOG_MODE" TELEMETRY_LOG_MODE = "TELEMETRY_LOG_MODE"
# ROOT IO DEFAULT CONFIRM. 0 - disable, 1 - enable.
ENV_DEFAULT_CONFIRM = "IO_DEFAULT_CONFIRM"
\ No newline at end of file
...@@ -175,6 +175,9 @@ EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK_AVAILABLE = OBDErrorCodeTemplate(4305 ...@@ -175,6 +175,9 @@ EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK_AVAILABLE = OBDErrorCodeTemplate(4305
EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK = OBDErrorCodeTemplate(4305, 'There is not enough log disk for ocp meta tenant.') EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK = OBDErrorCodeTemplate(4305, 'There is not enough log disk for ocp meta tenant.')
EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_MEM = OBDErrorCodeTemplate(4305, 'There is not enough memory for ocp meta tenant') EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_MEM = OBDErrorCodeTemplate(4305, 'There is not enough memory for ocp meta tenant')
EC_OCP_EXPRESS_ADMIN_PASSWD_ERROR = OBDErrorCodeTemplate(4306, '({ip}) ocp-express admin_passwd invalid.(Current :{current})') EC_OCP_EXPRESS_ADMIN_PASSWD_ERROR = OBDErrorCodeTemplate(4306, '({ip}) ocp-express admin_passwd invalid.(Current :{current})')
# 4350-4399 had been used by ocp
# sql # sql
EC_SQL_EXECUTE_FAILED = OBDErrorCodeTemplate(5000, "{sql} execute failed") EC_SQL_EXECUTE_FAILED = OBDErrorCodeTemplate(5000, "{sql} execute failed")
......
...@@ -271,9 +271,9 @@ class RemoteMirrorRepository(MirrorRepository): ...@@ -271,9 +271,9 @@ class RemoteMirrorRepository(MirrorRepository):
self._load_repo_age() self._load_repo_age()
if self.enabled: if self.enabled:
repo_age = ConfigUtil.get_value_from_dict(meta_data, 'repo_age', 0, int) repo_age = ConfigUtil.get_value_from_dict(meta_data, 'repo_age', 0, int)
if repo_age > self.repo_age or int(time.time()) - 86400 > self.repo_age: if (repo_age > self.repo_age or int(time.time()) - 86400 > self.repo_age) and self.available:
self.repo_age = repo_age if self.update_mirror():
self.update_mirror() self.repo_age = repo_age
@property @property
def available(self): def available(self):
......
...@@ -32,6 +32,7 @@ from _manager import Manager ...@@ -32,6 +32,7 @@ from _manager import Manager
from _rpm import Version from _rpm import Version
from ssh import ConcurrentExecutor from ssh import ConcurrentExecutor
from tool import ConfigUtil, DynamicLoading, YamlLoader, FileUtil from tool import ConfigUtil, DynamicLoading, YamlLoader, FileUtil
from _types import *
yaml = YamlLoader() yaml = YamlLoader()
...@@ -360,225 +361,9 @@ class Null(object): ...@@ -360,225 +361,9 @@ class Null(object):
def __init__(self): def __init__(self):
pass pass
class ParamPlugin(Plugin): class ParamPlugin(Plugin):
class ConfigItemType(object):
TYPE_STR = None
NULL = Null()
def __init__(self, s):
try:
self._origin = s
self._value = 0
self.value = self.NULL
self._format()
if self.value == self.NULL:
self.value = self._origin
except:
raise Exception("'%s' is not %s" % (self._origin, self._type_str))
@property
def _type_str(self):
if self.TYPE_STR is None:
self.TYPE_STR = str(self.__class__.__name__).split('.')[-1]
return self.TYPE_STR
def _format(self):
raise NotImplementedError
def __str__(self):
return str(self._origin)
def __hash__(self):
return self._origin.__hash__()
@property
def __cmp_value__(self):
return self._value
def __eq__(self, value):
if value is None:
return False
return self.__cmp_value__ == value.__cmp_value__
def __gt__(self, value):
if value is None:
return True
return self.__cmp_value__ > value.__cmp_value__
def __ge__(self, value):
if value is None:
return True
return self.__eq__(value) or self.__gt__(value)
def __lt__(self, value):
if value is None:
return False
return self.__cmp_value__ < value.__cmp_value__
def __le__(self, value):
if value is None:
return False
return self.__eq__(value) or self.__lt__(value)
class Moment(ConfigItemType):
def _format(self):
if self._origin:
if self._origin.upper() == 'DISABLE':
self._value = 0
else:
r = re.match('^(\d{1,2}):(\d{1,2})$', self._origin)
h, m = r.groups()
h, m = int(h), int(m)
if 0 <= h <= 23 and 0 <= m <= 60:
self._value = h * 60 + m
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Time(ConfigItemType):
UNITS = {
'ns': 0.000000001,
'us': 0.000001,
'ms': 0.001,
's': 1,
'm': 60,
'h': 3600,
'd': 86400
}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['s']
else:
r = re.match('^(\d+)(\w+)$', self._origin.lower())
n, u = r.groups()
unit = self.UNITS.get(u.lower())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Capacity(ConfigItemType):
UNITS = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40, 'P': 1 << 50}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['M']
else:
r = re.match('^(\d+)(\w)B?$', self._origin.upper())
n, u = r.groups()
unit = self.UNITS.get(u.upper())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class StringList(ConfigItemType):
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
self._value = self._origin.split(';')
else:
self._value = []
class Dict(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, dict):
raise Exception("Invalid Value")
self._value = self._origin
else:
self._value = self.value = {}
class List(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, list):
raise Exception("Invalid value: {} is not a list.".format(self._origin))
self._value = self._origin
else:
self._value = self.value = []
class StringOrKvList(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, list):
raise Exception("Invalid value: {} is not a list.".format(self._origin))
for item in self._origin:
if not item:
continue
if not isinstance(item, (str, dict)):
raise Exception("Invalid value: {} should be string or key-value format.".format(item))
if isinstance(item, dict):
if len(item.keys()) != 1:
raise Exception("Invalid value: {} should be single key-value format".format(item))
self._value = self._origin
else:
self._value = self.value = []
class Double(ConfigItemType):
def _format(self):
self.value = self._value = float(self._origin) if self._origin else 0
class Boolean(ConfigItemType):
def _format(self):
if isinstance(self._origin, bool):
self._value = self._origin
else:
_origin = str(self._origin).lower()
if _origin == 'true':
self._value = True
elif _origin == 'false':
self._value = False
elif _origin.isdigit():
self._value = bool(self._origin)
else:
raise Exception('%s is not Boolean' % _origin)
self.value = self._value
class Integer(ConfigItemType):
def _format(self):
if self._origin is None:
self._value = 0
self._origin = 0
else:
_origin = str(self._origin)
try:
self.value = self._value = int(_origin)
except:
raise Exception('%s is not Integer' % _origin)
class String(ConfigItemType):
def _format(self):
self.value = self._value = str(self._origin) if self._origin else ''
class ConfigItem(object): class ConfigItem(object):
def __init__( def __init__(
...@@ -667,17 +452,18 @@ class ParamPlugin(Plugin): ...@@ -667,17 +452,18 @@ class ParamPlugin(Plugin):
if self._src_data is None: if self._src_data is None:
try: try:
TYPES = { TYPES = {
'DOUBLE': ParamPlugin.Double, 'DOUBLE': Double,
'BOOL': ParamPlugin.Boolean, 'BOOL': Boolean,
'INT': ParamPlugin.Integer, 'INT': Integer,
'STRING': ParamPlugin.String, 'STRING': String,
'MOMENT': ParamPlugin.Moment, 'MOMENT': Moment,
'TIME': ParamPlugin.Time, 'TIME': Time,
'CAPACITY': ParamPlugin.Capacity, 'CAPACITY': Capacity,
'STRING_LIST': ParamPlugin.StringList, 'CAPACITY_MB': CapacityMB,
'DICT': ParamPlugin.Dict, 'STRING_LIST': StringList,
'LIST': ParamPlugin.List, 'DICT': Dict,
'PARAM_LIST': ParamPlugin.StringOrKvList 'LIST': List,
'PARAM_LIST': StringOrKvList
} }
self._src_data = {} self._src_data = {}
with open(self.def_param_yaml_path, 'rb') as f: with open(self.def_param_yaml_path, 'rb') as f:
...@@ -688,7 +474,7 @@ class ParamPlugin(Plugin): ...@@ -688,7 +474,7 @@ class ParamPlugin(Plugin):
if param_type in TYPES: if param_type in TYPES:
param_type = TYPES[param_type] param_type = TYPES[param_type]
else: else:
param_type = ParamPlugin.String param_type = String
self._src_data[conf['name']] = ParamPlugin.ConfigItem( self._src_data[conf['name']] = ParamPlugin.ConfigItem(
name=conf['name'], name=conf['name'],
......
...@@ -232,7 +232,7 @@ class ParallerExtractor(object): ...@@ -232,7 +232,7 @@ class ParallerExtractor(object):
pool.close() pool.close()
pool = None pool = None
except: except:
self.stdio and getattr(self.stdio, 'exception', print)() self.stdio and getattr(self.stdio, 'exception', print)('')
finally: finally:
pool and pool.close() pool and pool.close()
return False return False
......
...@@ -379,6 +379,7 @@ class IO(object): ...@@ -379,6 +379,7 @@ class IO(object):
): ):
self.level = level self.level = level
self.msg_lv = msg_lv self.msg_lv = msg_lv
self.default_confirm = False
self._log_path = None self._log_path = None
self._trace_id = None self._trace_id = None
self._log_name = 'default' self._log_name = 'default'
...@@ -672,6 +673,8 @@ class IO(object): ...@@ -672,6 +673,8 @@ class IO(object):
def confirm(self, msg): def confirm(self, msg):
msg = '%s [y/n]: ' % msg msg = '%s [y/n]: ' % msg
self.print(msg, end='') self.print(msg, end='')
if self.default_confirm:
return True
if self._input_is_tty: if self._input_is_tty:
while True: while True:
try: try:
...@@ -748,7 +751,7 @@ class IO(object): ...@@ -748,7 +751,7 @@ class IO(object):
self._print(MsgLevel.VERBOSE, '%s %s' % (self._verbose_prefix, msg), *args, **kwargs) self._print(MsgLevel.VERBOSE, '%s %s' % (self._verbose_prefix, msg), *args, **kwargs)
if sys.version_info.major == 2: if sys.version_info.major == 2:
def exception(self, msg, *args, **kwargs): def exception(self, msg='', *args, **kwargs):
import linecache import linecache
exception_msg = [] exception_msg = []
ei = sys.exc_info() ei = sys.exc_info()
...@@ -780,7 +783,7 @@ class IO(object): ...@@ -780,7 +783,7 @@ class IO(object):
msg and self.error(msg) msg and self.error(msg)
print_stack('\n'.join(exception_msg)) print_stack('\n'.join(exception_msg))
else: else:
def exception(self, msg, *args, **kwargs): def exception(self, msg='', *args, **kwargs):
ei = sys.exc_info() ei = sys.exc_info()
traceback_e = traceback.TracebackException(type(ei[1]), ei[1], ei[2], limit=None) traceback_e = traceback.TracebackException(type(ei[1]), ei[1], ei[2], limit=None)
pre_stach = traceback.extract_stack()[self.track_limit:-2] pre_stach = traceback.extract_stack()[self.track_limit:-2]
......
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import re
__all__ = ("Moment", "Time", "Capacity", "CapacityMB", "StringList", "Dict", "List", "StringOrKvList", "Double", "Boolean", "Integer", "String")
class Null(object):
def __init__(self):
pass
class ConfigItemType(object):
TYPE_STR = None
NULL = Null()
def __init__(self, s):
try:
self._origin = s
self._value = 0
self.value = self.NULL
self._format()
if self.value == self.NULL:
self.value = self._origin
except:
raise Exception("'%s' is not %s" % (self._origin, self._type_str))
@property
def _type_str(self):
if self.TYPE_STR is None:
self.TYPE_STR = str(self.__class__.__name__).split('.')[-1]
return self.TYPE_STR
def _format(self):
raise NotImplementedError
def __str__(self):
return str(self._origin)
def __hash__(self):
return self._origin.__hash__()
@property
def __cmp_value__(self):
return self._value
def __eq__(self, value):
if value is None:
return False
return self.__cmp_value__ == value.__cmp_value__
def __gt__(self, value):
if value is None:
return True
return self.__cmp_value__ > value.__cmp_value__
def __ge__(self, value):
if value is None:
return True
return self.__eq__(value) or self.__gt__(value)
def __lt__(self, value):
if value is None:
return False
return self.__cmp_value__ < value.__cmp_value__
def __le__(self, value):
if value is None:
return False
return self.__eq__(value) or self.__lt__(value)
class Moment(ConfigItemType):
def _format(self):
if self._origin:
if self._origin.upper() == 'DISABLE':
self._value = 0
else:
r = re.match('^(\d{1,2}):(\d{1,2})$', self._origin)
h, m = r.groups()
h, m = int(h), int(m)
if 0 <= h <= 23 and 0 <= m <= 60:
self._value = h * 60 + m
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Time(ConfigItemType):
UNITS = {
'ns': 0.000000001,
'us': 0.000001,
'ms': 0.001,
's': 1,
'm': 60,
'h': 3600,
'd': 86400
}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['s']
else:
r = re.match('^(\d+)(\w+)$', self._origin.lower())
n, u = r.groups()
unit = self.UNITS.get(u.lower())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Capacity(ConfigItemType):
UNITS = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40, 'P': 1 << 50}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['M']
else:
r = re.match('^(\d+)(\w)B?$', self._origin.upper())
n, u = r.groups()
unit = self.UNITS.get(u.upper())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class CapacityMB(Capacity):
def _format(self):
super(CapacityMB, self)._format()
if isinstance(self._origin, str) and self._origin.isdigit():
self.value = self._origin + 'M'
if not self._origin:
self.value = '0M'
class StringList(ConfigItemType):
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
self._value = self._origin.split(';')
else:
self._value = []
class Dict(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, dict):
raise Exception("Invalid Value")
self._value = self._origin
else:
self._value = self.value = {}
class List(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, list):
raise Exception("Invalid value: {} is not a list.".format(self._origin))
self._value = self._origin
else:
self._value = self.value = []
class StringOrKvList(ConfigItemType):
def _format(self):
if self._origin:
if not isinstance(self._origin, list):
raise Exception("Invalid value: {} is not a list.".format(self._origin))
for item in self._origin:
if not item:
continue
if not isinstance(item, (str, dict)):
raise Exception("Invalid value: {} should be string or key-value format.".format(item))
if isinstance(item, dict):
if len(item.keys()) != 1:
raise Exception("Invalid value: {} should be single key-value format".format(item))
self._value = self._origin
else:
self._value = self.value = []
class Double(ConfigItemType):
def _format(self):
self.value = self._value = float(self._origin) if self._origin else 0
class Boolean(ConfigItemType):
def _format(self):
if isinstance(self._origin, bool):
self._value = self._origin
else:
_origin = str(self._origin).lower()
if _origin == 'true':
self._value = True
elif _origin == 'false':
self._value = False
elif _origin.isdigit():
self._value = bool(self._origin)
else:
raise Exception('%s is not Boolean' % _origin)
self.value = self._value
class Integer(ConfigItemType):
def _format(self):
if self._origin is None:
self._value = 0
self._origin = 0
else:
_origin = str(self._origin)
try:
self.value = self._value = int(_origin)
except:
raise Exception('%s is not Integer' % _origin)
class String(ConfigItemType):
def _format(self):
self.value = self._value = str(self._origin) if self._origin else ''
\ No newline at end of file
...@@ -2284,13 +2284,16 @@ class ObdHome(object): ...@@ -2284,13 +2284,16 @@ class ObdHome(object):
self._call_stdio('stop_loading', 'succeed') self._call_stdio('stop_loading', 'succeed')
return False return False
def redeploy_cluster(self, name, search_repo=True): def redeploy_cluster(self, name, search_repo=True, need_confirm=False):
self._call_stdio('verbose', 'Get Deploy by name') self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name) deploy = self.deploy_manager.get_deploy_config(name)
self.set_deploy(deploy) self.set_deploy(deploy)
if not deploy: if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name) self._call_stdio('error', 'No such deploy: %s.' % name)
return False return False
if need_confirm and not self._call_stdio('confirm', 'Are you sure to destroy the "%s" cluster and rebuild it?' % name):
return False
deploy_info = deploy.deploy_info deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Get deploy configuration') self._call_stdio('verbose', 'Get deploy configuration')
...@@ -2630,9 +2633,9 @@ class ObdHome(object): ...@@ -2630,9 +2633,9 @@ class ObdHome(object):
route = [] route = []
use_images = [] use_images = []
upgrade_route_plugins = self.search_py_script_plugin([current_repository], 'upgrade_route', no_found_act='warn') upgrade_route_plugins = self.search_py_script_plugin([dest_repository], 'upgrade_route', no_found_act='warn')
if current_repository in upgrade_route_plugins: if dest_repository in upgrade_route_plugins:
ret = self.call_plugin(upgrade_route_plugins[current_repository], current_repository , current_repository=current_repository, dest_repository=dest_repository) ret = self.call_plugin(upgrade_route_plugins[dest_repository], current_repository , current_repository=current_repository, dest_repository=dest_repository)
route = ret.get_return('route') route = ret.get_return('route')
if not route: if not route:
return False return False
...@@ -2742,9 +2745,6 @@ class ObdHome(object): ...@@ -2742,9 +2745,6 @@ class ObdHome(object):
if not install_plugins: if not install_plugins:
return False return False
if not self.install_repositories_to_servers(deploy_config, upgrade_repositories[1:], install_plugins, ssh_clients, self.options):
return False
script_query_timeout = getattr(self.options, 'script_query_timeout', '') script_query_timeout = getattr(self.options, 'script_query_timeout', '')
n = len(upgrade_repositories) n = len(upgrade_repositories)
while upgrade_ctx['index'] < n: while upgrade_ctx['index'] < n:
...@@ -4006,7 +4006,7 @@ class ObdHome(object): ...@@ -4006,7 +4006,7 @@ class ObdHome(object):
cluster_config = deploy_config.components[component_name] cluster_config = deploy_config.components[component_name]
if not cluster_config.servers: if not cluster_config.servers:
self._call_stdio('error', '%s server list is empty' % allow_components[0]) self._call_stdio('error', '%s server list is empty' % allow_components)
return False return False
self._call_stdio('start_loading', 'Get local repositories and plugins') self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository # Get the repository
...@@ -4015,8 +4015,9 @@ class ObdHome(object): ...@@ -4015,8 +4015,9 @@ class ObdHome(object):
self._call_stdio('stop_loading', 'succeed') self._call_stdio('stop_loading', 'succeed')
target_repository = None target_repository = None
for repository in repositories: for repository in repositories:
if repository.name == allow_components[0]: if repository.name == component_name:
target_repository = repository target_repository = repository
break
if gather_type in ['gather_plan_monitor']: if gather_type in ['gather_plan_monitor']:
setattr(opts, 'connect_cluster', True) setattr(opts, 'connect_cluster', True)
obdiag_path = getattr(opts, 'obdiag_dir', None) obdiag_path = getattr(opts, 'obdiag_dir', None)
......
...@@ -181,7 +181,7 @@ grafana: ...@@ -181,7 +181,7 @@ grafana:
- prometheus - prometheus
global: global:
home_path: /root/grafana home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'. login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data. # data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log. # logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins. # plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
...@@ -181,7 +181,7 @@ grafana: ...@@ -181,7 +181,7 @@ grafana:
- prometheus - prometheus
global: global:
home_path: /root/grafana home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'. login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data. # data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log. # logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins. # plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
...@@ -223,7 +223,7 @@ grafana: ...@@ -223,7 +223,7 @@ grafana:
- prometheus - prometheus
global: global:
home_path: /root/grafana home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'. login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data. # data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log. # logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins. # plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
...@@ -28,8 +28,6 @@ oceanbase-ce: ...@@ -28,8 +28,6 @@ oceanbase-ce:
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
skip_proxy_sys_private_check: true
enable_strict_kernel_release: false
# root_password: # root user password # root_password: # root user password
# In this example , support multiple ob process in single node, so different process use different ports. # In this example , support multiple ob process in single node, so different process use different ports.
# If deploy ob cluster in multiple nodes, the port and path setting can be same. # If deploy ob cluster in multiple nodes, the port and path setting can be same.
......
...@@ -210,7 +210,7 @@ grafana: ...@@ -210,7 +210,7 @@ grafana:
- prometheus - prometheus
global: global:
home_path: /root/grafana home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'. login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data. # data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log. # logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins. # plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
...@@ -10,7 +10,7 @@ grafana: ...@@ -10,7 +10,7 @@ grafana:
- 192.168.1.5 - 192.168.1.5
global: global:
home_path: /root/grafana home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'. login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data. # data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log. # logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins. # plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
...@@ -77,7 +77,7 @@ grafana: ...@@ -77,7 +77,7 @@ grafana:
- prometheus - prometheus
global: global:
home_path: /root/grafana home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'. login_password: xxxxxxxxx # Grafana login password.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data. # data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log. # logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins. # plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
......
...@@ -23,8 +23,10 @@ oceanbase-ce: ...@@ -23,8 +23,10 @@ oceanbase-ce:
# please set memory limit to a suitable value which is matching resource. # please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file. datafile_size: 2G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files. datafile_next: 2G # the auto extend step. Please enter an capacity, such as 2G
datafile_maxsize: 20G # the auto extend max size. Please enter an capacity, such as 20G
log_disk_size: 13G # The size of disk space used by the clog files.
cpu_count: 16 cpu_count: 16
production_mode: false production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
......
...@@ -23,8 +23,10 @@ oceanbase-ce: ...@@ -23,8 +23,10 @@ oceanbase-ce:
# please set memory limit to a suitable value which is matching resource. # please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file. datafile_size: 2G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files. datafile_next: 2G # the auto extend step. Please enter an capacity, such as 2G
datafile_maxsize: 20G # the auto extend max size. Please enter an capacity, such as 20G
log_disk_size: 13G # The size of disk space used by the clog files.
cpu_count: 16 cpu_count: 16
production_mode: false production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
......
...@@ -20,8 +20,10 @@ oceanbase-ce: ...@@ -20,8 +20,10 @@ oceanbase-ce:
# please set memory limit to a suitable value which is matching resource. # please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file. datafile_size: 2G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files. datafile_next: 2G # the auto extend step. Please enter an capacity, such as 2G
datafile_maxsize: 20G # the auto extend max size. Please enter an capacity, such as 20G
log_disk_size: 13G # The size of disk space used by the clog files.
cpu_count: 16 cpu_count: 16
production_mode: false production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
......
...@@ -27,8 +27,10 @@ oceanbase-ce: ...@@ -27,8 +27,10 @@ oceanbase-ce:
# please set memory limit to a suitable value which is matching resource. # please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file. datafile_size: 2G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files. datafile_next: 2G # the auto extend step. Please enter an capacity, such as 2G
datafile_maxsize: 20G # the auto extend max size. Please enter an capacity, such as 20G
log_disk_size: 13G # The size of disk space used by the clog files.
cpu_count: 16 cpu_count: 16
production_mode: false production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
......
...@@ -27,8 +27,10 @@ oceanbase-ce: ...@@ -27,8 +27,10 @@ oceanbase-ce:
# please set memory limit to a suitable value which is matching resource. # please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file. datafile_size: 2G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files. datafile_next: 2G # the auto extend step. Please enter an capacity, such as 2G
datafile_maxsize: 20G # the auto extend max size. Please enter an capacity, such as 20G
log_disk_size: 13G # The size of disk space used by the clog files.
cpu_count: 16 cpu_count: 16
production_mode: false production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
......
...@@ -22,10 +22,11 @@ test: ...@@ -22,10 +22,11 @@ test:
value: 'true' value: 'true'
optimizer: tenant optimizer: tenant
- name: memory_chunk_cache_size - name: memory_chunk_cache_size
value: '0' value: '0M'
value_type: STRING value_type: CAPACITY_MB
- name: syslog_io_bandwidth_limit - name: syslog_io_bandwidth_limit
value: 30m value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog - name: enable_async_syslog
value: 'true' value: 'true'
- name: large_query_worker_percentage - name: large_query_worker_percentage
...@@ -55,6 +56,7 @@ test: ...@@ -55,6 +56,7 @@ test:
value: 1m value: 1m
- name: cache_wash_threshold - name: cache_wash_threshold
value: 10G value: 10G
value_type: CAPACITY_MB
- name: plan_cache_evict_interval - name: plan_cache_evict_interval
value: 30s value: 30s
- name: bf_cache_miss_count_threshold - name: bf_cache_miss_count_threshold
......
...@@ -25,10 +25,11 @@ build: ...@@ -25,10 +25,11 @@ build:
value: 'true' value: 'true'
optimizer: tenant optimizer: tenant
- name: memory_chunk_cache_size - name: memory_chunk_cache_size
value: '0' value: '0M'
value_type: STRING value_type: CAPACITY_MB
- name: syslog_io_bandwidth_limit - name: syslog_io_bandwidth_limit
value: 30m value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog - name: enable_async_syslog
value: 'true' value: 'true'
- name: large_query_worker_percentage - name: large_query_worker_percentage
...@@ -58,6 +59,7 @@ build: ...@@ -58,6 +59,7 @@ build:
value: 1m value: 1m
- name: cache_wash_threshold - name: cache_wash_threshold
value: 10G value: 10G
value_type: CAPACITY_MB
- name: plan_cache_evict_interval - name: plan_cache_evict_interval
value: 30s value: 30s
- name: bf_cache_miss_count_threshold - name: bf_cache_miss_count_threshold
......
...@@ -26,9 +26,11 @@ test: ...@@ -26,9 +26,11 @@ test:
value: false value: false
value_type: BOOL value_type: BOOL
- name: memory_chunk_cache_size - name: memory_chunk_cache_size
value: 0 value: 0M
value_type: CAPACITY_MB
- name: cache_wash_threshold - name: cache_wash_threshold
value: 30g value: 30g
value_type: CAPACITY_MB
- name: ob_enable_batched_multi_statement - name: ob_enable_batched_multi_statement
value: true value: true
optimizer: tenant optimizer: tenant
...@@ -47,6 +49,7 @@ test: ...@@ -47,6 +49,7 @@ test:
value: 4 value: 4
- name: syslog_io_bandwidth_limit - name: syslog_io_bandwidth_limit
value: 30m value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog - name: enable_async_syslog
value: true value: true
- name: large_query_worker_percentage - name: large_query_worker_percentage
......
test:
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 3
optimizer: sleep
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: _enable_newsort
value: 'false'
- name: _enable_adaptive_compaction
value: 'false'
optimizer: tenant
- name: enable_record_trace_log
value: 'false'
\ No newline at end of file
build:
variables:
- name: ob_query_timeout
value: 36000000000
- name: ob_trx_timeout
value: 36000000000
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 5
optimizer: sleep
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: enable_record_trace_log
value: 'false'
- name: _enable_defensive_check
value: false
- name: default_auto_increment_mode
value: 'NOORDER'
optimizer: tenant
- name: _rowsets_enabled
value: false
optimizer: tenant
- name: freeze_trigger_percentage
value: 40
optimizer: tenant
\ No newline at end of file
...@@ -22,10 +22,11 @@ test: ...@@ -22,10 +22,11 @@ test:
value: 'true' value: 'true'
optimizer: tenant optimizer: tenant
- name: memory_chunk_cache_size - name: memory_chunk_cache_size
value: '0' value: '0M'
value_type: STRING value_type: CAPACITY_MB
- name: syslog_io_bandwidth_limit - name: syslog_io_bandwidth_limit
value: 30m value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog - name: enable_async_syslog
value: 'true' value: 'true'
- name: large_query_worker_percentage - name: large_query_worker_percentage
...@@ -55,6 +56,7 @@ test: ...@@ -55,6 +56,7 @@ test:
value: 1m value: 1m
- name: cache_wash_threshold - name: cache_wash_threshold
value: 10G value: 10G
value_type: CAPACITY_MB
- name: plan_cache_evict_interval - name: plan_cache_evict_interval
value: 30s value: 30s
- name: bf_cache_miss_count_threshold - name: bf_cache_miss_count_threshold
......
...@@ -25,10 +25,11 @@ build: ...@@ -25,10 +25,11 @@ build:
value: 'true' value: 'true'
optimizer: tenant optimizer: tenant
- name: memory_chunk_cache_size - name: memory_chunk_cache_size
value: '0' value: '0M'
value_type: STRING value_type: CAPACITY_MB
- name: syslog_io_bandwidth_limit - name: syslog_io_bandwidth_limit
value: 30m value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog - name: enable_async_syslog
value: 'true' value: 'true'
- name: large_query_worker_percentage - name: large_query_worker_percentage
...@@ -58,6 +59,7 @@ build: ...@@ -58,6 +59,7 @@ build:
value: 1m value: 1m
- name: cache_wash_threshold - name: cache_wash_threshold
value: 10G value: 10G
value_type: CAPACITY_MB
- name: plan_cache_evict_interval - name: plan_cache_evict_interval
value: 30s value: 30s
- name: bf_cache_miss_count_threshold - name: bf_cache_miss_count_threshold
......
...@@ -26,9 +26,10 @@ test: ...@@ -26,9 +26,10 @@ test:
value: false value: false
value_type: BOOL value_type: BOOL
- name: memory_chunk_cache_size - name: memory_chunk_cache_size
value: 0 value: 0M
- name: cache_wash_threshold - name: cache_wash_threshold
value: 30g value: 30g
value_type: CAPACITY_MB
- name: ob_enable_batched_multi_statement - name: ob_enable_batched_multi_statement
value: true value: true
optimizer: tenant optimizer: tenant
...@@ -47,6 +48,7 @@ test: ...@@ -47,6 +48,7 @@ test:
value: 4 value: 4
- name: syslog_io_bandwidth_limit - name: syslog_io_bandwidth_limit
value: 30m value: 30m
value_type: CAPACITY_MB
- name: enable_async_syslog - name: enable_async_syslog
value: true value: true
- name: large_query_worker_percentage - name: large_query_worker_percentage
......
test:
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 3
optimizer: sleep
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: _enable_newsort
value: 'false'
- name: _enable_adaptive_compaction
value: 'false'
optimizer: tenant
- name: enable_record_trace_log
value: 'false'
\ No newline at end of file
build:
variables:
- name: ob_query_timeout
value: 36000000000
- name: ob_trx_timeout
value: 36000000000
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 5
optimizer: sleep
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: enable_record_trace_log
value: 'false'
- name: _enable_defensive_check
value: false
- name: default_auto_increment_mode
value: 'NOORDER'
optimizer: tenant
- name: _rowsets_enabled
value: false
optimizer: tenant
- name: freeze_trigger_percentage
value: 40
optimizer: tenant
\ No newline at end of file
...@@ -23,6 +23,7 @@ from __future__ import absolute_import, division, print_function ...@@ -23,6 +23,7 @@ from __future__ import absolute_import, division, print_function
import re import re
import time import time
from copy import deepcopy from copy import deepcopy
from _types import *
from _stdio import SafeStdio from _stdio import SafeStdio
...@@ -32,179 +33,6 @@ VARIABLES = 'variables' ...@@ -32,179 +33,6 @@ VARIABLES = 'variables'
SYSTEM_CONFIG = 'system_config' SYSTEM_CONFIG = 'system_config'
class OptimizeItem(object):
class OptimizeItemType(object):
TYPE_STR = None
def __init__(self, s):
try:
self._origin = s
self._value = 0
self._format()
except:
raise Exception("'%s' is not %s" % (self._origin, self._type_str))
@property
def _type_str(self):
if self.TYPE_STR is None:
self.TYPE_STR = str(self.__class__.__name__).split('.')[-1]
return self.TYPE_STR
def _format(self):
raise NotImplementedError
def __str__(self):
return str(self._origin)
def __repr__(self):
return self.__str__()
def __hash__(self):
return self._origin.__hash__()
@property
def __cmp_value__(self):
return self._value
def __eq__(self, value):
if value is None:
return False
return self.__cmp_value__ == value.__cmp_value__
def __gt__(self, value):
if value is None:
return True
return self.__cmp_value__ > value.__cmp_value__
def __ge__(self, value):
if value is None:
return True
return self.__eq__(value) or self.__gt__(value)
def __lt__(self, value):
if value is None:
return False
return self.__cmp_value__ < value.__cmp_value__
def __le__(self, value):
if value is None:
return False
return self.__eq__(value) or self.__lt__(value)
class Moment(OptimizeItemType):
def _format(self):
if self._origin:
if self._origin.upper() == 'DISABLE':
self._value = 0
else:
r = re.match('^(\d{1,2}):(\d{1,2})$', self._origin)
h, m = r.groups()
h, m = int(h), int(m)
if 0 <= h <= 23 and 0 <= m <= 60:
self._value = h * 60 + m
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Time(OptimizeItemType):
UNITS = {
'ns': 0.000000001,
'us': 0.000001,
'ms': 0.001,
's': 1,
'm': 60,
'h': 3600,
'd': 86400
}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['s']
else:
r = re.match('^(\d+)(\w+)$', self._origin.lower())
n, u = r.groups()
unit = self.UNITS.get(u.lower())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class Capacity(OptimizeItemType):
UNITS = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40, 'P': 1 << 50}
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
if self._origin.isdigit():
n = self._origin
unit = self.UNITS['M']
else:
r = re.match('^(\d+)(\w)B?$', self._origin.upper())
n, u = r.groups()
unit = self.UNITS.get(u.upper())
if unit:
self._value = int(n) * unit
else:
raise Exception('Invalid Value')
else:
self._value = 0
class StringList(OptimizeItemType):
def _format(self):
if self._origin:
self._origin = str(self._origin).strip()
self._value = self._origin.split(';')
else:
self._value = []
class Double(OptimizeItemType):
def _format(self):
self._value = float(self._origin) if self._origin else 0
class Boolean(OptimizeItemType):
def _format(self):
if isinstance(self._origin, bool):
self._value = self._origin
else:
_origin = str(self._origin).lower()
if _origin == 'true':
self._value = True
elif _origin == 'false':
self._value = False
elif _origin.isdigit():
self._value = bool(self._origin)
else:
raise Exception('%s is not Boolean' % _origin)
class Integer(OptimizeItemType):
def _format(self):
if self._origin is None:
self._value = 0
self._origin = 0
else:
_origin = str(self._origin)
try:
self.value = self._value = int(_origin)
except:
raise Exception('%s is not Integer' % _origin)
class String(OptimizeItemType):
def _format(self):
self._value = str(self._origin) if self._origin else ''
class SqlFile(object): class SqlFile(object):
def __init__(self, path, entrance, sys=False, **kwargs): def __init__(self, path, entrance, sys=False, **kwargs):
...@@ -245,14 +73,15 @@ class SqlFile(object): ...@@ -245,14 +73,15 @@ class SqlFile(object):
class Variable(object): class Variable(object):
TYPES = { TYPES = {
'DOUBLE': OptimizeItem.Double, 'DOUBLE': Double,
'BOOL': OptimizeItem.Boolean, 'BOOL': Boolean,
'INT': OptimizeItem.Integer, 'INT': Integer,
'STRING': OptimizeItem.String, 'STRING': String,
'MOMENT': OptimizeItem.Moment, 'MOMENT': Moment,
'TIME': OptimizeItem.Time, 'TIME': Time,
'CAPACITY': OptimizeItem.Capacity, 'CAPACITY': Capacity,
'STRING_LIST': OptimizeItem.StringList 'CAPACITY_MB': CapacityMB,
'STRING_LIST': StringList
} }
def __init__(self, value, entrance, name=None, value_type=None, condition="lambda n, o: n != o", def __init__(self, value, entrance, name=None, value_type=None, condition="lambda n, o: n != o",
......
...@@ -38,7 +38,6 @@ shell_command_map = { ...@@ -38,7 +38,6 @@ shell_command_map = {
"cpu_logical_cores": 'cat /proc/cpuinfo | grep "processor" | wc -l', "cpu_logical_cores": 'cat /proc/cpuinfo | grep "processor" | wc -l',
"cpu_model_name": 'cat /proc/cpuinfo | grep name | cut -f2 -d: | uniq', "cpu_model_name": 'cat /proc/cpuinfo | grep name | cut -f2 -d: | uniq',
"cpu_frequency": 'cat /proc/cpuinfo | grep MHz | cut -f2 -d: | uniq', "cpu_frequency": 'cat /proc/cpuinfo | grep MHz | cut -f2 -d: | uniq',
"cpu_flags": 'cat /proc/cpuinfo | grep flags | cut -f2 -d: | uniq',
"memory_total": 'cat /proc/meminfo | grep MemTotal | cut -f2 -d: | uniq', "memory_total": 'cat /proc/meminfo | grep MemTotal | cut -f2 -d: | uniq',
"memory_free": 'cat /proc/meminfo | grep MemFree | cut -f2 -d: | uniq', "memory_free": 'cat /proc/meminfo | grep MemFree | cut -f2 -d: | uniq',
"memory_avaiable": 'cat /proc/meminfo | grep MemAvailable | cut -f2 -d: | uniq', "memory_avaiable": 'cat /proc/meminfo | grep MemAvailable | cut -f2 -d: | uniq',
...@@ -121,11 +120,6 @@ class CpuInfo: ...@@ -121,11 +120,6 @@ class CpuInfo:
def cpu_frequency(*args, **kwargs): def cpu_frequency(*args, **kwargs):
return kwargs["bash_result"] return kwargs["bash_result"]
@staticmethod
@shell_command
def cpu_flags(*args, **kwargs):
return kwargs["bash_result"]
class MemInfo: class MemInfo:
@staticmethod @staticmethod
...@@ -237,7 +231,6 @@ def telemetry_machine_data(data): ...@@ -237,7 +231,6 @@ def telemetry_machine_data(data):
_hosts['cpu']['logicalCores'] = CpuInfo.cpu_logical_cores() _hosts['cpu']['logicalCores'] = CpuInfo.cpu_logical_cores()
_hosts['cpu']['modelName'] = CpuInfo.cpu_model_name() _hosts['cpu']['modelName'] = CpuInfo.cpu_model_name()
_hosts['cpu']['frequency'] = CpuInfo.cpu_frequency() _hosts['cpu']['frequency'] = CpuInfo.cpu_frequency()
_hosts['cpu']['flags'] = CpuInfo.cpu_flags()
_hosts['memory']['total'] = MemInfo.memory_total() _hosts['memory']['total'] = MemInfo.memory_total()
_hosts['memory']['free'] = MemInfo.memory_free() _hosts['memory']['free'] = MemInfo.memory_free()
......
...@@ -216,7 +216,7 @@ ...@@ -216,7 +216,7 @@
"targets": [ "targets": [
{ {
"exemplar": true, "exemplar": true,
"expr": "(sum(rate(ob_sysstat{stat_id=\"40003\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40005\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40009\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40009\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40001\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))\n/\n(sum(rate(ob_sysstat{stat_id=\"40002\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40004\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40006\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40008\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40000\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))", "expr": "(sum(rate(ob_sysstat{stat_id=\"40003\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40005\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40007\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40009\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40001\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))\n/\n(sum(rate(ob_sysstat{stat_id=\"40002\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40004\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40006\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40008\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40000\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))",
"interval": "", "interval": "",
"legendFormat": "sql latency {{$group}}", "legendFormat": "sql latency {{$group}}",
"refId": "A" "refId": "A"
......
...@@ -37,7 +37,7 @@ def call_plugin(plugin, plugin_context, repositories, *args, **kwargs): ...@@ -37,7 +37,7 @@ def call_plugin(plugin, plugin_context, repositories, *args, **kwargs):
stdio, *args, **kwargs) stdio, *args, **kwargs)
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, **kwargs): def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, *args, **kwargs):
cluster_config = plugin_context.cluster_config cluster_config = plugin_context.cluster_config
clients = plugin_context.clients clients = plugin_context.clients
...@@ -57,7 +57,8 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, ...@@ -57,7 +57,8 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
apply_param_plugin(cur_repository) apply_param_plugin(cur_repository)
if not call_plugin(stop_plugin, plugin_context, [cur_repository], *args, **kwargs): if not call_plugin(stop_plugin, plugin_context, [cur_repository], *args, **kwargs):
return return
install_repository_to_servers(cluster_config.name, cluster_config, dest_repository, clients)
apply_param_plugin(dest_repository) apply_param_plugin(dest_repository)
if not call_plugin(start_plugin, plugin_context, [dest_repository], *args, **kwargs): if not call_plugin(start_plugin, plugin_context, [dest_repository], *args, **kwargs):
......
...@@ -37,7 +37,7 @@ def call_plugin(plugin, plugin_context, repositories, *args, **kwargs): ...@@ -37,7 +37,7 @@ def call_plugin(plugin, plugin_context, repositories, *args, **kwargs):
stdio, *args, **kwargs) stdio, *args, **kwargs)
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, **kwargs): def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, *args, **kwargs):
def summit_config(): def summit_config():
generate_global_config = generate_configs['global'] generate_global_config = generate_configs['global']
...@@ -71,6 +71,7 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, ...@@ -71,6 +71,7 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
apply_param_plugin(cur_repository) apply_param_plugin(cur_repository)
if not call_plugin(stop_plugin, plugin_context, repositories=[cur_repository], *args, **kwargs): if not call_plugin(stop_plugin, plugin_context, repositories=[cur_repository], *args, **kwargs):
return return
install_repository_to_servers(cluster_config.name, cluster_config, dest_repository, clients)
# clean useless config # clean useless config
clean_files = [ clean_files = [
"conf/config_properties/monagent_basic_auth.yaml", "conf/config_properties/monagent_basic_auth.yaml",
......
...@@ -242,7 +242,7 @@ def start(plugin_context, need_bootstrap=False, *args, **kwargs): ...@@ -242,7 +242,7 @@ def start(plugin_context, need_bootstrap=False, *args, **kwargs):
stdio.start_loading('obproxy program health check') stdio.start_loading('obproxy program health check')
failed = [] failed = []
servers = cluster_config.servers servers = cluster_config.servers
count = 20 count = 300
while servers and count: while servers and count:
count -= 1 count -= 1
tmp_servers = [] tmp_servers = []
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, **kwargs): def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, *args, **kwargs):
namespace = plugin_context.namespace namespace = plugin_context.namespace
namespaces = plugin_context.namespaces namespaces = plugin_context.namespaces
deploy_name = plugin_context.deploy_name deploy_name = plugin_context.deploy_name
...@@ -53,8 +53,8 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, ...@@ -53,8 +53,8 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
apply_param_plugin(cur_repository) apply_param_plugin(cur_repository)
if not stop_plugin(namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs): if not stop_plugin(namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs):
return return
install_repository_to_servers(cluster_config.name, cluster_config, dest_repository, clients)
apply_param_plugin(dest_repository) apply_param_plugin(dest_repository)
if not start_plugin(namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmds, options, stdio, need_bootstrap=True, *args, **kwargs): if not start_plugin(namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmds, options, stdio, need_bootstrap=True, *args, **kwargs):
return return
......
...@@ -134,8 +134,9 @@ def connect(plugin_context, target_server=None, *args, **kwargs): ...@@ -134,8 +134,9 @@ def connect(plugin_context, target_server=None, *args, **kwargs):
server_config = cluster_config.get_server_conf(server) server_config = cluster_config.get_server_conf(server)
password = server_config.get('root_password', '') if count % 2 else '' password = server_config.get('root_password', '') if count % 2 else ''
cursor = Cursor(ip=server.ip, port=server_config['mysql_port'], tenant='', password=password if password is not None else '', stdio=stdio) cursor = Cursor(ip=server.ip, port=server_config['mysql_port'], tenant='', password=password if password is not None else '', stdio=stdio)
stdio.stop_loading('succeed') if cursor.execute('select 1', raise_exception=True):
return plugin_context.return_true(connect=cursor.db, cursor=cursor, server=server) stdio.stop_loading('succeed')
return plugin_context.return_true(connect=cursor.db, cursor=cursor, server=server)
except: except:
if count == 0: if count == 0:
stdio.exception('') stdio.exception('')
......
...@@ -96,12 +96,14 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T ...@@ -96,12 +96,14 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
def summit_config(): def summit_config():
generate_global_config = generate_configs['global'] generate_global_config = generate_configs['global']
for key in generate_global_config: for key in generate_global_config:
stdio.verbose('Update global config %s to %s' % (key, generate_global_config[key]))
cluster_config.update_global_conf(key, generate_global_config[key], False) cluster_config.update_global_conf(key, generate_global_config[key], False)
for server in cluster_config.servers: for server in cluster_config.servers:
if server not in generate_configs: if server not in generate_configs:
continue continue
generate_server_config = generate_configs[server] generate_server_config = generate_configs[server]
for key in generate_server_config: for key in generate_server_config:
stdio.verbose('Update server %s config %s to %s' % (server, key, generate_server_config[key]))
cluster_config.update_server_conf(server, key, generate_server_config[key], False) cluster_config.update_server_conf(server, key, generate_server_config[key], False)
clients = plugin_context.clients clients = plugin_context.clients
...@@ -145,7 +147,7 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T ...@@ -145,7 +147,7 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
ip = server.ip ip = server.ip
client = clients[server] client = clients[server]
server_config = cluster_config.get_server_conf_with_default(server) server_config = cluster_config.get_server_conf_with_default(server)
user_server_config = cluster_config.get_original_server_conf_with_global(server) user_server_config = cluster_config.get_original_server_conf_with_global(server, format_conf=True)
if user_server_config.get('devname') is None: if user_server_config.get('devname') is None:
if client.is_localhost(): if client.is_localhost():
......
...@@ -148,7 +148,7 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor ...@@ -148,7 +148,7 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor
if server_memory_config[server]['system_memory']: if server_memory_config[server]['system_memory']:
memory_limit = server_memory_config[server]['num'] memory_limit = server_memory_config[server]['num']
if not memory_limit: if not memory_limit:
memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total'] memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total'] / 100
factor = 0.7 factor = 0.7
suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor) suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor)
......
...@@ -158,15 +158,15 @@ class ObVersionGraph(object): ...@@ -158,15 +158,15 @@ class ObVersionGraph(object):
res.insert(0, start_node) res.insert(0, start_node)
if res and res[-1].deprecated: if res and res[-1].deprecated:
raise Exception('upgrade destination version:{}{} is deprecated, not support upgrade.'.format(res[-1].version, '-{}'.format(res[-1].release) if res[-1].release else '')) raise Exception('upgrade destination version:{}{} is deprecated, not support upgrade.'.format(res[-1].version, '-{}'.format(res[-1].release) if res[-1].release else ''))
return format_route(res) return format_route(res, current_repository)
def format_route(routes): def format_route(routes, repository):
route_res = [] route_res = []
for node in routes: for node in routes:
require_from_binary = getattr(node, 'require_from_binary', False) require_from_binary = getattr(node, 'require_from_binary', False)
if node.when_come_from: if getattr(node, 'when_come_from', False):
require_from_binary = require_from_binary and routes[0].version in node.when_come_from require_from_binary = require_from_binary and (repository.version in node.when_come_from or '%s-%s' % (repository.version, repository.release.split('.')[0]) in node.when_come_from)
route_res.append({ route_res.append({
'version': node.version, 'version': node.version,
'release': None if node.release == VersionNode.RELEASE_NULL else node.release, 'release': None if node.release == VersionNode.RELEASE_NULL else node.release,
...@@ -180,17 +180,13 @@ def upgrade_route(plugin_context, current_repository, dest_repository, *args, ** ...@@ -180,17 +180,13 @@ def upgrade_route(plugin_context, current_repository, dest_repository, *args, **
stdio = plugin_context.stdio stdio = plugin_context.stdio
repository_dir = dest_repository.repository_dir repository_dir = dest_repository.repository_dir
if dest_repository.version > Version("4.1.0.0"):
stdio.error('upgrade observer to version {} is not support, please upgrade obd first.'.format(dest_repository.version))
return
if current_repository.version == dest_repository.version: if current_repository.version == dest_repository.version:
return plugin_context.return_true(route=format_route([current_repository, dest_repository])) return plugin_context.return_true(route=format_route([current_repository, dest_repository], current_repository))
upgrade_dep_name = 'etc/oceanbase_upgrade_dep.yml' upgrade_dep_name = 'etc/oceanbase_upgrade_dep.yml'
upgrade_dep_path = os.path.join(repository_dir, upgrade_dep_name) upgrade_dep_path = os.path.join(repository_dir, upgrade_dep_name)
if not os.path.isfile(upgrade_dep_path): if not os.path.isfile(upgrade_dep_path):
stdio.error('%s No such file: %s' % (dest_repository, upgrade_dep_name)) stdio.error('%s No such file: %s. \n No upgrade route available' % (dest_repository, upgrade_dep_name))
return return
version_dep = {} version_dep = {}
......
...@@ -119,9 +119,10 @@ def bootstrap(plugin_context, cursor, *args, **kwargs): ...@@ -119,9 +119,10 @@ def bootstrap(plugin_context, cursor, *args, **kwargs):
has_ocp = True has_ocp = True
if has_ocp: if has_ocp:
global_conf_with_default = deepcopy(cluster_config.get_global_conf_with_default()) global_conf_with_default = deepcopy(cluster_config.get_global_conf_with_default())
original_global_conf = cluster_config.get_original_global_conf()
ocp_meta_tenant_prefix = 'ocp_meta_tenant_' ocp_meta_tenant_prefix = 'ocp_meta_tenant_'
for key in global_conf_with_default: for key in global_conf_with_default:
if key.startswith(ocp_meta_tenant_prefix): if key.startswith(ocp_meta_tenant_prefix) and original_global_conf.get(key, None):
global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key] global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key]
tenant_info = global_conf_with_default["ocp_meta_tenant"] tenant_info = global_conf_with_default["ocp_meta_tenant"]
tenant_info["variables"] = "ob_tcp_invited_nodes='%'" tenant_info["variables"] = "ob_tcp_invited_nodes='%'"
......
...@@ -104,12 +104,14 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T ...@@ -104,12 +104,14 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
def summit_config(): def summit_config():
generate_global_config = generate_configs['global'] generate_global_config = generate_configs['global']
for key in generate_global_config: for key in generate_global_config:
stdio.verbose('Update global config %s to %s' % (key, generate_global_config[key]))
cluster_config.update_global_conf(key, generate_global_config[key], False) cluster_config.update_global_conf(key, generate_global_config[key], False)
for server in cluster_config.servers: for server in cluster_config.servers:
if server not in generate_configs: if server not in generate_configs:
continue continue
generate_server_config = generate_configs[server] generate_server_config = generate_configs[server]
for key in generate_server_config: for key in generate_server_config:
stdio.verbose('Update server %s config %s to %s' % (server, key, generate_server_config[key]))
cluster_config.update_server_conf(server, key, generate_server_config[key], False) cluster_config.update_server_conf(server, key, generate_server_config[key], False)
clients = plugin_context.clients clients = plugin_context.clients
...@@ -147,7 +149,7 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T ...@@ -147,7 +149,7 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
ip = server.ip ip = server.ip
client = clients[server] client = clients[server]
server_config = cluster_config.get_server_conf_with_default(server) server_config = cluster_config.get_server_conf_with_default(server)
user_server_config = cluster_config.get_original_server_conf_with_global(server) user_server_config = cluster_config.get_original_server_conf_with_global(server, format_conf=True)
if user_server_config.get('devname') is None: if user_server_config.get('devname') is None:
if client.is_localhost(): if client.is_localhost():
...@@ -308,7 +310,7 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T ...@@ -308,7 +310,7 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
if not datafile_size: if not datafile_size:
datafile_disk_percentage = int(user_server_config.get('datafile_disk_percentage', 0)) datafile_disk_percentage = int(user_server_config.get('datafile_disk_percentage', 0))
if datafile_disk_percentage: if datafile_disk_percentage:
datafile_size = data_dir_mount['total'] * datafile_disk_percentage / 100 datafile_size = data_dir_disk['total'] * datafile_disk_percentage / 100
elif generate_config_mini: elif generate_config_mini:
datafile_size = MINI_DATA_FILE_SIZE datafile_size = MINI_DATA_FILE_SIZE
update_server_conf(server, 'datafile_size', format_size(datafile_size, 0)) update_server_conf(server, 'datafile_size', format_size(datafile_size, 0))
......
...@@ -256,7 +256,7 @@ ...@@ -256,7 +256,7 @@
name_local: 数据文件大小 name_local: 数据文件大小
require: false require: false
essential: true essential: true
type: CAPACITY type: CAPACITY_MB
default: 0 default: 0
min_value: 0M min_value: 0M
max_value: NULL max_value: NULL
...@@ -277,7 +277,7 @@ ...@@ -277,7 +277,7 @@
name_local: Redo 日志大小 name_local: Redo 日志大小
require: false require: false
essential: true essential: true
type: CAPACITY type: CAPACITY_MB
default: 0 default: 0
min_value: 0M min_value: 0M
max_value: NULL max_value: NULL
...@@ -295,7 +295,7 @@ ...@@ -295,7 +295,7 @@
description_local: 合并时候数据列统计信息的采样率 description_local: 合并时候数据列统计信息的采样率
- name: cache_wash_threshold - name: cache_wash_threshold
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 4GB default: 4GB
min_value: 0B min_value: 0B
max_value: NULL max_value: NULL
...@@ -385,7 +385,7 @@ ...@@ -385,7 +385,7 @@
description_local: 系统可以使用的最小CPU配额,将会预留 description_local: 系统可以使用的最小CPU配额,将会预留
- name: memory_reserved - name: memory_reserved
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 500M default: 500M
min_value: 10M min_value: 10M
max_value: NULL max_value: NULL
...@@ -475,7 +475,7 @@ ...@@ -475,7 +475,7 @@
description_local: 升级模式开关。在升级模式中,会暂停部分系统后台功能。 description_local: 升级模式开关。在升级模式中,会暂停部分系统后台功能。
- name: multiblock_read_size - name: multiblock_read_size
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 128K default: 128K
min_value: 0K min_value: 0K
max_value: 2M max_value: 2M
...@@ -495,7 +495,7 @@ ...@@ -495,7 +495,7 @@
description_local: 因磁盘满等原因导致某个节点数据迁入失败时,暂停迁入时长 description_local: 因磁盘满等原因导致某个节点数据迁入失败时,暂停迁入时长
- name: tablet_size - name: tablet_size
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 128M default: 128M
min_value: NULL min_value: NULL
max_value: NULL max_value: NULL
...@@ -594,7 +594,7 @@ ...@@ -594,7 +594,7 @@
description_local: 数据块缓存在缓存系统中的优先级 description_local: 数据块缓存在缓存系统中的优先级
- name: syslog_io_bandwidth_limit - name: syslog_io_bandwidth_limit
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 30MB default: 30MB
min_value: NULL min_value: NULL
max_value: NULL max_value: NULL
...@@ -656,7 +656,7 @@ ...@@ -656,7 +656,7 @@
description_local: 系统日志自动回收复用时,最多保留多少个。值0表示不自动清理。 description_local: 系统日志自动回收复用时,最多保留多少个。值0表示不自动清理。
- name: px_task_size - name: px_task_size
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 2M default: 2M
min_value: 2M min_value: 2M
max_value: NULL max_value: NULL
...@@ -1017,7 +1017,7 @@ ...@@ -1017,7 +1017,7 @@
description_local: 控制租户CPU调度中每次预留多少比例的空闲token数给租户 description_local: 控制租户CPU调度中每次预留多少比例的空闲token数给租户
- name: stack_size - name: stack_size
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 1M default: 1M
min_value: 512K min_value: 512K
max_value: 20M max_value: 20M
...@@ -1039,7 +1039,7 @@ ...@@ -1039,7 +1039,7 @@
name_local: 最大运行内存 name_local: 最大运行内存
require: false require: false
essential: true essential: true
type: CAPACITY type: CAPACITY_MB
default: 0 default: 0
min_value: NULL min_value: NULL
max_value: NULL max_value: NULL
...@@ -1051,7 +1051,7 @@ ...@@ -1051,7 +1051,7 @@
- name: system_memory - name: system_memory
name_local: 集群系统内存 name_local: 集群系统内存
essential: true essential: true
type: CAPACITY type: CAPACITY_MB
default: 30G default: 30G
min_value: 0M min_value: 0M
max_value: NULL max_value: NULL
...@@ -1180,7 +1180,7 @@ ...@@ -1180,7 +1180,7 @@
description_local: OB内置本地磁盘RAID特性。暂勿使用 description_local: OB内置本地磁盘RAID特性。暂勿使用
- name: rootservice_memory_limit - name: rootservice_memory_limit
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 2G default: 2G
min_value: 2G min_value: 2G
max_value: NULL max_value: NULL
...@@ -1190,7 +1190,7 @@ ...@@ -1190,7 +1190,7 @@
description_local: RootService最大内存限制 description_local: RootService最大内存限制
- name: plan_cache_low_watermark - name: plan_cache_low_watermark
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 1500M default: 1500M
min_value: NULL min_value: NULL
max_value: NULL max_value: NULL
...@@ -1252,7 +1252,7 @@ ...@@ -1252,7 +1252,7 @@
description_local: 控制内存大页的行为,"true"表示在操作系统开启内存大页并且有空闲大页时,数据库总是申请内存大页,否则申请普通内存页, "false"表示数据库不使用大页, "only"表示数据库总是分配大页 description_local: 控制内存大页的行为,"true"表示在操作系统开启内存大页并且有空闲大页时,数据库总是申请内存大页,否则申请普通内存页, "false"表示数据库不使用大页, "only"表示数据库总是分配大页
- name: dtl_buffer_size - name: dtl_buffer_size
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 64K default: 64K
min_value: 4K min_value: 4K
max_value: 2M max_value: 2M
...@@ -1522,7 +1522,7 @@ ...@@ -1522,7 +1522,7 @@
description_local: MySQL模式下,建表时使用的默认压缩算法 description_local: MySQL模式下,建表时使用的默认压缩算法
- name: memory_chunk_cache_size - name: memory_chunk_cache_size
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 0M default: 0M
min_value: 0M min_value: 0M
max_value: NULL max_value: NULL
...@@ -1699,7 +1699,7 @@ ...@@ -1699,7 +1699,7 @@
description_local: 系统内部执行 schema 多版本记录回收任务的时间间隔。 description_local: 系统内部执行 schema 多版本记录回收任务的时间间隔。
- name: backup_data_file_size - name: backup_data_file_size
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 4G default: 4G
min_value: 512M min_value: 512M
max_value: 4G max_value: 4G
...@@ -1863,7 +1863,7 @@ ...@@ -1863,7 +1863,7 @@
name_local: OCP express元数据库租户内存 name_local: OCP express元数据库租户内存
essential: true essential: true
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 2G default: 2G
need_redeploy: true need_redeploy: true
description_en: The tenant memory size for ocp meta db description_en: The tenant memory size for ocp meta db
...@@ -1872,7 +1872,7 @@ ...@@ -1872,7 +1872,7 @@
name_local: OCP express元数据库租户日志磁盘大小 name_local: OCP express元数据库租户日志磁盘大小
essential: true essential: true
require: false require: false
type: CAPACITY type: CAPACITY_MB
default: 6656M default: 6656M
need_redeploy: true need_redeploy: true
description_en: The tenant log disk size for ocp meta db description_en: The tenant log disk size for ocp meta db
......
...@@ -176,7 +176,7 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor ...@@ -176,7 +176,7 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor
if server_memory_config[server]['system_memory']: if server_memory_config[server]['system_memory']:
memory_limit = server_memory_config[server]['num'] memory_limit = server_memory_config[server]['num']
if not memory_limit: if not memory_limit:
server_memory_config[server]['num'] = memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total'] server_memory_config[server]['num'] = memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total'] / 100
factor = 0.75 factor = 0.75
suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor) suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor)
suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {}) suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {})
...@@ -586,9 +586,10 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor ...@@ -586,9 +586,10 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor
has_ocp = True has_ocp = True
if has_ocp and need_bootstrap: if has_ocp and need_bootstrap:
global_conf_with_default = copy.deepcopy(cluster_config.get_global_conf_with_default()) global_conf_with_default = copy.deepcopy(cluster_config.get_global_conf_with_default())
original_global_conf = cluster_config.get_original_global_conf()
ocp_meta_tenant_prefix = 'ocp_meta_tenant_' ocp_meta_tenant_prefix = 'ocp_meta_tenant_'
for key in global_conf_with_default: for key in global_conf_with_default:
if key.startswith(ocp_meta_tenant_prefix): if key.startswith(ocp_meta_tenant_prefix) and original_global_conf.get(key, None):
global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key] global_conf_with_default['ocp_meta_tenant'][key.replace(ocp_meta_tenant_prefix, '', 1)] = global_conf_with_default[key]
meta_db_memory_size = parse_size(global_conf_with_default['ocp_meta_tenant'].get('memory_size')) meta_db_memory_size = parse_size(global_conf_with_default['ocp_meta_tenant'].get('memory_size'))
servers_sys_memory = {} servers_sys_memory = {}
......
...@@ -390,23 +390,39 @@ class Upgrader(object): ...@@ -390,23 +390,39 @@ class Upgrader(object):
time.sleep(3) time.sleep(3)
# major freeze # major freeze
# 1. check merge status # 1. wait all tenant global_broadcast_scn = last_scn, record tenant_id, global_broadcast_scn
pre_global_broadcast_scn = 0 pre_tenant_scn_dict = {}
while True: tenant_ids = []
merge_status = self.execute_sql("select max(global_broadcast_scn) as global_broadcast_scn, max(global_broadcast_scn > last_scn) as is_merging from CDB_OB_MAJOR_COMPACTION") for tenant_info in self.execute_sql("select tenant_id from CDB_OB_MAJOR_COMPACTION", one=False):
if merge_status['is_merging'] == 0: tenant_ids.append(tenant_info['tenant_id'])
pre_global_broadcast_scn = merge_status['global_broadcast_scn'] while tenant_ids:
break pre_tenant_scn_list = self.execute_sql("select tenant_id, global_broadcast_scn, last_scn from CDB_OB_MAJOR_COMPACTION where tenant_id in ({})".format(",".join([str(x) for x in tenant_ids])), one=False)
time.sleep(3) tenant_ids = []
for pre_tenant_scn in pre_tenant_scn_list:
if pre_tenant_scn['global_broadcast_scn'] > pre_tenant_scn['last_scn']:
tenant_ids.append(pre_tenant_scn['tenant_id'])
continue
pre_tenant_scn_dict[pre_tenant_scn['tenant_id']] = pre_tenant_scn['global_broadcast_scn']
time.sleep(1)
# 2. begin merge # 2. begin merge
self.execute_sql("alter system major freeze tenant = all", error=False) self.execute_sql("alter system major freeze tenant = all", error=False)
# 3. wait merge start # 3. wait merge start
while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn <= %s", [pre_global_broadcast_scn]): tenant_ids = pre_tenant_scn_dict.keys()
while tenant_ids:
tenant_scn_list = self.execute_sql("select tenant_id, global_broadcast_scn from CDB_OB_MAJOR_COMPACTION where tenant_id in ({})".format(",".join([str(x) for x in tenant_ids])), one=False)
tenant_ids = []
for tenant_scn in tenant_scn_list:
if pre_tenant_scn_dict[tenant_scn['tenant_id']] >= tenant_scn['global_broadcast_scn']:
tenant_ids.append(tenant_scn['tenant_id'])
continue
time.sleep(3) time.sleep(3)
# 4.wait merge finsh
# 4. wait merge finish
while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn > last_scn"): while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn > last_scn"):
time.sleep(3) time.sleep(3)
self.stdio.stop_loading('succeed') self.stdio.stop_loading('succeed')
return True return True
......
...@@ -158,15 +158,15 @@ class ObVersionGraph(object): ...@@ -158,15 +158,15 @@ class ObVersionGraph(object):
res.insert(0, start_node) res.insert(0, start_node)
if len(res) > 0 and res[-1].deprecated: if len(res) > 0 and res[-1].deprecated:
raise Exception('upgrade destination version:{}{} is deprecated, not support upgrade.'.format(res[-1].version, '-{}'.format(res[-1].release) if res[-1].release else '')) raise Exception('upgrade destination version:{}{} is deprecated, not support upgrade.'.format(res[-1].version, '-{}'.format(res[-1].release) if res[-1].release else ''))
return format_route(res) return format_route(res, current_repository)
def format_route(routes): def format_route(routes, repository):
route_res = [] route_res = []
for i, node in enumerate(routes): for i, node in enumerate(routes):
require_from_binary = getattr(node, 'require_from_binary', False) require_from_binary = getattr(node, 'require_from_binary', False)
if getattr(node, 'when_come_from', False): if getattr(node, 'when_come_from', False):
require_from_binary = require_from_binary and routes[0].version in node.when_come_from require_from_binary = require_from_binary and (repository.version in node.when_come_from or '%s-%s' % (repository.version, repository.release.split('.')[0]) in node.when_come_from)
route_res.append({ route_res.append({
'version': node.version, 'version': node.version,
...@@ -194,17 +194,17 @@ def upgrade_route(plugin_context, current_repository, dest_repository, *args, ** ...@@ -194,17 +194,17 @@ def upgrade_route(plugin_context, current_repository, dest_repository, *args, **
stdio = plugin_context.stdio stdio = plugin_context.stdio
repository_dir = dest_repository.repository_dir repository_dir = dest_repository.repository_dir
if dest_repository.version >= Version("4.2"): if dest_repository.version >= Version("4.3"):
stdio.error('upgrade observer to version {} is not support, please upgrade obd first.'.format(dest_repository.version)) stdio.error('upgrade observer to version {} is not support, please upgrade obd first.'.format(dest_repository.version))
return return
if current_repository.version == dest_repository.version: if current_repository.version == dest_repository.version:
return plugin_context.return_true(route=format_route([current_repository, dest_repository])) return plugin_context.return_true(route=format_route([current_repository, dest_repository], current_repository))
upgrade_dep_name = 'etc/oceanbase_upgrade_dep.yml' upgrade_dep_name = 'etc/oceanbase_upgrade_dep.yml'
upgrade_dep_path = os.path.join(repository_dir, upgrade_dep_name) upgrade_dep_path = os.path.join(repository_dir, upgrade_dep_name)
if not os.path.isfile(upgrade_dep_path): if not os.path.isfile(upgrade_dep_path):
stdio.error('%s No such file: %s' % (dest_repository, upgrade_dep_name)) stdio.error('%s No such file: %s. \n No upgrade route available' % (dest_repository, upgrade_dep_name))
return return
version_dep = {} version_dep = {}
......
...@@ -374,20 +374,36 @@ class Upgrader(object): ...@@ -374,20 +374,36 @@ class Upgrader(object):
time.sleep(3) time.sleep(3)
# major freeze # major freeze
# 1. check merge status # 1. wait all tenant global_broadcast_scn = last_scn, record tenant_id, global_broadcast_scn
pre_global_broadcast_scn = 0 pre_tenant_scn_dict = {}
while True: tenant_ids = []
merge_status = self.execute_sql("select max(global_broadcast_scn) as global_broadcast_scn, max(global_broadcast_scn > last_scn) as is_merging from CDB_OB_MAJOR_COMPACTION") for tenant_info in self.execute_sql("select tenant_id from CDB_OB_MAJOR_COMPACTION", one=False):
if merge_status['is_merging'] == 0: tenant_ids.append(tenant_info['tenant_id'])
pre_global_broadcast_scn = merge_status['global_broadcast_scn'] while tenant_ids:
break pre_tenant_scn_list = self.execute_sql("select tenant_id, global_broadcast_scn, last_scn from CDB_OB_MAJOR_COMPACTION where tenant_id in ({})".format(",".join([str(x) for x in tenant_ids])), one=False)
time.sleep(3) tenant_ids = []
for pre_tenant_scn in pre_tenant_scn_list:
if pre_tenant_scn['global_broadcast_scn'] > pre_tenant_scn['last_scn']:
tenant_ids.append(pre_tenant_scn['tenant_id'])
continue
pre_tenant_scn_dict[pre_tenant_scn['tenant_id']] = pre_tenant_scn['global_broadcast_scn']
time.sleep(1)
# 2. begin merge # 2. begin merge
self.execute_sql("alter system major freeze tenant = all", error=False) self.execute_sql("alter system major freeze tenant = all", error=False)
# 3. wait merge start # 3. wait merge start
while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn <= %s", [pre_global_broadcast_scn]): tenant_ids = pre_tenant_scn_dict.keys()
while tenant_ids:
tenant_scn_list = self.execute_sql("select tenant_id, global_broadcast_scn from CDB_OB_MAJOR_COMPACTION where tenant_id in ({})".format(",".join([str(x) for x in tenant_ids])), one=False)
tenant_ids = []
for tenant_scn in tenant_scn_list:
if pre_tenant_scn_dict[tenant_scn['tenant_id']] >= tenant_scn['global_broadcast_scn']:
tenant_ids.append(tenant_scn['tenant_id'])
continue
time.sleep(3) time.sleep(3)
# 4.wait merge finsh
# 4. wait merge finish
while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn > last_scn"): while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn > last_scn"):
time.sleep(3) time.sleep(3)
......
此差异已折叠。
此差异已折叠。
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import json
import time
import requests
from copy import deepcopy
from _errno import EC_OBSERVER_FAIL_TO_START, EC_OBSERVER_FAIL_TO_START_WITH_ERR, EC_OBSERVER_FAILED_TO_REGISTER, EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS
from collections import OrderedDict
def config_url(ocp_config_server, appname, cid):
cfg_url = '%s&Action=ObRootServiceInfo&ObCluster=%s' % (ocp_config_server, appname)
proxy_cfg_url = '%s&Action=GetObProxyConfig&ObRegionGroup=%s' % (ocp_config_server, appname)
# Command that clears the URL content for the cluster
cleanup_config_url_content = '%s&Action=DeleteObRootServiceInfoByClusterName&ClusterName=%s' % (ocp_config_server, appname)
# Command that register the cluster information to the Config URL
register_to_config_url = '%s&Action=ObRootServiceRegister&ObCluster=%s&ObClusterId=%s' % (ocp_config_server, appname, cid)
return cfg_url, cleanup_config_url_content, register_to_config_url
def init_config_server(ocp_config_server, appname, cid, force_delete, stdio):
def post(url):
stdio.verbose('post %s' % url)
response = requests.post(url)
if response.status_code != 200:
raise Exception('%s status code %s' % (url, response.status_code))
return json.loads(response.text)['Code']
cfg_url, cleanup_config_url_content, register_to_config_url = config_url(ocp_config_server, appname, cid)
ret = post(register_to_config_url)
if ret != 200:
if not force_delete:
raise Exception('%s may have been registered in %s' % (appname, ocp_config_server))
ret = post(cleanup_config_url_content)
if ret != 200 :
raise Exception('failed to clean up the config url content, return code %s' % ret)
if post(register_to_config_url) != 200:
return False
return cfg_url
class EnvVariables(object):
def __init__(self, environments, client):
self.environments = environments
self.client = client
self.env_done = {}
def __enter__(self):
for env_key, env_value in self.environments.items():
self.env_done[env_key] = self.client.get_env(env_key)
self.client.add_env(env_key, env_value, True)
def __exit__(self, *args, **kwargs):
for env_key, env_value in self.env_done.items():
if env_value is not None:
self.client.add_env(env_key, env_value, True)
else:
self.client.del_env(env_key)
def start(plugin_context, *args, **kwargs):
cluster_config = plugin_context.cluster_config
options = plugin_context.options
clients = plugin_context.clients
stdio = plugin_context.stdio
clusters_cmd = {}
need_bootstrap = True
root_servers = {}
global_config = cluster_config.get_global_conf()
appname = global_config['appname'] if 'appname' in global_config else None
cluster_id = global_config['cluster_id'] if 'cluster_id' in global_config else None
obconfig_url = global_config['obconfig_url'] if 'obconfig_url' in global_config else None
cfg_url = ''
if obconfig_url:
if not appname or not cluster_id:
stdio.error('need appname and cluster_id')
return
try:
cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), stdio)
if not cfg_url:
stdio.error(EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS.format(appname, obconfig_url))
return
except:
stdio.exception(EC_OBSERVER_FAILED_TO_REGISTER.format())
return
stdio.start_loading('Start observer')
for server in cluster_config.original_servers:
config = cluster_config.get_server_conf(server)
zone = config['zone']
if zone not in root_servers:
root_servers[zone] = '%s:%s:%s' % (server.ip, config['rpc_port'], config['mysql_port'])
rs_list_opt = '-r \'%s\'' % ';'.join([root_servers[zone] for zone in root_servers])
for server in cluster_config.servers:
client = clients[server]
server_config = cluster_config.get_server_conf(server)
home_path = server_config['home_path']
if not server_config.get('data_dir'):
server_config['data_dir'] = '%s/store' % home_path
if client.execute_command('ls %s/clog/tenant_1/' % server_config['data_dir']).stdout.strip():
need_bootstrap = False
remote_pid_path = '%s/run/observer.pid' % home_path
remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip()
if remote_pid:
if client.execute_command('ls /proc/%s' % remote_pid):
continue
stdio.verbose('%s start command construction' % server)
if getattr(options, 'without_parameter', False) and client.execute_command('ls %s/etc/observer.config.bin' % home_path):
use_parameter = False
else:
use_parameter = True
cmd = []
if use_parameter:
not_opt_str = OrderedDict({
'mysql_port': '-p',
'rpc_port': '-P',
'zone': '-z',
'nodaemon': '-N',
'appname': '-n',
'cluster_id': '-c',
'data_dir': '-d',
'syslog_level': '-l',
'ipv6': '-6',
'mode': '-m',
'scn': '-f'
})
not_cmd_opt = [
'home_path', 'obconfig_url', 'root_password', 'proxyro_password',
'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc', 'production_mode',
'ocp_meta_tenant', 'ocp_meta_username', 'ocp_meta_password', 'ocp_meta_db', 'ocp_agent_monitor_password'
]
get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key]
opt_str = []
for key in server_config:
if key not in not_cmd_opt and key not in not_opt_str and not key.startswith('ocp_meta_tenant_'):
value = get_value(key)
opt_str.append('%s=%s' % (key, value))
if cfg_url:
opt_str.append('obconfig_url=\'%s\'' % cfg_url)
else:
cmd.append(rs_list_opt)
for key in not_opt_str:
if key in server_config:
value = get_value(key)
cmd.append('%s %s' % (not_opt_str[key], value))
cmd.append('-I %s' % server.ip)
cmd.append('-o %s' % ','.join(opt_str))
else:
cmd.append('-p %s' % server_config['mysql_port'])
clusters_cmd[server] = 'cd %s; %s/bin/observer %s' % (home_path, home_path, ' '.join(cmd))
for server in clusters_cmd:
environments = deepcopy(cluster_config.get_environments())
client = clients[server]
server_config = cluster_config.get_server_conf(server)
stdio.verbose('starting %s observer', server)
if 'LD_LIBRARY_PATH' not in environments:
environments['LD_LIBRARY_PATH'] = '%s/lib:' % server_config['home_path']
with EnvVariables(environments, client):
ret = client.execute_command(clusters_cmd[server])
if not ret:
stdio.stop_loading('fail')
stdio.error(EC_OBSERVER_FAIL_TO_START_WITH_ERR.format(server=server, stderr=ret.stderr))
return
stdio.stop_loading('succeed')
stdio.start_loading('observer program health check')
time.sleep(3)
failed = []
for server in cluster_config.servers:
client = clients[server]
server_config = cluster_config.get_server_conf(server)
home_path = server_config['home_path']
remote_pid_path = '%s/run/observer.pid' % home_path
stdio.verbose('%s program health check' % server)
remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip()
if remote_pid and client.execute_command('ls /proc/%s' % remote_pid):
stdio.verbose('%s observer[pid: %s] started', server, remote_pid)
else:
failed.append(EC_OBSERVER_FAIL_TO_START.format(server=server))
if failed:
stdio.stop_loading('fail')
for msg in failed:
stdio.warn(msg)
return plugin_context.return_false()
else:
stdio.stop_loading('succeed')
return plugin_context.return_true(need_bootstrap=need_bootstrap)
此差异已折叠。
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
name_local: 进程内存 name_local: 进程内存
require: true require: true
essential: true essential: true
type: CAPACITY type: CAPACITY_MB
min_value: 512M min_value: 512M
need_restart: true need_restart: true
description_en: the memroy size of ocp express server. Please enter an capacity, such as 2G description_en: the memroy size of ocp express server. Please enter an capacity, such as 2G
......
...@@ -202,14 +202,15 @@ def prepare_parameters(cluster_config, stdio): ...@@ -202,14 +202,15 @@ def prepare_parameters(cluster_config, stdio):
if value is not None: if value is not None:
depend_info[key] = value depend_info[key] = value
ob_servers = cluster_config.get_depend_servers(comp) ob_servers = cluster_config.get_depend_servers(comp)
connect_infos = []
for ob_server in ob_servers: for ob_server in ob_servers:
ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server) ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server)
if 'server_ip' not in depend_info: connect_infos.append([ob_server.ip, ob_server_conf['mysql_port']])
depend_info['server_ip'] = ob_server.ip
depend_info['mysql_port'] = ob_server_conf['mysql_port']
zone = ob_server_conf['zone'] zone = ob_server_conf['zone']
if zone not in ob_zones: if zone not in ob_zones:
ob_zones[zone] = ob_server ob_zones[zone] = ob_server
depend_info['connect_infos'] = connect_infos
root_servers = ob_zones.values() root_servers = ob_zones.values()
break break
for comp in ['obproxy', 'obproxy-ce']: for comp in ['obproxy', 'obproxy-ce']:
...@@ -266,7 +267,12 @@ def prepare_parameters(cluster_config, stdio): ...@@ -266,7 +267,12 @@ def prepare_parameters(cluster_config, stdio):
missed_keys = get_missing_required_parameters(original_server_config) missed_keys = get_missing_required_parameters(original_server_config)
if missed_keys: if missed_keys:
if 'jdbc_url' in missed_keys and depend_observer: if 'jdbc_url' in missed_keys and depend_observer:
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db']) if depend_info.get('server_ip'):
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
else:
server_config['connect_infos'] = depend_info.get('connect_infos')
server_config['ocp_meta_db'] = depend_info.get('ocp_meta_db')
server_config['jdbc_url'] = ''
if 'jdbc_username' in missed_keys and depend_observer: if 'jdbc_username' in missed_keys and depend_observer:
server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'],
depend_info.get('ocp_meta_tenant', {}).get("tenant_name")) depend_info.get('ocp_meta_tenant', {}).get("tenant_name"))
...@@ -333,26 +339,37 @@ def start(plugin_context, start_env=None, *args, **kwargs): ...@@ -333,26 +339,37 @@ def start(plugin_context, start_env=None, *args, **kwargs):
else: else:
use_parameter = True use_parameter = True
# check meta db connect before start # check meta db connect before start
matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url) if jdbc_url:
if matched: matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url)
if not matched:
stdio.error("Invalid jdbc url: %s" % jdbc_url)
return
ip = matched.group(1) ip = matched.group(1)
sql_port = matched.group(2)[1:] sql_port = matched.group(2)[1:]
database = matched.group(3) database = matched.group(3)
connected = False connect_infos = [[ip, sql_port]]
retries = 300 else:
while not connected and retries: connect_infos = server_config.get('connect_infos', '')
database = server_config.get('ocp_meta_db', '')
connected = False
retries = 300
while not connected and retries:
for connect_info in connect_infos:
retries -= 1 retries -= 1
server_ip = connect_info[0]
server_port = connect_info[-1]
try: try:
Cursor(ip=ip, port=sql_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio) Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio)
jdbc_url = 'jdbc:oceanbase://{}:{}/{}'.format(server_ip, server_port, database)
connected = True connected = True
break
except: except:
time.sleep(1) time.sleep(1)
if not connected: if not connected:
success = False success = False
stdio.error("{}: failed to connect meta db".format(server)) stdio.error("{}: failed to connect meta db".format(server))
continue continue
else:
stdio.verbose('unmatched jdbc url, skip meta db connection check')
if server_config.get('encrypt_password', False): if server_config.get('encrypt_password', False):
private_key, public_key = get_key(client, os.path.join(home_path, 'conf'), stdio) private_key, public_key = get_key(client, os.path.join(home_path, 'conf'), stdio)
public_key_str = get_plain_public_key(public_key) public_key_str = get_plain_public_key(public_key)
......
...@@ -145,14 +145,14 @@ def prepare_parameters(cluster_config, stdio): ...@@ -145,14 +145,14 @@ def prepare_parameters(cluster_config, stdio):
if value is not None: if value is not None:
depend_info[key] = value depend_info[key] = value
ob_servers = cluster_config.get_depend_servers(comp) ob_servers = cluster_config.get_depend_servers(comp)
connect_infos = []
for ob_server in ob_servers: for ob_server in ob_servers:
ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server) ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server)
if 'server_ip' not in depend_info: connect_infos.append([ob_server.ip, ob_server_conf['mysql_port']])
depend_info['server_ip'] = ob_server.ip
depend_info['mysql_port'] = ob_server_conf['mysql_port']
zone = ob_server_conf['zone'] zone = ob_server_conf['zone']
if zone not in ob_zones: if zone not in ob_zones:
ob_zones[zone] = ob_server ob_zones[zone] = ob_server
depend_info['connect_infos'] = connect_infos
root_servers = ob_zones.values() root_servers = ob_zones.values()
break break
for comp in ['obproxy', 'obproxy-ce']: for comp in ['obproxy', 'obproxy-ce']:
...@@ -209,7 +209,12 @@ def prepare_parameters(cluster_config, stdio): ...@@ -209,7 +209,12 @@ def prepare_parameters(cluster_config, stdio):
missed_keys = get_missing_required_parameters(original_server_config) missed_keys = get_missing_required_parameters(original_server_config)
if missed_keys: if missed_keys:
if 'jdbc_url' in missed_keys and depend_observer: if 'jdbc_url' in missed_keys and depend_observer:
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db']) if depend_info.get('server_ip'):
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
else:
server_config['connect_infos'] = depend_info.get('connect_infos')
server_config['ocp_meta_db'] = depend_info.get('ocp_meta_db')
server_config['jdbc_url'] = ''
if 'jdbc_username' in missed_keys and depend_observer: if 'jdbc_username' in missed_keys and depend_observer:
server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], depend_info.get('ocp_meta_tenant', {}).get("tenant_name")) server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], depend_info.get('ocp_meta_tenant', {}).get("tenant_name"))
depends_key_maps = { depends_key_maps = {
......
...@@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function ...@@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function
from tool import ConfigUtil from tool import ConfigUtil
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, **kwargs): def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, *args, **kwargs):
namespace = plugin_context.namespace namespace = plugin_context.namespace
namespaces = plugin_context.namespaces namespaces = plugin_context.namespaces
deploy_name = plugin_context.deploy_name deploy_name = plugin_context.deploy_name
...@@ -51,8 +51,8 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, ...@@ -51,8 +51,8 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
apply_param_plugin(cur_repository) apply_param_plugin(cur_repository)
if not stop_plugin(namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs): if not stop_plugin(namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmds, options, stdio, *args, **kwargs):
return return
install_repository_to_servers(cluster_config.name, cluster_config, dest_repository, clients)
apply_param_plugin(dest_repository) apply_param_plugin(dest_repository)
warns = {} warns = {}
not_support = ['system_password'] not_support = ['system_password']
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
name_local: 进程内存 name_local: 进程内存
require: true require: true
essential: true essential: true
type: CAPACITY type: CAPACITY_MB
min_value: 512M min_value: 512M
need_restart: true need_restart: true
description_en: the memroy size of ocp express server. Please enter an capacity, such as 2G description_en: the memroy size of ocp express server. Please enter an capacity, such as 2G
......
...@@ -201,14 +201,15 @@ def prepare_parameters(cluster_config, stdio): ...@@ -201,14 +201,15 @@ def prepare_parameters(cluster_config, stdio):
if value is not None: if value is not None:
depend_info[key] = value depend_info[key] = value
ob_servers = cluster_config.get_depend_servers(comp) ob_servers = cluster_config.get_depend_servers(comp)
connect_infos = []
for ob_server in ob_servers: for ob_server in ob_servers:
ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server) ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server)
if 'server_ip' not in depend_info: connect_infos.append([ob_server.ip, ob_server_conf['mysql_port']])
depend_info['server_ip'] = ob_server.ip
depend_info['mysql_port'] = ob_server_conf['mysql_port']
zone = ob_server_conf['zone'] zone = ob_server_conf['zone']
if zone not in ob_zones: if zone not in ob_zones:
ob_zones[zone] = ob_server ob_zones[zone] = ob_server
depend_info['connect_infos'] = connect_infos
root_servers = ob_zones.values() root_servers = ob_zones.values()
break break
for comp in ['obproxy', 'obproxy-ce']: for comp in ['obproxy', 'obproxy-ce']:
...@@ -265,7 +266,12 @@ def prepare_parameters(cluster_config, stdio): ...@@ -265,7 +266,12 @@ def prepare_parameters(cluster_config, stdio):
missed_keys = get_missing_required_parameters(original_server_config) missed_keys = get_missing_required_parameters(original_server_config)
if missed_keys: if missed_keys:
if 'jdbc_url' in missed_keys and depend_observer: if 'jdbc_url' in missed_keys and depend_observer:
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db']) if depend_info.get('server_ip'):
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
else:
server_config['connect_infos'] = depend_info.get('connect_infos')
server_config['ocp_meta_db'] = depend_info.get('ocp_meta_db')
server_config['jdbc_url'] = ''
if 'jdbc_username' in missed_keys and depend_observer: if 'jdbc_username' in missed_keys and depend_observer:
server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'],
depend_info.get('ocp_meta_tenant', {}).get("tenant_name")) depend_info.get('ocp_meta_tenant', {}).get("tenant_name"))
...@@ -333,26 +339,37 @@ def start(plugin_context, start_env=None, *args, **kwargs): ...@@ -333,26 +339,37 @@ def start(plugin_context, start_env=None, *args, **kwargs):
else: else:
use_parameter = True use_parameter = True
# check meta db connect before start # check meta db connect before start
matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url) if jdbc_url:
if matched: matched = re.match(r"^jdbc:\S+://(\S+?)(|:\d+)/(\S+)", jdbc_url)
if not matched:
stdio.error("Invalid jdbc url: %s" % jdbc_url)
return
ip = matched.group(1) ip = matched.group(1)
sql_port = matched.group(2)[1:] sql_port = matched.group(2)[1:]
database = matched.group(3) database = matched.group(3)
connected = False connect_infos = [[ip, sql_port]]
retries = 300 else:
while not connected and retries: connect_infos = server_config.get('connect_infos', '')
database = server_config.get('ocp_meta_db', '')
connected = False
retries = 300
while not connected and retries:
for connect_info in connect_infos:
retries -= 1 retries -= 1
server_ip = connect_info[0]
server_port = connect_info[-1]
try: try:
Cursor(ip=ip, port=sql_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio) Cursor(ip=server_ip, port=server_port, user=jdbc_username, password=jdbc_password, database=database, stdio=stdio)
jdbc_url = 'jdbc:oceanbase://{}:{}/{}'.format(server_ip, server_port, database)
connected = True connected = True
break
except: except:
time.sleep(1) time.sleep(1)
if not connected: if not connected:
success = False success = False
stdio.error("{}: failed to connect meta db".format(server)) stdio.error("{}: failed to connect meta db".format(server))
continue continue
else:
stdio.verbose('unmatched jdbc url, skip meta db connection check')
if server_config.get('encrypt_password', False): if server_config.get('encrypt_password', False):
private_key, public_key = get_key(client, os.path.join(home_path, 'conf'), stdio) private_key, public_key = get_key(client, os.path.join(home_path, 'conf'), stdio)
public_key_str = get_plain_public_key(public_key) public_key_str = get_plain_public_key(public_key)
......
...@@ -146,14 +146,14 @@ def prepare_parameters(cluster_config, stdio): ...@@ -146,14 +146,14 @@ def prepare_parameters(cluster_config, stdio):
if value is not None: if value is not None:
depend_info[key] = value depend_info[key] = value
ob_servers = cluster_config.get_depend_servers(comp) ob_servers = cluster_config.get_depend_servers(comp)
connect_infos = []
for ob_server in ob_servers: for ob_server in ob_servers:
ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server) ob_servers_conf[ob_server] = ob_server_conf = cluster_config.get_depend_config(comp, ob_server)
if 'server_ip' not in depend_info: connect_infos.append([ob_server.ip, ob_server_conf['mysql_port']])
depend_info['server_ip'] = ob_server.ip
depend_info['mysql_port'] = ob_server_conf['mysql_port']
zone = ob_server_conf['zone'] zone = ob_server_conf['zone']
if zone not in ob_zones: if zone not in ob_zones:
ob_zones[zone] = ob_server ob_zones[zone] = ob_server
depend_info['connect_infos'] = connect_infos
root_servers = ob_zones.values() root_servers = ob_zones.values()
break break
for comp in ['obproxy', 'obproxy-ce']: for comp in ['obproxy', 'obproxy-ce']:
...@@ -210,7 +210,12 @@ def prepare_parameters(cluster_config, stdio): ...@@ -210,7 +210,12 @@ def prepare_parameters(cluster_config, stdio):
missed_keys = get_missing_required_parameters(original_server_config) missed_keys = get_missing_required_parameters(original_server_config)
if missed_keys: if missed_keys:
if 'jdbc_url' in missed_keys and depend_observer: if 'jdbc_url' in missed_keys and depend_observer:
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db']) if depend_info.get('server_ip'):
server_config['jdbc_url'] = 'jdbc:oceanbase://{}:{}/{}'.format(depend_info['server_ip'], depend_info['mysql_port'], depend_info['ocp_meta_db'])
else:
server_config['connect_infos'] = depend_info.get('connect_infos')
server_config['ocp_meta_db'] = depend_info.get('ocp_meta_db')
server_config['jdbc_url'] = ''
if 'jdbc_username' in missed_keys and depend_observer: if 'jdbc_username' in missed_keys and depend_observer:
server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], depend_info.get('ocp_meta_tenant', {}).get("tenant_name")) server_config['jdbc_username'] = "{}@{}".format(depend_info['ocp_meta_username'], depend_info.get('ocp_meta_tenant', {}).get("tenant_name"))
depends_key_maps = { depends_key_maps = {
......
此差异已折叠。
...@@ -14,4 +14,5 @@ pycryptodome==3.10.1 ...@@ -14,4 +14,5 @@ pycryptodome==3.10.1
inspect2==0.1.2 inspect2==0.1.2
six==1.16.0 six==1.16.0
pyinstaller==3.6 pyinstaller==3.6
bcrypt==3.1.7 bcrypt==3.1.7
\ No newline at end of file zstandard==0.14.1
\ No newline at end of file
...@@ -12,4 +12,5 @@ inspect2==0.1.2 ...@@ -12,4 +12,5 @@ inspect2==0.1.2
six==1.16.0 six==1.16.0
pyinstaller>=4.3 pyinstaller>=4.3
bcrypt==4.0.0 bcrypt==4.0.0
configparser>=5.2.0 configparser>=5.2.0
\ No newline at end of file zstandard==0.21.0
\ No newline at end of file
...@@ -26,7 +26,7 @@ MINIMAL_CONFIG = ''' ...@@ -26,7 +26,7 @@ MINIMAL_CONFIG = '''
''' '''
PKG_ESTIMATED_SIZE = defaultdict(lambda:0) PKG_ESTIMATED_SIZE = defaultdict(lambda:0)
PKG_ESTIMATED_SIZE.update({"oceanbase-ce":314142720, "obproxy-ce":45424640, "obagent": 25124864}) PKG_ESTIMATED_SIZE.update({"oceanbase-ce": 347142720, "oceanbase": 358142928, "obproxy-ce": 45424640, "obproxy": 56428687, "obagent": 76124864, "ocp-express": 95924680})
OCEANBASE_CE = 'oceanbase-ce' OCEANBASE_CE = 'oceanbase-ce'
......
...@@ -12,4 +12,5 @@ Start the dev server, ...@@ -12,4 +12,5 @@ Start the dev server,
```bash ```bash
$ yarn start $ yarn start
$ yarn dev
``` ```
...@@ -32,5 +32,5 @@ export default defineConfig({ ...@@ -32,5 +32,5 @@ export default defineConfig({
`!function(modules){function __webpack_require__(moduleId){if(installedModules[moduleId])return installedModules[moduleId].exports;var module=installedModules[moduleId]={exports:{},id:moduleId,loaded:!1};return modules[moduleId].call(module.exports,module,module.exports,__webpack_require__),module.loaded=!0,module.exports}var installedModules={};return __webpack_require__.m=modules,__webpack_require__.c=installedModules,__webpack_require__.p="",__webpack_require__(0)}([function(module,exports){"use strict";!function(){if(!window.Tracert){for(var Tracert={_isInit:!0,_readyToRun:[],_guid:function(){return"xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g,function(c){var r=16*Math.random()|0,v="x"===c?r:3&r|8;return v.toString(16)})},get:function(key){if("pageId"===key){if(window._tracert_loader_cfg=window._tracert_loader_cfg||{},window._tracert_loader_cfg.pageId)return window._tracert_loader_cfg.pageId;var metaa=document.querySelectorAll("meta[name=data-aspm]"),spma=metaa&&metaa[0].getAttribute("content"),spmb=document.body&&document.body.getAttribute("data-aspm"),pageId=spma&&spmb?spma+"."+spmb+"_"+Tracert._guid()+"_"+Date.now():"-_"+Tracert._guid()+"_"+Date.now();return window._tracert_loader_cfg.pageId=pageId,pageId}return this[key]},call:function(){var argsList,args=arguments;try{argsList=[].slice.call(args,0)}catch(ex){var argsLen=args.length;argsList=[];for(var i=0;i<argsLen;i++)argsList.push(args[i])}Tracert.addToRun(function(){Tracert.call.apply(Tracert,argsList)})},addToRun:function(_fn){var fn=_fn;"function"==typeof fn&&(fn._logTimer=new Date-0,Tracert._readyToRun.push(fn))}},fnlist=["config","logPv","info","err","click","expo","pageName","pageState","time","timeEnd","parse","checkExpo","stringify","report"],i=0;i<fnlist.length;i++){var fn=fnlist[i];!function(fn){Tracert[fn]=function(){var argsList,args=arguments;try{argsList=[].slice.call(args,0)}catch(ex){var argsLen=args.length;argsList=[];for(var i=0;i<argsLen;i++)argsList.push(args[i])}argsList.unshift(fn),Tracert.addToRun(function(){Tracert.call.apply(Tracert,argsList)})}}(fn)}window.Tracert=Tracert}}()}]);`, `!function(modules){function __webpack_require__(moduleId){if(installedModules[moduleId])return installedModules[moduleId].exports;var module=installedModules[moduleId]={exports:{},id:moduleId,loaded:!1};return modules[moduleId].call(module.exports,module,module.exports,__webpack_require__),module.loaded=!0,module.exports}var installedModules={};return __webpack_require__.m=modules,__webpack_require__.c=installedModules,__webpack_require__.p="",__webpack_require__(0)}([function(module,exports){"use strict";!function(){if(!window.Tracert){for(var Tracert={_isInit:!0,_readyToRun:[],_guid:function(){return"xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g,function(c){var r=16*Math.random()|0,v="x"===c?r:3&r|8;return v.toString(16)})},get:function(key){if("pageId"===key){if(window._tracert_loader_cfg=window._tracert_loader_cfg||{},window._tracert_loader_cfg.pageId)return window._tracert_loader_cfg.pageId;var metaa=document.querySelectorAll("meta[name=data-aspm]"),spma=metaa&&metaa[0].getAttribute("content"),spmb=document.body&&document.body.getAttribute("data-aspm"),pageId=spma&&spmb?spma+"."+spmb+"_"+Tracert._guid()+"_"+Date.now():"-_"+Tracert._guid()+"_"+Date.now();return window._tracert_loader_cfg.pageId=pageId,pageId}return this[key]},call:function(){var argsList,args=arguments;try{argsList=[].slice.call(args,0)}catch(ex){var argsLen=args.length;argsList=[];for(var i=0;i<argsLen;i++)argsList.push(args[i])}Tracert.addToRun(function(){Tracert.call.apply(Tracert,argsList)})},addToRun:function(_fn){var fn=_fn;"function"==typeof fn&&(fn._logTimer=new Date-0,Tracert._readyToRun.push(fn))}},fnlist=["config","logPv","info","err","click","expo","pageName","pageState","time","timeEnd","parse","checkExpo","stringify","report"],i=0;i<fnlist.length;i++){var fn=fnlist[i];!function(fn){Tracert[fn]=function(){var argsList,args=arguments;try{argsList=[].slice.call(args,0)}catch(ex){var argsLen=args.length;argsList=[];for(var i=0;i<argsLen;i++)argsList.push(args[i])}argsList.unshift(fn),Tracert.addToRun(function(){Tracert.call.apply(Tracert,argsList)})}}(fn)}window.Tracert=Tracert}}()}]);`,
'https://gw.alipayobjects.com/as/g/component/tracert/4.4.9/index.js', 'https://gw.alipayobjects.com/as/g/component/tracert/4.4.9/index.js',
], ],
plugins: ['./config/plugin.ts'], plugins: ['./config/plugin.ts']
}); });
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
"antd": "5.0.7", "antd": "5.0.7",
"copy-to-clipboard": "3.3.3", "copy-to-clipboard": "3.3.3",
"cross-env": "7.0.3", "cross-env": "7.0.3",
"i18next": "22.4.15", "i18next": "^23.2.11",
"lottie-web": "5.10.2", "lottie-web": "5.10.2",
"moment": "2.29.4", "moment": "2.29.4",
"number-precision": "1.6.0", "number-precision": "1.6.0",
...@@ -51,5 +51,6 @@ ...@@ -51,5 +51,6 @@
}, },
"gitHooks": { "gitHooks": {
"pre-commit": "lint-staged" "pre-commit": "lint-staged"
} },
"repository": "git@gitlab.alibaba-inc.com:oceanbase/ob-deploy.git"
} }
...@@ -285,5 +285,6 @@ ...@@ -285,5 +285,6 @@
"OBD.pages.components.NodeConfig.OnlyOneNodeCanBe": "Only one node can be selected or entered", "OBD.pages.components.NodeConfig.OnlyOneNodeCanBe": "Only one node can be selected or entered",
"OBD.pages.components.NodeConfig.NodeConfigurationPreviousStep": "Node Configuration-Previous Step", "OBD.pages.components.NodeConfig.NodeConfigurationPreviousStep": "Node Configuration-Previous Step",
"OBD.pages.components.NodeConfig.AreYouSureYouWant": "Are you sure to delete the configuration of this Zone?", "OBD.pages.components.NodeConfig.AreYouSureYouWant": "Are you sure to delete the configuration of this Zone?",
"OBD.pages.components.ClusterConfig.PortObproxyOfExporterIs": "Used for Prometheus to pull OBProxy monitoring data." "OBD.pages.components.ClusterConfig.PortObproxyOfExporterIs": "Used for Prometheus to pull OBProxy monitoring data.",
"OBD.pages.components.InstallConfig.OptionalComponents": "Optional components"
} }
...@@ -285,5 +285,6 @@ ...@@ -285,5 +285,6 @@
"OBD.pages.components.NodeConfig.NodeConfigurationPreviousStep": "节点配置-上一步", "OBD.pages.components.NodeConfig.NodeConfigurationPreviousStep": "节点配置-上一步",
"OBD.pages.components.NodeConfig.AreYouSureYouWant": "确定删除该条 Zone 的相关配置吗?", "OBD.pages.components.NodeConfig.AreYouSureYouWant": "确定删除该条 Zone 的相关配置吗?",
"OBD.pages.components.ClusterConfig.PortObproxyOfExporterIs": "OBProxy 的 Exporter 端口,用于 Prometheus 拉取 OBProxy 监控数据。", "OBD.pages.components.ClusterConfig.PortObproxyOfExporterIs": "OBProxy 的 Exporter 端口,用于 Prometheus 拉取 OBProxy 监控数据。",
"OBD.pages.components.InstallProcess.Deploying": "部署中..." "OBD.pages.components.InstallProcess.Deploying": "部署中...",
"OBD.pages.components.InstallConfig.OptionalComponents": "可选组件"
} }
...@@ -6,6 +6,7 @@ import { getErrorInfo } from '@/utils'; ...@@ -6,6 +6,7 @@ import { getErrorInfo } from '@/utils';
export default () => { export default () => {
const initAppName = 'myoceanbase'; const initAppName = 'myoceanbase';
const [selectedConfig,setSelectedConfig] = useState(['obproxy','ocp-express','obagent']); // 有ocpexpress必定有obagent
const [currentStep, setCurrentStep] = useState<number>(0); const [currentStep, setCurrentStep] = useState<number>(0);
const [configData, setConfigData] = useState<any>({}); const [configData, setConfigData] = useState<any>({});
const [currentType, setCurrentType] = useState('all'); const [currentType, setCurrentType] = useState('all');
...@@ -41,13 +42,13 @@ export default () => { ...@@ -41,13 +42,13 @@ export default () => {
}); });
return { return {
selectedConfig,
setSelectedConfig,
initAppName, initAppName,
currentStep, currentStep,
setCurrentStep, setCurrentStep,
configData, configData,
setConfigData, setConfigData,
currentType,
setCurrentType,
checkOK, checkOK,
setCheckOK, setCheckOK,
installStatus, installStatus,
......
...@@ -37,13 +37,14 @@ export default function DeleteDeployModal({ ...@@ -37,13 +37,14 @@ export default function DeleteDeployModal({
setOBVersionValue, setOBVersionValue,
}: Props) { }: Props) {
const { const {
selectedConfig,
setSelectedConfig,
setConfigData, setConfigData,
setIsDraft, setIsDraft,
setClusterMore, setClusterMore,
setComponentsMore, setComponentsMore,
componentsVersionInfo, componentsVersionInfo,
setComponentsVersionInfo, setComponentsVersionInfo,
setCurrentType,
getInfoByName, getInfoByName,
setLowVersion, setLowVersion,
setErrorVisible, setErrorVisible,
...@@ -95,14 +96,21 @@ export default function DeleteDeployModal({ ...@@ -95,14 +96,21 @@ export default function DeleteDeployModal({
if (nameSuccess) { if (nameSuccess) {
const { config } = nameData; const { config } = nameData;
const { components = {} } = config; const { components = {} } = config;
const newSelectedConfig:string[] = []
Object.keys(components).forEach((key)=>{
if(selectedConfig.includes(key) && components[key]){
newSelectedConfig.push(key)
}else if(key === 'ocpexpress' && components[key]){
// todo:同步为ocpexpress
newSelectedConfig.push('ocp-express')
}
})
setSelectedConfig(newSelectedConfig)
setConfigData(config || {}); setConfigData(config || {});
setLowVersion(checkLowVersion(components?.oceanbase?.version)); setLowVersion(checkLowVersion(components?.oceanbase?.version));
setClusterMore(!!components?.oceanbase?.parameters?.length); setClusterMore(!!components?.oceanbase?.parameters?.length);
setComponentsMore(!!components?.obproxy?.parameters?.length); setComponentsMore(!!components?.obproxy?.parameters?.length);
setIsDraft(true); setIsDraft(true);
setCurrentType(
components?.oceanbase && !components?.obproxy ? 'ob' : 'all',
);
const newSelectedVersionInfo = componentsVersionInfo?.[ const newSelectedVersionInfo = componentsVersionInfo?.[
oceanbaseComponent oceanbaseComponent
......
...@@ -47,10 +47,10 @@ interface FormValues extends API.Components { ...@@ -47,10 +47,10 @@ interface FormValues extends API.Components {
export default function NodeConfig() { export default function NodeConfig() {
const { const {
selectedConfig,
setCurrentStep, setCurrentStep,
configData, configData,
setConfigData, setConfigData,
currentType,
lowVersion, lowVersion,
handleQuitProgress, handleQuitProgress,
nameIndex, nameIndex,
...@@ -139,17 +139,19 @@ export default function NodeConfig() { ...@@ -139,17 +139,19 @@ export default function NodeConfig() {
const setData = (dataSource: FormValues) => { const setData = (dataSource: FormValues) => {
let newComponents: API.Components = {}; let newComponents: API.Components = {};
if (currentType === 'all') { if (selectedConfig.includes('obproxy')) {
newComponents.obproxy = { newComponents.obproxy = {
...(components.obproxy || {}), ...(components.obproxy || {}),
...dataSource.obproxy, ...dataSource.obproxy,
}; };
if (!lowVersion) { }
newComponents.ocpexpress = { if (selectedConfig.includes('ocp-express') && !lowVersion) {
...(components.ocpexpress || {}), newComponents.ocpexpress = {
...dataSource?.ocpexpress, ...(components.ocpexpress || {}),
}; ...dataSource?.ocpexpress,
} };
}
if (selectedConfig.includes('obagent')) {
newComponents.obagent = { newComponents.obagent = {
...(components.obagent || {}), ...(components.obagent || {}),
servers: allOBServer, servers: allOBServer,
...@@ -827,7 +829,8 @@ export default function NodeConfig() { ...@@ -827,7 +829,8 @@ export default function NodeConfig() {
}} }}
/> />
</ProCard> </ProCard>
{currentType === 'all' ? ( {selectedConfig.includes('ocp-express') ||
selectedConfig.includes('obproxy') ? (
<ProCard <ProCard
className={styles.pageCard} className={styles.pageCard}
title={intl.formatMessage({ title={intl.formatMessage({
...@@ -837,7 +840,7 @@ export default function NodeConfig() { ...@@ -837,7 +840,7 @@ export default function NodeConfig() {
bodyStyle={{ paddingBottom: '0' }} bodyStyle={{ paddingBottom: '0' }}
> >
<Space size={16}> <Space size={16}>
{!lowVersion ? ( {selectedConfig.includes('ocp-express') && !lowVersion ? (
<ProFormSelect <ProFormSelect
mode="tags" mode="tags"
name={['ocpexpress', 'servers']} name={['ocpexpress', 'servers']}
...@@ -884,36 +887,39 @@ export default function NodeConfig() { ...@@ -884,36 +887,39 @@ export default function NodeConfig() {
options={formatOptions(allOBServer)} options={formatOptions(allOBServer)}
/> />
) : null} ) : null}
<ProFormSelect {selectedConfig.includes('obproxy') && (
mode="tags" <ProFormSelect
name={['obproxy', 'servers']} mode="tags"
label={intl.formatMessage({ name={['obproxy', 'servers']}
id: 'OBD.pages.components.NodeConfig.ObproxyNodes', label={intl.formatMessage({
defaultMessage: 'OBProxy 节点', id: 'OBD.pages.components.NodeConfig.ObproxyNodes',
})} defaultMessage: 'OBProxy 节点',
fieldProps={{ style: { width: 504 }, maxTagCount: 3 }} })}
placeholder={intl.formatMessage({ fieldProps={{ style: { width: 504 }, maxTagCount: 3 }}
id: 'OBD.pages.components.NodeConfig.PleaseSelect', placeholder={intl.formatMessage({
defaultMessage: '请选择', id: 'OBD.pages.components.NodeConfig.PleaseSelect',
})} defaultMessage: '请选择',
rules={[ })}
{ rules={[
required: true, {
message: intl.formatMessage({ required: true,
id: 'OBD.pages.components.NodeConfig.SelectOrEnterObproxyNodes', message: intl.formatMessage({
defaultMessage: '请选择或输入 OBProxy 节点', id: 'OBD.pages.components.NodeConfig.SelectOrEnterObproxyNodes',
}), defaultMessage: '请选择或输入 OBProxy 节点',
}, }),
{ },
validator: (_: any, value: string[]) => {
serversValidator(_, value, 'OBProxy'), validator: (_: any, value: string[]) =>
}, serversValidator(_, value, 'OBProxy'),
]} },
options={formatOptions(allOBServer)} ]}
/> options={formatOptions(allOBServer)}
/>
)}
</Space> </Space>
</ProCard> </ProCard>
) : null} ) : null}
{/* 设计稿字段好像写错了 */}
<ProCard <ProCard
className={styles.pageCard} className={styles.pageCard}
title={intl.formatMessage({ title={intl.formatMessage({
......
...@@ -149,6 +149,7 @@ export default function IndexPage() { ...@@ -149,6 +149,7 @@ export default function IndexPage() {
useEffect(() => { useEffect(() => {
let token = ''; let token = '';
fetchDeploymentInfo({ task_status: 'INSTALLING' }).then( fetchDeploymentInfo({ task_status: 'INSTALLING' }).then(
({ success, data }: API.OBResponse) => { ({ success, data }: API.OBResponse) => {
if (success && data?.items?.length) { if (success && data?.items?.length) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册