diff --git a/_cmd.py b/_cmd.py index 07c2d02fa2fe7f77b20d01898fa7a7673e44247f..ae249e52232664e1b75b2ca90cabf3590cf6c283 100644 --- a/_cmd.py +++ b/_cmd.py @@ -25,15 +25,17 @@ import os import sys import time import logging +import textwrap from logging import handlers from uuid import uuid1 as uuid -from optparse import OptionParser, OptionGroup, BadOptionError, Option +from optparse import OptionParser, OptionGroup, BadOptionError, Option, IndentedHelpFormatter from core import ObdHome from _stdio import IO from log import Logger from tool import DirectoryUtil, FileUtil, COMMAND_ENV from _errno import DOC_LINK_MSG, LockError +from _environ import ENV_DEV_MODE ROOT_IO = IO(1) @@ -49,7 +51,32 @@ FORBIDDEN_VARS = (CONST_OBD_HOME, CONST_OBD_INSTALL_PRE) OBD_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), '.obd') COMMAND_ENV.load(os.path.join(OBD_HOME_PATH, '.obd_environ'), ROOT_IO) -DEV_MODE = "OBD_DEV_MODE" + + +class OptionHelpFormatter(IndentedHelpFormatter): + + def format_option(self, option): + result = [] + opts = self.option_strings[option] + opt_width = self.help_position - self.current_indent - 2 + if len(opts) > opt_width: + opts = "%*s%s\n" % (self.current_indent, "", opts) + indent_first = self.help_position + else: # start help on same line as opts + opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts) + indent_first = 0 + result.append(opts) + if option.help: + help_text = self.expand_default(option) + help_lines = help_text.split('\n') + if len(help_lines) == 1: + help_lines = textwrap.wrap(help_text, self.help_width) + result.append("%*s%s\n" % (indent_first, "", help_lines[0])) + result.extend(["%*s%s\n" % (self.help_position, "", line) + for line in help_lines[1:]]) + elif opts[-1] != "\n": + result.append("\n") + return "".join(result) class AllowUndefinedOptionParser(OptionParser): @@ -66,12 +93,15 @@ class AllowUndefinedOptionParser(OptionParser): add_help_option=True, prog=None, epilog=None, - allow_undefine=True): + allow_undefine=True, + undefine_warn=True + ): OptionParser.__init__( self, usage, option_list, option_class, version, conflict_handler, description, formatter, add_help_option, prog, epilog ) self.allow_undefine = allow_undefine + self.undefine_warn = undefine_warn def warn(self, msg, file=None): if self.IS_TTY: @@ -88,7 +118,7 @@ class AllowUndefinedOptionParser(OptionParser): key = e.opt_str value = value[len(key)+1:] setattr(values, key.strip('-').replace('-', '_'), value if value != '' else True) - return self.warn(e) + self.undefine_warn and self.warn(e) else: raise e @@ -101,7 +131,7 @@ class AllowUndefinedOptionParser(OptionParser): key = e.opt_str value = value[len(key)+1:] setattr(values, key.strip('-').replace('-', '_'), value if value != '' else True) - return self.warn(e) + self.undefine_warn and self.warn(e) else: raise e @@ -148,7 +178,7 @@ class BaseCommand(object): self.parser.exit(1) def _mk_usage(self): - return self.parser.format_help() + return self.parser.format_help(OptionHelpFormatter()) class ObdCommand(BaseCommand): @@ -163,7 +193,7 @@ class ObdCommand(BaseCommand): version_fobj.seek(0) version = version_fobj.read() if VERSION != version: - for part in ['plugins', 'config_parser', 'mirror/remote']: + for part in ['plugins', 'config_parser', 'optimize', 'mirror/remote']: obd_part_dir = os.path.join(self.OBD_PATH, part) if DirectoryUtil.mkdir(self.OBD_PATH): root_part_path = os.path.join(self.OBD_INSTALL_PRE, 'usr/obd/', part) @@ -177,14 +207,11 @@ class ObdCommand(BaseCommand): @property def dev_mode(self): - return COMMAND_ENV.get(DEV_MODE) == "1" + return COMMAND_ENV.get(ENV_DEV_MODE) == "1" def parse_command(self): - self.parser.allow_undefine = self.dev_mode - return super(ObdCommand, self).parse_command() - - def parse_command(self): - self.parser.allow_undefine = self.dev_mode + if self.parser.allow_undefine != True: + self.parser.allow_undefine = self.dev_mode return super(ObdCommand, self).parse_command() def do_command(self): @@ -196,11 +223,7 @@ class ObdCommand(BaseCommand): log_dir = os.path.join(self.OBD_PATH, 'log') DirectoryUtil.mkdir(log_dir) log_path = os.path.join(log_dir, 'obd') - logger = Logger('obd') - handler = handlers.TimedRotatingFileHandler(log_path, when='midnight', interval=1, backupCount=30) - handler.setFormatter(logging.Formatter("[%%(asctime)s.%%(msecs)03d] [%s] [%%(levelname)s] %%(message)s" % trace_id, "%Y-%m-%d %H:%M:%S")) - logger.addHandler(handler) - ROOT_IO.trace_logger = logger + ROOT_IO.init_trace_logger(log_path, 'obd', trace_id) obd = ObdHome(self.OBD_PATH, self.dev_mode, ROOT_IO) ROOT_IO.track_limit += 1 ROOT_IO.verbose('cmd: %s' % self.cmds) @@ -294,7 +317,7 @@ class DevModeEnableCommand(HiddenObdCommand): super(DevModeEnableCommand, self).__init__('enable', 'Enable Dev Mode') def _do_command(self, obd): - if COMMAND_ENV.set(DEV_MODE, "1", save=True, stdio=obd.stdio): + if COMMAND_ENV.set(ENV_DEV_MODE, "1", save=True, stdio=obd.stdio): obd.stdio.print("Dev Mode: ON") return True return False @@ -306,7 +329,7 @@ class DevModeDisableCommand(HiddenObdCommand): super(DevModeDisableCommand, self).__init__('disable', 'Disable Dev Mode') def _do_command(self, obd): - if COMMAND_ENV.set(DEV_MODE, "0", save=True, stdio=obd.stdio): + if COMMAND_ENV.set(ENV_DEV_MODE, "0", save=True, stdio=obd.stdio): obd.stdio.print("Dev Mode: OFF") return True return False @@ -434,6 +457,11 @@ class MirrorListCommand(ObdCommand): def __init__(self): super(MirrorListCommand, self).__init__('list', 'List mirrors.') + def init(self, cmd, args): + super(MirrorListCommand, self).init(cmd, args) + self.parser.set_usage('%s [section name] [options]\n\nExample: %s local' % (self.prev_cmd, self.prev_cmd)) + return self + def show_pkg(self, name, pkgs): ROOT_IO.print_list( pkgs, @@ -469,6 +497,7 @@ class MirrorListCommand(ObdCommand): lambda x: [x.section_name, x.mirror_type.value, x.enabled, time.strftime("%Y-%m-%d %H:%M", time.localtime(x.repo_age))], title='Mirror Repository List' ) + ROOT_IO.print("Use `obd mirror list
` for more details") return True @@ -588,6 +617,25 @@ class ClusterCheckForOCPChange(ClusterMirrorCommand): return self._show_help() +class DemoCommand(ClusterMirrorCommand): + + def __init__(self): + super(DemoCommand, self).__init__('demo', 'Quickly start') + self.parser.add_option('-c', '--components', type='string', help="List the components. Multiple components are separated with commas. [oceanbase-ce,obproxy-ce,obagent,prometheus,grafana]\nExample: \nstart oceanbase-ce: obd demo -c oceanbase-ce\n" + + "start -c oceanbase-ce V3.2.3: obd demo -c oceanbase-ce --oceanbase-ce.version=3.2.3\n" + + "start oceanbase-ce and obproxy-ce: obd demo -c oceanbase-ce,obproxy-ce", default='oceanbase-ce,obproxy-ce,obagent,prometheus,grafana') + self.parser.allow_undefine = True + self.parser.undefine_warn = False + + def _do_command(self, obd): + setattr(self.opts, 'mini', True) + setattr(self.opts, 'force', True) + setattr(self.opts, 'clean', True) + setattr(self.opts, 'force', True) + setattr(self.opts, 'force_delete', True) + return obd.demo(self.opts) + + class ClusterAutoDeployCommand(ClusterMirrorCommand): def __init__(self): @@ -931,6 +979,7 @@ class MySQLTestCommand(TestMirrorCommand): self.parser.add_option('--log-pattern', type='string', help='The pattern for collected servers log ', default='*.log') self.parser.add_option('--cluster-mode', type='string', help="The mode of mysqltest") self.parser.add_option('--disable-reboot', action='store_true', help='Never reboot during test.', default=False) + self.parser.add_option('--fast-reboot', action='store_true', help='Reboot using snapshots.', default=False) def _do_command(self, obd): if self.cmds: @@ -955,14 +1004,16 @@ class SysBenchCommand(TestMirrorCommand): self.parser.add_option('--sysbench-script-dir', type='string', help='The directory of the sysbench lua script file. [/usr/sysbench/share/sysbench]', default='/usr/sysbench/share/sysbench') self.parser.add_option('--table-size', type='int', help='Number of data initialized per table. [20000]', default=20000) self.parser.add_option('--tables', type='int', help='Number of initialization tables. [30]', default=30) - self.parser.add_option('--threads', type='int', help='Number of threads to use. [32]', default=16) + self.parser.add_option('--threads', type='int', help='Number of threads to use. [16]', default=16) self.parser.add_option('--time', type='int', help='Limit for total execution time in seconds. [60]', default=60) self.parser.add_option('--interval', type='int', help='Periodically report intermediate statistics with a specified time interval in seconds. 0 disables intermediate reports. [10]', default=10) self.parser.add_option('--events', type='int', help='Limit for total number of events.') self.parser.add_option('--rand-type', type='string', help='Random numbers distribution {uniform,gaussian,special,pareto}.') self.parser.add_option('--percentile', type='int', help='Percentile to calculate in latency statistics. Available values are 1-100. 0 means to disable percentile calculations.') - self.parser.add_option('--skip-trx', dest='{on/off}', type='string', help='Open or close a transaction in a read-only test. ') + self.parser.add_option('--skip-trx', type='string', help='Open or close a transaction in a read-only test. {on/off}') self.parser.add_option('-O', '--optimization', type='int', help='optimization level {0/1}', default=1) + self.parser.add_option('-S', '--skip-cluster-status-check', action='store_true', help='Skip cluster status check', default=False) + self.parser.add_option('--mysql-ignore-errors', type='string', help='list of errors to ignore, or "all". ', default='1062') def _do_command(self, obd): if self.cmds: @@ -993,6 +1044,7 @@ class TPCHCommand(TestMirrorCommand): self.parser.add_option('--dss-config', type='string', help='Directory for dists.dss. [/usr/tpc-h-tools/tpc-h-tools]', default='/usr/tpc-h-tools/tpc-h-tools/') self.parser.add_option('-O', '--optimization', type='int', help='Optimization level {0/1}. [1]', default=1) self.parser.add_option('--test-only', action='store_true', help='Only testing SQLs are executed. No initialization is executed.') + self.parser.add_option('-S', '--skip-cluster-status-check', action='store_true', help='Skip cluster status check', default=False) def _do_command(self, obd): if self.cmds: @@ -1001,6 +1053,41 @@ class TPCHCommand(TestMirrorCommand): return self._show_help() +class TPCDSCommand(TestMirrorCommand): + + def __init__(self): + super(TPCDSCommand, self).__init__('tpcds', 'Run a TPC-DS test for a deployment.') + self.parser.add_option('--component', type='string', help='Components for a test.') + self.parser.add_option('--test-server', type='string', help='The server for a test. By default, the first root server in the component is the test server.') + self.parser.add_option('--user', type='string', help='Username for a test.') + self.parser.add_option('--password', type='string', help='Password for a test.') + self.parser.add_option('-t', '--tenant', type='string', help='Tenant for a test. [test]', default='test') + self.parser.add_option('--mode', type='string', help='Tenant compatibility mode. {mysql,oracle} [mysql]', default='mysql') + self.parser.add_option('--database', type='string', help='Database for a test. [test]', default='test') + self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient') + self.parser.add_option('--tool-dir', type='string', help='tpc-ds tool dir. [/usr/tpc-ds-tools]') + self.parser.add_option('--dsdgen-bin', type='string', help='dsdgen bin path. [$TOOL_DIR/bin/dsdgen]') + self.parser.add_option('--idx-file', type='string', help='tpcds.idx file path. [$TOOL_DIR/bin/tpcds.idx]') + self.parser.add_option('--dsqgen-bin', type='string', help='dsqgen bin path. [$TOOL_DIR/bin/dsqgen]') + self.parser.add_option('--query-templates-dir', type='string', help='Query templates dir. [$TOOL_DIR/query_templates]') + self.parser.add_option('-s', '--scale', type='int', help='Set Scale Factor (SF) to . [1] ', default=1) + self.parser.add_option('--disable-generate', '--dg', action='store_true', help='Do not generate test data.') + self.parser.add_option('-p', '--generate-parallel', help='Generate data parallel number. [0]', default=0) + self.parser.add_option('--tmp-dir', type='string', help='The temporary directory for executing TPC-H. [./tmp]', default='./tmp') + self.parser.add_option('--ddl-path', type='string', help='Directory for DDL files.') + self.parser.add_option('--sql-path', type='string', help='Directory for SQL files.') + self.parser.add_option('--create-foreign-key', '--fk', action='store_true', help='create foreign key.') + self.parser.add_option('--foreign-key-file', '--fk-file', action='store_true', help='SQL file for creating foreign key.') + self.parser.add_option('--remote-dir', type='string', help='Directory for the data file on target observers. Make sure that you have read and write access to the directory when you start observer.') + self.parser.add_option('--test-only', action='store_true', help='Only testing SQLs are executed. No initialization is executed.') + + def _do_command(self, obd): + if self.cmds: + return obd.tpcds(self.cmds[0], self.opts) + else: + return self._show_help() + + class TPCCCommand(TestMirrorCommand): def __init__(self): @@ -1024,6 +1111,7 @@ class TPCCCommand(TestMirrorCommand): self.parser.add_option('--run-mins', type='int', help='To run for specified minutes.[10]', default=10) self.parser.add_option('--test-only', action='store_true', help='Only testing SQLs are executed. No initialization is executed.') self.parser.add_option('-O', '--optimization', type='int', help='Optimization level {0/1/2}. [1] 0 - No optimization. 1 - Optimize some of the parameters which do not need to restart servers. 2 - Optimize all the parameters and maybe RESTART SERVERS for better performance.', default=1) + self.parser.add_option('-S', '--skip-cluster-status-check', action='store_true', help='Skip cluster status check', default=False) def _do_command(self, obd): if self.cmds: @@ -1040,6 +1128,7 @@ class TestMajorCommand(MajorCommand): self.register_command(SysBenchCommand()) self.register_command(TPCHCommand()) self.register_command(TPCCCommand()) + # self.register_command(TPCDSCommand()) class DbConnectCommand(HiddenObdCommand): @@ -1054,8 +1143,7 @@ class DbConnectCommand(HiddenObdCommand): self.parser.add_option('-c', '--component', type='string', help='The component used by database connection.') self.parser.add_option('-s', '--server', type='string', help='The server used by database connection. The first server in the configuration will be used by default') - self.parser.add_option('-u', '--user', type='string', help='The username used by d' - 'atabase connection. [root]', default='root') + self.parser.add_option('-u', '--user', type='string', help='The username used by database connection. [root]', default='root') self.parser.add_option('-p', '--password', type='string', help='The password used by database connection.') self.parser.add_option('-t', '--tenant', type='string', help='The tenant used by database connection. [sys]', default='sys') self.parser.add_option('-D', '--database', type='string', help='The database name used by database connection.') @@ -1068,6 +1156,30 @@ class DbConnectCommand(HiddenObdCommand): return self._show_help() +class DoobaCommand(HiddenObdCommand): + + def init(self, cmd, args): + super(DoobaCommand, self).init(cmd, args) + self.parser.set_usage('%s [options]' % self.prev_cmd) + return self + + def __init__(self): + super(DoobaCommand, self).__init__('dooba', 'A curses powerful tool for OceanBase admin, more than a monitor') + self.parser.add_option('-c', '--component', type='string', help='The component used by database connection.') + self.parser.add_option('-s', '--server', type='string', + help='The server used by database connection. The first server in the configuration will be used by default') + self.parser.add_option('-u', '--user', type='string', help='The username used by database connection. [root]', + default='root') + self.parser.add_option('-p', '--password', type='string', help='The password used by database connection.') + self.parser.add_option('--dooba-bin', type='string', help='Dooba bin path.') + + def _do_command(self, obd): + if self.cmds: + return obd.dooba(self.cmds[0], self.opts) + else: + return self._show_help() + + class CommandsCommand(HiddenObdCommand): def init(self, cmd, args): @@ -1093,6 +1205,7 @@ class ToolCommand(HiddenMajorCommand): super(ToolCommand, self).__init__('tool', 'Tools') self.register_command(DbConnectCommand()) self.register_command(CommandsCommand()) + self.register_command(DoobaCommand()) class BenchMajorCommand(MajorCommand): @@ -1121,6 +1234,7 @@ class MainCommand(MajorCommand): def __init__(self): super(MainCommand, self).__init__('obd', '') self.register_command(DevModeMajorCommand()) + self.register_command(DemoCommand()) self.register_command(MirrorMajorCommand()) self.register_command(ClusterMajorCommand()) self.register_command(RepositoryMajorCommand()) diff --git a/_deploy.py b/_deploy.py index 37f2430193e2056e961ada92c024a5b4a7a3b9c2..cb32fa37dbefaf6e88cc0a2d89f9f81a9c039c60 100644 --- a/_deploy.py +++ b/_deploy.py @@ -22,8 +22,10 @@ from __future__ import absolute_import, division, print_function import os import re +import sys import pickle import getpass +import hashlib from copy import deepcopy from enum import Enum @@ -33,12 +35,12 @@ from tool import ConfigUtil, FileUtil, YamlLoader, OrderedDict, COMMAND_ENV from _manager import Manager from _repository import Repository from _stdio import SafeStdio +from _environ import ENV_BASE_DIR yaml = YamlLoader() DEFAULT_CONFIG_PARSER_MANAGER = None ENV = 'env' -BASE_DIR_KEY = "OBD_DEPLOY_BASE_DIR" class ParserError(Exception): @@ -383,6 +385,30 @@ class ClusterConfig(object): self._depends = {} self.parser = parser self._has_package_pattern = None + self._object_hash = None + + if sys.version_info.major == 2: + def __hash__(self): + if self._object_hash is None: + m_sum = hashlib.md5() + m_sum.update(str(self.package_hash).encode('utf-8')) + m_sum.update(str(self.get_global_conf()).encode('utf-8')) + for server in self.servers: + m_sum.update(str(self.get_server_conf(server)).encode('utf-8')) + m_sum.update(str(self.depends).encode('utf-8')) + self._object_hash = int(''.join(['%03d' % ord(v) for v in m_sum.digest()])) + return self._object_hash + else: + def __hash__(self): + if self._object_hash is None: + m_sum = hashlib.md5() + m_sum.update(str(self.package_hash).encode('utf-8')) + m_sum.update(str(self.get_global_conf()).encode('utf-8')) + for server in self.servers: + m_sum.update(str(self.get_server_conf(server)).encode('utf-8')) + m_sum.update(str(self.depends).encode('utf-8')) + self._object_hash = (int(''.join(['%03d' % v for v in m_sum.digest()]))) + return self._object_hash def __eq__(self, other): if not isinstance(other, self.__class__): @@ -446,6 +472,9 @@ class ClusterConfig(object): raise Exception('Circular Dependency: %s and %s' % (self.name, name)) self._depends[name] = cluster_conf + def add_depend_component(self, depend_component_name): + return self._deploy_config.add_depend_for_component(self.name, depend_component_name, save=False) + def del_depend(self, name, component_name): if component_name in self._depends: del self._depends[component_name] @@ -468,6 +497,8 @@ class ClusterConfig(object): return False if server not in self._server_conf: return False + if self._temp_conf and key in self._temp_conf: + value = self._temp_conf[key].param_type(value).value if not self._deploy_config.update_component_server_conf(self.name, server, key, value, save): return False self._server_conf[server][key] = value @@ -478,6 +509,8 @@ class ClusterConfig(object): def update_global_conf(self, key, value, save=True): if self._deploy_config is None: return False + if self._temp_conf and key in self._temp_conf: + value = self._temp_conf[key].param_type(value).value if not self._deploy_config.update_component_global_conf(self.name, key, value, save): return False self._update_global_conf(key, value) @@ -488,7 +521,8 @@ class ClusterConfig(object): def _update_global_conf(self, key, value): self._original_global_conf[key] = value - self._global_conf[key] = value + if self._global_conf: + self._global_conf[key] = value def update_rsync_list(self, rsync_list, save=True): if self._deploy_config is None: @@ -558,6 +592,13 @@ class ClusterConfig(object): self._global_conf = None self._clear_cache_server() + def _apply_temp_conf(self, conf): + if self._temp_conf: + for key in conf: + if key in self._temp_conf: + conf[key] = self._temp_conf[key].param_type(conf[key]).value + return conf + def get_temp_conf_item(self, key): if self._temp_conf: return self._temp_conf.get(key) @@ -613,7 +654,9 @@ class ClusterConfig(object): if self._global_conf is None: self._global_conf = deepcopy(self._default_conf) self._global_conf.update(self._get_include_config('config', {})) - self._global_conf.update(self._original_global_conf) + if self._original_global_conf: + self._global_conf.update(self._original_global_conf) + self._global_conf = self._apply_temp_conf(self._global_conf) return self._global_conf def _add_base_dir(self, path): @@ -622,7 +665,7 @@ class ClusterConfig(object): path = os.path.join(self._base_dir, path) else: raise Exception("`{}` need to use absolute paths. If you want to use relative paths, please enable developer mode " - "and set environment variables {}".format(RsyncConfig.RSYNC, BASE_DIR_KEY)) + "and set environment variables {}".format(RsyncConfig.RSYNC, ENV_BASE_DIR)) return path @property @@ -717,9 +760,9 @@ class ClusterConfig(object): if server not in self._server_conf: return None if self._cache_server[server] is None: - conf = deepcopy(self._inner_config.get(server.name, {})) + conf = self._apply_temp_conf(deepcopy(self._inner_config.get(server.name, {}))) conf.update(self.get_global_conf()) - conf.update(self._server_conf[server]) + conf.update(self._apply_temp_conf(self._server_conf[server])) self._cache_server[server] = conf return self._cache_server[server] @@ -788,7 +831,7 @@ class DeployConfig(SafeStdio): self.stdio = stdio self._ignore_include_error = False if self.config_parser_manager is None: - raise ParserError('ConfigParaserManager Not Set') + raise ParserError('ConfigParserManager Not Set') self._load() @property @@ -853,32 +896,35 @@ class DeployConfig(SafeStdio): return False def _load(self): - with open(self.yaml_path, 'rb') as f: - depends = {} - self._src_data = self.yaml_loader.load(f) - for key in self._src_data: - if key == 'user': - self.set_user_conf(UserConfig( - ConfigUtil.get_value_from_dict(self._src_data[key], 'username'), - ConfigUtil.get_value_from_dict(self._src_data[key], 'password'), - ConfigUtil.get_value_from_dict(self._src_data[key], 'key_file'), - ConfigUtil.get_value_from_dict(self._src_data[key], 'port', 0, int), - ConfigUtil.get_value_from_dict(self._src_data[key], 'timeout', 0, int), - )) - elif key == 'unuse_lib_repository': - self.unuse_lib_repository = self._src_data['unuse_lib_repository'] - elif key == 'auto_create_tenant': - self.auto_create_tenant = self._src_data['auto_create_tenant'] - elif issubclass(type(self._src_data[key]), dict): - self._add_component(key, self._src_data[key]) - depends[key] = self._src_data[key].get('depends', []) - for comp in depends: - conf = self.components[comp] - for name in depends[comp]: - if name == comp: - continue - if name in self.components: - conf.add_depend(name, self.components[name]) + try: + with open(self.yaml_path, 'rb') as f: + depends = {} + self._src_data = self.yaml_loader.load(f) + for key in self._src_data: + if key == 'user': + self.set_user_conf(UserConfig( + ConfigUtil.get_value_from_dict(self._src_data[key], 'username'), + ConfigUtil.get_value_from_dict(self._src_data[key], 'password'), + ConfigUtil.get_value_from_dict(self._src_data[key], 'key_file'), + ConfigUtil.get_value_from_dict(self._src_data[key], 'port', 0, int), + ConfigUtil.get_value_from_dict(self._src_data[key], 'timeout', 0, int), + )) + elif key == 'unuse_lib_repository': + self.unuse_lib_repository = self._src_data['unuse_lib_repository'] + elif key == 'auto_create_tenant': + self.auto_create_tenant = self._src_data['auto_create_tenant'] + elif issubclass(type(self._src_data[key]), dict): + self._add_component(key, self._src_data[key]) + depends[key] = self._src_data[key].get('depends', []) + for comp in depends: + conf = self.components[comp] + for name in depends[comp]: + if name == comp: + continue + if name in self.components: + conf.add_depend(name, self.components[name]) + except: + pass if not self.user: self.set_user_conf(UserConfig()) @@ -889,7 +935,7 @@ class DeployConfig(SafeStdio): def load_include_file(self, path): if not os.path.isabs(path): raise Exception("`{}` need to use absolute path. If you want to use relative paths, please enable developer mode " - "and set environment variables {}".format('include', BASE_DIR_KEY)) + "and set environment variables {}".format('include', ENV_BASE_DIR)) if os.path.isfile(path): with open(path, 'rb') as f: return self.yaml_loader.load(f) @@ -909,7 +955,7 @@ class DeployConfig(SafeStdio): if parser: inner_config = parser.extract_inner_config(cluster_config, src_data) self.inner_config.update_component_config(component_name, inner_config) - + def _dump_inner_config(self): if self.inner_config: self._separate_config() diff --git a/_errno.py b/_errno.py index c1eb871e1bfd6ba907a8dd9ee25ad719a425ec44..cf22afedb711470073ac63da04a03f5957fb9b34 100644 --- a/_errno.py +++ b/_errno.py @@ -50,7 +50,7 @@ class InitDirFailedErrorMessage(object): DOC_LINK = '' -DOC_LINK_MSG = 'See {}'.format(DOC_LINK if DOC_LINK else "https://open.oceanbase.com/docs/obd-cn/V1.4.0/10000000000436999 .") +DOC_LINK_MSG = 'See {}'.format(DOC_LINK if DOC_LINK else "https://www.oceanbase.com/product/ob-deployer/error-codes .") EC_CONFIG_CONFLICT_PORT = OBDErrorCode(1000, 'Configuration conflict {server1}:{port} port is used for {server2}\'s {key}') EC_CONFLICT_PORT = OBDErrorCode(1001, '{server}:{port} port is already used') @@ -76,4 +76,4 @@ EC_OBAGENT_RELOAD_FAILED = OBDErrorCode(4000, 'Fail to reload {server}') EC_OBAGENT_SEND_CONFIG_FAILED = OBDErrorCode(4001, 'Fail to send config file to {server}') # WARN CODE -WC_ULIMIT_CHECK = OBDErrorCode(1007, '({server}) The recommended number of {key} is {need} (Current value: %s)') \ No newline at end of file +WC_ULIMIT_CHECK = OBDErrorCode(1007, '({server}) The recommended number of {key} is {need} (Current value: {now})') \ No newline at end of file diff --git a/_plugin.py b/_plugin.py index 3d8c12266ecad59709021a0f94369523579063a4..7d714a21e556d5cb955ff0f0792eacbcb604c8da 100644 --- a/_plugin.py +++ b/_plugin.py @@ -30,7 +30,7 @@ from copy import deepcopy from _manager import Manager from _rpm import Version from ssh import ConcurrentExecutor -from tool import ConfigUtil, DynamicLoading, YamlLoader +from tool import ConfigUtil, DynamicLoading, YamlLoader, FileUtil yaml = YamlLoader() @@ -38,9 +38,11 @@ yaml = YamlLoader() class PluginType(Enum): + # 插件类型 = 插件加载类 START = 'StartPlugin' PARAM = 'ParamPlugin' INSTALL = 'InstallPlugin' + SNAP_CONFIG = 'SnapConfigPlugin' PY_SCRIPT = 'PyScriptPlugin' @@ -125,7 +127,7 @@ class PluginContext(object): self.options = options self.dev_mode = dev_mode self.stdio = stdio - self.concurrent_exector = ConcurrentExecutor(32) + self.concurrent_executor = ConcurrentExecutor(32) self._return = PluginReturn() def get_return(self): @@ -265,18 +267,28 @@ class PyScriptPlugin(ScriptPlugin): # def init(self, components, ssh_clients, cluster_config, cmd, options, stdio, *arg, **kwargs): # pass +class Null(object): + + def __init__(self): + pass + class ParamPlugin(Plugin): + class ConfigItemType(object): TYPE_STR = None + NULL = Null() def __init__(self, s): try: self._origin = s self._value = 0 + self.value = self.NULL self._format() + if self.value == self.NULL: + self.value = self._origin except: raise Exception("'%s' is not %s" % (self._origin, self._type_str)) @@ -401,10 +413,48 @@ class ParamPlugin(Plugin): else: self._value = [] + class Dict(ConfigItemType): + + def _format(self): + if self._origin: + if not isinstance(self._origin, dict): + raise Exception("Invalid Value") + self._value = self._origin + else: + self._value = self.value = {} + + class List(ConfigItemType): + + def _format(self): + if self._origin: + if not isinstance(self._origin, list): + raise Exception("Invalid value: {} is not a list.".format(self._origin)) + self._value = self._origin + else: + self._value = self.value = [] + + class StringOrKvList(ConfigItemType): + + def _format(self): + if self._origin: + if not isinstance(self._origin, list): + raise Exception("Invalid value: {} is not a list.".format(self._origin)) + for item in self._origin: + if not item: + continue + if not isinstance(item, (str, dict)): + raise Exception("Invalid value: {} should be string or key-value format.".format(item)) + if isinstance(item, dict): + if len(item.keys()) != 1: + raise Exception("Invalid value: {} should be single key-value format".format(item)) + self._value = self._origin + else: + self._value = self.value = [] + class Double(ConfigItemType): def _format(self): - self._value = float(self._origin) if self._origin else 0 + self.value = self._value = float(self._origin) if self._origin else 0 class Boolean(ConfigItemType): @@ -413,10 +463,15 @@ class ParamPlugin(Plugin): self._value = self._origin else: _origin = str(self._origin).lower() - if _origin.isdigit() or _origin in ['true', 'false']: + if _origin == 'true': + self._value = True + elif _origin == 'false': + self._value = False + elif _origin.isdigit(): self._value = bool(self._origin) else: - raise Exception('%s is not Boolean') + raise Exception('%s is not Boolean' % _origin) + self.value = self._value class Integer(ConfigItemType): @@ -426,15 +481,15 @@ class ParamPlugin(Plugin): self._origin = 0 else: _origin = str(self._origin) - if _origin.isdigit(): - self._value = int(_origin) - else: - raise Exception('%s is not Integer') + try: + self.value = self._value = int(_origin) + except: + raise Exception('%s is not Integer' % _origin) class String(ConfigItemType): def _format(self): - self._value = str(self._origin) if self._origin else '' + self.value = self._value = str(self._origin) if self._origin else '' class ConfigItem(object): @@ -519,29 +574,35 @@ class ParamPlugin(Plugin): 'MOMENT': ParamPlugin.Moment, 'TIME': ParamPlugin.Time, 'CAPACITY': ParamPlugin.Capacity, - 'STRING_LIST': ParamPlugin.StringList + 'STRING_LIST': ParamPlugin.StringList, + 'DICT': ParamPlugin.Dict, + 'LIST': ParamPlugin.List, + 'PARAM_LIST': ParamPlugin.StringOrKvList } self._src_data = {} with open(self.def_param_yaml_path, 'rb') as f: configs = yaml.load(f) for conf in configs: - param_type = ConfigUtil.get_value_from_dict(conf, 'type', 'STRING').upper() - if param_type in TYPES: - param_type = TYPES[param_type] - else: - param_type = ParamPlugin.String - - self._src_data[conf['name']] = ParamPlugin.ConfigItem( - name=conf['name'], - param_type=param_type, - default=ConfigUtil.get_value_from_dict(conf, 'default', None), - min_value=ConfigUtil.get_value_from_dict(conf, 'min_value', None), - max_value=ConfigUtil.get_value_from_dict(conf, 'max_value', None), - modify_limit=ConfigUtil.get_value_from_dict(conf, 'modify_limit', None), - require=ConfigUtil.get_value_from_dict(conf, 'require', False), - need_restart=ConfigUtil.get_value_from_dict(conf, 'need_restart', False), - need_redeploy=ConfigUtil.get_value_from_dict(conf, 'need_redeploy', False) - ) + try: + param_type = ConfigUtil.get_value_from_dict(conf, 'type', 'STRING').upper() + if param_type in TYPES: + param_type = TYPES[param_type] + else: + param_type = ParamPlugin.String + + self._src_data[conf['name']] = ParamPlugin.ConfigItem( + name=conf['name'], + param_type=param_type, + default=ConfigUtil.get_value_from_dict(conf, 'default', None), + min_value=ConfigUtil.get_value_from_dict(conf, 'min_value', None), + max_value=ConfigUtil.get_value_from_dict(conf, 'max_value', None), + modify_limit=ConfigUtil.get_value_from_dict(conf, 'modify_limit', None), + require=ConfigUtil.get_value_from_dict(conf, 'require', False), + need_restart=ConfigUtil.get_value_from_dict(conf, 'need_restart', False), + need_redeploy=ConfigUtil.get_value_from_dict(conf, 'need_redeploy', False) + ) + except: + pass except: pass return self._src_data @@ -590,6 +651,40 @@ class ParamPlugin(Plugin): return self._params_default +class SnapConfigPlugin(Plugin): + + PLUGIN_TYPE = PluginType.SNAP_CONFIG + CONFIG_YAML = 'snap_config.yaml' + FLAG_FILE = CONFIG_YAML + _KEYCRE = re.compile(r"\$(\w+)") + + def __init__(self, component_name, plugin_path, version, dev_mode): + super(SnapConfigPlugin, self).__init__(component_name, plugin_path, version, dev_mode) + self.config_path = os.path.join(self.plugin_path, self.CONFIG_YAML) + self._config = None + self._file_hash = None + + def __hash__(self): + if self._file_hash is None: + self._file_hash = int(''.join(['%03d' % (ord(v) if isinstance(v, str) else v) for v in FileUtil.checksum(self.config_path)])) + return self._file_hash + + @property + def config(self): + if self._config is None: + with open(self.config_path, 'rb') as f: + self._config = yaml.load(f) + return self._config + + @property + def backup(self): + return self.config.get('backup', []) + + @property + def clean(self): + return self.config.get('clean', []) + + class InstallPlugin(Plugin): class FileItemType(Enum): @@ -621,6 +716,7 @@ class InstallPlugin(Plugin): self.file_map_path = os.path.join(self.plugin_path, self.FILES_MAP_YAML) self._file_map = {} self._file_map_data = None + self._check_value = None @classmethod def var_replace(cls, string, var): @@ -644,6 +740,12 @@ class InstallPlugin(Plugin): return ''.join(done) + @property + def check_value(self): + if self._check_value is None: + self._check_value = os.path.getmtime(self.file_map_path) + return self._check_value + @property def file_map_data(self): if self._file_map_data is None: diff --git a/_repository.py b/_repository.py index 9088c34ed003d07f0988b514d5f6c4a6b1cef5f0..ec8b68cc2f5896b5266024c1e3d20e9a2bb7f7bf 100644 --- a/_repository.py +++ b/_repository.py @@ -25,13 +25,14 @@ import sys import time import hashlib from glob import glob +from multiprocessing import cpu_count +from multiprocessing.pool import Pool from _rpm import Package, PackageInfo, Version from _arch import getBaseArch from tool import DirectoryUtil, FileUtil, YamlLoader from _manager import Manager from _plugin import InstallPlugin -from ssh import LocalClient class LocalPackage(Package): @@ -122,15 +123,7 @@ class LocalPackage(Package): filelinktos.append(os.readlink(target_path)) filemodes.append(-24065) else: - ret = LocalClient().execute_command('md5sum {}'.format(target_path)) - if ret: - m_value = ret.stdout.strip().split(' ')[0].encode('utf-8') - else: - m = hashlib.md5() - with open(target_path, 'rb') as f: - m.update(f.read()) - m_value = m.hexdigest().encode(sys.getdefaultencoding()) - # raise Exception('Failed to get md5sum for {}, error: {}'.format(target_path, ret.stderr)) + m_value = FileUtil.checksum(target_path) m_sum.update(m_value) filemd5s.append(m_value) filelinktos.append('') @@ -149,6 +142,73 @@ class LocalPackage(Package): return self.RpmObject(self.headers, self.files) +class ExtractFileInfo(object): + + def __init__(self, src_path, target_path, mode): + self.src_path = src_path + self.target_path = target_path + self.mode = mode + + +class ParallerExtractWorker(object): + + def __init__(self, pkg, files, stdio=None): + self.pkg = pkg + self.files = files + self.stdio = stdio + + @staticmethod + def extract(worker): + with worker.pkg.open() as rpm: + for info in worker.files: + if os.path.exists(info.target_path): + continue + fd = rpm.extractfile(info.src_path) + with FileUtil.open(info.target_path, 'wb', stdio=worker.stdio) as f: + FileUtil.copy_fileobj(fd, f) + if info.mode != 0o744: + os.chmod(info.target_path, info.mode) + + +class ParallerExtractor(object): + + MAX_PARALLER = cpu_count() + + def __init__(self, pkg, files, stdio=None): + self.pkg = pkg + self.files = files + self.stdio = stdio + + def extract(self): + workers = [] + file_num = len(self.files) + paraler = min(self.MAX_PARALLER, file_num) + size = min(100, file_num / paraler) + size = max(10, size) + index = 0 + while index < file_num: + p_index = index + size + workers.append(ParallerExtractWorker( + self.pkg, + self.files[index:p_index], + stdio=self.stdio + )) + index = p_index + + pool = Pool(processes=paraler) + try: + results = pool.map(ParallerExtractWorker.extract, workers) + for r in results: + if not r: + return False + except KeyboardInterrupt: + if pool: + pool.close() + pool = None + finally: + pool and pool.close() + + class Repository(PackageInfo): _DATA_FILE = '.data' @@ -251,7 +311,7 @@ class Repository(PackageInfo): self.stdio and getattr(self.stdio, 'print', '%s is a shadow repository' % self) return False hash_path = os.path.join(self.repository_dir, '.hash') - if self.hash == pkg.md5 and self.file_check(plugin): + if self.hash == pkg.md5 and self.file_check(plugin) and self.install_time > plugin.check_value: return True self.clear() try: @@ -291,6 +351,8 @@ class Repository(PackageInfo): if path.startswith(n_dir): need_files[path] = os.path.join(need_dirs[n_dir], path[len(n_dir):]) break + + need_extract_files = [] for src_path in need_files: if src_path not in files: raise Exception('%s not found in packge' % src_path) @@ -299,17 +361,17 @@ class Repository(PackageInfo): return idx = files[src_path] if filemd5s[idx]: - fd = rpm.extractfile(src_path) - self.stdio and getattr(self.stdio, 'verbose', print)('extract %s to %s' % (src_path, target_path)) - with FileUtil.open(target_path, 'wb', stdio=self.stdio) as f: - FileUtil.copy_fileobj(fd, f) - mode = filemodes[idx] & 0x1ff - if mode != 0o744: - os.chmod(target_path, mode) + need_extract_files.append(ExtractFileInfo( + src_path, + target_path, + filemodes[idx] & 0x1ff + )) elif filelinktos[idx]: links[target_path] = filelinktos[idx] else: raise Exception('%s is directory' % src_path) + + ParallerExtractor(pkg, need_extract_files, stdio=self.stdio).extract() for link in links: self.stdio and getattr(self.stdio, 'verbose', print)('link %s to %s' % (links[link], link)) diff --git a/_stdio.py b/_stdio.py index 949607d24d2515c05e2e8994539d81c8efacf5c7..67ac4c3408a4e7ba7937a61175bf53afbffe90b9 100644 --- a/_stdio.py +++ b/_stdio.py @@ -23,9 +23,13 @@ from __future__ import absolute_import, division, print_function import os import signal import sys +import fcntl import traceback import inspect2 import six +import logging +from copy import deepcopy +from logging import handlers from enum import Enum from halo import Halo, cursor @@ -35,6 +39,8 @@ from progressbar import AdaptiveETA, Bar, SimpleProgress, ETA, FileTransferSpeed from types import MethodType from inspect2 import Parameter +from log import Logger + if sys.version_info.major == 3: raw_input = input @@ -55,6 +61,87 @@ class BufferIO(object): return s +class SysStdin(object): + + NONBLOCK = False + STATS = None + FD = None + + @classmethod + def fileno(cls): + if cls.FD is None: + cls.FD = sys.stdin.fileno() + return cls.FD + + @classmethod + def stats(cls): + if cls.STATS is None: + cls.STATS = fcntl.fcntl(cls.fileno(), fcntl.F_GETFL) + return cls.STATS + + @classmethod + def nonblock(cls): + if cls.NONBLOCK is False: + fcntl.fcntl(cls.fileno(), fcntl.F_SETFL, cls.stats() | os.O_NONBLOCK) + cls.NONBLOCK = True + + @classmethod + def block(cls): + if cls.NONBLOCK: + fcntl.fcntl(cls.fileno(), fcntl.F_SETFL, cls.stats()) + cls.NONBLOCK = True + + @classmethod + def readline(cls, blocked=False): + if blocked: + cls.block() + else: + cls.nonblock() + return cls._readline() + + @classmethod + def read(cls, blocked=False): + return ''.join(cls.readlines(blocked=blocked)) + + @classmethod + def readlines(cls, blocked=False): + if blocked: + cls.block() + else: + cls.nonblock() + return cls._readlines() + + @classmethod + def _readline(cls): + if cls.NONBLOCK: + try: + for line in sys.stdin: + return line + except IOError: + return '' + finally: + cls.block() + else: + return sys.stdin.readline() + + @classmethod + def _readlines(cls): + if cls.NONBLOCK: + lines = [] + try: + for line in sys.stdin: + lines.append(line) + except IOError: + pass + finally: + cls.block() + return lines + else: + return sys.stdin.readlines() + + + + class FormtatText(object): @staticmethod @@ -234,11 +321,11 @@ class IO(object): WARNING_PREV = FormtatText.warning('[WARN]') ERROR_PREV = FormtatText.error('[ERROR]') IS_TTY = sys.stdin.isatty() + INPUT = SysStdin def __init__(self, level, msg_lv=MsgLevel.DEBUG, - trace_logger=None, use_cache=False, track_limit=0, root_io=None, @@ -246,7 +333,11 @@ class IO(object): ): self.level = level self.msg_lv = msg_lv - self.trace_logger = trace_logger + self.log_path = None + self.trace_id = None + self.log_name = 'default' + self.log_path = None + self._trace_logger = None self._log_cache = [] if use_cache else None self._root_io = root_io self.track_limit = track_limit @@ -257,6 +348,34 @@ class IO(object): self._cur_out_obj = self._out_obj self._before_critical = None + def init_trace_logger(self, log_path, log_name=None, trace_id=None): + if self._trace_logger is None: + self.log_path = log_path + if trace_id: + self.trace_id = trace_id + if log_name: + self.log_name = log_name + + def __getstate__(self): + state = {} + for key in self.__dict__: + state[key] = self.__dict__[key] + for key in ['_trace_logger', 'sync_obj', '_out_obj', '_cur_out_obj', '_before_critical']: + state[key] = None + return state + + @property + def trace_logger(self): + if self.log_path and self._trace_logger is None: + self._trace_logger = Logger(self.log_name) + handler = handlers.TimedRotatingFileHandler(self.log_path, when='midnight', interval=1, backupCount=30) + if self.trace_id: + handler.setFormatter(logging.Formatter("[%%(asctime)s.%%(msecs)03d] [%s] [%%(levelname)s] %%(message)s" % self.trace_id, "%Y-%m-%d %H:%M:%S")) + else: + handler.setFormatter(logging.Formatter("[%%(asctime)s.%%(msecs)03d] [%%(levelname)s] %%(message)s", "%Y-%m-%d %H:%M:%S")) + self._trace_logger.addHandler(handler) + return self._trace_logger + @property def log_cache(self): if self._root_io: @@ -417,13 +536,17 @@ class IO(object): msg_lv = self.msg_lv key = "%s-%s" % (pid, msg_lv) if key not in self.sub_ios: - self.sub_ios[key] = self.__class__( + sub_io = self.__class__( self.level + 1, msg_lv=msg_lv, - trace_logger=self.trace_logger, track_limit=self.track_limit, root_io=self._root_io if self._root_io else self ) + sub_io.log_name = self.log_name + sub_io.log_path = self.log_path + sub_io.trace_id = self.trace_id + sub_io._trace_logger = self.trace_logger + self.sub_ios[key] = sub_io return self.sub_ios[key] def print_list(self, ary, field_names=None, exp=lambda x: x if isinstance(x, (list, tuple)) else [x], show_index=False, start=0, **kwargs): @@ -445,11 +568,18 @@ class IO(object): table.add_row(row) self.print(table) + def read(self, msg='', blocked=False): + if msg: + self._print(MsgLevel.INFO, msg) + return self.INPUT.read(blocked) + def confirm(self, msg): + msg = '%s [y/n]: ' % msg + self.print(msg, end='') if self.IS_TTY: while True: try: - ans = raw_input('%s [y/n]: ' % msg) + ans = raw_input() if ans == 'y': return True if ans == 'n': @@ -595,6 +725,8 @@ class StdIO(object): self._warn_func = getattr(self.io, "warn", print) def __getattr__(self, item): + if item.startswith('__'): + return super(StdIO, self).__getattribute__(item) if self.io is None: return FAKE_RETURN if item not in self._attrs: diff --git a/core.py b/core.py index 0c220697ed2e95758518ade2db780dd1813caf4a..d25f9a354e49b30cc5e2b6bde7febc61dc7bb0a1 100644 --- a/core.py +++ b/core.py @@ -22,33 +22,25 @@ from __future__ import absolute_import, division, print_function import re import os -import sys import time -import fcntl from optparse import Values +from copy import deepcopy import tempfile from subprocess import call as subprocess_call -from prettytable import PrettyTable -from halo import Halo from ssh import SshClient, SshConfig -from tool import ConfigUtil, FileUtil, DirectoryUtil, YamlLoader, timeout, COMMAND_ENV +from tool import ConfigUtil, FileUtil, DirectoryUtil, YamlLoader, timeout, COMMAND_ENV, OrderedDict from _stdio import MsgLevel from _rpm import Version from _mirror import MirrorRepositoryManager, PackageInfo from _plugin import PluginManager, PluginType, InstallPlugin -from _repository import RepositoryManager, LocalPackage -from _deploy import DeployManager, DeployStatus, DeployConfig, DeployConfigStatus, BASE_DIR_KEY, InnerConfigKeywords -from _lock import LockManager +from _deploy import DeployManager, DeployStatus, DeployConfig, DeployConfigStatus, Deploy from _repository import RepositoryManager, LocalPackage, Repository -from _deploy import ( - DeployManager, DeployStatus, - DeployConfig, DeployConfigStatus, - ParserError, Deploy -) from _errno import EC_SOME_SERVER_STOPED from _lock import LockManager +from _optimize import OptimizeManager +from _environ import ENV_REPO_INSTALL_MODE, ENV_BASE_DIR class ObdHome(object): @@ -65,8 +57,10 @@ class ObdHome(object): self._deploy_manager = None self._plugin_manager = None self._lock_manager = None + self._optimize_manager = None self.stdio = None self._stdio_func = None + self.ssh_clients = {} self.set_stdio(stdio) self.lock_manager.global_sh_lock() @@ -100,6 +94,12 @@ class ObdHome(object): self._lock_manager = LockManager(self.home_path, self.stdio) return self._lock_manager + @property + def optimize_manager(self): + if not self._optimize_manager: + self._optimize_manager = OptimizeManager(self.home_path, stdio=self.stdio) + return self._optimize_manager + def _obd_update_lock(self): self.lock_manager.global_ex_lock() @@ -112,7 +112,7 @@ class ObdHome(object): self._stdio_func = {} if not self.stdio: return - for func in ['start_loading', 'stop_loading', 'print', 'confirm', 'verbose', 'warn', 'exception', 'error', 'critical', 'print_list']: + for func in ['start_loading', 'stop_loading', 'print', 'confirm', 'verbose', 'warn', 'exception', 'error', 'critical', 'print_list', 'read']: self._stdio_func[func] = getattr(self.stdio, func, _print) def _call_stdio(self, func, msg, *arg, **kwarg): @@ -140,16 +140,23 @@ class ObdHome(object): return errors def get_clients(self, deploy_config, repositories): - ssh_clients = {} - self._call_stdio('start_loading', 'Open ssh connection') + servers = set() + user_config = deploy_config.user + if user_config not in self.ssh_clients: + self.ssh_clients[user_config] = {} + ssh_clients = self.ssh_clients[user_config] + for repository in repositories: cluster_config = deploy_config.components[repository.name] - # ssh check - self.ssh_clients_connect(ssh_clients, cluster_config.servers, deploy_config.user) - self._call_stdio('stop_loading', 'succeed') + for server in cluster_config.servers: + if server not in ssh_clients: + servers.add(server) + if servers: + self.ssh_clients_connect(servers, ssh_clients, user_config) return ssh_clients - def ssh_clients_connect(self, ssh_clients, servers, user_config): + def ssh_clients_connect(self, servers, ssh_clients, user_config): + self._call_stdio('start_loading', 'Open ssh connection') for server in servers: if server not in ssh_clients: ssh_clients[server] = SshClient( @@ -164,6 +171,8 @@ class ObdHome(object): self.stdio ) ssh_clients[server].connect() + self._call_stdio('stop_loading', 'succeed') + return ssh_clients def search_plugin(self, repository, plugin_type, no_found_exit=True): self._call_stdio('verbose', 'Search %s plugin for %s' % (plugin_type.name.lower(), repository.name)) @@ -273,9 +282,13 @@ class ObdHome(object): not self._call_stdio('confirm', 'Found a higher version\n%s\nDo you want to use it?' % pkg) ) or update_if_need is False ): - repositories.append(repository) - self._call_stdio('verbose', 'Use repository %s' % repository) - self._call_stdio('print', '%s-%s already installed.' % (repository.name, repository.version)) + if pkg and repository.release == pkg.release: + pkgs.append(pkg) + self._call_stdio('verbose', '%s as same as %s, Use package %s' % (pkg, repository, pkg)) + else: + repositories.append(repository) + self._call_stdio('verbose', 'Use repository %s' % repository) + self._call_stdio('print', '%s-%s already installed.' % (repository.name, repository.version)) continue if config.version and pkg.version != config.version: self._call_stdio('warn', 'No such package %s-%s-%s. Use similar package %s-%s-%s.' % (component, config.version, config.release, pkg.name, pkg.version, pkg.release)) @@ -357,12 +370,22 @@ class ObdHome(object): if deploy_config.components[component_name].servers != deploy.deploy_config.components[component_name].servers: return True return False + if not self.stdio: + raise IOError("IO Not Found") + self._call_stdio('verbose', 'Get Deploy by name') deploy = self.deploy_manager.get_deploy_config(name) param_plugins = {} repositories, pkgs = [], [] is_deployed = deploy and deploy.deploy_info.status not in [DeployStatus.STATUS_CONFIGURED, DeployStatus.STATUS_DESTROYED] is_started = deploy and deploy.deploy_info.status in [DeployStatus.STATUS_RUNNING, DeployStatus.STATUS_STOPPED] + user_input = self._call_stdio('read', '') + if not user_input and not self.stdio.IS_TTY: + time.sleep(0.1) + user_input = self._call_stdio('read', '') + if not user_input: + self._call_stdio('error', 'Input is empty') + return False initial_config = '' if deploy: try: @@ -371,17 +394,23 @@ class ObdHome(object): path = deploy.deploy_config.yaml_path else: path = Deploy.get_temp_deploy_yaml_path(deploy.config_dir) - self._call_stdio('verbose', 'Load %s' % path) - with open(path, 'r') as f: - initial_config = f.read() + if user_input: + initial_config = user_input + else: + self._call_stdio('verbose', 'Load %s' % path) + with open(path, 'r') as f: + initial_config = f.read() except: self._call_stdio('exception', '') msg = 'Save deploy "%s" configuration' % name else: - if not self.stdio: - return False - if not self._call_stdio('confirm', 'No such deploy: %s. Create?' % name): - return False + if user_input: + initial_config = user_input + else: + if not self.stdio: + return False + if not initial_config and not self._call_stdio('confirm', 'No such deploy: %s. Create?' % name): + return False msg = 'Create deploy "%s" configuration' % name if is_deployed: repositories = self.load_local_repositories(deploy.deploy_info) @@ -405,10 +434,11 @@ class ObdHome(object): self.lock_manager.set_try_times(-1) config_status = DeployConfigStatus.UNCHNAGE while True: - tf.seek(0) - self._call_stdio('verbose', '%s %s' % (EDITOR, tf.name)) - subprocess_call([EDITOR, tf.name]) - self._call_stdio('verbose', 'Load %s' % tf.name) + if not user_input: + tf.seek(0) + self._call_stdio('verbose', '%s %s' % (EDITOR, tf.name)) + subprocess_call([EDITOR, tf.name]) + self._call_stdio('verbose', 'Load %s' % tf.name) try: deploy_config = DeployConfig( tf.name, yaml_loader=YamlLoader(self.stdio), @@ -416,8 +446,10 @@ class ObdHome(object): inner_config=deploy.deploy_config.inner_config if deploy else None ) deploy_config.allow_include_error() + if not deploy_config.get_base_dir(): + deploy_config.set_base_dir('/', save=False) except Exception as e: - if confirm(e): + if not user_input and confirm(e): continue break @@ -432,6 +464,8 @@ class ObdHome(object): elif is_deployed: if deploy_config.components.keys() != deploy.deploy_config.components.keys() or is_server_list_change(deploy_config): if not self._call_stdio('confirm', 'Modifications to the deployment architecture take effect after you redeploy the architecture. Are you sure that you want to start a redeployment? '): + if user_input: + return False continue config_status = DeployConfigStatus.NEED_REDEPLOY @@ -449,6 +483,8 @@ class ObdHome(object): break if comp_attr_changed: if not self._call_stdio('confirm', 'Modifications to the version, release or hash of the component take effect after you redeploy the cluster. Are you sure that you want to start a redeployment? '): + if user_input: + return False continue config_status = DeployConfigStatus.NEED_REDEPLOY @@ -462,6 +498,8 @@ class ObdHome(object): break if rsync_conf_changed: if not self._call_stdio('confirm', 'Modifications to the rsync config of a deployed cluster take effect after you redeploy the cluster. Are you sure that you want to start a redeployment? '): + if user_input: + return False continue config_status = DeployConfigStatus.NEED_REDEPLOY @@ -522,6 +560,8 @@ class ObdHome(object): errors.append('[%s] %s: %s' % (component_name, server, str(e))) if errors: self._call_stdio('print', '\n'.join(errors)) + if user_input: + return False if self._call_stdio('confirm', 'Modifications take effect after a redeployment. Are you sure that you want to start a redeployment?'): config_status = DeployConfigStatus.NEED_REDEPLOY elif self._call_stdio('confirm', 'Continue to edit?'): @@ -561,7 +601,7 @@ class ObdHome(object): ret = True if deploy: if deploy.deploy_info.status == DeployStatus.STATUS_RUNNING or ( - config_status == DeployConfigStatus.NEED_REDEPLOY and is_deployed + config_status == DeployConfigStatus.NEED_REDEPLOY and is_deployed ): msg += deploy.effect_tip() except Exception as e: @@ -871,12 +911,14 @@ class ObdHome(object): # Check whether the components have the parameter plugins and apply the plugins self.search_param_plugin_and_apply(repositories, deploy_config) - # Parameter check - errors = self.deploy_param_check(repositories, deploy_config) - if errors: - self._call_stdio('stop_loading', 'fail') - self._call_stdio('error', '\n'.join(errors)) - return False + if not getattr(opt, 'skip_param_check', False): + # Parameter check + errors = self.deploy_param_check(repositories, deploy_config) + if errors: + self._call_stdio('stop_loading', 'fail') + self._call_stdio('error', '\n'.join(errors)) + return False + self._call_stdio('stop_loading', 'succeed') # Get the client @@ -885,11 +927,12 @@ class ObdHome(object): gen_config_plugins = self.search_py_script_plugin(repositories, 'generate_config') component_num = len(repositories) + auto_depend = getattr(opt, 'auto_depend', False) for repository in repositories: cluster_config = deploy_config.components[repository.name] self._call_stdio('verbose', 'Call %s for %s' % (gen_config_plugins[repository], repository)) - ret = gen_config_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], opt, self.stdio, deploy_config) + ret = gen_config_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], opt, self.stdio, deploy_config, auto_depend=auto_depend) if ret: component_num -= 1 @@ -984,6 +1027,26 @@ class ObdHome(object): return component_num == 0 + def sort_repository_by_depend(self, repositories, deploy_config): + sorted_repositories = [] + sorted_componets = {} + while repositories: + temp_repositories = [] + for repository in repositories: + cluster_config = deploy_config.components.get(repository.name) + for componet_name in cluster_config.depends: + if componet_name not in sorted_componets: + temp_repositories.append(repository) + break + else: + sorted_componets[repository.name] = 1 + sorted_repositories.append(repository) + if len(temp_repositories) == len(repositories): + sorted_repositories += temp_repositories + break + repositories = temp_repositories + return sorted_repositories + def change_deploy_config_style(self, name, options=Values()): self._call_stdio('verbose', 'Get Deploy by name') deploy = self.deploy_manager.get_deploy_config(name) @@ -1016,6 +1079,20 @@ class ObdHome(object): else: components = deploy_config.components.keys() + self._call_stdio('start_loading', 'Load param plugin') + + # Get the repository + if deploy_info.status not in [DeployStatus.STATUS_CONFIGURED, DeployStatus.STATUS_DESTROYED]: + repositories = self.load_local_repositories(deploy_info) + else: + repositories = [] + for component_name in components: + repositories.append(self.repository_manager.get_repository_allow_shadow(component_name, '100000.0')) + + # Check whether the components have the parameter plugins and apply the plugins + self.search_param_plugin_and_apply(repositories, deploy_config) + self._call_stdio('stop_loading', 'succeed') + self._call_stdio('start_loading', 'Change style') try: parsers = {} @@ -1035,6 +1112,72 @@ class ObdHome(object): self._call_stdio('stop_loading', 'fail') return False + def demo(self, opt=Values()): + name = 'demo' + self._call_stdio('verbose', 'Get Deploy by name') + deploy = self.deploy_manager.get_deploy_config(name) + if deploy: + self._call_stdio('verbose', 'Get deploy info') + deploy_info = deploy.deploy_info + self._call_stdio('verbose', 'judge deploy status') + if deploy_info.status == DeployStatus.STATUS_DEPLOYED: + if not self.destroy_cluster(name): + return False + elif deploy_info.status not in [DeployStatus.STATUS_CONFIGURED, DeployStatus.STATUS_DESTROYED]: + self._call_stdio('error', 'Deploy "%s" is %s. You could not deploy an %s cluster.' % (name, deploy_info.status.value, deploy_info.status.value)) + return False + + components = set() + for component_name in getattr(opt, 'components', '').split(','): + if component_name: + components.add(component_name) + if not components: + self._call_stdio('error', 'Use `-c/--components` to set in the components to be deployed') + return + global_key = 'global' + home_path_key = 'home_path' + global_config = {home_path_key: os.getenv('HOME')} + opt_config = {} + for key in opt.__dict__: + tmp = key.split('.', 1) + if len(tmp) == 1: + if key == home_path_key: + global_config[key] = opt.__dict__[key] + else: + component_name = tmp[0] + if component_name not in components: + component_name = component_name.replace('_', '-') + if component_name not in opt_config: + opt_config[component_name] = {global_key: {}} + if tmp[1] in ['version', 'tag', 'package_hash', 'release']: + _config = opt_config[component_name] + else: + _config = opt_config[component_name][global_key] + _config[tmp[1]] = opt.__dict__[key] + + configs = OrderedDict() + for component_name in components: + configs[component_name] = { + 'servers': ['127.0.0.1'], + global_key: deepcopy(global_config) + } + configs[component_name][global_key][home_path_key] = os.path.join(configs[component_name][global_key][home_path_key], component_name) + if component_name in opt_config: + configs[component_name][global_key].update(opt_config[component_name][global_key]) + del opt_config[component_name][global_key] + configs[component_name].update(opt_config[component_name]) + + with tempfile.NamedTemporaryFile(suffix=".yaml", mode='w') as tf: + yaml_loader = YamlLoader(self.stdio) + yaml_loader.dump(configs, tf) + setattr(opt, 'config', tf.name) + setattr(opt, 'skip_param_check', True) + setattr(opt, 'auto_depend', True) + if not self.genconfig(name, opt): + return False + setattr(opt, 'config', '') + return self.deploy_cluster(name, opt) and self.start_cluster(name, [], opt) + def deploy_cluster(self, name, opt=Values()): self._call_stdio('verbose', 'Get Deploy by name') deploy = self.deploy_manager.get_deploy_config(name) @@ -1080,13 +1223,21 @@ class ObdHome(object): self._call_stdio('error', '%s\'s servers list is empty.' % component_name) return False - if self.dev_mode: - base_dir = COMMAND_ENV.get(BASE_DIR_KEY, '') + install_mode = COMMAND_ENV.get(ENV_REPO_INSTALL_MODE) + if not install_mode: + install_mode = 'cp' if self.dev_mode else 'ln' + + if install_mode == 'cp': deploy_config.enable_cp_install_mode(save=False) - else: - base_dir = '' + elif install_mode == 'ln': deploy_config.enable_ln_install_mode(save=False) - deploy_config.set_base_dir(base_dir, save=False) + else: + self._call_stdio('error', 'Invalid repository install mode: {}'.format(install_mode)) + return False + + if self.dev_mode: + base_dir = COMMAND_ENV.get(ENV_BASE_DIR, '') + deploy_config.set_base_dir(base_dir, save=False) # Check the best suitable mirror for the components and installation plugins. Install locally repositories, install_plugins = self.search_components_from_mirrors_and_install(deploy_config) @@ -1341,9 +1492,13 @@ class ObdHome(object): self._call_stdio('print', 'Deploy "%s" is running' % name) return True + repositories = self.sort_repository_by_depend(repositories, deploy_config) + strict_check = getattr(options, 'strict_check', False) success = True + repository_dir_map = {} for repository in repositories: + repository_dir_map[repository.name] = repository.repository_dir if repository.name not in components: continue if repository not in start_check_plugins: @@ -1359,6 +1514,8 @@ class ObdHome(object): return False component_num = len(components) + display_repositories = [] + connect_ret = {} for repository in repositories: if repository.name not in components: continue @@ -1373,7 +1530,7 @@ class ObdHome(object): update_deploy_status = update_deploy_status and start_all self._call_stdio('verbose', 'Call %s for %s' % (start_plugins[repository], repository)) - ret = start_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, cmd, options, self.stdio, self.home_path, repository.repository_dir) + ret = start_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, cmd, options, self.stdio, self.home_path, repository.repository_dir, repository_dir_map=repository_dir_map, deploy_name=deploy.name) if ret: need_bootstrap = ret.get_return('need_bootstrap') else: @@ -1385,15 +1542,18 @@ class ObdHome(object): if ret: db = ret.get_return('connect') cursor = ret.get_return('cursor') + connect_ret[repository] = ret.kwargs else: break if need_bootstrap and start_all: - self._call_stdio('print', 'Initialize cluster') + self._call_stdio('start_loading', 'Initialize cluster') self._call_stdio('verbose', 'Call %s for %s' % (bootstrap_plugins[repository], repository)) if not bootstrap_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, cmd, options, self.stdio, cursor): + self._call_stdio('stop_loading', 'fail') self._call_stdio('error', 'Cluster init failed') break + self._call_stdio('stop_loading', 'succeed') if repository in create_tenant_plugins: create_tenant_options = Values({"variables": "ob_tcp_invited_nodes='%'"}) self._call_stdio('verbose', 'Call %s for %s' % (bootstrap_plugins[repository], repository)) @@ -1402,9 +1562,12 @@ class ObdHome(object): if not start_all: component_num -= 1 continue - + display_repositories.append(repository) + + for repository in display_repositories: + cluster_config = deploy_config.components[repository.name] self._call_stdio('verbose', 'Call %s for %s' % (display_plugins[repository], repository)) - if display_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, cmd, options, self.stdio, cursor): + if display_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, cmd, options, self.stdio, **connect_ret[repository]): component_num -= 1 if component_num == 0: @@ -1597,7 +1760,7 @@ class ObdHome(object): self._call_stdio('verbose', 'Call %s for %s' % (reload_plugins[repository], repository)) if not reload_plugins[repository]( deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, - cursor=cursor, new_cluster_config=new_cluster_config, repository_dir=repository.repository_dir): + cursor=cursor, new_cluster_config=new_cluster_config, repository_dir=repository.repository_dir, deploy_name=deploy.name): continue component_num -= 1 if component_num == 0: @@ -1627,6 +1790,7 @@ class ObdHome(object): self._call_stdio('start_loading', 'Get local repositories and plugins') # Get the repository repositories = self.load_local_repositories(deploy_info) + repositories = self.sort_repository_by_depend(repositories, deploy_config) # Check whether the components have the parameter plugins and apply the plugins self.search_param_plugin_and_apply(repositories, deploy_config) @@ -1856,6 +2020,9 @@ class ObdHome(object): cluster_configs = {} component_num = len(components) repositories = self.sort_repositories_by_depends(deploy_config, repositories) + repository_dir_map = {} + for repository in repositories: + repository_dir_map[repository.name] = repository.repository_dir for repository in repositories: if repository.name not in components: continue @@ -1883,7 +2050,9 @@ class ObdHome(object): display_plugin=display_plugins[repository], repository=repository, new_cluster_config=new_cluster_config, - new_clients=new_ssh_clients + new_clients=new_ssh_clients, + repository_dir_map=repository_dir_map, + deploy_name=deploy.name, ): component_num -= 1 done_repositories.append(repository) @@ -1926,6 +2095,8 @@ class ObdHome(object): new_clients=new_ssh_clients, rollback=True, bootstrap_plugin=bootstrap_plugins[repository], + repository_dir_map=repository_dir_map, + deploy_name=deploy.name ): deploy_config.update_component(cluster_config) @@ -2146,7 +2317,7 @@ class ObdHome(object): if need_restart and deploy_info.status == DeployStatus.STATUS_RUNNING: self._call_stdio('verbose', 'Call %s for %s' % (start_plugins[current_repository], repository)) setattr(options, 'without_parameter', True) - if not start_plugins[current_repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio, self.home_path, repository.repository_dir) and getattr(options, 'force', False) is False: + if not start_plugins[current_repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio, self.home_path, repository.repository_dir, deploy_name=deploy.name) and getattr(options, 'force', False) is False: self.install_repositories_to_servers(deploy_config, [current_repository, ], install_plugins, ssh_clients, options) return False @@ -2467,6 +2638,171 @@ class ObdHome(object): self._call_stdio('error', 'Repository(%s) existed' % tag_repository.repository_dir) return True + def _test_optimize_init(self, opts, test_name, deploy_config, cluster_config): + optimize_config_path = getattr(opts, 'optimize_config', None) + if optimize_config_path: + self._call_stdio('verbose', 'load optimize config {}'.format(optimize_config_path)) + self.optimize_manager.load_config(optimize_config_path, stdio=self.stdio) + else: + for component, cluster_config in deploy_config.components.items(): + self.optimize_manager.register_component(component, cluster_config.version) + self._call_stdio('verbose', 'load default optimize config for {}'.format(test_name)) + self.optimize_manager.load_default_config(test_name=test_name, stdio=self.stdio) + self._call_stdio('verbose', 'Get optimize config') + optimize_config = self.optimize_manager.optimize_config + check_options_plugin = self.plugin_manager.get_best_py_script_plugin('check_options', 'optimize', '0.1') + self._call_stdio('verbose', 'Call check options plugin for optimize') + return check_options_plugin(deploy_config.components.keys(), [], cluster_config, [], opts, self.stdio, optimize_config=optimize_config) + + @staticmethod + def _get_first_db_and_cursor_from_connect(connect_ret): + dbs = connect_ret.get_return('connect') + cursors = connect_ret.get_return('cursor') + if not dbs or not cursors: + return None, None + if isinstance(dbs, dict) and isinstance(cursors, dict): + tmp_server = list(dbs.keys())[0] + db = dbs[tmp_server] + cursor = cursors[tmp_server] + return db, cursor + else: + return dbs, cursors + + def _test_optimize_operation(self, deploy, optimize_envs, connect_context, stage=None, opts=None, operation='optimize'): + """ + + :param deploy: + :param stage: optimize stage + :param optimize_envs: envs for optimize plugin + :param connect_context: { + "": { + "db": db, + "cursor": cursor, + "connect_kwargs": { + "component": , + "target_server": "server1" # kwargs for connect plugin + } + } + } + :param operation: "optimize" or "recover" + :return: + """ + if operation == 'optimize': + self._call_stdio('verbose', 'Optimize for stage {}'.format(stage)) + elif operation == 'recover': + self._call_stdio('verbose', 'Recover the optimizes') + else: + raise Exception("Invalid optimize operation!") + deploy_config = deploy.deploy_config + ob_cursor = None + odp_cursor = None + cluster_config = None + for component in connect_context.keys(): + self._call_stdio('verbose', 'get cursor for component {}'.format(component)) + connect_context[component] = connect_context.get(component, {}) + cursor = connect_context[component].get('cursor') + db = connect_context[component].get('db') + if not cursor or not db: + self._call_stdio('verbose', 'cursor not found for component {}, try to connect'.format(component)) + connect_kwargs = connect_context[component].get('connect_kwargs', {}) + ret = self._get_connect(deploy, **connect_kwargs) + db, cursor = self._get_first_db_and_cursor_from_connect(ret) + connect_context[component]['db'] = db + cursor = connect_context[component]['cursor'] = cursor + if component in ['oceanbase', 'oceanbase-ce']: + ob_cursor = cursor + elif component in ['obproxy', 'obproxy-ce']: + odp_cursor = cursor + cluster_config = deploy_config.components[component] + operation_plugin = self.plugin_manager.get_best_py_script_plugin(operation, 'optimize', '0.1') + optimize_config = self.optimize_manager.optimize_config + kwargs = dict(optimize_config=optimize_config, stage=stage, ob_cursor=ob_cursor, odp_cursor=odp_cursor, optimize_envs=optimize_envs) + self._call_stdio('verbose', 'Call {} plugin.'.format(operation)) + ret = operation_plugin(deploy_config.components.keys(), [], cluster_config, [], opts, self.stdio, **kwargs) + if ret: + restart_components = ret.get_return('restart_components') + else: + return False + if restart_components: + self._call_stdio('verbose', 'Components {} need restart.'.format(','.join(restart_components))) + for component in restart_components: + self._call_stdio('verbose', 'close cursor for {}'.format(component)) + connect_context[component]['cursor'].close() + connect_context[component]['db'].close() + ret = self._restart_cluster_for_optimize(deploy.name, restart_components) + if not ret: + return False + if operation == 'optimize': + for component, connect_item in connect_context.items(): + connect_kwargs = connect_item['connect_kwargs'] + self._call_stdio('verbose', 'reconnect {} by kwargs {}'.format(component, connect_kwargs)) + if connect_kwargs['component_name'] in restart_components: + ret = self._get_connect(deploy, **connect_kwargs) + if not ret: + return False + db, cursor = self._get_first_db_and_cursor_from_connect(ret) + connect_context[component]['db'] = db + connect_context[component]['cursor'] = cursor + for component in restart_components: + self._call_stdio('verbose', '{}: major freeze for component ready'.format(component)) + self._call_stdio('start_loading', 'Waiting for {} ready'.format(component)) + cursor = connect_context[component]['cursor'] + if not self._major_freeze(deploy_config, component, cursor=cursor, tenant=optimize_envs.get('tenant')): + self._call_stdio('stop_loading', 'fail') + return False + self._call_stdio('stop_loading', 'succeed') + return True + + def _major_freeze(self, deploy_config, component, **kwargs): + cluster_config = deploy_config.components[component] + major_freeze_plugin = self.plugin_manager.get_best_py_script_plugin('major_freeze', component, cluster_config.version) + if not major_freeze_plugin: + self._call_stdio('verbose', 'no major freeze plugin for component {}, skip.'.format(component)) + return True + return major_freeze_plugin(deploy_config.components.keys(), [], cluster_config, [], {}, self.stdio, **kwargs) + + def _restart_cluster_for_optimize(self, deploy_name, components): + self._call_stdio('start_loading', 'Restart cluster') + if getattr(self.stdio, 'sub_io'): + stdio = self.stdio.sub_io(msg_lv=MsgLevel.ERROR) + else: + stdio = None + obd = ObdHome(self.home_path, self.dev_mode, stdio=stdio) + obd.lock_manager.set_try_times(-1) + option = Values({'components': ','.join(components), 'without_parameter': True}) + if obd.stop_cluster(name=deploy_name, options=option) and \ + obd.start_cluster(name=deploy_name, options=option) and obd.display_cluster(name=deploy_name): + self._call_stdio('stop_loading', 'succeed') + return True + else: + self._call_stdio('stop_loading', 'fail') + return False + + def _get_connect(self, deploy, component_name, **kwargs): + deploy_config = deploy.deploy_config + cluster_config = deploy_config.components[component_name] + connect_plugin = self.plugin_manager.get_best_py_script_plugin('connect', component_name, cluster_config.version) + ret = connect_plugin(deploy_config.components.keys(), [], cluster_config, [], {}, self.stdio, **kwargs) + if not ret or not ret.get_return('connect'): + return None + return ret + + def create_mysqltest_snap(self, deploy, ssh_clients, repositories, create_snap_plugin, start_plugins, stop_plugins, options, snap_configs, env={}): + deploy_config = deploy.deploy_config + for repository in repositories: + if repository in snap_configs: + cluster_config = deploy_config.components[repository.name] + self._call_stdio('verbose', 'Call %s for %s' % (stop_plugins[repository], repository)) + if not stop_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio): + return False + self._call_stdio('verbose', 'Call %s for %s' % (create_snap_plugin, repository)) + if not create_snap_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env=env, snap_config=snap_configs[repository]): + return False + self._call_stdio('verbose', 'Call %s for %s' % (start_plugins[repository], repository)) + if not start_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio, self.home_path, repository.repository_dir, deploy_name=deploy.name): + return False + return True + def mysqltest(self, name, opts): self._call_stdio('verbose', 'Get Deploy by name') deploy = self.deploy_manager.get_deploy_config(name) @@ -2474,9 +2810,15 @@ class ObdHome(object): self._call_stdio('error', 'No such deploy: %s.' % name) return False + fast_reboot = getattr(opts, 'fast_reboot', False) deploy_info = deploy.deploy_info self._call_stdio('verbose', 'Check deploy status') - if deploy_info.status != DeployStatus.STATUS_RUNNING: + if fast_reboot: + setattr(opts, 'without_parameter', True) + status = [DeployStatus.STATUS_DEPLOYED, DeployStatus.STATUS_RUNNING] + else: + status = [DeployStatus.STATUS_RUNNING] + if deploy_info.status not in status: self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value)) return False self._call_stdio('verbose', 'Get deploy configuration') @@ -2518,16 +2860,27 @@ class ObdHome(object): # Get the repository # repositories = self.get_local_repositories({opts.component: deploy_config.components[opts.component]}) repositories = self.load_local_repositories(deploy_info) + target_repository = None + ob_repository = None for repository in repositories: if repository.name == opts.component: - break - else: + target_repository = repository + if repository.name in ['oceanbase', 'oceanbase-ce']: + ob_repository = repository + + if not target_repository: self._call_stdio('error', 'Can not find the component for mysqltest, use `--component` to select component') return False + if not ob_repository: + self._call_stdio('error', 'Deploy {} must contain the component oceanbase or oceanbase-ce.'.format(deploy.name)) + return False # Check whether the components have the parameter plugins and apply the plugins self.search_param_plugin_and_apply(repositories, deploy_config) self._call_stdio('stop_loading', 'succeed') + if deploy_info.status == DeployStatus.STATUS_DEPLOYED and not self._start_cluster(deploy, repositories): + return False + # Get the client ssh_clients = self.get_clients(deploy_config, repositories) @@ -2544,29 +2897,39 @@ class ObdHome(object): self._call_stdio('print', '%s %s is stopped' % (server, repository.name)) return False - connect_plugin = self.search_py_script_plugin(repositories, 'connect')[repository] + connect_plugin = self.search_py_script_plugin(repositories, 'connect')[target_repository] ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, target_server=opts.test_server, sys_root=False) if not ret or not ret.get_return('connect'): return False db = ret.get_return('connect') cursor = ret.get_return('cursor') - - mysqltest_init_plugin = self.plugin_manager.get_best_py_script_plugin('init', 'mysqltest', repository.version) - mysqltest_check_opt_plugin = self.plugin_manager.get_best_py_script_plugin('check_opt', 'mysqltest', repository.version) - mysqltest_check_test_plugin = self.plugin_manager.get_best_py_script_plugin('check_test', 'mysqltest', repository.version) - mysqltest_run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'mysqltest', repository.version) - mysqltest_collect_log_plugin = self.plugin_manager.get_best_py_script_plugin('collect_log', 'mysqltest', repository.version) - env = opts.__dict__ env['cursor'] = cursor env['host'] = opts.test_server.ip env['port'] = db.port - self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_check_opt_plugin, repository)) + + mysqltest_init_plugin = self.plugin_manager.get_best_py_script_plugin('init', 'mysqltest', ob_repository.version) + mysqltest_check_opt_plugin = self.plugin_manager.get_best_py_script_plugin('check_opt', 'mysqltest', ob_repository.version) + mysqltest_check_test_plugin = self.plugin_manager.get_best_py_script_plugin('check_test', 'mysqltest', ob_repository.version) + mysqltest_run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'mysqltest', ob_repository.version) + mysqltest_collect_log_plugin = self.plugin_manager.get_best_py_script_plugin('collect_log', 'mysqltest', ob_repository.version) + + start_plugins = self.search_py_script_plugin(repositories, 'start') + stop_plugins = self.search_py_script_plugin(repositories, 'stop') + # display_plugin = self.search_py_script_plugin(repositories, 'display')[repository] + + if fast_reboot: + create_snap_plugin = self.plugin_manager.get_best_py_script_plugin('create_snap', 'general', '0.1') + load_snap_plugin = self.plugin_manager.get_best_py_script_plugin('load_snap', 'general', '0.1') + snap_check_plugin = self.plugin_manager.get_best_py_script_plugin('snap_check', 'general', '0.1') + snap_configs = self.search_plugins(repositories, PluginType.SNAP_CONFIG, no_found_exit=False) + + self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_check_opt_plugin, target_repository)) ret = mysqltest_check_opt_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env) if not ret: return False if not env['init_only']: - self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_check_test_plugin, repository)) + self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_check_test_plugin, target_repository)) ret = mysqltest_check_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env) if not ret: self._call_stdio('error', 'Failed to get test set') @@ -2575,23 +2938,61 @@ class ObdHome(object): self._call_stdio('error', 'Test set is empty') return False + use_snap = False if env['need_init'] or env['init_only']: - self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_init_plugin, repository)) + self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_init_plugin, target_repository)) if not mysqltest_init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env): self._call_stdio('error', 'Failed to init for mysqltest') return False + if fast_reboot: + if not self.create_mysqltest_snap(deploy, ssh_clients, repositories, create_snap_plugin, start_plugins, stop_plugins, opts, snap_configs, env): + return False + connect_plugin = self.search_py_script_plugin(repositories, 'connect')[target_repository] + ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, target_server=opts.test_server, sys_root=False) + if not ret or not ret.get_return('connect'): + return False + db = ret.get_return('connect') + cursor = ret.get_return('cursor') + env['cursor'] = cursor + env['host'] = opts.test_server.ip + env['port'] = db.port + self._call_stdio('start_loading', 'Check init') + env['load_snap'] = True + self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_init_plugin, target_repository)) + mysqltest_init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env) + env['load_snap'] = False + self._call_stdio('stop_loading', 'succeed') + use_snap = True + if env['init_only']: return True + if fast_reboot and use_snap is False: + self._call_stdio('start_loading', 'Check init') + env['load_snap'] = True + mysqltest_init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env) + env['load_snap'] = False + self._call_stdio('stop_loading', 'succeed') + snap_num = 0 + for repository in repositories: + if repository in snap_configs: + cluster_config = deploy_config.components[repository.name] + self._call_stdio('verbose', 'Call %s for %s' % (snap_check_plugin, repository)) + if not snap_check_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env=env, snap_config=snap_configs[repository]): + break + snap_num += 1 + use_snap = len(snap_configs) == snap_num + env['load_snap'] = use_snap + self._call_stdio('verbose', 'test set: {}'.format(env['test_set'])) self._call_stdio('verbose', 'total: {}'.format(len(env['test_set']))) reboot_success = True while True: - self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_run_test_plugin, repository)) + self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_run_test_plugin, target_repository)) ret = mysqltest_run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env) if not ret: break - self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_collect_log_plugin, repository)) + self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_collect_log_plugin, target_repository)) mysqltest_collect_log_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env) if ret.get_return('finished'): @@ -2609,19 +3010,36 @@ class ObdHome(object): while reboot_retries and not reboot_success: reboot_retries -= 1 with timeout(reboot_timeout): - self._call_stdio('start_loading', 'Reboot') - obd = ObdHome(self.home_path, self.dev_mode, stdio=stdio) - obd.lock_manager.set_try_times(-1) - if obd.redeploy_cluster( - name, - opt=Values({'force_kill': True, 'force': True, 'force_delete': True}), search_repo=False): - self._call_stdio('stop_loading', 'succeed') + if use_snap: + self._call_stdio('start_loading', 'Snap Reboot') + for repository in repositories: + if repository in snap_configs: + cluster_config = deploy_config.components[repository.name] + self._call_stdio('verbose', 'Call %s for %s' % (stop_plugins[repository], repository)) + if not stop_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, stdio): + self._call_stdio('stop_loading', 'fail') + continue + self._call_stdio('verbose', 'Call %s for %s' % (load_snap_plugin, repository)) + if not load_snap_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, stdio, env=env, snap_config=snap_configs[repository]): + self._call_stdio('stop_loading', 'fail') + continue + if not start_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, stdio, self.home_path, repository.repository_dir, deploy_name=deploy.name): + self._call_stdio('stop_loading', 'fail') + continue else: - self._call_stdio('stop_loading', 'fail') - continue - obd.lock_manager.set_try_times(6000) - obd = None - connect_plugin = self.search_py_script_plugin(repositories, 'connect')[repository] + self._call_stdio('start_loading', 'Reboot') + obd = ObdHome(self.home_path, self.dev_mode, stdio=stdio) + obd.lock_manager.set_try_times(-1) + if not obd.redeploy_cluster( + name, + opt=Values({'force_kill': True, 'force': True, 'force_delete': True}), search_repo=False): + self._call_stdio('stop_loading', 'fail') + continue + obd.lock_manager.set_try_times(6000) + obd = None + + self._call_stdio('stop_loading', 'succeed') + connect_plugin = self.search_py_script_plugin(repositories, 'connect')[target_repository] ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, target_server=opts.test_server, sys_root=False) if not ret or not ret.get_return('connect'): @@ -2630,9 +3048,25 @@ class ObdHome(object): db = ret.get_return('connect') cursor = ret.get_return('cursor') env['cursor'] = cursor - self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_init_plugin, repository)) + + self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_init_plugin, target_repository)) if mysqltest_init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env): + if fast_reboot and use_snap is False: + if not self.create_mysqltest_snap(deploy, ssh_clients, repositories, create_snap_plugin, start_plugins, stop_plugins, opts, snap_configs, env): + return False + use_snap = True + connect_plugin = self.search_py_script_plugin(repositories, 'connect')[target_repository] + ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, + self.stdio, target_server=opts.test_server, sys_root=False) + if not ret or not ret.get_return('connect'): + self._call_stdio('error', 'Failed to connect server') + continue + db = ret.get_return('connect') + cursor = ret.get_return('cursor') + env['cursor'] = cursor + self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_init_plugin, target_repository)) + mysqltest_init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env) reboot_success = True else: self._call_stdio('error', 'Failed to prepare for mysqltest') @@ -2723,69 +3157,90 @@ class ObdHome(object): ssh_clients = self.get_clients(deploy_config, repositories) # Check the status for the deployed cluster - component_status = {} - cluster_status = self.cluster_status_check(ssh_clients, deploy_config, repositories, component_status) - if cluster_status is False or cluster_status == 0: - if self.stdio: - self._call_stdio('error', EC_SOME_SERVER_STOPED) - for repository in component_status: - cluster_status = component_status[repository] - for server in cluster_status: - if cluster_status[server] == 0: - self._call_stdio('print', '%s %s is stopped' % (server, repository.name)) - return False + if not getattr(opts, 'skip_cluster_status_check', False): + component_status = {} + cluster_status = self.cluster_status_check(ssh_clients, deploy_config, repositories, component_status) + if cluster_status is False or cluster_status == 0: + if self.stdio: + self._call_stdio('error', EC_SOME_SERVER_STOPED) + for repository in component_status: + cluster_status = component_status[repository] + for server in cluster_status: + if cluster_status[server] == 0: + self._call_stdio('print', '%s %s is stopped' % (server, repository.name)) + return False ob_repository = None repository = None + env = {'sys_root': False} + odp_db = None + odp_cursor = None + ob_component = None + connect_context = {} for tmp_repository in repositories: if tmp_repository.name in ["oceanbase", "oceanbase-ce"]: ob_repository = tmp_repository + ob_component = tmp_repository.name if tmp_repository.name == opts.component: repository = tmp_repository - + if tmp_repository.name in ['obproxy', 'obproxy-ce']: + odp_component = tmp_repository.name + allow_components = ['oceanbase', 'oceanbase-ce'] + for component_name in deploy_config.components: + if component_name in allow_components: + config = deploy_config.components[component_name] + env['user'] = 'root' + env['password'] = config.get_global_conf().get('root_password', '') + env['target_server'] = opts.test_server + break + connect_kwargs = dict(component_name=odp_component, target_server=opts.test_server) + ret = self._get_connect(deploy, **connect_kwargs) + if not ret or not ret.get_return('connect'): + return False + odp_db, odp_cursor = self._get_first_db_and_cursor_from_connect(ret) + connect_context[tmp_repository.name] = {'connect_kwargs': connect_kwargs, 'db': odp_db, + 'cursor': odp_cursor} + if not ob_repository: + self._call_stdio('error', 'Deploy {} must contain the component oceanbase or oceanbase-ce.'.format(deploy.name)) + return False plugin_version = ob_repository.version if ob_repository else repository.version - env = {'sys_root': False} - db = None - cursor = None - odp_db = None - odp_cursor = None - ob_optimization = True - - connect_plugin = self.search_py_script_plugin(repositories, 'connect')[repository] - - if repository.name in ['obproxy', 'obproxy-ce']: - ob_optimization = False - allow_components = ['oceanbase', 'oceanbase-ce'] - for component_name in deploy_config.components: - if component_name in allow_components: - config = deploy_config.components[component_name] - env['user'] = 'root' - env['password'] = config.get_global_conf().get('root_password', '') - ob_optimization = True - break - ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, target_server=opts.test_server) - if not ret or not ret.get_return('connect'): - return False - odp_db = ret.get_return('connect') - odp_cursor = ret.get_return('cursor') - - ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, target_server=opts.test_server, **env) + connect_kwargs = dict(component_name=repository.name, **env) + ret = self._get_connect(deploy=deploy, **connect_kwargs) if not ret or not ret.get_return('connect'): return False - db = ret.get_return('connect') - cursor = ret.get_return('cursor') + db, cursor = self._get_first_db_and_cursor_from_connect(ret) + connect_context[ob_component] = {'connect_kwargs': connect_kwargs, 'db': db, 'cursor': cursor} + pre_test_plugin = self.plugin_manager.get_best_py_script_plugin('pre_test', 'sysbench', plugin_version) run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'sysbench', plugin_version) setattr(opts, 'host', opts.test_server.ip) setattr(opts, 'port', db.port) - setattr(opts, 'ob_optimization', ob_optimization) - self._call_stdio('verbose', 'Call %s for %s' % (run_test_plugin, repository)) - if run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, db, cursor, odp_db, odp_cursor): - return True - return False + optimization = getattr(opts, 'optimization', 0) + + self._call_stdio('verbose', 'Call %s for %s' % (pre_test_plugin, repository)) + ret = pre_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, cursor=cursor) + if not ret: + return False + kwargs = ret.kwargs + optimization_init = False + try: + if optimization: + if not self._test_optimize_init(opts=opts, test_name='sysbench', deploy_config=deploy_config, cluster_config=cluster_config): + return False + optimization_init = True + if not self._test_optimize_operation(deploy=deploy, stage='test', opts=opts, connect_context=connect_context, optimize_envs=kwargs): + return False + self._call_stdio('verbose', 'Call %s for %s' % (run_test_plugin, repository)) + if run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio): + return True + + return False + finally: + if optimization and optimization_init: + self._test_optimize_operation(deploy=deploy, connect_context=connect_context, optimize_envs=kwargs, operation='recover') def tpch(self, name, opts): self._call_stdio('verbose', 'Get Deploy by name') @@ -2842,25 +3297,28 @@ class ObdHome(object): # Get the client ssh_clients = self.get_clients(deploy_config, repositories) - # Check the status for the deployed cluster - component_status = {} - cluster_status = self.cluster_status_check(ssh_clients, deploy_config, repositories, component_status) - if cluster_status is False or cluster_status == 0: - if self.stdio: - self._call_stdio('error', EC_SOME_SERVER_STOPED) - for repository in component_status: - cluster_status = component_status[repository] - for server in cluster_status: - if cluster_status[server] == 0: - self._call_stdio('print', '%s %s is stopped' % (server, repository.name)) - return False + if not getattr(opts, 'skip_cluster_status_check', False): + # Check the status for the deployed cluster + component_status = {} + cluster_status = self.cluster_status_check(ssh_clients, deploy_config, repositories, component_status) + if cluster_status is False or cluster_status == 0: + if self.stdio: + self._call_stdio('error', EC_SOME_SERVER_STOPED) + for repository in component_status: + cluster_status = component_status[repository] + for server in cluster_status: + if cluster_status[server] == 0: + self._call_stdio('print', '%s %s is stopped' % (server, repository.name)) + return False - connect_plugin = self.search_py_script_plugin(repositories, 'connect')[repository] - ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, target_server=opts.test_server) + connect_context = {} + connect_kwargs = dict(component_name=repository.name, target_server=opts.test_server) + ret = self._get_connect(deploy=deploy, **connect_kwargs) if not ret or not ret.get_return('connect'): return False db = ret.get_return('connect') cursor = ret.get_return('cursor') + connect_context[repository.name] = {'connect_kwargs': connect_kwargs, 'db': db, 'cursor': cursor} pre_test_plugin = self.plugin_manager.get_best_py_script_plugin('pre_test', 'tpch', repository.version) run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'tpch', repository.version) @@ -2868,13 +3326,31 @@ class ObdHome(object): setattr(opts, 'host', opts.test_server.ip) setattr(opts, 'port', db.port) + optimization = getattr(opts, 'optimization', 0) self._call_stdio('verbose', 'Call %s for %s' % (pre_test_plugin, repository)) - if pre_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio): + ret = pre_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, cursor=cursor) + if not ret: + return False + kwargs = ret.kwargs + optimization_init = False + try: + if optimization: + if not self._test_optimize_init(opts=opts, test_name='tpch', deploy_config=deploy_config, cluster_config=cluster_config): + return False + optimization_init = True + if not self._test_optimize_operation(deploy=deploy, stage='test', opts=opts, connect_context=connect_context, optimize_envs=kwargs): + return False self._call_stdio('verbose', 'Call %s for %s' % (run_test_plugin, repository)) - if run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, db, cursor): + if run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, db, cursor, **kwargs): return True - return False + return False + except Exception as e: + self._call_stdio('error', e) + return False + finally: + if optimization and optimization_init: + self._test_optimize_operation(deploy=deploy, connect_context=connect_context, optimize_envs=kwargs, operation='recover') def update_obd(self, version, install_prefix='/'): self._obd_update_lock() @@ -2925,7 +3401,7 @@ class ObdHome(object): if opts.component not in deploy_config.components: self._call_stdio('error', 'Can not find the component for tpcds, use `--component` to select component') return False - + for component_name in db_components: if component_name in deploy_config.components: db_component = component_name @@ -3030,7 +3506,6 @@ class ObdHome(object): self._call_stdio('start_loading', 'Get local repositories and plugins') # Get the repository - # repositories = self.get_local_repositories({opts.component: deploy_config.components[opts.component]}) repositories = self.load_local_repositories(deploy_info) # Check whether the components have the parameter plugins and apply the plugins @@ -3041,143 +3516,90 @@ class ObdHome(object): ssh_clients = self.get_clients(deploy_config, repositories) # Check the status for the deployed cluster - component_status = {} - cluster_status = self.cluster_status_check(ssh_clients, deploy_config, repositories, component_status) - if cluster_status is False or cluster_status == 0: - if self.stdio: - self._call_stdio('error', EC_SOME_SERVER_STOPED) - for repository in component_status: - cluster_status = component_status[repository] - for server in cluster_status: - if cluster_status[server] == 0: - self._call_stdio('print', '%s %s is stopped' % (server, repository.name)) - return False + if not getattr(opts, 'skip_cluster_status_check', False): + component_status = {} + cluster_status = self.cluster_status_check(ssh_clients, deploy_config, repositories, component_status) + if cluster_status is False or cluster_status == 0: + if self.stdio: + self._call_stdio('error', EC_SOME_SERVER_STOPED) + for repository in component_status: + cluster_status = component_status[repository] + for server in cluster_status: + if cluster_status[server] == 0: + self._call_stdio('print', '%s %s is stopped' % (server, repository.name)) + return False ob_repository = None repository = None + env = {} + odp_cursor = None + ob_component = None + odp_component = None + connect_context = {} for tmp_repository in repositories: if tmp_repository.name in ["oceanbase", "oceanbase-ce"]: ob_repository = tmp_repository + ob_component = tmp_repository.name if tmp_repository.name == opts.component: repository = tmp_repository - + if tmp_repository.name in ['obproxy', 'obproxy-ce']: + odp_component = tmp_repository.name + allow_components = ['oceanbase', 'oceanbase-ce'] + for component in deploy_info.components: + if component in allow_components: + config = deploy_config.components[component] + env['user'] = 'root' + env['password'] = config.get_global_conf().get('root_password', '') + env['target_server'] = opts.test_server + break + connect_kwargs = dict(component_name=odp_component, target_server=opts.test_server) + ret = self._get_connect(deploy, **connect_kwargs) + if not ret or not ret.get_return('connect'): + return False + odp_db, odp_cursor = self._get_first_db_and_cursor_from_connect(ret) + connect_context[odp_component] = {'connect_kwargs': connect_kwargs, 'db': odp_db, 'cursor': odp_cursor} + if not ob_repository: + self._call_stdio('error', 'Deploy {} must contain the component oceanbase or oceanbase-ce.'.format(deploy.name)) + return False plugin_version = ob_repository.version if ob_repository else repository.version - - env = {'sys_root': False} - odp_db = None - odp_cursor = None - ob_optimization = True - ob_component = None - odp_component = None - # ob_cluster_config = None - - connect_plugin = self.search_py_script_plugin(repositories, 'connect')[repository] - - if repository.name in ['obproxy', 'obproxy-ce']: - odp_component = repository.name - ob_optimization = False - allow_components = ['oceanbase', 'oceanbase-ce'] - for component in deploy_info.components: - if component in allow_components: - ob_component = component - config = deploy_config.components[component] - env['user'] = 'root' - env['password'] = config.get_global_conf().get('root_password', '') - ob_optimization = True - break - ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, - target_server=opts.test_server) - if not ret or not ret.get_return('connect'): - return False - odp_db = ret.get_return('connect') - odp_cursor = ret.get_return('cursor') - # ob_cluster_config = deploy_config.components[ob_component] - else: - ob_component = opts.component - # ob_cluster_config = cluster_config - - ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, - target_server=opts.test_server, **env) + connect_kwargs = dict(component_name=repository.name, **env) + ret = self._get_connect(deploy=deploy, **connect_kwargs) if not ret or not ret.get_return('connect'): return False - db = ret.get_return('connect') - cursor = ret.get_return('cursor') + db, cursor = self._get_first_db_and_cursor_from_connect(ret) + connect_context[ob_component] = {'connect_kwargs': connect_kwargs, 'db': db, 'cursor': cursor} + pre_test_plugin = self.plugin_manager.get_best_py_script_plugin('pre_test', 'tpcc', plugin_version) - optimize_plugin = self.plugin_manager.get_best_py_script_plugin('optimize', 'tpcc', plugin_version) build_plugin = self.plugin_manager.get_best_py_script_plugin('build', 'tpcc', plugin_version) run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'tpcc', plugin_version) - recover_plugin = self.plugin_manager.get_best_py_script_plugin('recover', 'tpcc', plugin_version) setattr(opts, 'host', opts.test_server.ip) setattr(opts, 'port', db.port) - setattr(opts, 'ob_optimization', ob_optimization) kwargs = {} - optimized = False optimization = getattr(opts, 'optimization', 0) test_only = getattr(opts, 'test_only', False) - components = [] - if getattr(self.stdio, 'sub_io'): - stdio = self.stdio.sub_io() - else: - stdio = None - obd = None + optimization_inited = False try: self._call_stdio('verbose', 'Call %s for %s' % (pre_test_plugin, repository)) ret = pre_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, - cursor, odp_cursor, **kwargs) + cursor=cursor, odp_cursor=odp_cursor, **kwargs) if not ret: return False else: kwargs.update(ret.kwargs) if optimization: - optimized = True - kwargs['optimization_step'] = 'build' - self._call_stdio('verbose', 'Call %s for %s' % (optimize_plugin, repository)) - ret = optimize_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, cursor, - odp_cursor, **kwargs) - if not ret: + if not self._test_optimize_init(opts=opts, test_name='tpcc', deploy_config=deploy_config, cluster_config=cluster_config): + return False + optimization_inited = True + if not self._test_optimize_operation(deploy=deploy, stage='build', opts=opts, connect_context=connect_context, optimize_envs=kwargs): return False - else: - kwargs.update(ret.kwargs) - if kwargs.get('odp_need_reboot') and odp_component: - components.append(odp_component) - if kwargs.get('obs_need_reboot') and ob_component: - components.append(ob_component) - if components: - db.close() - cursor.close() - if odp_db: - odp_db.close() - if odp_cursor: - odp_cursor.close() - self._call_stdio('start_loading', 'Restart cluster') - obd = ObdHome(self.home_path, self.dev_mode, stdio=stdio) - obd.lock_manager.set_try_times(-1) - option = Values({'components': ','.join(components), 'without_parameter': True}) - if obd.stop_cluster(name=name, options=option) and obd.start_cluster(name=name, options=option) and obd.display_cluster(name=name): - self._call_stdio('stop_loading', 'succeed') - else: - self._call_stdio('stop_loading', 'fail') - return False - if repository.name in ['obproxy', 'obproxy-ce']: - ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, - self.stdio, - target_server=opts.test_server) - if not ret or not ret.get_return('connect'): - return False - odp_db = ret.get_return('connect') - odp_cursor = ret.get_return('cursor') - ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, - self.stdio, - target_server=opts.test_server, **env) - if not ret or not ret.get_return('connect'): - return False - db = ret.get_return('connect') - cursor = ret.get_return('cursor') if not test_only: self._call_stdio('verbose', 'Call %s for %s' % (build_plugin, repository)) + cursor = connect_context[ob_component]['cursor'] + if odp_component: + odp_cursor = connect_context[odp_component]['cursor'] ret = build_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, cursor, odp_cursor, **kwargs) if not ret: @@ -3185,17 +3607,12 @@ class ObdHome(object): else: kwargs.update(ret.kwargs) if optimization: - kwargs['optimization_step'] = 'test' - self._call_stdio('verbose', 'Call %s for %s' % (optimize_plugin, repository)) - ret = optimize_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, cursor, - odp_cursor, **kwargs) + ret = self._test_optimize_operation(deploy=deploy, stage='test', opts=opts, connect_context=connect_context, optimize_envs=kwargs) if not ret: return False - else: - kwargs.update(ret.kwargs) self._call_stdio('verbose', 'Call %s for %s' % (run_test_plugin, repository)) - ret = run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, cursor, - odp_cursor, **kwargs) + cursor = connect_context[ob_component]['cursor'] + ret = run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, cursor, **kwargs) if not ret: return False else: @@ -3205,22 +3622,8 @@ class ObdHome(object): self._call_stdio('error', e) return False finally: - if optimization and optimized: - self._call_stdio('verbose', 'Call %s for %s' % (recover_plugin, repository)) - if not recover_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, - cursor, odp_cursor, **kwargs): - return False - if components and obd: - self._call_stdio('start_loading', 'Restart cluster') - option = Values({'components': ','.join(components), 'without_parameter': True}) - if obd.stop_cluster(name=name, options=option) and obd.start_cluster(name=name, options=option): - self._call_stdio('stop_loading', 'succeed') - else: - self._call_stdio('stop_loading', 'fail') - if db: - db.close() - if odp_db: - odp_db.close() + if optimization and optimization_inited: + self._test_optimize_operation(deploy=deploy, connect_context=connect_context, optimize_envs=kwargs, operation='recover') def db_connect(self, name, opts): self._call_stdio('verbose', 'Get Deploy by name') @@ -3247,7 +3650,7 @@ class ObdHome(object): self._call_stdio('error', '%s not support. %s is allowed' % (opts.component, allow_components)) return False if opts.component not in deploy_config.components: - self._call_stdio('error', 'Can not find the component for tpch, use `--component` to select component') + self._call_stdio('error', 'Can not find the component for db connect, use `--component` to select component') return False cluster_config = deploy_config.components[opts.component] @@ -3266,7 +3669,8 @@ class ObdHome(object): return False self._call_stdio('start_loading', 'Get local repositories and plugins') # Get the repository - repositories = self.load_local_repositories(deploy_info) + repositories = self.get_local_repositories({opts.component: deploy_config.components[opts.component]}) + # Check whether the components have the parameter plugins and apply the plugins self.search_param_plugin_and_apply(repositories, deploy_config) self._call_stdio('stop_loading', 'succeed') @@ -3322,3 +3726,70 @@ class ObdHome(object): results = context.get('results', []) self._call_stdio("print_list", results, ["Component", "Server", cmd_name.title()], title=cmd_name.title()) return not context.get('failed') + + def dooba(self, name, opts): + self._call_stdio('verbose', 'Get Deploy by name') + deploy = self.deploy_manager.get_deploy_config(name, read_only=True) + if not deploy: + self._call_stdio('error', 'No such deploy: %s.' % name) + return False + + self._call_stdio('verbose', 'Get deploy configuration') + deploy_config = deploy.deploy_config + deploy_info = deploy.deploy_info + + if deploy_info.status in (DeployStatus.STATUS_DESTROYED, DeployStatus.STATUS_CONFIGURED): + self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value)) + return False + + allow_components = ['obproxy', 'obproxy-ce', 'oceanbase', 'oceanbase-ce'] + if opts.component is None: + for component_name in allow_components: + if component_name in deploy_config.components: + opts.component = component_name + break + elif opts.component not in allow_components: + self._call_stdio('error', '%s not support. %s is allowed' % (opts.component, allow_components)) + return False + if opts.component not in deploy_config.components: + self._call_stdio('error', + 'Can not find the component for dooba, use `--component` to select component') + return False + + for component in deploy_config.components: + if component in ['oceanbase', 'oceanbase-ce']: + break + else: + self._call_stdio('error', 'Dooba must contain the component oceanbase or oceanbase-ce.') + return False + + cluster_config = deploy_config.components[opts.component] + if not cluster_config.servers: + self._call_stdio('error', '%s server list is empty' % opts.component) + return False + if opts.server is None: + opts.server = cluster_config.servers[0] + else: + for server in cluster_config.servers: + if server.name == opts.server: + opts.server = server + break + else: + self._call_stdio('error', '%s is not a server in %s' % (opts.server, opts.component)) + return False + self._call_stdio('start_loading', 'Get local repositories and plugins') + # Get the repository + repositories = self.load_local_repositories(deploy_info) + plugin_version = None + for repository in repositories: + if repository.name in ['oceanbase', 'oceanbase-ce']: + plugin_version = repository.version + break + # Check whether the components have the parameter plugins and apply the plugins + self.search_param_plugin_and_apply(repositories, deploy_config) + self._call_stdio('stop_loading', 'succeed') + + sync_config_plugin = self.plugin_manager.get_best_py_script_plugin('sync_cluster_config', 'general', '0.1') + sync_config_plugin(deploy_config.components.keys(), [], cluster_config, [], opts, self.stdio) + dooba_plugin = self.plugin_manager.get_best_py_script_plugin('run', 'dooba', plugin_version) + return dooba_plugin(deploy_config.components.keys(), [], cluster_config, [], opts, self.stdio) \ No newline at end of file diff --git a/plugins-requirements.txt b/plugins-requirements.txt index 25e3f9a0bd98d5876e14c5dd4a0bcf38b49bd651..c513edf07513eb9916a655835e57b28f8bf6ff38 100644 --- a/plugins-requirements.txt +++ b/plugins-requirements.txt @@ -1,2 +1,3 @@ MySQL-python==1.2.5 -pycryptodome==3.10.1 \ No newline at end of file +pycryptodome==3.10.1 +bcrypt==3.1.7 \ No newline at end of file diff --git a/plugins-requirements3.txt b/plugins-requirements3.txt index b680690ff291b996af2e68a84ddef79f981b792f..c1c93f3067f5917c73c123aef5092d8df2e825f6 100644 --- a/plugins-requirements3.txt +++ b/plugins-requirements3.txt @@ -1,2 +1,4 @@ PyMySQL==1.0.2 -pycryptodome==3.10.1 \ No newline at end of file +pycryptodome==3.10.1 +bcrypt==4.0.0 +configparser>=5.2.0 \ No newline at end of file diff --git a/plugins/commands/0.1/check_opt.py b/plugins/commands/0.1/check_opt.py index d95d51bd330af28198c70de8875cd5e4f0b18c09..87e63ed75597588c19bb8a7e2395e9c978a54041 100644 --- a/plugins/commands/0.1/check_opt.py +++ b/plugins/commands/0.1/check_opt.py @@ -91,10 +91,10 @@ def check_opt(plugin_context, name, context, *args, **kwargs): if not clients: stdio.error("{} server list is empty".format(','.join(components))) return - if servers is None: if interactive: - servers = [None, ] + servers = cluster_config.servers[:1] + stdio.verbose("Server {} will be used according to the order in the deploy configuration yaml.".format(servers[0])) else: servers = list(clients.keys()) stdio.verbose("Server {} will be used because {} is a non-interactive command".format(", ".join([str(s) for s in servers]), name)) diff --git a/plugins/mysqltest/3.1.0/check_opt.py b/plugins/mysqltest/3.1.0/check_opt.py index 77b7df9df7d0ae6a0e39dc165b16dcc69a1a74cf..cd33d15468db8250a5aec39803b0246445ef33cc 100644 --- a/plugins/mysqltest/3.1.0/check_opt.py +++ b/plugins/mysqltest/3.1.0/check_opt.py @@ -87,7 +87,9 @@ def check_opt(plugin_context, opt, *args, **kwargs): ob_component = intersection[0] global_config = cluster_config.get_depend_config(ob_component) else: + ob_component = opt["component"] global_config = cluster_config.get_global_conf() + opt['is_business'] = 1 if ob_component == 'oceanbase' else 0 cursor = opt['cursor'] opt['_enable_static_typing_engine'] = None if '_enable_static_typing_engine' in global_config: diff --git a/plugins/mysqltest/3.1.0/init.py b/plugins/mysqltest/3.1.0/init.py index 8aac225b648ed1dfe4d47e5d2a53ea6cf5e20f28..081f7ea1dafb41ca64c883bd231c528a0ee32fd3 100644 --- a/plugins/mysqltest/3.1.0/init.py +++ b/plugins/mysqltest/3.1.0/init.py @@ -22,8 +22,11 @@ from __future__ import absolute_import, division, print_function import re import os -from ssh import LocalClient +import time +import hashlib +from ssh import LocalClient +from tool import FileUtil from _errno import EC_MYSQLTEST_FAILE_NOT_FOUND, EC_MYSQLTEST_PARSE_CMD_FAILED @@ -58,21 +61,23 @@ def get_memory_limit(cursor, client): return 0 -def get_root_server(cursor): - try: - cursor.execute('select * from oceanbase.__all_server where status = \'active\' and with_rootserver=1') - return cursor.fetchone() - except: - pass - return None - - def init(plugin_context, env, *args, **kwargs): + def get_root_server(cursor): + while True: + try: + cursor.execute('select * from oceanbase.__all_server where status = \'active\' and with_rootserver=1') + return cursor.fetchone() + except: + if load_snap: + time.sleep(0.1) + continue + return None + def exec_sql(cmd): ret = re.match('(.*\.sql)(?:\|([^\|]*))?(?:\|([^\|]*))?', cmd) if not ret: stdio.error(EC_MYSQLTEST_PARSE_CMD_FAILED.format(path=cmd)) - return False + return None cmd = ret.groups() sql_file_path1 = os.path.join(init_sql_dir, cmd[0]) sql_file_path2 = os.path.join(plugin_init_sql_dir, cmd[0]) @@ -82,19 +87,27 @@ def init(plugin_context, env, *args, **kwargs): sql_file_path = sql_file_path2 else: stdio.error(EC_MYSQLTEST_FAILE_NOT_FOUND.format(file=cmd[0], path='[%s, %s]' % (init_sql_dir, plugin_init_sql_dir))) - return False - exec_sql_cmd = exec_sql_temp % (cmd[1] if cmd[1] else 'root', cmd[2] if cmd[2] else 'oceanbase', sql_file_path) - ret = LocalClient.execute_command(exec_sql_cmd, stdio=stdio) - if ret: - return True - stdio.error('Failed to Excute %s: %s' % (sql_file_path, ret.stderr.strip())) - return False + return None + if load_snap: + exec_sql_cmd = exec_sql_connect % (cmd[1] if cmd[1] else 'root') + else: + exec_sql_cmd = exec_sql_execute % (cmd[1] if cmd[1] else 'root', cmd[2] if cmd[2] else 'oceanbase', sql_file_path) + + while True: + ret = LocalClient.execute_command(exec_sql_cmd, stdio=stdio) + if ret: + return sql_file_path + if load_snap: + time.sleep(0.1) + continue + stdio.error('Failed to Excute %s: %s' % (sql_file_path, ret.stderr.strip())) + return None cluster_config = plugin_context.cluster_config stdio = plugin_context.stdio + load_snap = env.get('load_snap', False) cursor = env['cursor'] obclient_bin = env['obclient_bin'] - mysqltest_bin = env['mysqltest_bin'] server = env['test_server'] root_server = get_root_server(cursor) if root_server: @@ -105,7 +118,8 @@ def init(plugin_context, env, *args, **kwargs): return plugin_context.return_false() init_sql_dir = env['init_sql_dir'] plugin_init_sql_dir = os.path.join(os.path.split(__file__)[0], 'init_sql') - exec_sql_temp = obclient_bin + ' --prompt "OceanBase(\\u@\d)>" -h ' + host + ' -P ' + str(port) + ' -u%s -D%s -c < %s' + exec_sql_execute = obclient_bin + ' --prompt "OceanBase(\\u@\d)>" -h ' + host + ' -P ' + str(port) + ' -u%s -D%s -c < %s' + exec_sql_connect = obclient_bin + ' --prompt "OceanBase(\\u@\d)>" -h ' + host + ' -P ' + str(port) + ' -u%s -e "select 1 from DUAL"' if 'init_sql_files' in env and env['init_sql_files']: init_sql = env['init_sql_files'].split(',') @@ -116,15 +130,21 @@ def init(plugin_context, env, *args, **kwargs): client = plugin_context.clients[server] memory_limit = get_memory_limit(cursor, client) is_mini = memory_limit and parse_size(memory_limit) < (16<<30) - if is_mini: - init_sql = [exec_mini_init, exec_init_user] + if env['is_business']: + init_sql = [exec_mini_init if is_mini else exec_init, exec_init_user_for_oracle, exec_init_user] else: - init_sql = [exec_init, exec_init_user] + init_sql = [exec_mini_init if is_mini else exec_init, exec_init_user] + m_sum = hashlib.md5() if not load_snap else None stdio.start_loading('Execute initialize sql') for sql in init_sql: - if not exec_sql(sql): + sql_file_path = exec_sql(sql) + if not sql_file_path: stdio.stop_loading('fail') return plugin_context.return_false() + m_sum and m_sum.update(FileUtil.checksum(sql_file_path)) stdio.stop_loading('succeed') + + if m_sum: + env['init_file_md5'] = m_sum.hexdigest() return plugin_context.return_true() diff --git a/plugins/mysqltest/3.1.0/init_sql/init_user.sql b/plugins/mysqltest/3.1.0/init_sql/init_user.sql index 6afca72f7d2f064f2baf5158a0a6c7561efa6db8..8e289dce035b2ba7ee612c911417fe5c0327bc43 100644 --- a/plugins/mysqltest/3.1.0/init_sql/init_user.sql +++ b/plugins/mysqltest/3.1.0/init_sql/init_user.sql @@ -2,5 +2,3 @@ use oceanbase; create user 'admin' IDENTIFIED BY 'admin'; grant all on *.* to 'admin' WITH GRANT OPTION; create database obproxy; - -alter system set _enable_split_partition = true; diff --git a/plugins/mysqltest/3.1.0/run_test.py b/plugins/mysqltest/3.1.0/run_test.py index d86e2bb9de340d270a0d3668eb3856cd01eda609..6a2777c99fd5961ce779386f9f22896ac35bb266 100644 --- a/plugins/mysqltest/3.1.0/run_test.py +++ b/plugins/mysqltest/3.1.0/run_test.py @@ -215,6 +215,7 @@ def run_test(plugin_context, env, *args, **kwargs): continue run_test_cases.append(test) if test in reboot_cases: + stdio.print('Reboot cluster because case "{}" is in the reboot cases list.'.format(test)) need_reboot = True if need_reboot: need_reboot = False @@ -324,6 +325,11 @@ def run_test(plugin_context, env, *args, **kwargs): opt['slave_cmp'] = 0 opt['result_file'] = result_file + if not opt['is_business']: + ce_result_file = re.sub(r'\.result$', '.ce.result', opt['result_file']) + if os.path.exists(ce_result_file): + opt['result_file'] = ce_result_file + if 'my_host' in opt or 'oracle_host' in opt: # compare mode pass @@ -335,13 +341,21 @@ def run_test(plugin_context, env, *args, **kwargs): stdio.verbose('query engine result: {}'.format(result.stdout)) if not result: stdio.error('engine failed, exit code %s. error msg: %s' % (result.code, result.stderr)) + obmysql_ms0_dev = str(opt['host']) + if ':' in opt['host']: + # todo: obproxy没有网卡设备选项,可能会遇到问题。如果obproxy支持IPv6后续进行改造 + devname = cluster_config.get_server_conf(opt['test_server']).get('devname') + if devname: + obmysql_ms0_dev = '{}%{}'.format(opt['host'], devname) update_env = { 'OBMYSQL_PORT': str(opt['port']), 'OBMYSQL_MS0': str(opt['host']), + 'OBMYSQL_MS0_DEV': obmysql_ms0_dev, 'OBMYSQL_PWD': str(opt['password']), 'OBMYSQL_USR': opt['user'], 'PATH': os.getenv('PATH'), - 'OBSERVER_DIR': cluster_config.get_server_conf(opt['test_server'])['home_path'] + 'OBSERVER_DIR': cluster_config.get_server_conf(opt['test_server'])['home_path'], + 'IS_BUSINESS': str(opt['is_business']) } test_env = deepcopy(os.environ.copy()) test_env.update(update_env) @@ -442,5 +456,4 @@ def run_test(plugin_context, env, *args, **kwargs): # retry is_retry = True need_reboot = True - return return_true(finished=True) - + return return_true(finished=True) \ No newline at end of file diff --git a/plugins/obagent/0.1/display.py b/plugins/obagent/0.1/display.py index 377e784496e9087951335c63abea52df5e91b2ce..1d973e8fcb93790bb677e7689c206ac18ed86391 100644 --- a/plugins/obagent/0.1/display.py +++ b/plugins/obagent/0.1/display.py @@ -19,6 +19,7 @@ from __future__ import absolute_import, division, print_function +import socket def display(plugin_context, cursor, *args, **kwargs): stdio = plugin_context.stdio @@ -34,9 +35,12 @@ def display(plugin_context, cursor, *args, **kwargs): else: auth = '--user %s:%s' % (config['http_basic_auth_user'], config['http_basic_auth_password']) cmd = '''curl %s -H "Content-Type:application/json" -L "http://%s:%s/metrics/stat"''' % (auth, server.ip, config['server_port']) - + ip = server.ip + if ip == '127.0.0.1': + hostname = socket.gethostname() + ip = socket.gethostbyname(hostname) result.append({ - 'ip': server.ip, + 'ip': ip, 'status': 'active' if client.execute_command(cmd) else 'inactive', 'server_port': config['server_port'], 'pprof_port': config['pprof_port'] diff --git a/plugins/obagent/0.1/generate_config.py b/plugins/obagent/0.1/generate_config.py index fdab8711c7cde9cd849657367ce6e1fc77f678ab..61c1bbf3ec1e699e6364309693dd195af767ced8 100644 --- a/plugins/obagent/0.1/generate_config.py +++ b/plugins/obagent/0.1/generate_config.py @@ -21,7 +21,7 @@ from __future__ import absolute_import, division, print_function -def generate_config(plugin_context, deploy_config, *args, **kwargs): +def generate_config(plugin_context, deploy_config, auto_depend=False, *args, **kwargs): cluster_config = plugin_context.cluster_config clients = plugin_context.clients stdio = plugin_context.stdio @@ -60,6 +60,11 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): cluster_config.update_server_conf(server, 'ob_monitor_status', 'inactive', False) else: cluster_config.update_global_conf('ob_monitor_status', 'inactive', False) + if auto_depend: + for depend in depends: + if cluster_config.add_depend_component(depend): + cluster_config.update_global_conf('ob_monitor_status', 'active', False) + break stdio.stop_loading('succeed') plugin_context.return_true() diff --git a/plugins/obagent/0.1/init.py b/plugins/obagent/0.1/init.py index 1cc5fb6ae26ecfa90d905c578993735e1e0a0e83..14456ec404118d2e1dd9b2f70aedf8e25ceea407 100644 --- a/plugins/obagent/0.1/init.py +++ b/plugins/obagent/0.1/init.py @@ -50,7 +50,17 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): if need_clean: client.execute_command("pkill -9 -u `whoami` -f '^%s/bin/monagent -c conf/monagent.yaml'" % home_path) - ret = client.execute_command('rm -fr %s' % home_path) + if client.execute_command('bash -c \'if [[ "$(ls -d {0} 2>/dev/null)" != "" && ! -O {0} ]]; then exit 0; else exit 1; fi\''.format(home_path)): + owner = client.execute_command("ls -ld %s | awk '{print $3}'" % home_path).stdout.strip() + global_ret = False + err_msg = ' {} is not empty, and the owner is {}'.format(home_path, owner) + stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=err_msg)) + continue + need_clean = True + + if need_clean: + client.execute_command("pkill -9 -u `whoami` -f '^%s/bin/monagent -c conf/monagent.yaml'" % home_path) + ret = client.execute_command('rm -fr %s' % home_path, timeout=-1) if not ret: global_ret = False stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=ret.stderr)) diff --git a/plugins/obagent/0.1/parameter.yaml b/plugins/obagent/0.1/parameter.yaml index 99db386f6c74f875688c9cb7ce8609300e700f7e..ddd98066d4c49203843360d6aad87e81fd10479f 100644 --- a/plugins/obagent/0.1/parameter.yaml +++ b/plugins/obagent/0.1/parameter.yaml @@ -151,7 +151,7 @@ - name: monitor_password require: false type: STRING - default: NULL + default: '' min_value: NULL max_value: NULL need_restart: false @@ -237,4 +237,18 @@ max_value: NULL need_restart: false description_en: whether to disable the basic authentication for the debug interface. True is to disable. False is to enable. - description_local: 是否禁用 debug 接口的basic auth 认证,true 表示禁用,false 表示不禁用 \ No newline at end of file + description_local: 是否禁用 debug 接口的basic auth 认证,true 表示禁用,false 表示不禁用 +- name: target_sync_configs + require: false + type: PARAM_LIST + need_restart: true + description_en: + description_local: '''将地址同步至指定远端目录 + target_sync_configs: + - host: 192.168.1.1 + target_dir: /data/prometheus/targets + user: user1 + port: 22 + # password: ***** + key_file: xxxxx + ''' \ No newline at end of file diff --git a/plugins/obagent/0.1/reload.py b/plugins/obagent/0.1/reload.py index 4d6605191cd9f92ee794278198e7cb6c7db49e0c..0034d3c056dad732dc0ba2e389b7408a6d1ee2f5 100644 --- a/plugins/obagent/0.1/reload.py +++ b/plugins/obagent/0.1/reload.py @@ -35,8 +35,7 @@ def reload(plugin_context, repository_dir, new_cluster_config, *args, **kwargs): clients = plugin_context.clients servers = cluster_config.servers yaml = YamlLoader(stdio) - - config_map = { + config_map = { "monitor_password": "root_password", "sql_port": "mysql_port", "rpc_port": "rpc_port", @@ -62,13 +61,13 @@ def reload(plugin_context, repository_dir, new_cluster_config, *args, **kwargs): with open(path) as f: data = yaml.load(f)['configs'] for config in data: - key = config.get('value') - if key and isinstance(key, dict): - key = list(key.keys())[0] - config_kv[key] = key - + value = config.get('value') + key = config.get('key') + if key and value and isinstance(value, dict): + value = list(value.keys())[0] + config_kv[value] = key global_ret = True - stdio.start_load('Reload obagent') + stdio.start_loading('Reload obagent') for server in servers: change_conf = deepcopy(global_change_conf) client = clients[server] @@ -111,8 +110,8 @@ def reload(plugin_context, repository_dir, new_cluster_config, *args, **kwargs): stdio.error(EC_OBAGENT_RELOAD_FAILED.format(server=server)) if global_ret: - stdio.stop_load('succeed') + stdio.stop_loading('succeed') return plugin_context.return_true() else: - stdio.stop_load('fail') + stdio.stop_loading('fail') return diff --git a/plugins/obagent/0.1/restart.py b/plugins/obagent/0.1/restart.py index c2ef6258f9395ccba9a92cf5bb85ca25cf7009a5..79cbbe52730cc83a2e349f6f85f21556dbbf879d 100644 --- a/plugins/obagent/0.1/restart.py +++ b/plugins/obagent/0.1/restart.py @@ -26,7 +26,7 @@ import os class Restart(object): - def __init__(self, plugin_context, local_home_path, start_plugin, reload_plugin, stop_plugin, connect_plugin, display_plugin, repository, new_cluster_config=None, new_clients=None): + def __init__(self, plugin_context, local_home_path, start_plugin, reload_plugin, stop_plugin, connect_plugin, display_plugin, repository, new_cluster_config=None, new_clients=None, deploy_name=None): self.local_home_path = local_home_path self.plugin_context = plugin_context self.components = plugin_context.components @@ -42,6 +42,7 @@ class Restart(object): self.new_clients = new_clients self.new_cluster_config = new_cluster_config self.sub_io = self.stdio.sub_io() + self.deploy_name = deploy_name def dir_read_check(self, client, path): if not client.execute_command('cd %s' % path): @@ -70,7 +71,7 @@ class Restart(object): cluster_config = self.new_cluster_config if self.new_cluster_config else self.cluster_config self.stdio.verbose('Call %s for %s' % (self.start_plugin, self.repository)) - if not self.start_plugin(self.components, clients, cluster_config, self.plugin_context.cmd, self.plugin_context.options, self.sub_io, local_home_path=self.local_home_path, repository_dir=self.repository.repository_dir): + if not self.start_plugin(self.components, clients, cluster_config, self.plugin_context.cmd, self.plugin_context.options, self.sub_io, local_home_path=self.local_home_path, repository_dir=self.repository.repository_dir, deploy_name=self.deploy_name): self.rollback() self.stdio.stop_loading('stop_loading', 'fail') return False @@ -87,8 +88,11 @@ class Restart(object): new_client.execute_command('sudo chown -R %s: %s' % (client.config.username, home_path)) -def restart(plugin_context, local_home_path, start_plugin, reload_plugin, stop_plugin, connect_plugin, display_plugin, repository, new_cluster_config=None, new_clients=None, rollback=False, *args, **kwargs): - task = Restart(plugin_context, local_home_path, start_plugin, reload_plugin, stop_plugin, connect_plugin, display_plugin, repository, new_cluster_config, new_clients) +def restart(plugin_context, local_home_path, start_plugin, reload_plugin, stop_plugin, connect_plugin, display_plugin, repository, new_cluster_config=None, new_clients=None, rollback=False, deploy_name=None, *args, **kwargs): + task = Restart(plugin_context=plugin_context, local_home_path=local_home_path, start_plugin=start_plugin, + reload_plugin=reload_plugin, stop_plugin=stop_plugin, connect_plugin=connect_plugin, + display_plugin=display_plugin, repository=repository, new_cluster_config=new_cluster_config, + new_clients=new_clients, deploy_name=deploy_name) call = task.rollback if rollback else task.restart if call(): plugin_context.return_true() diff --git a/plugins/obagent/0.1/start.py b/plugins/obagent/0.1/start.py index 66c445c402ae5b110355e6b6c335c9c7519401b1..ede6e5f65d958eea24f3496981c72245f8f00c1e 100644 --- a/plugins/obagent/0.1/start.py +++ b/plugins/obagent/0.1/start.py @@ -33,6 +33,7 @@ from copy import deepcopy from Crypto import Random from Crypto.Cipher import AES +from ssh import SshClient, SshConfig from tool import YamlLoader from _errno import * @@ -136,7 +137,7 @@ def generate_aes_b64_key(): return base64.b64encode(key.encode('utf-8')) -def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): +def start(plugin_context, local_home_path, repository_dir, deploy_name=None, *args, **kwargs): global stdio cluster_config = plugin_context.cluster_config clients = plugin_context.clients @@ -183,7 +184,7 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): if key and isinstance(key, dict): key = list(key.keys())[0] need_encrypted.append(key) - + targets = [] for server in cluster_config.servers: client = clients[server] server_config = deepcopy(cluster_config.get_server_conf(server)) @@ -192,7 +193,8 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): home_path = server_config['home_path'] remote_pid_path = '%s/run/obagent-%s-%s.pid' % (home_path, server.ip, server_config["server_port"]) pid_path[server] = remote_pid_path - + server_port = int(server_config['server_port']) + targets.append('{}:{}'.format(server.ip, server_port)) remote_pid = client.execute_command("cat %s" % pid_path[server]).stdout.strip() if remote_pid and client.execute_command('ls /proc/%s' % remote_pid): continue @@ -260,8 +262,8 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): 'compress': True if server_config.get('log_compress', True) else False }, 'server': { - 'address': '0.0.0.0:%d' % int(server_config.get('server_port', 8088)), - 'adminAddress': '0.0.0.0:%d' % int(server_config.get('pprof_port', 8089)), + 'address': '0.0.0.0:%d' % server_port, + 'adminAddress': '0.0.0.0:%d' % int(server_config['pprof_port']), 'runDir': 'run' }, 'cryptoMethod': server_config['crypto_method'] if server_config.get('crypto_method').lower() in ['aes', 'plain'] else 'plain', @@ -305,5 +307,43 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): stdio.warn(msg) plugin_context.return_false() else: + global_config = cluster_config.get_global_conf() + target_sync_configs = global_config.get('target_sync_configs', []) + stdio.verbose('start to sync target config') + data = [{'targets': targets}] + default_ssh_config = None + for client in clients.values(): + default_ssh_config = client.config + break + for target_sync_config in target_sync_configs: + host = None + target_dir = None + try: + host = target_sync_config.get('host') + target_dir = target_sync_config.get('target_dir') + if not host or not target_dir: + continue + ssh_config_keys = ['username', 'password', 'port', 'key_file', 'timeout'] + auth_keys = ['username', 'password', 'key_file'] + for key in auth_keys: + if key in target_sync_config: + config = SshConfig(host) + break + else: + config = deepcopy(default_ssh_config) + for key in ssh_config_keys: + if key in target_sync_config: + setattr(config, key, target_sync_config[key]) + with tempfile.NamedTemporaryFile(suffix='.yaml') as f: + yaml.dump(data, f) + f.flush() + file_name = '{}.yaml'.format(deploy_name or hash(cluster_config)) + file_path = os.path.join(target_dir, file_name) + remote_client = SshClient(config) + remote_client.connect() + remote_client.put_file(f.name, file_path) + except: + stdio.warn('failed to sync target to {}:{}'.format(host, target_dir)) + stdio.exception('') stdio.stop_loading('succeed') plugin_context.return_true(need_bootstrap=False) diff --git a/plugins/obagent/1.1.0/parameter.yaml b/plugins/obagent/1.1.0/parameter.yaml index d7abc418a11f5bae31097032ee374b81d84be8f8..bf8897784331e81b4a9605f1955954b7a20b1c5c 100644 --- a/plugins/obagent/1.1.0/parameter.yaml +++ b/plugins/obagent/1.1.0/parameter.yaml @@ -151,7 +151,7 @@ - name: monitor_password require: false type: STRING - default: NULL + default: '' min_value: NULL max_value: NULL need_restart: false @@ -264,4 +264,10 @@ max_value: NULL need_restart: false description_en: Working directory for OceanBase Database, needed when log alarm is enabled. - description_local: OceanBase 安装路径, 当日志报警开启时需要 \ No newline at end of file + description_local: OceanBase 安装路径, 当日志报警开启时需要 +- name: target_sync_configs + require: false + type: LIST + need_restart: true + description_en: + description_local: 将地址同步至指定远端目录 \ No newline at end of file diff --git a/plugins/obagent/1.1.0/start.py b/plugins/obagent/1.1.0/start.py index 8eb9966db01e951e83a051342eee100aa7b5d52c..d2fbef3735e6afb607b6e5039e8d8203c56354f4 100644 --- a/plugins/obagent/1.1.0/start.py +++ b/plugins/obagent/1.1.0/start.py @@ -33,6 +33,7 @@ from copy import deepcopy from Crypto import Random from Crypto.Cipher import AES +from ssh import SshClient, SshConfig from tool import YamlLoader from _errno import * @@ -136,7 +137,7 @@ def generate_aes_b64_key(): return base64.b64encode(key.encode('utf-8')) -def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): +def start(plugin_context, local_home_path, repository_dir, deploy_name=None, *args, **kwargs): global stdio cluster_config = plugin_context.cluster_config clients = plugin_context.clients @@ -184,7 +185,7 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): if key and isinstance(key, dict): key = list(key.keys())[0] need_encrypted.append(key) - + targets = [] for server in cluster_config.servers: client = clients[server] server_config = deepcopy(cluster_config.get_server_conf(server)) @@ -193,7 +194,8 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): home_path = server_config['home_path'] remote_pid_path = '%s/run/obagent-%s-%s.pid' % (home_path, server.ip, server_config["server_port"]) pid_path[server] = remote_pid_path - + server_port = int(server_config['server_port']) + targets.append('{}:{}'.format(server.ip, server_port)) remote_pid = client.execute_command("cat %s" % pid_path[server]).stdout.strip() if remote_pid and client.execute_command('ls /proc/%s' % remote_pid): continue @@ -255,8 +257,8 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): 'compress': True if server_config.get('log_compress', True) else False }, 'server': { - 'address': '0.0.0.0:%d' % int(server_config.get('server_port', 8088)), - 'adminAddress': '0.0.0.0:%d' % int(server_config.get('pprof_port', 8089)), + 'address': '0.0.0.0:%d' % server_port, + 'adminAddress': '0.0.0.0:%d' % int(server_config['pprof_port']), 'runDir': 'run' }, 'cryptoMethod': server_config['crypto_method'] if server_config.get('crypto_method').lower() in ['aes', 'plain'] else 'plain', @@ -300,5 +302,43 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): stdio.warn(msg) plugin_context.return_false() else: + global_config = cluster_config.get_global_conf() + target_sync_configs = global_config.get('target_sync_configs', []) + stdio.verbose('start to sync target config') + data = [{'targets': targets}] + default_ssh_config = None + for client in clients.values(): + default_ssh_config = client.config + break + for target_sync_config in target_sync_configs: + host = None + target_dir = None + try: + host = target_sync_config.get('host') + target_dir = target_sync_config.get('target_dir') + if not host or not target_dir: + continue + ssh_config_keys = ['username', 'password', 'port', 'key_file', 'timeout'] + auth_keys = ['username', 'password', 'key_file'] + for key in auth_keys: + if key in target_sync_config: + config = SshConfig(host) + break + else: + config = deepcopy(default_ssh_config) + for key in ssh_config_keys: + if key in target_sync_config: + setattr(config, key, target_sync_config[key]) + with tempfile.NamedTemporaryFile(suffix='.yaml') as f: + yaml.dump(data, f) + f.flush() + file_name = '{}.yaml'.format(deploy_name or hash(cluster_config)) + file_path = os.path.join(target_dir, file_name) + remote_client = SshClient(config) + remote_client.connect(stdio=stdio) + remote_client.put_file(f.name, file_path, stdio=stdio) + except: + stdio.warn('failed to sync target to {}:{}'.format(host, target_dir)) + stdio.exception('') stdio.stop_loading('succeed') plugin_context.return_true(need_bootstrap=False) diff --git a/plugins/obproxy/3.1.0/display.py b/plugins/obproxy/3.1.0/display.py index d1a0d2efa4e2b35f8f652c3f52bd7788c4474e6e..e6fb09679ba19d8307dc46c98e08535062972336 100644 --- a/plugins/obproxy/3.1.0/display.py +++ b/plugins/obproxy/3.1.0/display.py @@ -44,4 +44,25 @@ def display(plugin_context, cursor, *args, **kwargs): result.append(data) stdio.print_list(result, ['ip', 'port', 'prometheus_port', 'status'], lambda x: [x['ip'], x['listen_port'], x['prometheus_listen_port'], x['status']], title='obproxy') + + server = servers[0] + with_observer = False + server_config = cluster_config.get_server_conf(server) + cmd = '' + for comp in ['oceanbase', 'oceanbase-ce']: + if comp in cluster_config.depends: + ob_config = cluster_config.get_depend_config(comp) + if not ob_config: + continue + password = ob_config.get('root_password', '') + with_observer = True + cmd = 'obclient -h%s -P%s -uroot %s-Doceanbase' % (server.ip, server_config['listen_port'], '-p%s ' % password if password else '') + break + + if not with_observer: + password = server_config.get('obproxy_sys_password', '') + cmd = 'obclient -h%s -P%s -uroot@proxysys %s-Doceanbase' % (server.ip, server_config['listen_port'], '-p%s ' % password if password else '') + + stdio.print(cmd) + plugin_context.return_true() diff --git a/plugins/obproxy/3.1.0/generate_config.py b/plugins/obproxy/3.1.0/generate_config.py index b944f8e9cb93e1d2e9951249304939cf5e681289..73bc2c5e07b3a21eac07f77de3a145d4d6d21b1e 100644 --- a/plugins/obproxy/3.1.0/generate_config.py +++ b/plugins/obproxy/3.1.0/generate_config.py @@ -21,7 +21,7 @@ from __future__ import absolute_import, division, print_function -def generate_config(plugin_context, deploy_config, *args, **kwargs): +def generate_config(plugin_context, deploy_config, auto_depend=False, *args, **kwargs): cluster_config = plugin_context.cluster_config clients = plugin_context.clients stdio = plugin_context.stdio @@ -44,12 +44,25 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): cluster_config.update_global_conf('skip_proxy_sys_private_check', True, False) if 'enable_strict_kernel_release' not in global_config: cluster_config.update_global_conf('enable_strict_kernel_release', False, False) + + if getattr(plugin_context.options, 'mini', False): + if 'proxy_mem_limited' not in global_config: + cluster_config.update_global_conf('proxy_mem_limited', '200M', False) + ob_comps = ['oceanbase', 'oceanbase-ce'] ob_cluster_config = None - for comp in ['oceanbase', 'oceanbase-ce']: + for comp in ob_comps: + if comp in cluster_config.depends: + stdio.stop_loading('succeed') + return plugin_context.return_true() if comp in deploy_config.components: ob_cluster_config = deploy_config.components[comp] - break + + if auto_depend: + for depend in ['oceanbase', 'oceanbase-ce']: + if cluster_config.add_depend_component(depend): + stdio.stop_loading('succeed') + return plugin_context.return_true() if ob_cluster_config: root_servers = {} diff --git a/plugins/obproxy/3.1.0/init.py b/plugins/obproxy/3.1.0/init.py index bcae5a67fb647a73ff0f5ceae3294bb4eda2aa9b..1ea7ee954aa5793bd40677b0eeb30f03861fa6ab 100644 --- a/plugins/obproxy/3.1.0/init.py +++ b/plugins/obproxy/3.1.0/init.py @@ -51,14 +51,25 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): if need_clean: client.execute_command("pkill -9 -u `whoami` -f '^bash {home_path}/obproxyd.sh {home_path} {ip} {port} daemon$'".format(home_path=home_path, ip=server.ip, port=server_config.get('listen_port'))) client.execute_command("pkill -9 -u `whoami` -f '^%s/bin/obproxy --listen_port %s'" % (home_path, server_config.get('listen_port'))) - ret = client.execute_command('rm -fr %s' % home_path) + if client.execute_command('bash -c \'if [[ "$(ls -d {0} 2>/dev/null)" != "" && ! -O {0} ]]; then exit 0; else exit 1; fi\''.format(home_path)): + owner = client.execute_command("ls -ld %s | awk '{print $3}'" % home_path).stdout.strip() + global_ret = False + err_msg = ' {} is not empty, and the owner is {}'.format(home_path, owner) + stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=err_msg)) + continue + need_clean = True + + if need_clean: + client.execute_command("pkill -9 -u `whoami` -f '^bash {home_path}/obproxyd.sh {home_path} {ip} {port} daemon$'".format(home_path=home_path, ip=server.ip, port=server_config.get('listen_port'))) + client.execute_command("pkill -9 -u `whoami` -f '^%s/bin/obproxy --listen_port %s'" % (home_path, server_config.get('listen_port'))) + ret = client.execute_command('rm -fr %s' % home_path, timeout=-1) if not ret: global_ret = False stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=ret.stderr)) continue if not client.execute_command("bash -c 'mkdir -p %s/{run,bin,lib}'" % home_path): global_ret = False - stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=home_path))) + stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=home_path))) if global_ret: stdio.stop_loading('succeed') diff --git a/plugins/obproxy/3.1.0/parameter.yaml b/plugins/obproxy/3.1.0/parameter.yaml index a436c193a60b4a2c86ff2a06a8f6e43b0655aca1..0f40a1bad97b103265000315eeb3a0973b6f945c 100644 --- a/plugins/obproxy/3.1.0/parameter.yaml +++ b/plugins/obproxy/3.1.0/parameter.yaml @@ -372,22 +372,16 @@ - name: local_bound_ip type: STRING default: 0.0.0.0 - max_value: '' - min_value: '' need_restart: true description_en: local bound ip(any) - name: obproxy_config_server_url type: STRING default: '' - max_value: '' - min_value: '' need_restart: true description_en: url of config info(rs list and so on) - name: proxy_service_mode type: STRING default: '' - max_value: '' - min_value: '' need_restart: true description_en: "proxy deploy and service mode: 1.client(default); 2.server" - name: proxy_id @@ -400,8 +394,6 @@ - name: app_name type: STRING default: undefined - max_value: '' - min_value: '' need_restart: true description_en: current application name which proxy works for, need defined, only modified when restart - name: enable_metadb_used @@ -414,8 +406,6 @@ - name: rootservice_cluster_name type: STRING default: undefined - max_value: '' - min_value: '' need_restart: true description_en: default cluster name for rootservice_list - name: prometheus_cost_ms_unit @@ -435,21 +425,15 @@ - name: obproxy_sys_password type: STRING default: '' - max_value: '' - min_value: '' need_restart: false description_en: password pf obproxy sys user - name: observer_sys_password type: STRING default: '' - max_value: '' - min_value: '' need_restart: false description_en: password of observer proxyro user - name: observer_root_password type: STRING default: '' - max_value: '' - min_value: '' need_restart: false description_en: password of observer root user \ No newline at end of file diff --git a/plugins/obproxy/3.1.0/restart.py b/plugins/obproxy/3.1.0/restart.py index c53b771f9240357f2ef7571fb68a8aa724dd8e8e..0a646e6f4307235a20d35fb9388c8fbf112c9fc6 100644 --- a/plugins/obproxy/3.1.0/restart.py +++ b/plugins/obproxy/3.1.0/restart.py @@ -55,18 +55,17 @@ class Restart(object): # self.cursors = None # self.dbs = None - def connect(self): - if self.cursors is None: - self.stdio.verbose('Call %s for %s' % (self.connect_plugin, self.repository)) - self.sub_io.start_loading('Connect to obproxy') - ret = self.connect_plugin(self.components, self.clients, self.cluster_config, self.plugin_context.cmd, self.plugin_context.options, self.sub_io) - if not ret: - self.sub_io.stop_loading('fail') - return False - self.sub_io.stop_loading('succeed') - # self.close() - self.cursors = ret.get_return('cursor') - self.dbs = ret.get_return('connect') + def connect(self, cluster_config): + self.stdio.verbose('Call %s for %s' % (self.connect_plugin, self.repository)) + self.sub_io.start_loading('Connect to obproxy') + ret = self.connect_plugin(self.components, self.clients, cluster_config, self.plugin_context.cmd, self.plugin_context.options, self.sub_io) + if not ret: + self.sub_io.stop_loading('fail') + return False + self.sub_io.stop_loading('succeed') + # self.close() + self.cursors = ret.get_return('cursor') + self.dbs = ret.get_return('connect') return True def dir_read_check(self, client, path): @@ -77,6 +76,12 @@ class Restart(object): def restart(self): clients = self.clients + if self.new_cluster_config: + if not self.connect(self.cluster_config): + return False + self.stdio.verbose('Call %s for %s' % (self.reload_plugin, self.repository)) + self.reload_plugin(self.components, self.clients, self.cluster_config, [], {}, self.sub_io, cursor=self.cursors, new_cluster_config=self.new_cluster_config, repository_dir=self.repository.repository_dir) + self.stdio.verbose('Call %s for %s' % (self.stop_plugin, self.repository)) if not self.stop_plugin(self.components, clients, self.cluster_config, self.plugin_context.cmd, self.plugin_context.options, self.sub_io): self.stdio.stop_loading('stop_loading', 'fail') @@ -103,16 +108,12 @@ class Restart(object): self.stdio.stop_loading('stop_loading', 'fail') return False - if self.connect(): + if self.connect(cluster_config): if self.bootstrap_plugin: self.stdio.verbose('Call %s for %s' % (self.bootstrap_plugin, self.repository)) self.bootstrap_plugin(self.components, clients, cluster_config, self.plugin_context.cmd, self.plugin_context.options, self.sub_io, cursor=self.cursors) self.stdio.verbose('Call %s for %s' % (self.display_plugin, self.repository)) ret = self.display_plugin(self.components, clients, cluster_config, self.plugin_context.cmd, self.plugin_context.options, self.sub_io, cursor=self.cursors) - if self.new_cluster_config: - self.stdio.verbose('Call %s for %s' % (self.reload_plugin, self.repository)) - self.reload_plugin(self.components, self.clients, self.cluster_config, [], {}, self.sub_io, - cursor=self.cursors, new_cluster_config=self.new_cluster_config, repository_dir=self.repository.repository_dir) return ret return False diff --git a/plugins/oceanbase/3.1.0/display.py b/plugins/oceanbase/3.1.0/display.py index 8a60acba0c7e6292f183cf553f6e11390dde97da..3d7cc24ef083edb4a4991caeb48912daa29c4280 100644 --- a/plugins/oceanbase/3.1.0/display.py +++ b/plugins/oceanbase/3.1.0/display.py @@ -26,6 +26,7 @@ import time def display(plugin_context, cursor, *args, **kwargs): stdio = plugin_context.stdio stdio.start_loading('Wait for observer init') + cluster_config = plugin_context.cluster_config try: while True: try: @@ -34,6 +35,9 @@ def display(plugin_context, cursor, *args, **kwargs): if servers: stdio.print_list(servers, ['ip', 'version', 'port', 'zone', 'status'], lambda x: [x['svr_ip'], x['build_version'].split('_')[0], x['inner_port'], x['zone'], x['status']], title='observer') + password = cluster_config.get_global_conf().get('root_password', '') + cmd = 'obclient -h%s -P%s -uroot %s-Doceanbase' % (servers[0]['svr_ip'], servers[0]['inner_port'], '-p%s ' % password if password else '') + stdio.print(cmd) stdio.stop_loading('succeed') return plugin_context.return_true() except Exception as e: diff --git a/plugins/oceanbase/3.1.0/file_map.yaml b/plugins/oceanbase/3.1.0/file_map.yaml index 8b49892553da9731018b4054bbd36793bd8910f4..6296b3a3f876cc0d79903550dcb0a1b85886df91 100644 --- a/plugins/oceanbase/3.1.0/file_map.yaml +++ b/plugins/oceanbase/3.1.0/file_map.yaml @@ -2,6 +2,9 @@ target_path: bin/observer type: bin mode: 755 +- src_path: ./home/admin/oceanbase/bin + target_path: bin + type: dir - src_path: ./home/admin/oceanbase/etc target_path: etc type: dir diff --git a/plugins/oceanbase/3.1.0/generate_config.py b/plugins/oceanbase/3.1.0/generate_config.py index 048f6455ec69817a8fb37b22bd1303292884232b..d3c29a5b051a9e0c91b5ea07209dcb327deb7e3d 100644 --- a/plugins/oceanbase/3.1.0/generate_config.py +++ b/plugins/oceanbase/3.1.0/generate_config.py @@ -89,6 +89,33 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): cluster_config.update_global_conf('appname', default_appname, False) MIN_MEMORY = 8 << 30 + MIN_CPU_COUNT = 16 + clog_disk_utilization_threshold_max = 95 + clog_disk_usage_limit_percentage_max = 98 + global_config = cluster_config.get_original_global_conf() + + if getattr(plugin_context.options, 'mini', False): + if not global_config.get('memory_limit_percentage') and not global_config.get('memory_limit'): + cluster_config.update_global_conf('memory_limit', format_size(MIN_MEMORY, 0), False) + if not global_config.get('datafile_size') and not global_config.get('datafile_disk_percentage'): + cluster_config.update_global_conf('datafile_size', '20G', False) + if not global_config.get('clog_disk_utilization_threshold'): + cluster_config.update_global_conf('clog_disk_utilization_threshold', clog_disk_utilization_threshold_max, False) + if not global_config.get('clog_disk_usage_limit_percentage'): + cluster_config.update_global_conf('clog_disk_usage_limit_percentage', clog_disk_usage_limit_percentage_max, False) + + max_syslog_file_count_default = 4 + if global_config.get('syslog_level') is None: + cluster_config.update_global_conf('syslog_level', 'INFO', False) + if global_config.get('enable_syslog_recycle') is None: + cluster_config.update_global_conf('enable_syslog_recycle', True, False) + if global_config.get('enable_syslog_wf') is None: + cluster_config.update_global_conf('enable_syslog_wf', True, False) + if global_config.get('max_syslog_file_count') is None: + cluster_config.update_global_conf('max_syslog_file_count', max_syslog_file_count_default, False) + if global_config.get('cluster_id') is None: + cluster_config.update_global_conf('cluster_id', 1, False) + for server in cluster_config.servers: ip = server.ip client = clients[server] @@ -112,18 +139,6 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): cluster_config.update_server_conf(server, 'devname', interface) break - max_syslog_file_count_default = 4 - if user_server_config.get('syslog_level') is None: - cluster_config.update_server_conf(server, 'syslog_level', 'INFO', False) - if user_server_config.get('enable_syslog_recycle') is None: - cluster_config.update_server_conf(server, 'enable_syslog_recycle', True, False) - if user_server_config.get('enable_syslog_wf') is None: - cluster_config.update_server_conf(server, 'enable_syslog_wf', True, False) - if user_server_config.get('max_syslog_file_count') is None: - cluster_config.update_server_conf(server, 'max_syslog_file_count', max_syslog_file_count_default, False) - if server_config.get('cluster_id') is None: - cluster_config.update_server_conf(server, 'cluster_id', 1, False) - dirs = {"home_path": server_config['home_path']} dirs["data_dir"] = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store') dirs["redo_dir"] = server_config['redo_dir'] if server_config.get('redo_dir') else dirs["data_dir"] @@ -162,10 +177,10 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): else: try: memory_limit = parse_size(server_config.get('memory_limit')) + auto_set_memory = True except: stdio.error('memory_limit must be an integer') return - auto_set_memory = True auto_set_system_memory = False if not user_server_config.get('system_memory'): @@ -177,12 +192,14 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): ret = client.execute_command("grep -e 'processor\s*:' /proc/cpuinfo | wc -l") if ret and ret.stdout.strip().isdigit(): cpu_num = int(ret.stdout) - server_config['cpu_count'] = max(16, int(cpu_num - 2)) + server_config['cpu_count'] = max(MIN_CPU_COUNT, int(cpu_num - 2)) else: - server_config['cpu_count'] = 16 + server_config['cpu_count'] = MIN_CPU_COUNT + cluster_config.update_server_conf(server, 'cpu_count', server_config['cpu_count'], False) + elif server_config['cpu_count'] < MIN_CPU_COUNT: + cluster_config.update_server_conf(server, 'cpu_count', MIN_CPU_COUNT, False) + stdio.warn('(%s): automatically adjust the cpu_count %s' % (server, MIN_CPU_COUNT)) - cluster_config.update_server_conf(server, 'cpu_count', max(16, server_config['cpu_count']), False) - # disk if not server_config.get('datafile_size') and not user_server_config.get('datafile_disk_percentage'): disk = {'/': 0} @@ -224,7 +241,6 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): clog_dir_disk = disk[clog_dir_mount] if clog_dir_mount == data_dir_mount: - clog_disk_utilization_threshold_max = 95 disk_free = data_dir_disk['avail'] real_disk_total = data_dir_disk['total'] if mounts[dirs['home_path']] == data_dir_mount: @@ -274,7 +290,7 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): datafile_size = parse_size(datafile_size_format) clog_disk_utilization_threshold = max(80, int(100.0 * (disk_used + datafile_size + padding_size + clog_disk_size * 0.8) / real_disk_total)) clog_disk_utilization_threshold = min(clog_disk_utilization_threshold, clog_disk_utilization_threshold_max) - clog_disk_usage_limit_percentage = min(int(clog_disk_utilization_threshold / 80.0 * 95), 98) + clog_disk_usage_limit_percentage = min(int(clog_disk_utilization_threshold / 80.0 * 95), clog_disk_usage_limit_percentage_max) cluster_config.update_server_conf(server, 'datafile_size', datafile_size_format, False) cluster_config.update_server_conf(server, 'clog_disk_utilization_threshold', clog_disk_utilization_threshold, False) diff --git a/plugins/oceanbase/3.1.0/init.py b/plugins/oceanbase/3.1.0/init.py index 68826d72fb1ed6be500a5eaa6720e28bcb3487f9..e89aea7d42881c8ec0274790be805565c261f990 100644 --- a/plugins/oceanbase/3.1.0/init.py +++ b/plugins/oceanbase/3.1.0/init.py @@ -37,7 +37,7 @@ def critical(*arg, **kwargs): def init_dir(server, client, key, path, link_path=None): if force: - ret = client.execute_command('rm -fr %s' % path) + ret = client.execute_command('rm -fr %s' % path, timeout=-1) if not ret: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s path' % key, msg=ret.stderr)) return False @@ -118,7 +118,17 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): if need_clean: client.execute_command( "pkill -9 -u `whoami` -f '^%s/bin/observer -p %s'" % (home_path, server_config['mysql_port'])) - ret = client.execute_command('rm -fr %s/*' % home_path) + if client.execute_command('bash -c \'if [[ "$(ls -d {0} 2>/dev/null)" != "" && ! -O {0} ]]; then exit 0; else exit 1; fi\''.format(home_path)): + owner = client.execute_command("ls -ld %s | awk '{print $3}'" % home_path).stdout.strip() + err_msg = ' {} is not empty, and the owner is {}'.format(home_path, owner) + critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=err_msg)) + continue + need_clean = True + + if need_clean: + client.execute_command( + "pkill -9 -u `whoami` -f '^%s/bin/observer -p %s'" % (home_path, server_config['mysql_port'])) + ret = client.execute_command('rm -fr %s/*' % home_path, timeout=-1) if not ret: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=ret.stderr)) continue @@ -134,7 +144,7 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): if ret: data_path = server_config['data_dir'] if need_clean: - ret = client.execute_command('rm -fr %s/*' % data_path) + ret = client.execute_command('rm -fr %s/*' % data_path, timeout=-1) if not ret: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='data dir', msg=InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=data_path))) continue @@ -154,7 +164,7 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): # init_dir(server, client, key, server_config['%s_dir' % key], os.path.join(data_path, key)) log_dir = server_config['%s_dir' % key] if force: - ret = client.execute_command('rm -fr %s/*' % log_dir) + ret = client.execute_command('rm -fr %s/*' % log_dir, timeout=-1) if not ret: critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='%s dir' % key, msg=InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=log_dir))) continue diff --git a/plugins/oceanbase/3.1.0/restart.py b/plugins/oceanbase/3.1.0/restart.py index 4de814d2d7b7757e851c9d084a5629e37777755c..22a57759f376074bef896a032bc4936a9bc9268e 100644 --- a/plugins/oceanbase/3.1.0/restart.py +++ b/plugins/oceanbase/3.1.0/restart.py @@ -96,7 +96,7 @@ class Restart(object): def wait(self): if not self.connect(): return False - self.stdio.verbose('server cneck') + self.stdio.verbose('server check') self.broken_sql("select * from oceanbase.__all_server where status != 'active' or stop_time > 0 or start_service_time = 0") self.broken_sql("select * from oceanbase.__all_virtual_clog_stat where is_in_sync= 0 and is_offline = 0") return True diff --git a/plugins/sysbench/3.1.0/run_test.py b/plugins/sysbench/3.1.0/run_test.py index e60d74f81efb511c2276bfde57408811e2267dfc..257fed6ee664daba33abb770e77918cebf77aa8f 100644 --- a/plugins/sysbench/3.1.0/run_test.py +++ b/plugins/sysbench/3.1.0/run_test.py @@ -34,35 +34,6 @@ from ssh import LocalClient stdio = None -def parse_size(size): - _bytes = 0 - if not isinstance(size, str) or size.isdigit(): - _bytes = int(size) - else: - units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} - match = re.match(r'([1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) - _bytes = int(match.group(1)) * units[match.group(2)] - return _bytes - - -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) - - def exec_cmd(cmd): stdio.verbose('execute: %s' % cmd) process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) @@ -74,31 +45,20 @@ def exec_cmd(cmd): return process.returncode == 0 -def run_test(plugin_context, db, cursor, odp_db, odp_cursor=None, *args, **kwargs): +def run_test(plugin_context, *args, **kwargs): def get_option(key, default=''): + if key in opt_keys: + opt_keys.remove(key) value = getattr(options, key, default) if value is None: value = default return value - def execute(cursor, query, args=None): - msg = query % tuple(args) if args is not None else query - stdio.verbose('execute sql: %s' % msg) - # stdio.verbose("query: %s. args: %s" % (query, args)) - try: - cursor.execute(query, args) - return cursor.fetchone() - except: - msg = 'execute sql exception: %s' % msg - stdio.exception(msg) - raise Exception(msg) - global stdio - cluster_config = plugin_context.cluster_config stdio = plugin_context.stdio options = plugin_context.options - - optimization = get_option('optimization') > 0 - ob_optimization = get_option('ob_optimization') + opt_keys = list(vars(options).keys()) + for used_key in ['component', 'test_server', 'skip_cluster_status_check', 'obclient_bin', 'optimization']: + opt_keys.remove(used_key) host = get_option('host', '127.0.0.1') port = get_option('port', 2881) @@ -116,157 +76,10 @@ def run_test(plugin_context, db, cursor, odp_db, odp_cursor=None, *args, **kwarg skip_trx = get_option('skip_trx', '').lower() percentile = get_option('percentile', None) script_name = get_option('script_name', 'oltp_point_select.lua') - obclient_bin = get_option('obclient_bin', 'obclient') sysbench_bin = get_option('sysbench_bin', 'sysbench') sysbench_script_dir = get_option('sysbench_script_dir', '/usr/sysbench/share/sysbench') - if tenant_name == 'sys': - stdio.error('DO NOT use sys tenant for testing.') - return - - ret = LocalClient.execute_command('%s --help' % obclient_bin, stdio=stdio) - if not ret: - stdio.error('%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % (ret.stderr, obclient_bin)) - return - ret = LocalClient.execute_command('%s --help' % sysbench_bin, stdio=stdio) - if not ret: - stdio.error('%s\n%s is not an executable file. Please use `--sysbench-bin` to set.\nYou may not have ob-sysbench installed' % (ret.stderr, sysbench_bin)) - return - - if not script_name.endswith('.lua'): - script_name += '.lua' - script_path = os.path.join(sysbench_script_dir, script_name) - if not os.path.exists(script_path): - stdio.error('No such file %s. Please use `--sysbench-script-dir` to set sysbench scrpit dir.\nYou may not have ob-sysbench installed' % script_path) - return - - sql = "select * from oceanbase.gv$tenant where tenant_name = %s" - max_cpu = 2 - tenant_meta = None try: - stdio.verbose('execute sql: %s' % (sql % tenant_name)) - cursor.execute(sql, [tenant_name]) - tenant_meta = cursor.fetchone() - if not tenant_meta: - stdio.error('Tenant %s not exists. Use `obd cluster tenant create` to create tenant.' % tenant_name) - return - sql = "select * from oceanbase.__all_resource_pool where tenant_id = %d" % tenant_meta['tenant_id'] - pool = execute(cursor, sql) - sql = "select * from oceanbase.__all_unit_config where unit_config_id = %d" % pool['unit_config_id'] - max_cpu = execute(cursor, sql)['max_cpu'] - except: - stdio.exception('') - return - - exec_sql_cmd = "%s -h%s -P%s -u%s@%s %s -A -e" % (obclient_bin, host, port, user, tenant_name, ("-p'%s'" % password) if password else '') - ret = LocalClient.execute_command('%s "%s"' % (exec_sql_cmd, 'create database if not exists %s;' % mysql_db), stdio=stdio) - if not ret: - stdio.error(ret.stderr) - return - - sql = '' - odp_configs_done = [] - system_configs_done = [] - tenant_variables_done = [] - odp_configs = [ - # [配置名, 新值, 旧值, 替换条件: lambda n, o: n != o] - # ['enable_compression_protocol', False, False, lambda n, o: n != o], - ['proxy_mem_limited', format_size(min(max(threads * (8 << 10), 2 << 30), 4 << 30), 0), 0, lambda n, o: parse_size(n) > parse_size(o)], - ['enable_prometheus', False, False, lambda n, o: n != o], - ['enable_metadb_used', False, False, lambda n, o: n != o], - ['enable_standby', False, False, lambda n, o: n != o], - ['enable_strict_stat_time', False, False, lambda n, o: n != o], - ['use_local_dbconfig', True, True, lambda n, o: n != o], - ] - system_configs = [ - # [配置名, 新值, 旧值, 替换条件: lambda n, o: n != o, 是否是租户级] - ['enable_auto_leader_switch', False, False, lambda n, o: n != o, False], - ['enable_one_phase_commit', False, False, lambda n, o: n != o, False], - ['weak_read_version_refresh_interval', '5s', '5s', lambda n, o: n != o, False], - ['syslog_level', 'PERF', 'PERF', lambda n, o: n != o, False], - ['max_syslog_file_count', 100, 100, lambda n, o: n != o, False], - ['enable_syslog_recycle', True, True, lambda n, o: n != o, False], - ['trace_log_slow_query_watermark', '10s', '10s', lambda n, o: n != o, False], - ['large_query_threshold', '1s', '1s', lambda n, o: n != o, False], - ['clog_sync_time_warn_threshold', '200ms', '200ms', lambda n, o: n != o, False], - ['syslog_io_bandwidth_limit', '10M', '10M', lambda n, o: n != o, False], - ['enable_sql_audit', False, False, lambda n, o: n != o, False], - ['sleep', 1], - ['enable_perf_event', False, False, lambda n, o: n != o, False], - ['clog_max_unconfirmed_log_count', 5000, 5000, lambda n, o: n != o, False], - ['autoinc_cache_refresh_interval', '86400s', '86400s', lambda n, o: n != o, False], - ['enable_early_lock_release', False, False, lambda n, o: n != o, True], - ['default_compress_func', 'lz4_1.0', 'lz4_1.0', lambda n, o: n != o, False], - ['_clog_aggregation_buffer_amount', 4, 4, lambda n, o: n != o, False], - ['_flush_clog_aggregation_buffer_timeout', '1ms', '1ms', lambda n, o: n != o, False], - ] - - try: - if odp_cursor and optimization: - for config in odp_configs: - sql = 'show proxyconfig like "%s"' % config[0] - ret = execute(odp_cursor, sql) - if ret: - config[2] = ret['value'] - if config[3](config[1], config[2]): - sql = 'alter proxyconfig set %s=%%s' % config[0] - odp_configs_done.append(config) - execute(odp_cursor, sql, [config[1]]) - - tenant_q = ' tenant="%s"' % tenant_name - server_num = len(cluster_config.servers) - if optimization and ob_optimization: - for config in system_configs: - if config[0] == 'sleep': - sleep(config[1]) - system_configs_done.append(config) - continue - sql = 'show parameters like "%s"' % config[0] - if config[4]: - sql += tenant_q - ret = execute(cursor, sql) - if ret: - config[2] = ret['value'] - if config[3](config[1], config[2]): - sql = 'alter system set %s=%%s' % config[0] - if config[4]: - sql += tenant_q - system_configs_done.append(config) - execute(cursor, sql, [config[1]]) - - sql = "select count(1) server_num from oceanbase.__all_server where status = 'active'" - ret = execute(cursor, sql) - if ret: - server_num = ret.get("server_num", server_num) - - parallel_max_servers = int(max_cpu * 10) - parallel_servers_target = int(max_cpu * server_num * 8) - - tenant_variables = [ - # [变量名, 新值, 旧值, 替换条件: lambda n, o: n != o] - ['ob_timestamp_service', 1, 1, lambda n, o: n != o], - ['autocommit', 1, 1, lambda n, o: n != o], - ['ob_query_timeout', 36000000000, 36000000000, lambda n, o: n != o], - ['ob_trx_timeout', 36000000000, 36000000000, lambda n, o: n != o], - ['max_allowed_packet', 67108864, 67108864, lambda n, o: n != o], - ['ob_sql_work_area_percentage', 100, 100, lambda n, o: n != o], - ['parallel_max_servers', parallel_max_servers, parallel_max_servers, lambda n, o: n != o], - ['parallel_servers_target', parallel_servers_target, parallel_servers_target, lambda n, o: n != o] - ] - select_sql_t = "select value from oceanbase.__all_virtual_sys_variable where tenant_id = %d and name = '%%s'" % tenant_meta['tenant_id'] - update_sql_t = "ALTER TENANT %s SET VARIABLES %%s = %%%%s" % tenant_name - - for config in tenant_variables: - sql = select_sql_t % config[0] - ret = execute(cursor, sql) - if ret: - value = ret['value'] - config[2] = int(value) if isinstance(value, str) and value.isdigit() else value - if config[3](config[1], config[2]): - sql = update_sql_t % config[0] - tenant_variables_done.append(config) - execute(cursor, sql, [config[1]]) - sysbench_cmd = "cd %s; %s %s --mysql-host=%s --mysql-port=%s --mysql-user=%s@%s --mysql-db=%s" % (sysbench_script_dir, sysbench_bin, script_name, host, port, user, tenant_name, mysql_db) if password: @@ -289,35 +102,11 @@ def run_test(plugin_context, db, cursor, odp_db, odp_cursor=None, *args, **kwarg sysbench_cmd += ' --skip_trx=%s' % skip_trx if percentile: sysbench_cmd += ' --percentile=%s' % percentile - + for opt_key in opt_keys: + sysbench_cmd += ' --%s=%s' % (opt_key.replace('_', '-'), getattr(options, opt_key)) if exec_cmd('%s cleanup' % sysbench_cmd) and exec_cmd('%s prepare' % sysbench_cmd) and exec_cmd('%s --db-ps-mode=disable run' % sysbench_cmd): return plugin_context.return_true() except KeyboardInterrupt: pass except: - stdio.exception('') - finally: - try: - if optimization: - for config in tenant_variables_done[::-1]: - if config[3](config[1], config[2]): - sql = update_sql_t % config[0] - execute(cursor, sql, [config[2]]) - - for config in system_configs_done[::-1]: - if config[0] == 'sleep': - sleep(config[1]) - continue - if config[3](config[1], config[2]): - sql = 'alter system set %s=%%s' % config[0] - if config[4]: - sql += tenant_q - execute(cursor, sql, [config[2]]) - - if odp_cursor: - for config in odp_configs_done[::-1]: - if config[3](config[1], config[2]): - sql = 'alter proxyconfig set %s=%%s' % config[0] - execute(odp_cursor, sql, [config[2]]) - except: - pass + stdio.exception('') \ No newline at end of file diff --git a/plugins/tpcc/3.1.0/build.py b/plugins/tpcc/3.1.0/build.py index c443ce673181e3ac4c51c5e6f40222dc7e741e4d..85443477f0407eeb372f17d6cc187896ac262c35 100644 --- a/plugins/tpcc/3.1.0/build.py +++ b/plugins/tpcc/3.1.0/build.py @@ -175,34 +175,11 @@ def build(plugin_context, cursor, odp_cursor, *args, **kwargs): # load data stdio.verbose('Start to load data.') cmd = '{java_bin} -cp {cp} -Dprop={prop} LoadData'.format(java_bin=java_bin, cp=bmsql_classpath, prop=bmsql_prop_path) - stdio.start_progressbar('Load data ', warehouses, widget_type='simple_progress') try: stdio.verbose('local execute: %s' % cmd) - p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - while p.poll() is None: - count = get_table_rows('bmsql_warehouse') - if count: - stdio.update_progressbar(min(count, warehouses - 1)) - time.sleep(10) - code = p.returncode - output = p.stdout.read().decode() - verbose_msg = 'exited code %s' % code - verbose_msg += ', output:\n%s' % output + subprocess.call(cmd, shell=True, stderr=subprocess.STDOUT) except: - output = '' - code = 255 - verbose_msg = 'unknown error' - stdio.exception('') - stdio.verbose(verbose_msg) - if code != 0: - stdio.interrupt_progressbar() - stdio.error(EC_TPCC_LOAD_DATA_FAILED) - return - if re.match(r'.*Worker \d+: ERROR: .*', output, re.S): - stdio.interrupt_progressbar() - stdio.error(EC_TPCC_LOAD_DATA_FAILED) - return - stdio.finish_progressbar() + stdio.exception('failed to load data') # create index stdio.start_loading('create index') diff --git a/plugins/tpcc/3.1.0/pre_test.py b/plugins/tpcc/3.1.0/pre_test.py index fb5f1adffd8fd88385fb0eab9f5be4d0f65dee0e..cee1c0c642ebcaca5d454796b3cdd43d0bf1d6e7 100644 --- a/plugins/tpcc/3.1.0/pre_test.py +++ b/plugins/tpcc/3.1.0/pre_test.py @@ -174,6 +174,7 @@ def pre_test(plugin_context, cursor, odp_cursor, *args, **kwargs): cpu_count = int(serv.get('cpu_total', 0) + 2) min_cpu = cpu_count if min_cpu is None else min(cpu_count, min_cpu) cpu_total += cpu_count + server_num = len(all_services) except Exception as e: stdio.exception(e) stdio.error('Fail to get server status') @@ -278,5 +279,15 @@ def pre_test(plugin_context, cursor, odp_cursor, *args, **kwargs): warehouses=warehouses, cpu_total=cpu_total, max_memory=max_memory, - max_cpu=max_cpu + max_cpu=max_cpu, + tenant_id=tenant_meta['tenant_id'], + tenant=tenant_name, + tmp_dir=tmp_dir, + server_num=server_num, + obclient_bin=obclient_bin, + host=host, + port=port, + user=user, + password=password, + database=db_name ) diff --git a/plugins/tpch/3.1.0/pre_test.py b/plugins/tpch/3.1.0/pre_test.py index b42bfa811649f94fdcf82855b72caa9cd9a4f261..f6707b41725d4d318935a01b90a1bea52f9e26e4 100644 --- a/plugins/tpch/3.1.0/pre_test.py +++ b/plugins/tpch/3.1.0/pre_test.py @@ -32,7 +32,25 @@ from ssh import LocalClient from tool import DirectoryUtil -def pre_test(plugin_context, *args, **kwargs): +def format_size(size, precision=1): + units = ['B', 'K', 'M', 'G'] + units_num = len(units) - 1 + idx = 0 + if precision: + div = 1024.0 + format = '%.' + str(precision) + 'f%s' + limit = 1024 + else: + div = 1024 + limit = 1024 + format = '%d%s' + while idx < units_num and size >= limit: + size /= div + idx += 1 + return format % (size, units[idx]) + + +def pre_test(plugin_context, cursor, *args, **kwargs): def get_option(key, default=''): value = getattr(options, key, default) if not value: @@ -50,6 +68,18 @@ def pre_test(plugin_context, *args, **kwargs): stdio.verbose('get %s_path: %s' % (key, path)) return path if path else default + def execute(cursor, query, args=None): + msg = query % tuple(args) if args is not None else query + stdio.verbose('execute sql: %s' % msg) + stdio.verbose("query: %s. args: %s" % (query, args)) + try: + cursor.execute(query, args) + return cursor.fetchone() + except: + msg = 'execute sql exception: %s' % msg + stdio.exception(msg) + raise Exception(msg) + def local_execute_command(command, env=None, timeout=None): return LocalClient.execute_command(command, env, timeout, stdio) @@ -65,6 +95,12 @@ def pre_test(plugin_context, *args, **kwargs): disable_transfer = get_option('disable_transfer', False) remote_tbl_dir = get_option('remote_tbl_dir') tenant_name = get_option('tenant', 'test') + host = get_option('host', '127.0.0.1') + port = get_option('port', 2881) + mysql_db = get_option('database', 'test') + user = get_option('user', 'root') + password = get_option('password', '') + if tenant_name == 'sys': stdio.error('DO NOT use sys tenant for testing.') return @@ -91,8 +127,35 @@ def pre_test(plugin_context, *args, **kwargs): stdio.verbose('set tmp_dir: %s' % tmp_dir) setattr(options, 'tmp_dir', tmp_dir) + sql = "select * from oceanbase.gv$tenant where tenant_name = %s" + try: + stdio.verbose('execute sql: %s' % (sql % tenant_name)) + cursor.execute(sql, [tenant_name]) + tenant_meta = cursor.fetchone() + if not tenant_meta: + stdio.error('Tenant %s not exists. Use `obd cluster tenant create` to create tenant.' % tenant_name) + return + sql = "select * from oceanbase.__all_resource_pool where tenant_id = %d" % tenant_meta['tenant_id'] + pool = execute(cursor, sql) + sql = "select * from oceanbase.__all_unit_config where unit_config_id = %d" % pool['unit_config_id'] + tenant_unit = execute(cursor, sql) + max_cpu = tenant_unit['max_cpu'] + min_memory = tenant_unit['min_memory'] + unit_count = pool['unit_count'] + except: + stdio.error('fail to get tenant info') + return + server_num = len(cluster_config.servers) + sql = "select count(1) server_num from oceanbase.__all_server where status = 'active'" + ret = execute(cursor, sql) + if ret: + server_num = ret.get("server_num", server_num) + if get_option('test_only'): - return plugin_context.return_true() + return plugin_context.return_true( + max_cpu=max_cpu, min_memory=min_memory, unit_count=unit_count, server_num=server_num, tenant=tenant_name, + tenant_id=tenant_meta['tenant_id'], format_size=format_size + ) if not remote_tbl_dir: stdio.error('Please use --remote-tbl-dir to set a dir for remote tbl files') @@ -144,6 +207,11 @@ def pre_test(plugin_context, *args, **kwargs): stdio.stop_loading('succeed') stdio.verbose('set tbl_path: %s' % tbl_path) setattr(options, 'tbl_path', tbl_path) - return plugin_context.return_true() + + return plugin_context.return_true( + obclient_bin=obclient_bin, host=host, port=port, user=user, password=password, database=mysql_db, + max_cpu=max_cpu, min_memory=min_memory, unit_count=unit_count, server_num=server_num, tenant=tenant_name, + tenant_id=tenant_meta['tenant_id'], format_size=format_size + ) diff --git a/plugins/tpch/3.1.0/run_test.py b/plugins/tpch/3.1.0/run_test.py index e8cb0939951eb0a95c1d742e86158cb6c3dbe4d3..1a33681adfdc41443857820f573ec37cd1c403b4 100644 --- a/plugins/tpch/3.1.0/run_test.py +++ b/plugins/tpch/3.1.0/run_test.py @@ -45,24 +45,6 @@ def parse_size(size): return _bytes -def format_size(size, precision=1): - units = ['B', 'K', 'M', 'G'] - units_num = len(units) - 1 - idx = 0 - if precision: - div = 1024.0 - format = '%.' + str(precision) + 'f%s' - limit = 1024 - else: - div = 1024 - limit = 1024 - format = '%d%s' - while idx < units_num and size >= limit: - size /= div - idx += 1 - return format % (size, units[idx]) - - def exec_cmd(cmd): stdio.verbose('execute: %s' % cmd) process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) @@ -101,7 +83,6 @@ def run_test(plugin_context, db, cursor, *args, **kwargs): clients = plugin_context.clients options = plugin_context.options - optimization = get_option('optimization') > 0 not_test_only = not get_option('test_only') host = get_option('host', '127.0.0.1') @@ -118,30 +99,10 @@ def run_test(plugin_context, db, cursor, *args, **kwargs): sql_path = sorted(sql_path, key=lambda x: (len(x), x)) - sql = "select * from oceanbase.gv$tenant where tenant_name = %s" - max_cpu = 2 + max_cpu = kwargs.get('max_cpu', 2) + tenant_id = kwargs.get('tenant_id') + unit_count = kwargs.get('unit_count', 0) cpu_total = 0 - min_memory = 0 - unit_count = 0 - tenant_meta = None - tenant_unit = None - try: - stdio.verbose('execute sql: %s' % (sql % tenant_name)) - cursor.execute(sql, [tenant_name]) - tenant_meta = cursor.fetchone() - if not tenant_meta: - stdio.error('Tenant %s not exists. Use `obd cluster tenant create` to create tenant.' % tenant_name) - return - sql = "select * from oceanbase.__all_resource_pool where tenant_id = %d" % tenant_meta['tenant_id'] - pool = execute(cursor, sql) - sql = "select * from oceanbase.__all_unit_config where unit_config_id = %d" % pool['unit_config_id'] - tenant_unit = execute(cursor, sql) - max_cpu = tenant_unit['max_cpu'] - min_memory = tenant_unit['min_memory'] - unit_count = pool['unit_count'] - except: - stdio.error('fail to get tenant info') - return if not_test_only: sql_cmd_prefix = '%s -h%s -P%s -u%s@%s %s -A' % (obclient_bin, host, port, user, tenant_name, ("-p'%s'" % password) if password else '') @@ -158,7 +119,6 @@ def run_test(plugin_context, db, cursor, *args, **kwargs): stdio.error(ret.stderr) return - for server in cluster_config.servers: client = clients[server] ret = client.execute_command("grep -e 'processor\s*:' /proc/cpuinfo | wc -l") @@ -167,96 +127,17 @@ def run_test(plugin_context, db, cursor, *args, **kwargs): else: server_config = cluster_config.get_server_conf(server) cpu_total += int(server_config.get('cpu_count', 0)) - - sql = '' - system_configs_done = [] - tenant_variables_done = [] - try: - cache_wash_threshold = format_size(int(min_memory * 0.2), 0) - system_configs = [ - # [配置名, 新值, 旧值, 替换条件: lambda n, o: n != o, 是否是租户级] - ['syslog_level', 'PERF', 'PERF', lambda n, o: n != o, False], - ['max_syslog_file_count', 100, 100, lambda n, o: n != o, False], - ['enable_syslog_recycle', True, True, lambda n, o: n != o, False], - ['enable_merge_by_turn', False, False, lambda n, o: n != o, False], - ['trace_log_slow_query_watermark', '100s', '100s', lambda n, o: n != o, False], - ['max_kept_major_version_number', 1, 1, lambda n, o: n != o, False], - ['enable_sql_operator_dump', True, True, lambda n, o: n != o, False], - ['_hash_area_size', '3g', '3g', lambda n, o: n != o, False], - ['memstore_limit_percentage', 50, 50, lambda n, o: n != o, False], - ['enable_rebalance', False, False, lambda n, o: n != o, False], - ['memory_chunk_cache_size', '1g', '1g', lambda n, o: n != o, False], - ['minor_freeze_times', 5, 5, lambda n, o: n != o, False], - ['merge_thread_count', 20, 20, lambda n, o: n != o, False], - ['cache_wash_threshold', cache_wash_threshold, cache_wash_threshold, lambda n, o: n != o, False], - ['ob_enable_batched_multi_statement', True, True, lambda n, o: n != o, False], - ] - - tenant_q = ' tenant="%s"' % tenant_name - server_num = len(cluster_config.servers) - if optimization: - for config in system_configs: - if config[0] == 'sleep': - time.sleep(config[1]) - system_configs_done.append(config) - continue - sql = 'show parameters like "%s"' % config[0] - if config[4]: - sql += tenant_q - ret = execute(cursor, sql) - if ret: - config[2] = ret['value'] - if config[3](config[1], config[2]): - sql = 'alter system set %s=%%s' % config[0] - if config[4]: - sql += tenant_q - system_configs_done.append(config) - execute(cursor, sql, [config[1]]) - - sql = "select count(1) server_num from oceanbase.__all_server where status = 'active'" - ret = execute(cursor, sql) - if ret: - server_num = ret.get("server_num", server_num) - - parallel_max_servers = min(int(max_cpu * 10), 1800) - parallel_servers_target = int(max_cpu * server_num * 8) - tenant_variables = [ - # [变量名, 新值, 旧值, 替换条件: lambda n, o: n != o] - ['ob_sql_work_area_percentage', 80, 80, lambda n, o: n != o], - ['optimizer_use_sql_plan_baselines', True, True, lambda n, o: n != o], - ['optimizer_capture_sql_plan_baselines', True, True, lambda n, o: n != o], - ['ob_query_timeout', 36000000000, 36000000000, lambda n, o: n != o], - ['ob_trx_timeout', 36000000000, 36000000000, lambda n, o: n != o], - ['max_allowed_packet', 67108864, 67108864, lambda n, o: n != o], - ['secure_file_priv', "", "", lambda n, o: n != o], - ['parallel_max_servers', parallel_max_servers, parallel_max_servers, lambda n, o: n != o], - ['parallel_servers_target', parallel_servers_target, parallel_servers_target, lambda n, o: n != o] - ] - select_sql_t = "select value from oceanbase.__all_virtual_sys_variable where tenant_id = %d and name = '%%s'" % tenant_meta['tenant_id'] - update_sql_t = "ALTER TENANT %s SET VARIABLES %%s = %%%%s" % tenant_name - - for config in tenant_variables: - sql = select_sql_t % config[0] - ret = execute(cursor, sql) - if ret: - value = ret['value'] - config[2] = int(value) if isinstance(value, str) and value.isdigit() else value - if config[3](config[1], config[2]): - sql = update_sql_t % config[0] - tenant_variables_done.append(config) - execute(cursor, sql, [config[1]]) - else: - sql = "select value from oceanbase.__all_virtual_sys_variable where tenant_id = %d and name = 'secure_file_priv'" % tenant_meta['tenant_id'] - ret = execute(cursor, sql)['value'] - if ret is None: - stdio.error('Access denied. Please set `secure_file_priv` to "".') - return - if ret: - for path in tbl_path: - if not path.startswith(ret): - stdio.error('Access denied. Please set `secure_file_priv` to "".') - return + sql = "select value from oceanbase.__all_virtual_sys_variable where tenant_id = %d and name = 'secure_file_priv'" % tenant_id + ret = execute(cursor, sql)['value'] + if ret is None: + stdio.error('Access denied. Please set `secure_file_priv` to "".') + return + if ret: + for path in tbl_path: + if not path.startswith(ret): + stdio.error('Access denied. Please set `secure_file_priv` to "".') + return parallel_num = int(max_cpu * unit_count) @@ -331,7 +212,7 @@ def run_test(plugin_context, db, cursor, *args, **kwargs): for path in sql_path: _, fn = os.path.split(path) log_path = os.path.join(tmp_dir, '%s.log' % fn) - ret = local_execute_command('source %s | %s -c > %s' % (path, sql_cmd_prefix, log_path)) + ret = local_execute_command('echo source %s | %s -c > %s' % (path, sql_cmd_prefix, log_path)) if not ret: raise Exception(ret.stderr) stdio.stop_loading('succeed') @@ -350,28 +231,9 @@ def run_test(plugin_context, db, cursor, *args, **kwargs): if not ret: raise Exception(ret.stderr) stdio.print('Total Cost: %.1fs' % total_cost) - + return plugin_context.return_true() except KeyboardInterrupt: stdio.stop_loading('fail') except Exception as e: stdio.stop_loading('fail') stdio.exception(str(e)) - finally: - try: - if optimization: - for config in tenant_variables_done[::-1]: - if config[3](config[1], config[2]): - sql = update_sql_t % config[0] - execute(cursor, sql, [config[2]]) - - for config in system_configs_done[::-1]: - if config[0] == 'sleep': - time.sleep(config[1]) - continue - if config[3](config[1], config[2]): - sql = 'alter system set %s=%%s' % config[0] - if config[4]: - sql += tenant_q - execute(cursor, sql, [config[2]]) - except: - pass diff --git a/requirements.txt b/requirements.txt index efac4b6190c383f0d242a290cf47d692f54a6431..6a7a217cc9f896ebe27854293a6cbe8653f32825 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ requests==2.24.0 rpmfile==1.0.8 -paramiko==2.7.2 +paramiko==2.10.1 backports.lzma==0.0.14 MySQL-python==1.2.5 ruamel.yaml.clib==0.2.2 @@ -13,4 +13,5 @@ halo==0.0.30 pycryptodome==3.10.1 inspect2==0.1.2 six==1.16.0 -pyinstaller==3.6 \ No newline at end of file +pyinstaller==3.6 +bcrypt==3.1.7 \ No newline at end of file diff --git a/requirements3.txt b/requirements3.txt index 6c11b4fdb164c6a3d0eabbfe1d7ece4b30ea3243..f22b7e7250d6a8ac11e0af5f71b7e489151306dd 100644 --- a/requirements3.txt +++ b/requirements3.txt @@ -1,5 +1,5 @@ rpmfile==1.0.8 -paramiko==2.7.2 +paramiko==2.10.1 requests==2.25.1 PyMySQL==1.0.2 ruamel.yaml==0.17.4 @@ -11,3 +11,5 @@ pycryptodome==3.10.1 inspect2==0.1.2 six==1.16.0 pyinstaller>=4.3 +bcrypt==4.0.0 +configparser>=5.2.0 \ No newline at end of file diff --git a/ssh.py b/ssh.py index bfc5d44e7f3b0632018713b5ffc05d8899ce6320..b6028e9a73af31a4f5e0b8a650ca0dec847ae474 100644 --- a/ssh.py +++ b/ssh.py @@ -23,6 +23,7 @@ from __future__ import absolute_import, division, print_function import enum import getpass import os +import tempfile import warnings from glob import glob @@ -39,8 +40,9 @@ from multiprocessing.queues import Empty from multiprocessing import Queue, Process from multiprocessing.pool import ThreadPool -from tool import COMMAND_ENV, DirectoryUtil +from tool import COMMAND_ENV, DirectoryUtil, FileUtil from _stdio import SafeStdio +from _environ import ENV_DISABLE_RSYNC __all__ = ("SshClient", "SshConfig", "LocalClient", "ConcurrentExecutor") @@ -104,6 +106,9 @@ class ConcurrentExecutor(object): self.futures.append(ret) return ret + def size(self): + return len(self.futures) + @staticmethod def execute(future): client = SshClient(future.client.config, future.stdio) @@ -160,10 +165,23 @@ class LocalClient(SafeStdio): if os.path.exists(os.path.dirname(local_dir)) and not glob(local_dir): stdio.verbose("%s is empty" % local_dir) return True - if LocalClient.execute_command('mkdir -p %s && cp -fr %s %s' % (remote_dir, local_dir, remote_dir), stdio=stdio): + if LocalClient.execute_command('mkdir -p %s && cp -frL %s %s' % (remote_dir, local_dir, remote_dir), stdio=stdio): return True return False + @staticmethod + def write_file(content, file_path, mode='w', stdio=None): + stdio.verbose('write {} to {}'.format(content, file_path)) + try: + with FileUtil.open(file_path, mode, stdio=stdio) as f: + f.write(content) + f.flush() + return True + except: + stdio.exception('') + return False + + @staticmethod def get_file(local_path, remote_path, stdio=None): return LocalClient.put_file(remote_path, local_path, stdio=stdio) @@ -231,7 +249,7 @@ class SshClient(SafeStdio): stdio.verbose('%s@%s delete env %s' % (self.config.username, self.config.host, key)) del self.env[key] self._update_env() - + def __str__(self): return '%s@%s:%d' % (self.config.username, self.config.host, self.config.port) @@ -336,12 +354,12 @@ class SshClient(SafeStdio): verbose_msg = '%s execute: %s ' % (self.config, command) stdio.verbose(verbose_msg, end='') - command = '%s %s;echo -e "\n$?\c"' % (self.env_str, command.strip(';')) + command = '%s %s;echo -e "\n$?\c"' % (self.env_str, command.strip(';').lstrip('\n')) return self._execute_command(command, retry=3, timeout=timeout, stdio=stdio) @property def disable_rsync(self): - return COMMAND_ENV.get("OBD_DISABLE_RSYNC") == "1" + return COMMAND_ENV.get(ENV_DISABLE_RSYNC) == "1" @property def remote_transporter(self): @@ -367,6 +385,22 @@ class SshClient(SafeStdio): return False return self._put_file(local_path, remote_path, stdio=stdio) + def write_file(self, content, file_path, mode='w', stdio=None): + if self._is_local(): + return LocalClient.write_file(content, file_path, mode, stdio) + return self._write_file(content, file_path, mode, stdio) + + def _write_file(self, content, file_path, mode='w', stdio=None): + stdio.verbose('write {} to {}: {}'.format(content, self, file_path)) + try: + with tempfile.NamedTemporaryFile(mode=mode) as f: + f.write(content) + f.flush() + return self.put_file(f.name, file_path, stdio=stdio) + except: + stdio.exception('') + return False + @property def _put_file(self): if self.remote_transporter == RemoteTransporter.RSYNC: diff --git a/tool.py b/tool.py index 80a47f068565c390fa49efd9b0e158512054b289..15f9941fb2e4a44db7110d978ecc34ed3f3e33bb 100644 --- a/tool.py +++ b/tool.py @@ -31,6 +31,8 @@ import signal import shutil import re import json +import hashlib +from io import BytesIO from ruamel.yaml import YAML, YAMLContextManager, representer @@ -297,6 +299,25 @@ class FileUtil(object): COPY_BUFSIZE = 1024 * 1024 if _WINDOWS else 64 * 1024 + @staticmethod + def checksum(target_path, stdio=None): + from ssh import LocalClient + if not os.path.isfile(target_path): + info = 'No such file: ' + target_path + if stdio: + getattr(stdio, 'error', print)(info) + return False + else: + raise IOError(info) + ret = LocalClient.execute_command('md5sum {}'.format(target_path), stdio=stdio) + if ret: + return ret.stdout.strip().split(' ')[0].encode('utf-8') + else: + m = hashlib.md5() + with open(target_path, 'rb') as f: + m.update(f.read()) + return m.hexdigest().encode(sys.getdefaultencoding()) + @staticmethod def copy_fileobj(fsrc, fdst): fsrc_read = fsrc.read @@ -465,6 +486,18 @@ class YamlLoader(YAML): self.stdio.exception('Parsing error:\n%s' % e) raise e + def loads(self, yaml_content): + try: + stream = BytesIO() + yaml_content = str(yaml_content).encode() + stream.write(yaml_content) + stream.seek(0) + return self.load(stream) + except Exception as e: + if getattr(self.stdio, 'exception', False): + self.stdio.exception('Parsing error:\n%s' % e) + raise e + def dump(self, data, stream=None, transform=None): try: return super(YamlLoader, self).dump(data, stream=stream, transform=transform) @@ -473,6 +506,20 @@ class YamlLoader(YAML): self.stdio.exception('dump error:\n%s' % e) raise e + def dumps(self, data, transform=None): + try: + stream = BytesIO() + self.dump(data, stream=stream, transform=transform) + stream.seek(0) + content = stream.read() + if sys.version_info.major == 2: + return content + return content.decode() + except Exception as e: + if getattr(self.stdio, 'exception', False): + self.stdio.exception('dumps error:\n%s' % e) + raise e + _KEYCRE = re.compile(r"\$(\w+)")