提交 2ac5f5e8 编写于 作者: F frf12

v1.5.0

上级 984d25e8
...@@ -32,19 +32,28 @@ from optparse import OptionParser, OptionGroup, BadOptionError, Option ...@@ -32,19 +32,28 @@ from optparse import OptionParser, OptionGroup, BadOptionError, Option
from core import ObdHome from core import ObdHome
from _stdio import IO from _stdio import IO
from log import Logger from log import Logger
from tool import DirectoryUtil, FileUtil, COMMAND_ENV
from _errno import DOC_LINK_MSG, LockError from _errno import DOC_LINK_MSG, LockError
from tool import DirectoryUtil, FileUtil
ROOT_IO = IO(1) ROOT_IO = IO(1)
VERSION = u'<VERSION>' VERSION = '<VERSION>'
REVISION = '<CID>' REVISION = '<CID>'
BUILD_BRANCH = '<B_BRANCH>' BUILD_BRANCH = '<B_BRANCH>'
BUILD_TIME = '<B_TIME>' BUILD_TIME = '<B_TIME>'
DEBUG = True if '<DEBUG>' else False DEBUG = True if '<DEBUG>' else False
CONST_OBD_HOME = "OBD_HOME"
CONST_OBD_INSTALL_PRE = "OBD_INSTALL_PRE"
FORBIDDEN_VARS = (CONST_OBD_HOME, CONST_OBD_INSTALL_PRE)
OBD_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), '.obd')
COMMAND_ENV.load(os.path.join(OBD_HOME_PATH, '.obd_environ'), ROOT_IO)
DEV_MODE = "OBD_DEV_MODE"
class AllowUndefinedOptionParser(OptionParser): class AllowUndefinedOptionParser(OptionParser):
IS_TTY = sys.stdin.isatty()
def __init__(self, def __init__(self,
usage=None, usage=None,
...@@ -65,7 +74,10 @@ class AllowUndefinedOptionParser(OptionParser): ...@@ -65,7 +74,10 @@ class AllowUndefinedOptionParser(OptionParser):
self.allow_undefine = allow_undefine self.allow_undefine = allow_undefine
def warn(self, msg, file=None): def warn(self, msg, file=None):
print ('warn: %s' % msg) if self.IS_TTY:
print("%s %s" % (IO.WARNING_PREV, msg))
else:
print('warn: %s' % msg)
def _process_long_opt(self, rargs, values): def _process_long_opt(self, rargs, values):
try: try:
...@@ -88,7 +100,7 @@ class AllowUndefinedOptionParser(OptionParser): ...@@ -88,7 +100,7 @@ class AllowUndefinedOptionParser(OptionParser):
if self.allow_undefine: if self.allow_undefine:
key = e.opt_str key = e.opt_str
value = value[len(key)+1:] value = value[len(key)+1:]
setattr(values, key.strip('-').replace('-', '_'), value if value != '' else True) setattr(values, key.strip('-').replace('-', '_'), value if value != '' else True)
return self.warn(e) return self.warn(e)
else: else:
raise e raise e
...@@ -141,9 +153,8 @@ class BaseCommand(object): ...@@ -141,9 +153,8 @@ class BaseCommand(object):
class ObdCommand(BaseCommand): class ObdCommand(BaseCommand):
OBD_PATH = os.path.join(os.environ.get('OBD_HOME', os.getenv('HOME')), '.obd') OBD_PATH = OBD_HOME_PATH
OBD_INSTALL_PRE = os.environ.get('OBD_INSTALL_PRE', '/') OBD_INSTALL_PRE = os.environ.get(CONST_OBD_INSTALL_PRE, '/')
OBD_DEV_MODE_FILE = '.dev_mode'
def init_home(self): def init_home(self):
version_path = os.path.join(self.OBD_PATH, 'version') version_path = os.path.join(self.OBD_PATH, 'version')
...@@ -164,13 +175,13 @@ class ObdCommand(BaseCommand): ...@@ -164,13 +175,13 @@ class ObdCommand(BaseCommand):
version_fobj.flush() version_fobj.flush()
version_fobj.close() version_fobj.close()
@property
def dev_mode_path(self):
return os.path.join(self.OBD_PATH, self.OBD_DEV_MODE_FILE)
@property @property
def dev_mode(self): def dev_mode(self):
return os.path.exists(self.dev_mode_path) return COMMAND_ENV.get(DEV_MODE) == "1"
def parse_command(self):
self.parser.allow_undefine = self.dev_mode
return super(ObdCommand, self).parse_command()
def parse_command(self): def parse_command(self):
self.parser.allow_undefine = self.dev_mode self.parser.allow_undefine = self.dev_mode
...@@ -203,6 +214,8 @@ class ObdCommand(BaseCommand): ...@@ -203,6 +214,8 @@ class ObdCommand(BaseCommand):
ROOT_IO.exception('Another app is currently holding the obd lock.') ROOT_IO.exception('Another app is currently holding the obd lock.')
except SystemExit: except SystemExit:
pass pass
except KeyboardInterrupt:
ROOT_IO.exception('Keyboard Interrupt')
except: except:
e = sys.exc_info()[1] e = sys.exc_info()[1]
ROOT_IO.exception('Running Error: %s' % e) ROOT_IO.exception('Running Error: %s' % e)
...@@ -281,8 +294,7 @@ class DevModeEnableCommand(HiddenObdCommand): ...@@ -281,8 +294,7 @@ class DevModeEnableCommand(HiddenObdCommand):
super(DevModeEnableCommand, self).__init__('enable', 'Enable Dev Mode') super(DevModeEnableCommand, self).__init__('enable', 'Enable Dev Mode')
def _do_command(self, obd): def _do_command(self, obd):
from tool import FileUtil if COMMAND_ENV.set(DEV_MODE, "1", save=True, stdio=obd.stdio):
if FileUtil.open(self.dev_mode_path, _type='w', stdio=obd.stdio):
obd.stdio.print("Dev Mode: ON") obd.stdio.print("Dev Mode: ON")
return True return True
return False return False
...@@ -294,8 +306,7 @@ class DevModeDisableCommand(HiddenObdCommand): ...@@ -294,8 +306,7 @@ class DevModeDisableCommand(HiddenObdCommand):
super(DevModeDisableCommand, self).__init__('disable', 'Disable Dev Mode') super(DevModeDisableCommand, self).__init__('disable', 'Disable Dev Mode')
def _do_command(self, obd): def _do_command(self, obd):
from tool import FileUtil if COMMAND_ENV.set(DEV_MODE, "0", save=True, stdio=obd.stdio):
if FileUtil.rm(self.dev_mode_path, stdio=obd.stdio):
obd.stdio.print("Dev Mode: OFF") obd.stdio.print("Dev Mode: OFF")
return True return True
return False return False
...@@ -309,6 +320,78 @@ class DevModeMajorCommand(HiddenMajorCommand): ...@@ -309,6 +320,78 @@ class DevModeMajorCommand(HiddenMajorCommand):
self.register_command(DevModeDisableCommand()) self.register_command(DevModeDisableCommand())
class EnvironmentSetCommand(HiddenObdCommand):
def __init__(self):
super(EnvironmentSetCommand, self).__init__("set", "Set obd environment variable")
def init(self, cmd, args):
super(EnvironmentSetCommand, self).init(cmd, args)
self.parser.set_usage('%s [key] [value]' % self.prev_cmd)
return self
def _do_command(self, obd):
if len(self.cmds) == 2:
key = self.cmds[0]
if key in FORBIDDEN_VARS:
obd.stdio.error("Set the environment variable {} is not allowed.".format(key))
return False
return COMMAND_ENV.set(key, self.cmds[1], save=True, stdio=obd.stdio)
else:
return self._show_help()
class EnvironmentUnsetCommand(HiddenObdCommand):
def __init__(self):
super(EnvironmentUnsetCommand, self).__init__("unset", "Unset obd environment variable")
def init(self, cmd, args):
super(EnvironmentUnsetCommand, self).init(cmd, args)
self.parser.set_usage('%s [key] [value]' % self.prev_cmd)
return self
def _do_command(self, obd):
if len(self.cmds) == 1:
return COMMAND_ENV.delete(self.cmds[0], save=True, stdio=obd.stdio)
else:
return self._show_help()
class EnvironmentShowCommand(HiddenObdCommand):
def __init__(self):
super(EnvironmentShowCommand, self).__init__("show", "Show obd environment variables")
self.parser.add_option('-A', '--all', action="store_true", help="Show all environment variables including system variables")
def _do_command(self, obd):
if self.opts.all:
envs = COMMAND_ENV.copy().items()
else:
envs = COMMAND_ENV.show_env().items()
obd.stdio.print_list(envs, ["Key", "Value"], title="Environ")
return True
class EnvironmentClearCommand(HiddenObdCommand):
def __init__(self):
super(EnvironmentClearCommand, self).__init__("clear", "Clear obd environment variables")
def _do_command(self, obd):
return COMMAND_ENV.clear(stdio=obd.stdio)
class EnvironmentMajorCommand(HiddenMajorCommand):
def __init__(self):
super(EnvironmentMajorCommand, self).__init__('env', 'Environment variables for OBD')
self.register_command(EnvironmentSetCommand())
self.register_command(EnvironmentUnsetCommand())
self.register_command(EnvironmentShowCommand())
self.register_command(EnvironmentClearCommand())
class MirrorCloneCommand(ObdCommand): class MirrorCloneCommand(ObdCommand):
def __init__(self): def __init__(self):
...@@ -382,7 +465,7 @@ class MirrorListCommand(ObdCommand): ...@@ -382,7 +465,7 @@ class MirrorListCommand(ObdCommand):
repos = obd.mirror_manager.get_mirrors(is_enabled=None) repos = obd.mirror_manager.get_mirrors(is_enabled=None)
ROOT_IO.print_list( ROOT_IO.print_list(
repos, repos,
['SectionName', 'Type', 'Enabled','Update Time'], ['SectionName', 'Type', 'Enabled','Update Time'],
lambda x: [x.section_name, x.mirror_type.value, x.enabled, time.strftime("%Y-%m-%d %H:%M", time.localtime(x.repo_age))], lambda x: [x.section_name, x.mirror_type.value, x.enabled, time.strftime("%Y-%m-%d %H:%M", time.localtime(x.repo_age))],
title='Mirror Repository List' title='Mirror Repository List'
) )
...@@ -413,7 +496,7 @@ class MirrorEnableCommand(ObdCommand): ...@@ -413,7 +496,7 @@ class MirrorEnableCommand(ObdCommand):
def __init__(self): def __init__(self):
super(MirrorEnableCommand, self).__init__('enable', 'Enable remote mirror repository.') super(MirrorEnableCommand, self).__init__('enable', 'Enable remote mirror repository.')
def _do_command(self, obd): def _do_command(self, obd):
name = self.cmds[0] name = self.cmds[0]
return obd.mirror_manager.set_remote_mirror_enabled(name, True) return obd.mirror_manager.set_remote_mirror_enabled(name, True)
...@@ -423,7 +506,7 @@ class MirrorDisableCommand(ObdCommand): ...@@ -423,7 +506,7 @@ class MirrorDisableCommand(ObdCommand):
def __init__(self): def __init__(self):
super(MirrorDisableCommand, self).__init__('disable', 'Disable remote mirror repository.') super(MirrorDisableCommand, self).__init__('disable', 'Disable remote mirror repository.')
def _do_command(self, obd): def _do_command(self, obd):
name = self.cmds[0] name = self.cmds[0]
return obd.mirror_manager.set_remote_mirror_enabled(name, False) return obd.mirror_manager.set_remote_mirror_enabled(name, False)
...@@ -451,7 +534,7 @@ class RepositoryListCommand(ObdCommand): ...@@ -451,7 +534,7 @@ class RepositoryListCommand(ObdCommand):
repos, repos,
['name', 'version', 'release', 'arch', 'md5', 'tags'], ['name', 'version', 'release', 'arch', 'md5', 'tags'],
lambda x: [x.name, x.version, x.release, x.arch, x.md5, ', '.join(x.tags)], lambda x: [x.name, x.version, x.release, x.arch, x.md5, ', '.join(x.tags)],
title='%s Local Repository List' % name if name else '' title='%s Local Repository List' % name if name else 'Local Repository List'
) )
def _do_command(self, obd): def _do_command(self, obd):
...@@ -511,6 +594,7 @@ class ClusterAutoDeployCommand(ClusterMirrorCommand): ...@@ -511,6 +594,7 @@ class ClusterAutoDeployCommand(ClusterMirrorCommand):
super(ClusterAutoDeployCommand, self).__init__('autodeploy', 'Deploy a cluster automatically by using a simple configuration file.') super(ClusterAutoDeployCommand, self).__init__('autodeploy', 'Deploy a cluster automatically by using a simple configuration file.')
self.parser.add_option('-c', '--config', type='string', help="Path to the configuration file.") self.parser.add_option('-c', '--config', type='string', help="Path to the configuration file.")
self.parser.add_option('-f', '--force', action='store_true', help="Force autodeploy, overwrite the home_path.") self.parser.add_option('-f', '--force', action='store_true', help="Force autodeploy, overwrite the home_path.")
self.parser.add_option('-C', '--clean', action='store_true', help="Clean the home path if the directory belong to you.", default=False)
self.parser.add_option('-U', '--unuselibrepo', '--ulp', action='store_true', help="Disable OBD from installing the libs mirror automatically.") self.parser.add_option('-U', '--unuselibrepo', '--ulp', action='store_true', help="Disable OBD from installing the libs mirror automatically.")
self.parser.add_option('-A', '--auto-create-tenant', '--act', action='store_true', help="Automatically create a tenant named `test` by using all the available resource of the cluster.") self.parser.add_option('-A', '--auto-create-tenant', '--act', action='store_true', help="Automatically create a tenant named `test` by using all the available resource of the cluster.")
self.parser.add_option('--force-delete', action='store_true', help="Force delete, delete the registered cluster.") self.parser.add_option('--force-delete', action='store_true', help="Force delete, delete the registered cluster.")
...@@ -518,6 +602,8 @@ class ClusterAutoDeployCommand(ClusterMirrorCommand): ...@@ -518,6 +602,8 @@ class ClusterAutoDeployCommand(ClusterMirrorCommand):
def _do_command(self, obd): def _do_command(self, obd):
if self.cmds: if self.cmds:
if getattr(self.opts, 'force', False) or getattr(self.opts, 'clean', False):
setattr(self.opts, 'skip_cluster_status_check', True)
name = self.cmds[0] name = self.cmds[0]
if obd.genconfig(name, self.opts): if obd.genconfig(name, self.opts):
self.opts.config = '' self.opts.config = ''
...@@ -533,12 +619,15 @@ class ClusterDeployCommand(ClusterMirrorCommand): ...@@ -533,12 +619,15 @@ class ClusterDeployCommand(ClusterMirrorCommand):
super(ClusterDeployCommand, self).__init__('deploy', 'Deploy a cluster by using the current deploy configuration or a deploy yaml file.') super(ClusterDeployCommand, self).__init__('deploy', 'Deploy a cluster by using the current deploy configuration or a deploy yaml file.')
self.parser.add_option('-c', '--config', type='string', help="Path to the configuration yaml file.") self.parser.add_option('-c', '--config', type='string', help="Path to the configuration yaml file.")
self.parser.add_option('-f', '--force', action='store_true', help="Force deploy, overwrite the home_path.", default=False) self.parser.add_option('-f', '--force', action='store_true', help="Force deploy, overwrite the home_path.", default=False)
self.parser.add_option('-C', '--clean', action='store_true', help="Clean the home path if the directory belong to you.", default=False)
self.parser.add_option('-U', '--unuselibrepo', '--ulp', action='store_true', help="Disable OBD from installing the libs mirror automatically.") self.parser.add_option('-U', '--unuselibrepo', '--ulp', action='store_true', help="Disable OBD from installing the libs mirror automatically.")
self.parser.add_option('-A', '--auto-create-tenant', '--act', action='store_true', help="Automatically create a tenant named `test` by using all the available resource of the cluster.") self.parser.add_option('-A', '--auto-create-tenant', '--act', action='store_true', help="Automatically create a tenant named `test` by using all the available resource of the cluster.")
# self.parser.add_option('-F', '--fuzzymatch', action='store_true', help="enable fuzzy match when search package") # self.parser.add_option('-F', '--fuzzymatch', action='store_true', help="enable fuzzy match when search package")
def _do_command(self, obd): def _do_command(self, obd):
if self.cmds: if self.cmds:
if getattr(self.opts, 'force', False) or getattr(self.opts, 'clean', False):
setattr(self.opts, 'skip_cluster_status_check', True)
return obd.deploy_cluster(self.cmds[0], self.opts) return obd.deploy_cluster(self.cmds[0], self.opts)
else: else:
return self._show_help() return self._show_help()
...@@ -669,14 +758,14 @@ class ClusterEditConfigCommand(ClusterMirrorCommand): ...@@ -669,14 +758,14 @@ class ClusterEditConfigCommand(ClusterMirrorCommand):
class ClusterChangeRepositoryCommand(ClusterMirrorCommand): class ClusterChangeRepositoryCommand(ClusterMirrorCommand):
def __init__(self): def __init__(self):
super(ClusterChangeRepositoryCommand, self).__init__('change-repo', 'Change repository for a deployed component') super(ClusterChangeRepositoryCommand, self).__init__('reinstall', 'Reinstall a deployed component')
self.parser.add_option('-c', '--component', type='string', help="Component name to change repository.") self.parser.add_option('-c', '--component', type='string', help="Component name to change repository.")
self.parser.add_option('--hash', type='string', help="Repository's hash") self.parser.add_option('--hash', type='string', help="Repository's hash")
self.parser.add_option('-f', '--force', action='store_true', help="force change even start failed.") self.parser.add_option('-f', '--force', action='store_true', help="force change even start failed.")
def _do_command(self, obd): def _do_command(self, obd):
if self.cmds: if self.cmds:
return obd.change_repository(self.cmds[0], self.opts) return obd.reinstall(self.cmds[0], self.opts)
else: else:
return self._show_help() return self._show_help()
...@@ -703,15 +792,18 @@ class ClusterTenantCreateCommand(ClusterMirrorCommand): ...@@ -703,15 +792,18 @@ class ClusterTenantCreateCommand(ClusterMirrorCommand):
def __init__(self): def __init__(self):
super(ClusterTenantCreateCommand, self).__init__('create', 'Create a tenant.') super(ClusterTenantCreateCommand, self).__init__('create', 'Create a tenant.')
self.parser.add_option('-n', '--tenant-name', type='string', help="The tenant name. The default tenant name is [test].", default='test') self.parser.add_option('-t', '-n', '--tenant-name', type='string', help="The tenant name. The default tenant name is [test].", default='test')
self.parser.add_option('--max-cpu', type='float', help="Max CPU unit number.") self.parser.add_option('--max-cpu', type='float', help="Max CPU unit number.")
self.parser.add_option('--min-cpu', type='float', help="Mind CPU unit number.") self.parser.add_option('--min-cpu', type='float', help="Mind CPU unit number.")
self.parser.add_option('--max-memory', type='int', help="Max memory unit size.") self.parser.add_option('--max-memory', type='string', help="Max memory unit size. Not supported after version 4.0, use `--memory-size` instead")
self.parser.add_option('--min-memory', type='int', help="Min memory unit size.") self.parser.add_option('--min-memory', type='string', help="Min memory unit size. Not supported after version 4.0, use `--memory-size` instead")
self.parser.add_option('--max-disk-size', type='int', help="Max disk unit size.") self.parser.add_option('--memory-size', type='string', help="Memory unit size. Supported since version 4.0.")
self.parser.add_option('--max-iops', type='int', help="Max IOPS unit number. [128].", default=128) self.parser.add_option('--max-disk-size', type='string', help="Max disk unit size. Not supported after version 4.0")
self.parser.add_option('--log-disk-size', type='string', help="Log disk unit size.")
self.parser.add_option('--max-iops', type='int', help="Max IOPS unit number.")
self.parser.add_option('--min-iops', type='int', help="Min IOPS unit number.") self.parser.add_option('--min-iops', type='int', help="Min IOPS unit number.")
self.parser.add_option('--max-session-num', type='int', help="Max session unit number. [64].", default=64) self.parser.add_option('--iops-weight', type='int', help="The weight of IOPS. When Max IOPS is greater than Min IOPS, the weight of idle resources available to the current tenant. Supported since version 4.0.")
self.parser.add_option('--max-session-num', type='int', help="Max session unit number. Not supported after version 4.0")
self.parser.add_option('--unit-num', type='int', help="Pool unit number.") self.parser.add_option('--unit-num', type='int', help="Pool unit number.")
self.parser.add_option('-z', '--zone-list', type='string', help="Tenant zone list.") self.parser.add_option('-z', '--zone-list', type='string', help="Tenant zone list.")
self.parser.add_option('--charset', type='string', help="Tenant charset.") self.parser.add_option('--charset', type='string', help="Tenant charset.")
...@@ -734,7 +826,7 @@ class ClusterTenantDropCommand(ClusterMirrorCommand): ...@@ -734,7 +826,7 @@ class ClusterTenantDropCommand(ClusterMirrorCommand):
def __init__(self): def __init__(self):
super(ClusterTenantDropCommand, self).__init__('drop', 'Drop a tenant.') super(ClusterTenantDropCommand, self).__init__('drop', 'Drop a tenant.')
self.parser.add_option('-n', '--tenant-name', type='string', help="Tenant name.") self.parser.add_option('-t', '-n', '--tenant-name', type='string', help="Tenant name.")
def _do_command(self, obd): def _do_command(self, obd):
if self.cmds: if self.cmds:
...@@ -793,23 +885,52 @@ class MySQLTestCommand(TestMirrorCommand): ...@@ -793,23 +885,52 @@ class MySQLTestCommand(TestMirrorCommand):
self.parser.add_option('--mysqltest-bin', type='string', help='Mysqltest bin path. [/u01/obclient/bin/mysqltest]', default='/u01/obclient/bin/mysqltest') self.parser.add_option('--mysqltest-bin', type='string', help='Mysqltest bin path. [/u01/obclient/bin/mysqltest]', default='/u01/obclient/bin/mysqltest')
self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient') self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient')
self.parser.add_option('--test-dir', type='string', help='Test case file directory. [./mysql_test/t]', default='./mysql_test/t') self.parser.add_option('--test-dir', type='string', help='Test case file directory. [./mysql_test/t]', default='./mysql_test/t')
self.parser.add_option('--test-file-suffix', type='string', help='Test case file suffix. [.test]', default='.test')
self.parser.add_option('--result-dir', type='string', help='Result case file directory. [./mysql_test/r]', default='./mysql_test/r') self.parser.add_option('--result-dir', type='string', help='Result case file directory. [./mysql_test/r]', default='./mysql_test/r')
self.parser.add_option('--result-file-suffix', type='string', help='Result file suffix. [.result]', default='.result')
self.parser.add_option('--record', action='store_true', help='record mysqltest execution results', default=False) self.parser.add_option('--record', action='store_true', help='record mysqltest execution results', default=False)
self.parser.add_option('--record-dir', type='string', help='The directory of the result file for mysqltest.') self.parser.add_option('--record-dir', type='string', help='The directory of the result file for mysqltest.', default='./record')
self.parser.add_option('--log-dir', type='string', help='The log file directory. [./log]', default='./log') self.parser.add_option('--record-file-suffix', type='string', help='Result file suffix. [.record]', default='.record')
self.parser.add_option('--log-dir', type='string', help='The log file directory.')
self.parser.add_option('--tmp-dir', type='string', help='Temporary directory for mysqltest. [./tmp]', default='./tmp') self.parser.add_option('--tmp-dir', type='string', help='Temporary directory for mysqltest. [./tmp]', default='./tmp')
self.parser.add_option('--var-dir', type='string', help='Var directory to use when run mysqltest. [./var]', default='./var') self.parser.add_option('--var-dir', type='string', help='Var directory to use when run mysqltest. [./var]', default='./var')
self.parser.add_option('--test-set', type='string', help='test list, use `,` interval') self.parser.add_option('--test-set', type='string', help='test list, use `,` interval')
self.parser.add_option('--exclude', type='string', help='exclude list, use `,` interval')
self.parser.add_option('--test-pattern', type='string', help='Pattern for test file.') self.parser.add_option('--test-pattern', type='string', help='Pattern for test file.')
self.parser.add_option('--suite', type='string', help='Suite list. Multiple suites are separated with commas.') self.parser.add_option('--suite', type='string', help='Suite list. Multiple suites are separated with commas.')
self.parser.add_option('--suite-dir', type='string', help='Suite case directory. [./mysql_test/test_suite]', default='./mysql_test/test_suite') self.parser.add_option('--suite-dir', type='string', help='Suite case directory. [./mysql_test/test_suite]', default='./mysql_test/test_suite')
self.parser.add_option('--init-sql-dir', type='string', help='Initiate sql directory. [../]', default='../') self.parser.add_option('--init-sql-dir', type='string', help='Initiate sql directory. [./]', default='./')
self.parser.add_option('--init-sql-files', type='string', help='Initiate sql file list.Multiple files are separated with commas.') self.parser.add_option('--init-sql-files', type='string', help='Initiate sql file list.Multiple files are separated with commas.')
self.parser.add_option('--need-init', action='store_true', help='Execute the init SQL file.', default=False) self.parser.add_option('--need-init', action='store_true', help='Execute the init SQL file.', default=False)
self.parser.add_option('--init-only', action='store_true', help='Exit after executing init SQL.', default=False)
self.parser.add_option('--auto-retry', action='store_true', help='Auto retry when fails.', default=False) self.parser.add_option('--auto-retry', action='store_true', help='Auto retry when fails.', default=False)
self.parser.add_option('--all', action='store_true', help='Run all suite-dir cases.', default=False) self.parser.add_option('--all', action='store_true', help='Run all cases.', default=False)
self.parser.add_option('--psmall', action='store_true', help='Run psmall cases.', default=False) self.parser.add_option('--psmall', action='store_true', help='Run psmall cases.', default=False)
self.parser.add_option('--special-run', action='store_true', help='run mysqltest in special mode.', default=False)
self.parser.add_option('--sp-hint', type='string', help='run test with specified hint', default='')
self.parser.add_option('--sort-result', action='store_true', help='sort query result', default=False)
# self.parser.add_option('--java', action='store_true', help='use java sdk', default=False) # self.parser.add_option('--java', action='store_true', help='use java sdk', default=False)
self.parser.add_option('--slices', type='int', help='How many slices the test set should be')
self.parser.add_option('--slice-idx', type='int', help='The id of slices')
self.parser.add_option('--slb-host', type='string', help='The host of soft load balance.')
self.parser.add_option('--exec-id', type='string', help='The unique execute id.')
self.parser.add_option('--case-filter', type='string', help='The case filter file for mysqltest.')
self.parser.add_option('--psmall-test', type='string', help='The file maintain psmall cases.', default='./mysql_test/psmalltest.py')
self.parser.add_option('--psmall-source', type='string', help='The file maintain psmall source control.', default='./mysql_test/psmallsource.py')
self.parser.add_option('--ps', action='store_true', help='Run in ps mode.', default=False)
self.parser.add_option('--test-tags', type='string', help='The file maintain basic tags.', default='./mysql_test/test_tags.py')
self.parser.add_option('--tags', type='string', help='Run cases by tag.', default='')
self.parser.add_option('--regress-suite-map', type='string', help='The file maintain basic regress suite map', default='./regress_suite_map.py')
self.parser.add_option('--regress_suite', type='string', help='Run cases by regress_suite.', default='')
self.parser.add_option('--reboot-cases', type='string', help='The file maintain reboot cases')
self.parser.add_option('--reboot-timeout', type='int', help='The timeout of observer bootstrap', default=0)
self.parser.add_option('--reboot-retries', type='int', help='How many times to retry when rebooting failed', default=5)
self.parser.add_option('--collect-all', action='store_true', help='Collect servers log.', default=False)
self.parser.add_option('--collect-components', type='string', help='The components which need collect log, multiple components are separated with commas')
self.parser.add_option('--case-timeout', type='int', help='The timeout of mysqltest case')
self.parser.add_option('--log-pattern', type='string', help='The pattern for collected servers log ', default='*.log')
self.parser.add_option('--cluster-mode', type='string', help="The mode of mysqltest")
self.parser.add_option('--disable-reboot', action='store_true', help='Never reboot during test.', default=False)
def _do_command(self, obd): def _do_command(self, obd):
if self.cmds: if self.cmds:
...@@ -826,7 +947,7 @@ class SysBenchCommand(TestMirrorCommand): ...@@ -826,7 +947,7 @@ class SysBenchCommand(TestMirrorCommand):
self.parser.add_option('--test-server', type='string', help='The server for test. By default, the first root server in the component is the test server.') self.parser.add_option('--test-server', type='string', help='The server for test. By default, the first root server in the component is the test server.')
self.parser.add_option('--user', type='string', help='Username for a test. [root]', default='root') self.parser.add_option('--user', type='string', help='Username for a test. [root]', default='root')
self.parser.add_option('--password', type='string', help='Password for a test.') self.parser.add_option('--password', type='string', help='Password for a test.')
self.parser.add_option('--tenant', type='string', help='Tenant for a test. [test]', default='test') self.parser.add_option('-t', '--tenant', type='string', help='Tenant for a test. [test]', default='test')
self.parser.add_option('--database', type='string', help='Database for a test. [test]', default='test') self.parser.add_option('--database', type='string', help='Database for a test. [test]', default='test')
self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient') self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient')
self.parser.add_option('--sysbench-bin', type='string', help='Sysbench bin path. [sysbench]', default='sysbench') self.parser.add_option('--sysbench-bin', type='string', help='Sysbench bin path. [sysbench]', default='sysbench')
...@@ -858,7 +979,7 @@ class TPCHCommand(TestMirrorCommand): ...@@ -858,7 +979,7 @@ class TPCHCommand(TestMirrorCommand):
self.parser.add_option('--test-server', type='string', help='The server for a test. By default, the first root server in the component is the test server.') self.parser.add_option('--test-server', type='string', help='The server for a test. By default, the first root server in the component is the test server.')
self.parser.add_option('--user', type='string', help='Username for a test. [root]', default='root') self.parser.add_option('--user', type='string', help='Username for a test. [root]', default='root')
self.parser.add_option('--password', type='string', help='Password for a test.') self.parser.add_option('--password', type='string', help='Password for a test.')
self.parser.add_option('--tenant', type='string', help='Tenant for a test. [test]', default='test') self.parser.add_option('-t', '--tenant', type='string', help='Tenant for a test. [test]', default='test')
self.parser.add_option('--database', type='string', help='Database for a test. [test]', default='test') self.parser.add_option('--database', type='string', help='Database for a test. [test]', default='test')
self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient') self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient')
self.parser.add_option('--dbgen-bin', type='string', help='dbgen bin path. [/usr/tpc-h-tools/tpc-h-tools/bin/dbgen]', default='/usr/tpc-h-tools/tpc-h-tools/bin/dbgen') self.parser.add_option('--dbgen-bin', type='string', help='dbgen bin path. [/usr/tpc-h-tools/tpc-h-tools/bin/dbgen]', default='/usr/tpc-h-tools/tpc-h-tools/bin/dbgen')
...@@ -888,7 +1009,7 @@ class TPCCCommand(TestMirrorCommand): ...@@ -888,7 +1009,7 @@ class TPCCCommand(TestMirrorCommand):
self.parser.add_option('--test-server', type='string', help='The server for a test. By default, the first root server in the component is the test server.') self.parser.add_option('--test-server', type='string', help='The server for a test. By default, the first root server in the component is the test server.')
self.parser.add_option('--user', type='string', help='Username for a test. [root]', default='root') self.parser.add_option('--user', type='string', help='Username for a test. [root]', default='root')
self.parser.add_option('--password', type='string', help='Password for a test.') self.parser.add_option('--password', type='string', help='Password for a test.')
self.parser.add_option('--tenant', type='string', help='Tenant for a test. [test]', default='test') self.parser.add_option('-t', '--tenant', type='string', help='Tenant for a test. [test]', default='test')
self.parser.add_option('--database', type='string', help='Database for a test. [test]', default='test') self.parser.add_option('--database', type='string', help='Database for a test. [test]', default='test')
self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient') self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient')
self.parser.add_option('--java-bin', type='string', help='Java bin path. [java]', default='java') self.parser.add_option('--java-bin', type='string', help='Java bin path. [java]', default='java')
...@@ -897,10 +1018,10 @@ class TPCCCommand(TestMirrorCommand): ...@@ -897,10 +1018,10 @@ class TPCCCommand(TestMirrorCommand):
self.parser.add_option('--bmsql-jar', type='string', help='BenchmarkSQL jar path.') self.parser.add_option('--bmsql-jar', type='string', help='BenchmarkSQL jar path.')
self.parser.add_option('--bmsql-libs', type='string', help='BenchmarkSQL libs path.') self.parser.add_option('--bmsql-libs', type='string', help='BenchmarkSQL libs path.')
self.parser.add_option('--bmsql-sql-dir', type='string', help='The directory of BenchmarkSQL sql scripts.') self.parser.add_option('--bmsql-sql-dir', type='string', help='The directory of BenchmarkSQL sql scripts.')
self.parser.add_option('--warehouses', type='int', help='The number of warehouses.') self.parser.add_option('--warehouses', type='int', help='The number of warehouses.[10]', default=10)
self.parser.add_option('--load-workers', type='int', help='The number of workers to load data.') self.parser.add_option('--load-workers', type='int', help='The number of workers to load data.')
self.parser.add_option('--terminals', type='int', help='The number of terminals.') self.parser.add_option('--terminals', type='int', help='The number of terminals.')
self.parser.add_option('--run-mins', type='int', help='To run for specified minutes.', default=10) self.parser.add_option('--run-mins', type='int', help='To run for specified minutes.[10]', default=10)
self.parser.add_option('--test-only', action='store_true', help='Only testing SQLs are executed. No initialization is executed.') self.parser.add_option('--test-only', action='store_true', help='Only testing SQLs are executed. No initialization is executed.')
self.parser.add_option('-O', '--optimization', type='int', help='Optimization level {0/1/2}. [1] 0 - No optimization. 1 - Optimize some of the parameters which do not need to restart servers. 2 - Optimize all the parameters and maybe RESTART SERVERS for better performance.', default=1) self.parser.add_option('-O', '--optimization', type='int', help='Optimization level {0/1/2}. [1] 0 - No optimization. 1 - Optimize some of the parameters which do not need to restart servers. 2 - Optimize all the parameters and maybe RESTART SERVERS for better performance.', default=1)
...@@ -909,7 +1030,7 @@ class TPCCCommand(TestMirrorCommand): ...@@ -909,7 +1030,7 @@ class TPCCCommand(TestMirrorCommand):
return obd.tpcc(self.cmds[0], self.opts) return obd.tpcc(self.cmds[0], self.opts)
else: else:
return self._show_help() return self._show_help()
class TestMajorCommand(MajorCommand): class TestMajorCommand(MajorCommand):
...@@ -921,6 +1042,59 @@ class TestMajorCommand(MajorCommand): ...@@ -921,6 +1042,59 @@ class TestMajorCommand(MajorCommand):
self.register_command(TPCCCommand()) self.register_command(TPCCCommand())
class DbConnectCommand(HiddenObdCommand):
def init(self, cmd, args):
super(DbConnectCommand, self).init(cmd, args)
self.parser.set_usage('%s <deploy name> [options]' % self.prev_cmd)
return self
def __init__(self):
super(DbConnectCommand, self).__init__('db_connect', 'Establish a database connection to the deployment.')
self.parser.add_option('-c', '--component', type='string', help='The component used by database connection.')
self.parser.add_option('-s', '--server', type='string',
help='The server used by database connection. The first server in the configuration will be used by default')
self.parser.add_option('-u', '--user', type='string', help='The username used by d'
'atabase connection. [root]', default='root')
self.parser.add_option('-p', '--password', type='string', help='The password used by database connection.')
self.parser.add_option('-t', '--tenant', type='string', help='The tenant used by database connection. [sys]', default='sys')
self.parser.add_option('-D', '--database', type='string', help='The database name used by database connection.')
self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient')
def _do_command(self, obd):
if self.cmds:
return obd.db_connect(self.cmds[0], self.opts)
else:
return self._show_help()
class CommandsCommand(HiddenObdCommand):
def init(self, cmd, args):
super(CommandsCommand, self).init(cmd, args)
self.parser.set_usage('%s <deploy name> <command> [options]' % self.prev_cmd)
return self
def __init__(self):
super(CommandsCommand, self).__init__('command', 'Common tool commands')
self.parser.add_option('-c', '--components', type='string', help='The components used by the command. The first component in the configuration will be used by default in interactive commands, and all available components will be used by default in non-interactive commands.')
self.parser.add_option('-s', '--servers', type='string', help='The servers used by the command. The first server in the configuration will be used by default in interactive commands, and all available servers will be used by default in non-interactive commands.')
def _do_command(self, obd):
if len(self.cmds) == 2:
return obd.commands(self.cmds[0], self.cmds[1], self.opts)
else:
return self._show_help()
class ToolCommand(HiddenMajorCommand):
def __init__(self):
super(ToolCommand, self).__init__('tool', 'Tools')
self.register_command(DbConnectCommand())
self.register_command(CommandsCommand())
class BenchMajorCommand(MajorCommand): class BenchMajorCommand(MajorCommand):
def __init__(self): def __init__(self):
...@@ -952,6 +1126,8 @@ class MainCommand(MajorCommand): ...@@ -952,6 +1126,8 @@ class MainCommand(MajorCommand):
self.register_command(RepositoryMajorCommand()) self.register_command(RepositoryMajorCommand())
self.register_command(TestMajorCommand()) self.register_command(TestMajorCommand())
self.register_command(UpdateCommand()) self.register_command(UpdateCommand())
self.register_command(EnvironmentMajorCommand())
self.register_command(ToolCommand())
self.parser.version = '''OceanBase Deploy: %s self.parser.version = '''OceanBase Deploy: %s
REVISION: %s REVISION: %s
BUILD_BRANCH: %s BUILD_BRANCH: %s
......
...@@ -29,13 +29,16 @@ from enum import Enum ...@@ -29,13 +29,16 @@ from enum import Enum
from ruamel.yaml.comments import CommentedMap from ruamel.yaml.comments import CommentedMap
from tool import ConfigUtil, FileUtil, YamlLoader, OrderedDict from tool import ConfigUtil, FileUtil, YamlLoader, OrderedDict, COMMAND_ENV
from _manager import Manager from _manager import Manager
from _repository import Repository from _repository import Repository
from _stdio import SafeStdio
yaml = YamlLoader() yaml = YamlLoader()
DEFAULT_CONFIG_PARSER_MANAGER = None DEFAULT_CONFIG_PARSER_MANAGER = None
ENV = 'env'
BASE_DIR_KEY = "OBD_DEPLOY_BASE_DIR"
class ParserError(Exception): class ParserError(Exception):
...@@ -96,24 +99,51 @@ class ServerConfigFlyweightFactory(object): ...@@ -96,24 +99,51 @@ class ServerConfigFlyweightFactory(object):
return ServerConfigFlyweightFactory._CACHE[_key] return ServerConfigFlyweightFactory._CACHE[_key]
class RsyncConfig(object):
RSYNC = 'runtime_dependencies'
SOURCE_PATH = 'src_path'
TARGET_PATH = 'target_path'
class InnerConfigItem(str): class InnerConfigItem(str):
pass pass
class InnerConfigKeywords(object):
DEPLOY_INSTALL_MODE = 'deploy_install_mode'
DEPLOY_BASE_DIR = 'deploy_base_dir'
class InnerConfig(object): class InnerConfig(object):
keyword_symbol = "$_"
def __init__(self, path, yaml_loader): def __init__(self, path, yaml_loader):
self.path = path self.path = path
self.yaml_loader = yaml_loader self.yaml_loader = yaml_loader
self.config = {} self.config = {}
self._load() self._load()
def is_keyword(self, s):
return s.startswith(self.keyword_symbol)
def to_keyword(self, key):
return "{}{}".format(self.keyword_symbol, key)
def keyword_to_str(self, _keyword):
return str(_keyword.replace(self.keyword_symbol, '', 1))
def _load(self): def _load(self):
self.config = {} self.config = {}
try: try:
with FileUtil.open(self.path, 'rb') as f: with FileUtil.open(self.path, 'rb') as f:
config = self.yaml_loader.load(f) config = self.yaml_loader.load(f)
for component_name in config: for component_name in config:
if self.is_keyword(component_name):
self.config[InnerConfigItem(component_name)] = config[component_name]
continue
self.config[component_name] = {} self.config[component_name] = {}
c_config = config[component_name] c_config = config[component_name]
for server in c_config: for server in c_config:
...@@ -142,7 +172,14 @@ class InnerConfig(object): ...@@ -142,7 +172,14 @@ class InnerConfig(object):
return self.config.get(component_name, {}) return self.config.get(component_name, {})
def get_server_config(self, component_name, server): def get_server_config(self, component_name, server):
return self.get_component(component_name).get(server, {}) return self.get_component_config(component_name).get(server, {})
def get_global_config(self, key, default=None):
key = self.to_keyword(key)
return self.config.get(key, default)
def update_global_config(self, key, value):
self.config[self.to_keyword(key)] = value
def update_component_config(self, component_name, config): def update_component_config(self, component_name, config):
self.config[component_name] = {} self.config[component_name] = {}
...@@ -165,11 +202,11 @@ class ConfigParser(object): ...@@ -165,11 +202,11 @@ class ConfigParser(object):
@classmethod @classmethod
def _is_inner_item(cls, key): def _is_inner_item(cls, key):
return isinstance(key, InnerConfigItem) and key.startswith(cls.PREFIX) return isinstance(key, InnerConfigItem) and key.startswith(cls.PREFIX)
@classmethod @classmethod
def extract_inner_config(cls, cluster_config, config): def extract_inner_config(cls, cluster_config, config):
return {} return {}
@classmethod @classmethod
def _to_cluster_config(cls, component_name, config): def _to_cluster_config(cls, component_name, config):
raise NotImplementedError raise NotImplementedError
...@@ -177,18 +214,19 @@ class ConfigParser(object): ...@@ -177,18 +214,19 @@ class ConfigParser(object):
@classmethod @classmethod
def to_cluster_config(cls, component_name, config): def to_cluster_config(cls, component_name, config):
cluster_config = cls._to_cluster_config(component_name, config) cluster_config = cls._to_cluster_config(component_name, config)
cluster_config.set_include_file(config.get('include', ''))
cluster_config.parser = cls cluster_config.parser = cls
return cluster_config return cluster_config
@classmethod @classmethod
def _from_cluster_config(cls, conf, cluster_config): def _from_cluster_config(cls, conf, cluster_config):
raise NotImplementedError raise NotImplementedError
@classmethod @classmethod
def from_cluster_config(cls, cluster_config): def from_cluster_config(cls, cluster_config):
if not cls.STYLE: if not cls.STYLE:
raise NotImplementedError('undefined Style ConfigParser') raise NotImplementedError('undefined Style ConfigParser')
conf = CommentedMap() conf = CommentedMap()
conf['style'] = cls.STYLE conf['style'] = cls.STYLE
if cluster_config.origin_package_hash: if cluster_config.origin_package_hash:
...@@ -205,7 +243,7 @@ class ConfigParser(object): ...@@ -205,7 +243,7 @@ class ConfigParser(object):
'inner_config': inner_config, 'inner_config': inner_config,
'config': conf 'config': conf
} }
@classmethod @classmethod
def get_server_src_conf(cls, cluster_config, component_config, server): def get_server_src_conf(cls, cluster_config, component_config, server):
if server.name not in component_config: if server.name not in component_config:
...@@ -246,10 +284,18 @@ class DefaultConfigParser(ConfigParser): ...@@ -246,10 +284,18 @@ class DefaultConfigParser(ConfigParser):
component_name, component_name,
ConfigUtil.get_value_from_dict(conf, 'version', None, str), ConfigUtil.get_value_from_dict(conf, 'version', None, str),
ConfigUtil.get_value_from_dict(conf, 'tag', None, str), ConfigUtil.get_value_from_dict(conf, 'tag', None, str),
ConfigUtil.get_value_from_dict(conf, 'release', None, str),
ConfigUtil.get_value_from_dict(conf, 'package_hash', None, str) ConfigUtil.get_value_from_dict(conf, 'package_hash', None, str)
) )
if 'global' in conf: if 'global' in conf:
cluster_config.set_global_conf(conf['global']) cluster_config.set_global_conf(conf['global'])
if RsyncConfig.RSYNC in conf:
cluster_config.set_rsync_list(conf[RsyncConfig.RSYNC])
if ENV in conf:
cluster_config.set_environments(conf[ENV])
for server in servers: for server in servers:
if server.name in conf: if server.name in conf:
cluster_config.add_server_conf(server, conf[server.name]) cluster_config.add_server_conf(server, conf[server.name])
...@@ -269,7 +315,7 @@ class DefaultConfigParser(ConfigParser): ...@@ -269,7 +315,7 @@ class DefaultConfigParser(ConfigParser):
for server in cluster_config.servers: for server in cluster_config.servers:
inner_config[server.name][key] = global_config[key] inner_config[server.name][key] = global_config[key]
del global_config[key] del global_config[key]
for server in cluster_config.servers: for server in cluster_config.servers:
if server.name not in config: if server.name not in config:
continue continue
...@@ -301,21 +347,32 @@ class DefaultConfigParser(ConfigParser): ...@@ -301,21 +347,32 @@ class DefaultConfigParser(ConfigParser):
class ClusterConfig(object): class ClusterConfig(object):
def __init__(self, servers, name, version, tag, package_hash, parser=None): def __init__(self, servers, name, version, tag, release, package_hash, parser=None):
self.version = version self._version = version
self.origin_version = version self.origin_version = version
self.tag = tag self.tag = tag
self.origin_tag = tag self.origin_tag = tag
self._release = release
self.origin_release = release
self.name = name self.name = name
self.origin_package_hash = package_hash self.origin_package_hash = package_hash
self.package_hash = package_hash self._package_hash = package_hash
self._temp_conf = {} self._temp_conf = {}
self._default_conf = {} self._default_conf = {}
self._global_conf = {} self._global_conf = None
self._server_conf = {} self._server_conf = {}
self._cache_server = {} self._cache_server = {}
self._original_global_conf = {} self._original_global_conf = {}
self._rsync_list = None
self._include_config = None
self._origin_rsync_list = {}
self._include_file = None
self._origin_include_file = None
self._origin_include_config = None
self._environments = None
self._origin_environments = {}
self._inner_config = {} self._inner_config = {}
self._base_dir = ''
servers = list(servers) servers = list(servers)
self.servers = servers self.servers = servers
self._original_servers = servers # 保证顺序 self._original_servers = servers # 保证顺序
...@@ -325,10 +382,12 @@ class ClusterConfig(object): ...@@ -325,10 +382,12 @@ class ClusterConfig(object):
self._deploy_config = None self._deploy_config = None
self._depends = {} self._depends = {}
self.parser = parser self.parser = parser
self._has_package_pattern = None
def __eq__(self, other): def __eq__(self, other):
if not isinstance(other, self.__class__): if not isinstance(other, self.__class__):
return False return False
# todo 检查 rsync include等
return self._global_conf == other._global_conf and self._server_conf == other._server_conf return self._global_conf == other._global_conf and self._server_conf == other._server_conf
def __deepcopy__(self, memo): def __deepcopy__(self, memo):
...@@ -344,8 +403,17 @@ class ClusterConfig(object): ...@@ -344,8 +403,17 @@ class ClusterConfig(object):
def set_deploy_config(self, _deploy_config): def set_deploy_config(self, _deploy_config):
if self._deploy_config is None: if self._deploy_config is None:
self._deploy_config = _deploy_config self._deploy_config = _deploy_config
self.set_base_dir(self._deploy_config.get_base_dir())
return True return True
return False return False
def set_base_dir(self, base_dir):
if self._base_dir != base_dir:
self._base_dir = base_dir
self._rsync_list = None
self._include_config = None
self._global_conf = None
@property @property
def original_servers(self): def original_servers(self):
return self._original_servers return self._original_servers
...@@ -361,6 +429,12 @@ class ClusterConfig(object): ...@@ -361,6 +429,12 @@ class ClusterConfig(object):
def get_inner_config(self): def get_inner_config(self):
return self._inner_config return self._inner_config
def is_cp_install_mode(self):
return self._deploy_config.is_cp_install_mode()
def is_ln_install_mode(self):
return self._deploy_config.is_ln_install_mode()
def apply_inner_config(self, config): def apply_inner_config(self, config):
self._inner_config = config self._inner_config = config
self._clear_cache_server() self._clear_cache_server()
...@@ -416,6 +490,23 @@ class ClusterConfig(object): ...@@ -416,6 +490,23 @@ class ClusterConfig(object):
self._original_global_conf[key] = value self._original_global_conf[key] = value
self._global_conf[key] = value self._global_conf[key] = value
def update_rsync_list(self, rsync_list, save=True):
if self._deploy_config is None:
return False
if not self._deploy_config.update_component_rsync_list(self.name, rsync_list, save):
return False
self._rsync_list = rsync_list
return True
def update_environments(self, environments, save=True):
if self._deploy_config is None:
return False
if not self._deploy_config.update_component_environments(self.name, environments, save):
return False
self._origin_environments = environments
self._environments = None
return True
def get_unconfigured_require_item(self, server): def get_unconfigured_require_item(self, server):
items = [] items = []
config = self.get_server_conf(server) config = self.get_server_conf(server)
...@@ -464,18 +555,19 @@ class ClusterConfig(object): ...@@ -464,18 +555,19 @@ class ClusterConfig(object):
for key in self._temp_conf: for key in self._temp_conf:
if self._temp_conf[key].require and self._temp_conf[key].default is not None: if self._temp_conf[key].require and self._temp_conf[key].default is not None:
self._default_conf[key] = self._temp_conf[key].default self._default_conf[key] = self._temp_conf[key].default
self.set_global_conf(self._global_conf) # 更新全局配置 self._global_conf = None
self._clear_cache_server()
def get_temp_conf_item(self, key): def get_temp_conf_item(self, key):
if self._temp_conf: if self._temp_conf:
return self._temp_conf.get(key) return self._temp_conf.get(key)
else: else:
return None return None
def check_param(self): def check_param(self):
error = [] error = []
if self._temp_conf: if self._temp_conf:
error += self._check_param(self._global_conf) error += self._check_param(self.get_global_conf())
for server in self._server_conf: for server in self._server_conf:
error += self._check_param(self._server_conf[server]) error += self._check_param(self._server_conf[server])
return not error, set(error) return not error, set(error)
...@@ -493,10 +585,22 @@ class ClusterConfig(object): ...@@ -493,10 +585,22 @@ class ClusterConfig(object):
def set_global_conf(self, conf): def set_global_conf(self, conf):
self._original_global_conf = deepcopy(conf) self._original_global_conf = deepcopy(conf)
self._global_conf = deepcopy(self._default_conf) self._global_conf = None
self._global_conf.update(self._original_global_conf)
self._clear_cache_server() self._clear_cache_server()
def set_rsync_list(self, configs):
self._origin_rsync_list = configs
def set_include_file(self, path):
if path != self._origin_include_file:
self._origin_include_file = path
self._include_file = None
self._include_config = None
def set_environments(self, config):
self._origin_environments = config
self._environments = None
def add_server_conf(self, server, conf): def add_server_conf(self, server, conf):
if server not in self.servers: if server not in self.servers:
self.servers.append(server) self.servers.append(server)
...@@ -506,14 +610,115 @@ class ClusterConfig(object): ...@@ -506,14 +610,115 @@ class ClusterConfig(object):
self._cache_server[server] = None self._cache_server[server] = None
def get_global_conf(self): def get_global_conf(self):
if self._global_conf is None:
self._global_conf = deepcopy(self._default_conf)
self._global_conf.update(self._get_include_config('config', {}))
self._global_conf.update(self._original_global_conf)
return self._global_conf return self._global_conf
def _add_base_dir(self, path):
if not os.path.isabs(path):
if self._base_dir:
path = os.path.join(self._base_dir, path)
else:
raise Exception("`{}` need to use absolute paths. If you want to use relative paths, please enable developer mode "
"and set environment variables {}".format(RsyncConfig.RSYNC, BASE_DIR_KEY))
return path
@property
def has_package_pattern(self):
if self._has_package_pattern is None:
patterns = (self.origin_package_hash, self.origin_version, self.origin_release, self.origin_tag)
self._has_package_pattern = any([x is not None for x in patterns])
return self._has_package_pattern
@property
def version(self):
if self._version is None:
self._version = self.config_version
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def config_version(self):
if not self.has_package_pattern:
return self._get_include_config('version', None)
else:
return self.origin_version
@property
def release(self):
if self._release is None:
self._release = self.config_release
return self._release
@release.setter
def release(self, value):
self._release = value
@property
def config_release(self):
if not self.has_package_pattern:
return self._get_include_config('release', None)
else:
return self.origin_release
@property
def package_hash(self):
if self._package_hash is None:
self._package_hash = self.config_package_hash
return self._package_hash
@package_hash.setter
def package_hash(self, value):
self._package_hash = value
@property
def config_package_hash(self):
if not self.has_package_pattern:
return self._get_include_config('package_hash', None)
else:
return self.origin_package_hash
def _get_include_config(self, key=None, default=None, not_found_act="ignore"):
if self._include_config is None:
if self._origin_include_file:
if os.path.isabs(self._origin_include_file):
include_file = self._origin_include_file
else:
include_file = os.path.join(self._base_dir, self._origin_include_file)
if include_file != self._include_file:
self._include_file = include_file
self._origin_include_config = self._deploy_config.load_include_file(self._include_file)
if self._origin_include_config is None:
self._origin_include_config = {}
self._include_config = self._origin_include_config
value = self._include_config.get(key, default) if key else self._include_config
return deepcopy(value)
def get_rsync_list(self):
if self._rsync_list is None:
self._rsync_list = self._get_include_config(RsyncConfig.RSYNC, [])
self._rsync_list += self._origin_rsync_list
for item in self._rsync_list:
item[RsyncConfig.SOURCE_PATH] = self._add_base_dir(item[RsyncConfig.SOURCE_PATH])
return self._rsync_list
def get_environments(self):
if self._environments is None:
self._environments = self._get_include_config(ENV, OrderedDict())
self._environments.update(self._origin_environments)
return self._environments
def get_server_conf(self, server): def get_server_conf(self, server):
if server not in self._server_conf: if server not in self._server_conf:
return None return None
if self._cache_server[server] is None: if self._cache_server[server] is None:
conf = deepcopy(self._inner_config.get(server.name, {})) conf = deepcopy(self._inner_config.get(server.name, {}))
conf.update(self._global_conf) conf.update(self.get_global_conf())
conf.update(self._server_conf[server]) conf.update(self._server_conf[server])
self._cache_server[server] = conf self._cache_server[server] = conf
return self._cache_server[server] return self._cache_server[server]
...@@ -547,6 +752,12 @@ class DeployConfigStatus(Enum): ...@@ -547,6 +752,12 @@ class DeployConfigStatus(Enum):
NEED_REDEPLOY = 'need redeploy' NEED_REDEPLOY = 'need redeploy'
class DeployInstallMode(object):
LN = 'ln'
CP = 'cp'
class DeployInfo(object): class DeployInfo(object):
def __init__(self, name, status, components=OrderedDict(), config_status=DeployConfigStatus.UNCHNAGE): def __init__(self, name, status, components=OrderedDict(), config_status=DeployConfigStatus.UNCHNAGE):
...@@ -562,9 +773,9 @@ class DeployInfo(object): ...@@ -562,9 +773,9 @@ class DeployInfo(object):
return '\n'.join(info) return '\n'.join(info)
class DeployConfig(object): class DeployConfig(SafeStdio):
def __init__(self, yaml_path, yaml_loader=yaml, inner_config=None, config_parser_manager=None): def __init__(self, yaml_path, yaml_loader=yaml, inner_config=None, config_parser_manager=None, stdio=None):
self._user = None self._user = None
self.unuse_lib_repository = False self.unuse_lib_repository = False
self.auto_create_tenant = False self.auto_create_tenant = False
...@@ -574,6 +785,8 @@ class DeployConfig(object): ...@@ -574,6 +785,8 @@ class DeployConfig(object):
self.yaml_path = yaml_path self.yaml_path = yaml_path
self.yaml_loader = yaml_loader self.yaml_loader = yaml_loader
self.config_parser_manager = config_parser_manager if config_parser_manager else DEFAULT_CONFIG_PARSER_MANAGER self.config_parser_manager = config_parser_manager if config_parser_manager else DEFAULT_CONFIG_PARSER_MANAGER
self.stdio = stdio
self._ignore_include_error = False
if self.config_parser_manager is None: if self.config_parser_manager is None:
raise ParserError('ConfigParaserManager Not Set') raise ParserError('ConfigParaserManager Not Set')
self._load() self._load()
...@@ -594,9 +807,13 @@ class DeployConfig(object): ...@@ -594,9 +807,13 @@ class DeployConfig(object):
else: else:
def get_inner_config(component_name): def get_inner_config(component_name):
return {} return {}
for component_name in self.components:
self.components[component_name].apply_inner_config(get_inner_config(component_name))
self._inner_config = inner_config self._inner_config = inner_config
base_dir = self.get_base_dir()
for component_name in self.components:
cluster_config = self.components[component_name]
cluster_config.apply_inner_config(get_inner_config(component_name))
cluster_config.set_base_dir(base_dir)
def set_unuse_lib_repository(self, status): def set_unuse_lib_repository(self, status):
if self.unuse_lib_repository != status: if self.unuse_lib_repository != status:
...@@ -624,7 +841,7 @@ class DeployConfig(object): ...@@ -624,7 +841,7 @@ class DeployConfig(object):
del src_data['version'] del src_data['version']
if 'tag' in src_data: if 'tag' in src_data:
del src_data['tag'] del src_data['tag']
self._src_data[component] = src_data self._src_data[component] = src_data
if self._dump(): if self._dump():
cluster_config = self.components[component] cluster_config = self.components[component]
...@@ -665,6 +882,24 @@ class DeployConfig(object): ...@@ -665,6 +882,24 @@ class DeployConfig(object):
if not self.user: if not self.user:
self.set_user_conf(UserConfig()) self.set_user_conf(UserConfig())
def allow_include_error(self):
self.stdio.verbose("allow include file not exists")
self._ignore_include_error = True
def load_include_file(self, path):
if not os.path.isabs(path):
raise Exception("`{}` need to use absolute path. If you want to use relative paths, please enable developer mode "
"and set environment variables {}".format('include', BASE_DIR_KEY))
if os.path.isfile(path):
with open(path, 'rb') as f:
return self.yaml_loader.load(f)
else:
if self._ignore_include_error:
self.stdio.warn("include file: {} not found, some configurations may be lost".format(path))
return {}
else:
raise Exception('Not such file: %s' % path)
def _separate_config(self): def _separate_config(self):
if self.inner_config: if self.inner_config:
for component_name in self.components: for component_name in self.components:
...@@ -674,7 +909,7 @@ class DeployConfig(object): ...@@ -674,7 +909,7 @@ class DeployConfig(object):
if parser: if parser:
inner_config = parser.extract_inner_config(cluster_config, src_data) inner_config = parser.extract_inner_config(cluster_config, src_data)
self.inner_config.update_component_config(component_name, inner_config) self.inner_config.update_component_config(component_name, inner_config)
def _dump_inner_config(self): def _dump_inner_config(self):
if self.inner_config: if self.inner_config:
self._separate_config() self._separate_config()
...@@ -695,6 +930,47 @@ class DeployConfig(object): ...@@ -695,6 +930,47 @@ class DeployConfig(object):
def dump(self): def dump(self):
return self._dump() return self._dump()
def _update_global_inner_config(self, key, value, save=True):
if self.inner_config:
self.inner_config.update_global_config(key, value)
return self._dump_inner_config() if save else True
def _get_global_inner_config(self, key, default=None):
if self.inner_config:
return self.inner_config.get_global_config(key, default)
return default
def set_base_dir(self, path, save=True):
if path and not os.path.isabs(path):
raise Exception('%s is not an absolute path' % path)
if self._update_global_inner_config(InnerConfigKeywords.DEPLOY_BASE_DIR, path, save=save):
for component_name in self.components:
cluster_config = self.components[component_name]
cluster_config.set_base_dir(path)
return True
return False
def get_base_dir(self):
return self._get_global_inner_config(InnerConfigKeywords.DEPLOY_BASE_DIR, '')
def set_deploy_install_mode(self, mode, save=True):
return self._update_global_inner_config(InnerConfigKeywords.DEPLOY_INSTALL_MODE, mode, save=save)
def get_deploy_install_mode(self):
return self._get_global_inner_config(InnerConfigKeywords.DEPLOY_INSTALL_MODE, DeployInstallMode.CP)
def enable_ln_install_mode(self, save=True):
return self.set_deploy_install_mode(DeployInstallMode.LN, save=save)
def enable_cp_install_mode(self, save=True):
return self.set_deploy_install_mode(DeployInstallMode.CP, save=save)
def is_ln_install_mode(self):
return self.get_deploy_install_mode() == DeployInstallMode.LN
def is_cp_install_mode(self):
return self.get_deploy_install_mode() == DeployInstallMode.CP
def set_user_conf(self, conf): def set_user_conf(self, conf):
self._user = conf self._user = conf
...@@ -855,7 +1131,7 @@ class Deploy(object): ...@@ -855,7 +1131,7 @@ class Deploy(object):
def _load_deploy_config(self, path): def _load_deploy_config(self, path):
yaml_loader = YamlLoader(stdio=self.stdio) yaml_loader = YamlLoader(stdio=self.stdio)
deploy_config = DeployConfig(path, yaml_loader=yaml_loader, config_parser_manager=self.config_parser_manager) deploy_config = DeployConfig(path, yaml_loader=yaml_loader, config_parser_manager=self.config_parser_manager, stdio=self.stdio)
deploy_info = self.deploy_info deploy_info = self.deploy_info
for component_name in deploy_info.components: for component_name in deploy_info.components:
if component_name not in deploy_config.components: if component_name not in deploy_config.components:
...@@ -866,10 +1142,9 @@ class Deploy(object): ...@@ -866,10 +1142,9 @@ class Deploy(object):
cluster_config.version = config['version'] cluster_config.version = config['version']
if 'hash' in config and config['hash']: if 'hash' in config and config['hash']:
cluster_config.package_hash = config['hash'] cluster_config.package_hash = config['hash']
deploy_config.inner_config = InnerConfig(self.get_inner_config_path(self.config_dir), yaml_loader=yaml_loader) deploy_config.inner_config = InnerConfig(self.get_inner_config_path(self.config_dir), yaml_loader=yaml_loader)
return deploy_config return deploy_config
@property @property
def temp_deploy_config(self): def temp_deploy_config(self):
path = self.get_temp_deploy_yaml_path(self.config_dir) path = self.get_temp_deploy_yaml_path(self.config_dir)
...@@ -1027,7 +1302,7 @@ class Deploy(object): ...@@ -1027,7 +1302,7 @@ class Deploy(object):
class ConfigParserManager(Manager): class ConfigParserManager(Manager):
RELATIVE_PATH = 'config_parser/' RELATIVE_PATH = 'config_parser/'
def __init__(self, home_path, stdio=None): def __init__(self, home_path, stdio=None):
super(ConfigParserManager, self).__init__(home_path, stdio) super(ConfigParserManager, self).__init__(home_path, stdio)
self.global_parsers = { self.global_parsers = {
......
...@@ -49,7 +49,8 @@ class InitDirFailedErrorMessage(object): ...@@ -49,7 +49,8 @@ class InitDirFailedErrorMessage(object):
PERMISSION_DENIED = ': {path} permission denied .' PERMISSION_DENIED = ': {path} permission denied .'
DOC_LINK_MSG = 'See https://open.oceanbase.com/docs/obd-cn/V1.4.0/10000000000436999 .' DOC_LINK = '<DOC_LINK>'
DOC_LINK_MSG = 'See {}'.format(DOC_LINK if DOC_LINK else "https://open.oceanbase.com/docs/obd-cn/V1.4.0/10000000000436999 .")
EC_CONFIG_CONFLICT_PORT = OBDErrorCode(1000, 'Configuration conflict {server1}:{port} port is used for {server2}\'s {key}') EC_CONFIG_CONFLICT_PORT = OBDErrorCode(1000, 'Configuration conflict {server1}:{port} port is used for {server2}\'s {key}')
EC_CONFLICT_PORT = OBDErrorCode(1001, '{server}:{port} port is already used') EC_CONFLICT_PORT = OBDErrorCode(1001, '{server}:{port} port is already used')
......
...@@ -64,7 +64,7 @@ class MixLock(object): ...@@ -64,7 +64,7 @@ class MixLock(object):
FileUtil.exclusive_lock_obj(self.lock_obj, stdio=self.stdio) FileUtil.exclusive_lock_obj(self.lock_obj, stdio=self.stdio)
except Exception as e: except Exception as e:
raise LockError(e) raise LockError(e)
def _sh_lock(self): def _sh_lock(self):
if self.lock_obj: if self.lock_obj:
try: try:
...@@ -100,7 +100,7 @@ class MixLock(object): ...@@ -100,7 +100,7 @@ class MixLock(object):
except Exception as e: except Exception as e:
self.stdio and getattr(self.stdio, 'stop_loading', print)('fail') self.stdio and getattr(self.stdio, 'stop_loading', print)('fail')
raise LockError(e) raise LockError(e)
def _lock_escalation(self, try_times): def _lock_escalation(self, try_times):
stdio = self.stdio stdio = self.stdio
while try_times: while try_times:
......
...@@ -22,9 +22,10 @@ from __future__ import absolute_import, division, print_function ...@@ -22,9 +22,10 @@ from __future__ import absolute_import, division, print_function
import os import os
from tool import DirectoryUtil from tool import DirectoryUtil
from _stdio import SafeStdio
class Manager(object): class Manager(SafeStdio):
RELATIVE_PATH = '' RELATIVE_PATH = ''
......
...@@ -40,11 +40,10 @@ except: ...@@ -40,11 +40,10 @@ except:
from _arch import getArchList, getBaseArch from _arch import getArchList, getBaseArch
from _rpm import Package, PackageInfo from _rpm import Package, PackageInfo
from tool import ConfigUtil, FileUtil from tool import ConfigUtil, FileUtil, var_replace
from _manager import Manager from _manager import Manager
_KEYCRE = re.compile(r"\$(\w+)")
_ARCH = getArchList() _ARCH = getArchList()
_RELEASE = None _RELEASE = None
SUP_MAP = { SUP_MAP = {
...@@ -118,7 +117,7 @@ class MirrorRepository(object): ...@@ -118,7 +117,7 @@ class MirrorRepository(object):
self.stdio and getattr(self.stdio, 'verbose', print)('pkg %s is %s, but %s is required' % (key, getattr(pkg, key), pattern[key])) self.stdio and getattr(self.stdio, 'verbose', print)('pkg %s is %s, but %s is required' % (key, getattr(pkg, key), pattern[key]))
return None return None
return pkg return pkg
def get_rpm_pkg_by_info(self, pkg_info): def get_rpm_pkg_by_info(self, pkg_info):
return None return None
...@@ -286,14 +285,20 @@ class RemoteMirrorRepository(MirrorRepository): ...@@ -286,14 +285,20 @@ class RemoteMirrorRepository(MirrorRepository):
if self._db is None: if self._db is None:
fp = FileUtil.unzip(file_path, stdio=self.stdio) fp = FileUtil.unzip(file_path, stdio=self.stdio)
if not fp: if not fp:
FileUtil.rm(file_path, stdio=self.stdio)
return [] return []
self._db = {} self._db = {}
parser = cElementTree.iterparse(fp) try:
for event, elem in parser: parser = cElementTree.iterparse(fp)
if RemoteMirrorRepository.ns_cleanup(elem.tag) == 'package' and elem.attrib.get('type') == 'rpm': for event, elem in parser:
info = RemotePackageInfo(elem) if RemoteMirrorRepository.ns_cleanup(elem.tag) == 'package' and elem.attrib.get('type') == 'rpm':
self._db[info.md5] = info info = RemotePackageInfo(elem)
self._dump_db_cache() self._db[info.md5] = info
self._dump_db_cache()
except:
FileUtil.rm(file_path, stdio=self.stdio)
self.stdio and self.stdio.critical('failed to parse file %s, please retry later.' % file_path)
return []
return self._db return self._db
def _load_db_cache(self, path): def _load_db_cache(self, path):
...@@ -341,29 +346,6 @@ class RemoteMirrorRepository(MirrorRepository): ...@@ -341,29 +346,6 @@ class RemoteMirrorRepository(MirrorRepository):
def get_db_cache_file(mirror_path): def get_db_cache_file(mirror_path):
return os.path.join(mirror_path, RemoteMirrorRepository.DB_CACHE_FILE) return os.path.join(mirror_path, RemoteMirrorRepository.DB_CACHE_FILE)
@staticmethod
def var_replace(string, var):
if not var:
return string
done = []
while string:
m = _KEYCRE.search(string)
if not m:
done.append(string)
break
varname = m.group(1).lower()
replacement = var.get(varname, m.group())
start, end = m.span()
done.append(string[:start])
done.append(str(replacement))
string = string[end:]
return ''.join(done)
def _load_repo_age(self): def _load_repo_age(self):
try: try:
with open(self.get_repo_age_file(self.mirror_path), 'r') as f: with open(self.get_repo_age_file(self.mirror_path), 'r') as f:
...@@ -817,8 +799,8 @@ class MirrorRepositorySection(object): ...@@ -817,8 +799,8 @@ class MirrorRepositorySection(object):
def get_mirror(self, server_vars, stdio=None): def get_mirror(self, server_vars, stdio=None):
meta_data = self.meta_data meta_data = self.meta_data
meta_data['name'] = RemoteMirrorRepository.var_replace(meta_data['name'], server_vars) meta_data['name'] = var_replace(meta_data['name'], server_vars)
meta_data['baseurl'] = RemoteMirrorRepository.var_replace(meta_data['baseurl'], server_vars) meta_data['baseurl'] = var_replace(meta_data['baseurl'], server_vars)
mirror_path = os.path.join(self.remote_path, meta_data['name']) mirror_path = os.path.join(self.remote_path, meta_data['name'])
mirror = RemoteMirrorRepository(mirror_path, meta_data, stdio) mirror = RemoteMirrorRepository(mirror_path, meta_data, stdio)
return mirror return mirror
...@@ -947,9 +929,9 @@ class MirrorRepositoryManager(Manager): ...@@ -947,9 +929,9 @@ class MirrorRepositoryManager(Manager):
def get_mirrors(self, is_enabled=True): def get_mirrors(self, is_enabled=True):
self._lock() self._lock()
mirros = self.get_remote_mirrors(is_enabled=is_enabled) mirrors = self.get_remote_mirrors(is_enabled=is_enabled)
mirros.append(self.local_mirror) mirrors.append(self.local_mirror)
return mirros return mirrors
def get_exact_pkg(self, **pattern): def get_exact_pkg(self, **pattern):
only_info = 'only_info' in pattern and pattern['only_info'] only_info = 'only_info' in pattern and pattern['only_info']
......
...@@ -29,6 +29,7 @@ from copy import deepcopy ...@@ -29,6 +29,7 @@ from copy import deepcopy
from _manager import Manager from _manager import Manager
from _rpm import Version from _rpm import Version
from ssh import ConcurrentExecutor
from tool import ConfigUtil, DynamicLoading, YamlLoader from tool import ConfigUtil, DynamicLoading, YamlLoader
...@@ -124,6 +125,7 @@ class PluginContext(object): ...@@ -124,6 +125,7 @@ class PluginContext(object):
self.options = options self.options = options
self.dev_mode = dev_mode self.dev_mode = dev_mode
self.stdio = stdio self.stdio = stdio
self.concurrent_exector = ConcurrentExecutor(32)
self._return = PluginReturn() self._return = PluginReturn()
def get_return(self): def get_return(self):
...@@ -164,7 +166,8 @@ class ScriptPlugin(Plugin): ...@@ -164,7 +166,8 @@ class ScriptPlugin(Plugin):
def __getattr__(self, key): def __getattr__(self, key):
def new_method(*args, **kwargs): def new_method(*args, **kwargs):
kwargs['stdio'] = self.stdio if "stdio" not in kwargs:
kwargs['stdio'] = self.stdio
return attr(*args, **kwargs) return attr(*args, **kwargs)
attr = getattr(self.client, key) attr = getattr(self.client, key)
if hasattr(attr, '__call__'): if hasattr(attr, '__call__'):
...@@ -595,12 +598,18 @@ class InstallPlugin(Plugin): ...@@ -595,12 +598,18 @@ class InstallPlugin(Plugin):
DIR = 1 DIR = 1
BIN = 2 BIN = 2
class InstallMethod(Enum):
ANY = 0
CP = 1
class FileItem(object): class FileItem(object):
def __init__(self, src_path, target_path, _type): def __init__(self, src_path, target_path, _type, install_method):
self.src_path = src_path self.src_path = src_path
self.target_path = target_path self.target_path = target_path
self.type = _type if _type else InstallPlugin.FileItemType.FILE self.type = _type if _type else InstallPlugin.FileItemType.FILE
self.install_method = install_method or InstallPlugin.InstallMethod.ANY
PLUGIN_TYPE = PluginType.INSTALL PLUGIN_TYPE = PluginType.INSTALL
FILES_MAP_YAML = 'file_map.yaml' FILES_MAP_YAML = 'file_map.yaml'
...@@ -611,6 +620,7 @@ class InstallPlugin(Plugin): ...@@ -611,6 +620,7 @@ class InstallPlugin(Plugin):
super(InstallPlugin, self).__init__(component_name, plugin_path, version, dev_mode) super(InstallPlugin, self).__init__(component_name, plugin_path, version, dev_mode)
self.file_map_path = os.path.join(self.plugin_path, self.FILES_MAP_YAML) self.file_map_path = os.path.join(self.plugin_path, self.FILES_MAP_YAML)
self._file_map = {} self._file_map = {}
self._file_map_data = None
@classmethod @classmethod
def var_replace(cls, string, var): def var_replace(cls, string, var):
...@@ -634,6 +644,13 @@ class InstallPlugin(Plugin): ...@@ -634,6 +644,13 @@ class InstallPlugin(Plugin):
return ''.join(done) return ''.join(done)
@property
def file_map_data(self):
if self._file_map_data is None:
with open(self.file_map_path, 'rb') as f:
self._file_map_data = yaml.load(f)
return self._file_map_data
def file_map(self, package_info): def file_map(self, package_info):
var = { var = {
'name': package_info.name, 'name': package_info.name,
...@@ -646,17 +663,17 @@ class InstallPlugin(Plugin): ...@@ -646,17 +663,17 @@ class InstallPlugin(Plugin):
if not self._file_map.get(key): if not self._file_map.get(key):
try: try:
file_map = {} file_map = {}
with open(self.file_map_path, 'rb') as f: for data in self.file_map_data:
for data in yaml.load(f): k = data['src_path']
k = data['src_path'] if k[0] != '.':
if k[0] != '.': k = '.%s' % os.path.join('/', k)
k = '.%s' % os.path.join('/', k) k = self.var_replace(k, var)
k = self.var_replace(k, var) file_map[k] = InstallPlugin.FileItem(
file_map[k] = InstallPlugin.FileItem( k,
k, ConfigUtil.get_value_from_dict(data, 'target_path', k),
ConfigUtil.get_value_from_dict(data, 'target_path', k), getattr(InstallPlugin.FileItemType, ConfigUtil.get_value_from_dict(data, 'type', 'FILE').upper(), None),
getattr(InstallPlugin.FileItemType, ConfigUtil.get_value_from_dict(data, 'type', 'FILE').upper(), None) getattr(InstallPlugin.InstallMethod, ConfigUtil.get_value_from_dict(data, 'install_method', 'ANY').upper(), None),
) )
self._file_map[key] = file_map self._file_map[key] = file_map
except: except:
pass pass
......
...@@ -31,6 +31,7 @@ from _arch import getBaseArch ...@@ -31,6 +31,7 @@ from _arch import getBaseArch
from tool import DirectoryUtil, FileUtil, YamlLoader from tool import DirectoryUtil, FileUtil, YamlLoader
from _manager import Manager from _manager import Manager
from _plugin import InstallPlugin from _plugin import InstallPlugin
from ssh import LocalClient
class LocalPackage(Package): class LocalPackage(Package):
...@@ -121,10 +122,15 @@ class LocalPackage(Package): ...@@ -121,10 +122,15 @@ class LocalPackage(Package):
filelinktos.append(os.readlink(target_path)) filelinktos.append(os.readlink(target_path))
filemodes.append(-24065) filemodes.append(-24065)
else: else:
m = hashlib.md5() ret = LocalClient().execute_command('md5sum {}'.format(target_path))
with open(target_path, 'rb') as f: if ret:
m.update(f.read()) m_value = ret.stdout.strip().split(' ')[0].encode('utf-8')
m_value = m.hexdigest().encode(sys.getdefaultencoding()) else:
m = hashlib.md5()
with open(target_path, 'rb') as f:
m.update(f.read())
m_value = m.hexdigest().encode(sys.getdefaultencoding())
# raise Exception('Failed to get md5sum for {}, error: {}'.format(target_path, ret.stderr))
m_sum.update(m_value) m_sum.update(m_value)
filemd5s.append(m_value) filemd5s.append(m_value)
filelinktos.append('') filelinktos.append('')
...@@ -158,7 +164,7 @@ class Repository(PackageInfo): ...@@ -158,7 +164,7 @@ class Repository(PackageInfo):
return self.md5 return self.md5
def __str__(self): def __str__(self):
return '%s-%s-%s' % (self.name, self.version, self.hash) return '%s-%s-%s-%s' % (self.name, self.version, self.release, self.hash)
def __hash__(self): def __hash__(self):
return hash(self.repository_dir) return hash(self.repository_dir)
...@@ -380,48 +386,29 @@ class ComponentRepository(object): ...@@ -380,48 +386,29 @@ class ComponentRepository(object):
repositories[repository.hash] = repository repositories[repository.hash] = repository
return repositories return repositories
def get_repository_by_version(self, version, tag=None): def search_repository(self, version=None, tag=None, release=None):
if tag: path_pattern = os.path.join(self.repository_dir, version or '*', tag or '*')
return self.get_repository_by_tag(tag, version)
repository = self.get_repository_by_tag(self.name, version)
if repository:
return repository
path_partten = os.path.join(self.repository_dir, version, tag if tag else '*')
for path in glob(path_partten):
n_repository = Repository(self.name, path, self.stdio)
if n_repository.hash and n_repository > repository:
repository = n_repository
return repository
def get_repository_by_tag(self, tag, version=None):
path_partten = os.path.join(self.repository_dir, version if version else '*', tag)
repository = None repository = None
for path in glob(path_partten): for path in glob(path_pattern):
n_repository = Repository(self.name, path, self.stdio) n_repository = Repository(self.name, path, self.stdio)
if release and release != n_repository.release:
continue
if n_repository.hash and n_repository > repository: if n_repository.hash and n_repository > repository:
repository = n_repository repository = n_repository
return repository return repository
def get_repository(self, version=None, tag=None): def get_repository(self, version=None, tag=None, release=None):
if tag: if version or tag or release:
return self.get_repository_by_tag(tag, version) return self.search_repository(version=version, tag=tag, release=release)
if version: else:
return self.get_repository_by_version(version, tag) return self.search_repository(tag=self.name) or self.search_repository()
version = None
for rep_version in os.listdir(self.repository_dir):
rep_version = Version(rep_version)
if rep_version > version:
version = rep_version
if version:
return self.get_repository_by_version(version, tag)
return None
def get_repositories(self, version=None): def get_repositories(self, version=None):
if not version: if not version:
version = '*' version = '*'
repositories = [] repositories = []
path_partten = os.path.join(self.repository_dir, version, '*') path_pattern = os.path.join(self.repository_dir, version, '*')
for path in glob(path_partten): for path in glob(path_pattern):
repository = Repository(self.name, path, self.stdio) repository = Repository(self.name, path, self.stdio)
if repository.hash: if repository.hash:
repositories.append(repository) repositories.append(repository)
...@@ -436,7 +423,7 @@ class RepositoryManager(Manager): ...@@ -436,7 +423,7 @@ class RepositoryManager(Manager):
def __init__(self, home_path, lock_manager=None, stdio=None): def __init__(self, home_path, lock_manager=None, stdio=None):
super(RepositoryManager, self).__init__(home_path, stdio=stdio) super(RepositoryManager, self).__init__(home_path, stdio=stdio)
self.repositories = {} self.repositories = {}
self.component_repositoies = {} self.component_repositories = {}
self.lock_manager = lock_manager self.lock_manager = lock_manager
def _lock(self, read_only=False): def _lock(self, read_only=False):
...@@ -460,20 +447,20 @@ class RepositoryManager(Manager): ...@@ -460,20 +447,20 @@ class RepositoryManager(Manager):
def get_repositories(self, name, version=None, instance=True): def get_repositories(self, name, version=None, instance=True):
repositories = [] repositories = []
for repository in self.get_component_repositoy(name).get_repositories(version): for repository in self.get_component_repository(name).get_repositories(version):
if instance and repository.is_shadow_repository() is False: if instance and repository.is_shadow_repository() is False:
repositories.append(repository) repositories.append(repository)
return repositories return repositories
def get_repositories_view(self, name=None): def get_repositories_view(self, name=None):
if name: if name:
repositories = self.get_component_repositoy(name).get_repositories() repositories = self.get_component_repository(name).get_repositories()
else: else:
repositories = [] repositories = []
path_partten = os.path.join(self.path, '*') path_pattern = os.path.join(self.path, '*')
for path in glob(path_partten): for path in glob(path_pattern):
_, name = os.path.split(path) _, name = os.path.split(path)
repositories += self.get_component_repositoy(name).get_repositories() repositories += self.get_component_repository(name).get_repositories()
repositories_vo = {} repositories_vo = {}
for repository in repositories: for repository in repositories:
...@@ -487,36 +474,46 @@ class RepositoryManager(Manager): ...@@ -487,36 +474,46 @@ class RepositoryManager(Manager):
repositories_vo[repository] = self._get_repository_vo(repository) repositories_vo[repository] = self._get_repository_vo(repository)
return list(repositories_vo.values()) return list(repositories_vo.values())
def get_component_repositoy(self, name): def get_component_repository(self, name):
if name not in self.component_repositoies: if name not in self.component_repositories:
self._lock(True) self._lock(True)
path = os.path.join(self.path, name) path = os.path.join(self.path, name)
self.component_repositoies[name] = ComponentRepository(name, path, self.stdio) self.component_repositories[name] = ComponentRepository(name, path, self.stdio)
return self.component_repositoies[name] return self.component_repositories[name]
def get_repository_by_version(self, name, version, tag=None, instance=True): def get_repository(self, name, version=None, tag=None, release=None, package_hash=None, instance=True):
if not tag: self.stdio.verbose(
tag = name "Search repository {name} version: {version}, tag: {tag}, release: {release}, package_hash: {package_hash}".format(
path = os.path.join(self.path, name, version, tag) name=name, version=version, tag=tag, release=release, package_hash=package_hash))
if path not in self.repositories: tag = tag or package_hash
component_repositoy = self.get_component_repositoy(name) component_repository = self.get_component_repository(name)
repository = component_repositoy.get_repository(version, tag) if version and tag:
if repository: repository_dir = os.path.join(self.path, name, version, tag)
self.repositories[repository.repository_dir] = repository if repository_dir in self.repositories:
self.repositories[path] = repository repository = self.repositories[repository_dir]
else:
repository = component_repository.get_repository(version=version, tag=tag, release=release)
else:
repository = component_repository.get_repository(version=version, tag=tag, release=release)
if not repository:
return None
else: else:
repository = self.repositories[path] if repository.repository_dir not in self.repositories:
self.repositories[repository.repository_dir] = repository
else:
repository = self.repositories[repository.repository_dir]
if not self._check_repository_pattern(repository, version=version, release=release, hash=package_hash):
return None
self.stdio.verbose("Found repository {}".format(repository))
return self.get_instance_repository_from_shadow(repository) if instance else repository return self.get_instance_repository_from_shadow(repository) if instance else repository
def get_repository(self, name, version=None, tag=None, instance=True): def _check_repository_pattern(self, repository, **kwargs):
if version: for key in ["version", "release", "hash"]:
return self.get_repository_by_version(name, version, tag) current_value = getattr(repository, key)
if kwargs.get(key) is not None and current_value != kwargs[key]:
component_repositoy = self.get_component_repositoy(name) self.stdio.verbose("repository {} is {}, but {} is required".format(key, current_value, kwargs[key]))
repository = component_repositoy.get_repository(version, tag) return False
if repository: return True
self.repositories[repository.repository_dir] = repository
return self.get_instance_repository_from_shadow(repository) if repository and instance else repository
def create_instance_repository(self, name, version, _hash): def create_instance_repository(self, name, version, _hash):
path = os.path.join(self.path, name, version, _hash) path = os.path.join(self.path, name, version, _hash)
...@@ -534,7 +531,7 @@ class RepositoryManager(Manager): ...@@ -534,7 +531,7 @@ class RepositoryManager(Manager):
self._lock(True) self._lock(True)
self.repositories[path] = Repository(name, path, self.stdio) self.repositories[path] = Repository(name, path, self.stdio)
return self.repositories[path] return self.repositories[path]
repository = Repository(name, path, self.stdio) repository = Repository(name, path, self.stdio)
repository.set_version(version) repository.set_version(version)
return repository return repository
......
...@@ -24,12 +24,16 @@ import os ...@@ -24,12 +24,16 @@ import os
import signal import signal
import sys import sys
import traceback import traceback
import inspect2
import six
from enum import Enum from enum import Enum
from halo import Halo, cursor from halo import Halo, cursor
from colorama import Fore from colorama import Fore
from prettytable import PrettyTable from prettytable import PrettyTable
from progressbar import AdaptiveETA, Bar, SimpleProgress, ETA, FileTransferSpeed, Percentage, ProgressBar from progressbar import AdaptiveETA, Bar, SimpleProgress, ETA, FileTransferSpeed, Percentage, ProgressBar
from types import MethodType
from inspect2 import Parameter
if sys.version_info.major == 3: if sys.version_info.major == 3:
...@@ -74,8 +78,8 @@ class FormtatText(object): ...@@ -74,8 +78,8 @@ class FormtatText(object):
return FormtatText.format(text, Fore.RED) return FormtatText.format(text, Fore.RED)
class LogSymbols(Enum): class LogSymbols(Enum):
INFO = FormtatText.info('!') INFO = FormtatText.info('!')
SUCCESS = FormtatText.success('ok') SUCCESS = FormtatText.success('ok')
WARNING = FormtatText.warning('!!') WARNING = FormtatText.warning('!!')
...@@ -112,7 +116,7 @@ class IOTable(PrettyTable): ...@@ -112,7 +116,7 @@ class IOTable(PrettyTable):
val = 'l' val = 'l'
for field in self._field_names: for field in self._field_names:
self._align[field] = val self._align[field] = val
class IOHalo(Halo): class IOHalo(Halo):
...@@ -230,14 +234,14 @@ class IO(object): ...@@ -230,14 +234,14 @@ class IO(object):
WARNING_PREV = FormtatText.warning('[WARN]') WARNING_PREV = FormtatText.warning('[WARN]')
ERROR_PREV = FormtatText.error('[ERROR]') ERROR_PREV = FormtatText.error('[ERROR]')
IS_TTY = sys.stdin.isatty() IS_TTY = sys.stdin.isatty()
def __init__(self, def __init__(self,
level, level,
msg_lv=MsgLevel.DEBUG, msg_lv=MsgLevel.DEBUG,
trace_logger=None, trace_logger=None,
use_cache=False, use_cache=False,
track_limit=0, track_limit=0,
root_io=None, root_io=None,
stream=sys.stdout stream=sys.stdout
): ):
self.level = level self.level = level
...@@ -258,7 +262,7 @@ class IO(object): ...@@ -258,7 +262,7 @@ class IO(object):
if self._root_io: if self._root_io:
self._root_io.log_cache self._root_io.log_cache
return self._log_cache return self._log_cache
def before_close(self): def before_close(self):
if self._before_critical: if self._before_critical:
try: try:
...@@ -272,7 +276,7 @@ class IO(object): ...@@ -272,7 +276,7 @@ class IO(object):
def __del__(self): def __del__(self):
self._close() self._close()
def exit(self, code): def exit(self, code):
self._close() self._close()
sys.exit(code) sys.exit(code)
...@@ -280,14 +284,14 @@ class IO(object): ...@@ -280,14 +284,14 @@ class IO(object):
def set_cache(self, status): def set_cache(self, status):
if status: if status:
self._cache_on() self._cache_on()
def _cache_on(self): def _cache_on(self):
if self._root_io: if self._root_io:
return False return False
if self.log_cache is None: if self.log_cache is None:
self._log_cache = [] self._log_cache = []
return True return True
def _cache_off(self): def _cache_off(self):
if self._root_io: if self._root_io:
return False return False
...@@ -359,7 +363,7 @@ class IO(object): ...@@ -359,7 +363,7 @@ class IO(object):
finally: finally:
self._clear_sync_ctx() self._clear_sync_ctx()
return ret return ret
def start_loading(self, text, *arg, **kwargs): def start_loading(self, text, *arg, **kwargs):
if self.sync_obj: if self.sync_obj:
return False return False
...@@ -405,7 +409,7 @@ class IO(object): ...@@ -405,7 +409,7 @@ class IO(object):
if not isinstance(self.sync_obj, IOProgressBar): if not isinstance(self.sync_obj, IOProgressBar):
return False return False
return self._stop_sync_obj(IOProgressBar, 'interrupt') return self._stop_sync_obj(IOProgressBar, 'interrupt')
def sub_io(self, pid=None, msg_lv=None): def sub_io(self, pid=None, msg_lv=None):
if not pid: if not pid:
pid = os.getpid() pid = os.getpid()
...@@ -414,16 +418,20 @@ class IO(object): ...@@ -414,16 +418,20 @@ class IO(object):
key = "%s-%s" % (pid, msg_lv) key = "%s-%s" % (pid, msg_lv)
if key not in self.sub_ios: if key not in self.sub_ios:
self.sub_ios[key] = self.__class__( self.sub_ios[key] = self.__class__(
self.level + 1, self.level + 1,
msg_lv=msg_lv, msg_lv=msg_lv,
trace_logger=self.trace_logger, trace_logger=self.trace_logger,
track_limit=self.track_limit, track_limit=self.track_limit,
root_io=self._root_io if self._root_io else self root_io=self._root_io if self._root_io else self
) )
return self.sub_ios[key] return self.sub_ios[key]
def print_list(self, ary, field_names=None, exp=lambda x: x if isinstance(x, list) else [x], show_index=False, start=0, **kwargs): def print_list(self, ary, field_names=None, exp=lambda x: x if isinstance(x, (list, tuple)) else [x], show_index=False, start=0, **kwargs):
if not ary: if not ary:
title = kwargs.get("title", "")
empty_msg = kwargs.get("empty_msg", "{} is empty.".format(title))
if empty_msg:
self.print(empty_msg)
return return
show_index = field_names is not None and show_index show_index = field_names is not None and show_index
if show_index: if show_index:
...@@ -464,7 +472,7 @@ class IO(object): ...@@ -464,7 +472,7 @@ class IO(object):
kwargs['file'] and print(self._format(msg, *args), **kwargs) kwargs['file'] and print(self._format(msg, *args), **kwargs)
del kwargs['file'] del kwargs['file']
self.log(msg_lv, msg, *args, **kwargs) self.log(msg_lv, msg, *args, **kwargs)
def log(self, levelno, msg, *args, **kwargs): def log(self, levelno, msg, *args, **kwargs):
self._cache_log(levelno, msg, *args, **kwargs) self._cache_log(levelno, msg, *args, **kwargs)
...@@ -478,13 +486,11 @@ class IO(object): ...@@ -478,13 +486,11 @@ class IO(object):
else: else:
log_cache.append((levelno, line, args, kwargs)) log_cache.append((levelno, line, args, kwargs))
def _flush_log(self): def _flush_log(self):
if not self._root_io and self.trace_logger and self._log_cache: if not self._root_io and self.trace_logger and self._log_cache:
for levelno, line, args, kwargs in self._log_cache: for levelno, line, args, kwargs in self._log_cache:
self.trace_logger.log(levelno, line, *args, **kwargs) self.trace_logger.log(levelno, line, *args, **kwargs)
self._log_cache = [] self._log_cache = []
def _log(self, levelno, msg, *args, **kwargs): def _log(self, levelno, msg, *args, **kwargs):
if self.trace_logger: if self.trace_logger:
self.trace_logger.log(levelno, msg, *args, **kwargs) self.trace_logger.log(levelno, msg, *args, **kwargs)
...@@ -560,3 +566,144 @@ class IO(object): ...@@ -560,3 +566,144 @@ class IO(object):
msg and self.error(msg) msg and self.error(msg)
print_stack(''.join(lines)) print_stack(''.join(lines))
class _Empty(object):
pass
EMPTY = _Empty()
del _Empty
class FakeReturn(object):
def __call__(self, *args, **kwargs):
return None
def __len__(self):
return 0
FAKE_RETURN = FakeReturn()
class StdIO(object):
def __init__(self, io=None):
self.io = io
self._attrs = {}
self._warn_func = getattr(self.io, "warn", print)
def __getattr__(self, item):
if self.io is None:
return FAKE_RETURN
if item not in self._attrs:
attr = getattr(self.io, item, EMPTY)
if attr is not EMPTY:
self._attrs[item] = attr
else:
self._warn_func(FormtatText.warning("WARNING: {} has no attribute '{}'".format(self.io, item)))
self._attrs[item] = FAKE_RETURN
return self._attrs[item]
FAKE_IO = StdIO()
def get_stdio(io_obj):
if io_obj is None:
return FAKE_IO
elif isinstance(io_obj, StdIO):
return io_obj
else:
return StdIO(io_obj)
def safe_stdio_decorator(default_stdio=None):
def decorated(func):
is_bond_method = False
_type = None
if isinstance(func, (staticmethod, classmethod)):
is_bond_method = True
_type = type(func)
func = func.__func__
all_parameters = inspect2.signature(func).parameters
if "stdio" in all_parameters:
default_stdio_in_params = all_parameters["stdio"].default
if not isinstance(default_stdio_in_params, Parameter.empty):
_default_stdio = default_stdio_in_params or default_stdio
def func_wrapper(*args, **kwargs):
_params_keys = list(all_parameters.keys())
_index = _params_keys.index("stdio")
if "stdio" not in kwargs and len(args) > _index:
stdio = get_stdio(args[_index])
tmp_args = list(args)
tmp_args[_index] = stdio
args = tuple(tmp_args)
else:
stdio = get_stdio(kwargs.get("stdio", _default_stdio))
kwargs["stdio"] = stdio
return func(*args, **kwargs)
return _type(func_wrapper) if is_bond_method else func_wrapper
else:
return _type(func) if is_bond_method else func
return decorated
class SafeStdioMeta(type):
@staticmethod
def _init_wrapper_func(func):
def wrapper(*args, **kwargs):
setattr(args[0], "_wrapper_func", {})
func(*args, **kwargs)
if "stdio" in args[0].__dict__:
args[0].__dict__["stdio"] = get_stdio(args[0].__dict__["stdio"])
if func.__name__ != wrapper.__name__:
return wrapper
else:
return func
def __new__(mcs, name, bases, attrs):
for key, attr in attrs.items():
if key.startswith("__") and key.endswith("__"):
continue
if isinstance(attr, (staticmethod, classmethod)):
attrs[key] = safe_stdio_decorator()(attr)
cls = type.__new__(mcs, name, bases, attrs)
cls.__init__ = mcs._init_wrapper_func(cls.__init__)
return cls
class _StayTheSame(object):
pass
STAY_THE_SAME = _StayTheSame()
class SafeStdio(six.with_metaclass(SafeStdioMeta)):
_wrapper_func = {}
def __getattribute__(self, item):
_wrapper_func = super(SafeStdio, self).__getattribute__("_wrapper_func")
if item not in _wrapper_func:
attr = super(SafeStdio, self).__getattribute__(item)
if (not item.startswith("__") or not item.endswith("__")) and isinstance(attr, MethodType):
if "stdio" in inspect2.signature(attr).parameters:
_wrapper_func[item] = safe_stdio_decorator(default_stdio=getattr(self, "stdio", None))(attr)
return _wrapper_func[item]
_wrapper_func[item] = STAY_THE_SAME
return attr
if _wrapper_func[item] is STAY_THE_SAME:
return super(SafeStdio, self).__getattribute__(item)
return _wrapper_func[item]
def __setattr__(self, key, value):
if key in self._wrapper_func:
del self._wrapper_func[key]
return super(SafeStdio, self).__setattr__(key, value)
...@@ -30,7 +30,9 @@ from _deploy import ( ...@@ -30,7 +30,9 @@ from _deploy import (
ServerConfigFlyweightFactory, ServerConfigFlyweightFactory,
ClusterConfig, ClusterConfig,
ConfigParser, ConfigParser,
CommentedMap CommentedMap,
RsyncConfig,
ENV
) )
...@@ -85,11 +87,12 @@ class ClusterConfigParser(ConfigParser): ...@@ -85,11 +87,12 @@ class ClusterConfigParser(ConfigParser):
server_config['zone'] = zone_name server_config['zone'] = zone_name
servers[server] = server_config servers[server] = server_config
cluster_conf = ClusterConfig( cluster_config = ClusterConfig(
servers.keys(), servers.keys(),
component_name, component_name,
ConfigUtil.get_value_from_dict(conf, 'version', None, str), ConfigUtil.get_value_from_dict(conf, 'version', None, str),
ConfigUtil.get_value_from_dict(conf, 'tag', None, str), ConfigUtil.get_value_from_dict(conf, 'tag', None, str),
ConfigUtil.get_value_from_dict(conf, 'release', None, str),
ConfigUtil.get_value_from_dict(conf, 'package_hash', None, str) ConfigUtil.get_value_from_dict(conf, 'package_hash', None, str)
) )
global_config = {} global_config = {}
...@@ -99,11 +102,17 @@ class ClusterConfigParser(ConfigParser): ...@@ -99,11 +102,17 @@ class ClusterConfigParser(ConfigParser):
global_config['appname'] = str(conf['name']) global_config['appname'] = str(conf['name'])
if 'config' in conf: if 'config' in conf:
global_config.update(conf['config']) global_config.update(conf['config'])
cluster_conf.set_global_conf(global_config) cluster_config.set_global_conf(global_config)
if RsyncConfig.RSYNC in conf:
cluster_config.set_rsync_list(conf[RsyncConfig.RSYNC])
if ENV in conf:
cluster_config.set_environments(conf[ENV])
for server in servers: for server in servers:
cluster_conf.add_server_conf(server, servers[server]) cluster_config.add_server_conf(server, servers[server])
return cluster_conf return cluster_config
@classmethod @classmethod
def extract_inner_config(cls, cluster_config, config): def extract_inner_config(cls, cluster_config, config):
......
...@@ -33,14 +33,17 @@ from prettytable import PrettyTable ...@@ -33,14 +33,17 @@ from prettytable import PrettyTable
from halo import Halo from halo import Halo
from ssh import SshClient, SshConfig from ssh import SshClient, SshConfig
from tool import ConfigUtil, FileUtil, DirectoryUtil, YamlLoader from tool import ConfigUtil, FileUtil, DirectoryUtil, YamlLoader, timeout, COMMAND_ENV
from _stdio import MsgLevel from _stdio import MsgLevel
from _rpm import Version from _rpm import Version
from _mirror import MirrorRepositoryManager, PackageInfo from _mirror import MirrorRepositoryManager, PackageInfo
from _plugin import PluginManager, PluginType, InstallPlugin from _plugin import PluginManager, PluginType, InstallPlugin
from _repository import RepositoryManager, LocalPackage
from _deploy import DeployManager, DeployStatus, DeployConfig, DeployConfigStatus, BASE_DIR_KEY, InnerConfigKeywords
from _lock import LockManager
from _repository import RepositoryManager, LocalPackage, Repository from _repository import RepositoryManager, LocalPackage, Repository
from _deploy import ( from _deploy import (
DeployManager, DeployStatus, DeployManager, DeployStatus,
DeployConfig, DeployConfigStatus, DeployConfig, DeployConfigStatus,
ParserError, Deploy ParserError, Deploy
) )
...@@ -148,7 +151,7 @@ class ObdHome(object): ...@@ -148,7 +151,7 @@ class ObdHome(object):
def ssh_clients_connect(self, ssh_clients, servers, user_config): def ssh_clients_connect(self, ssh_clients, servers, user_config):
for server in servers: for server in servers:
if server.ip not in ssh_clients: if server not in ssh_clients:
ssh_clients[server] = SshClient( ssh_clients[server] = SshClient(
SshConfig( SshConfig(
server.ip, server.ip,
...@@ -226,9 +229,9 @@ class ObdHome(object): ...@@ -226,9 +229,9 @@ class ObdHome(object):
print_match and self._call_stdio( print_match and self._call_stdio(
'print_list', 'print_list',
matchs, matchs,
['name', 'version', 'release', 'arch', 'md5'], ['name', 'version', 'release', 'arch', 'md5'],
lambda x: [matchs[x].name, matchs[x].version, matchs[x].release, matchs[x].arch, matchs[x].md5], lambda x: [matchs[x].name, matchs[x].version, matchs[x].release, matchs[x].arch, matchs[x].md5],
title='Search %s %s Result' % (component_name, version) title='Search %s %s Result' % (component_name, version)
) )
for md5 in usable: for md5 in usable:
if md5 in matchs: if md5 in matchs:
...@@ -238,9 +241,9 @@ class ObdHome(object): ...@@ -238,9 +241,9 @@ class ObdHome(object):
usable_matchs = [info[1] for info in sorted(matchs.items())] usable_matchs = [info[1] for info in sorted(matchs.items())]
if release_first: if release_first:
usable_matchs = usable_matchs[:1] usable_matchs = usable_matchs[:1]
return usable_matchs return usable_matchs
def search_components_from_mirrors(self, deploy_config, fuzzy_match=False, only_info=True, update_if_need=None): def search_components_from_mirrors(self, deploy_config, fuzzy_match=False, only_info=True, update_if_need=None):
pkgs = [] pkgs = []
errors = [] errors = []
...@@ -251,14 +254,18 @@ class ObdHome(object): ...@@ -251,14 +254,18 @@ class ObdHome(object):
# First, check if the component exists in the repository. If exists, check if the version is available. If so, use the repository directly. # First, check if the component exists in the repository. If exists, check if the version is available. If so, use the repository directly.
self._call_stdio('verbose', 'Get %s repository' % component) self._call_stdio('verbose', 'Get %s repository' % component)
repository = self.repository_manager.get_repository(component, config.version, config.package_hash if config.package_hash else config.tag) repository = self.repository_manager.get_repository(name=component, version=config.version, tag=config.tag, release=config.release, package_hash=config.package_hash)
if repository and not repository.hash: if repository and not repository.hash:
repository = None repository = None
self._call_stdio('verbose', 'Search %s package from mirror' % component) if not config.tag:
pkg = self.mirror_manager.get_best_pkg(name=component, version=config.version, md5=config.package_hash, fuzzy_match=fuzzy_match, only_info=only_info) self._call_stdio('verbose', 'Search %s package from mirror' % component)
pkg = self.mirror_manager.get_best_pkg(
name=component, version=config.version, md5=config.package_hash, release=config.release, fuzzy_match=fuzzy_match, only_info=only_info)
else:
pkg = None
if repository or pkg: if repository or pkg:
if pkg: if pkg:
self._call_stdio('verbose', 'Found Package %s-%s-%s' % (pkg.name, pkg.version, pkg.md5)) self._call_stdio('verbose', 'Found Package %s-%s-%s-%s' % (pkg.name, pkg.version, pkg.release, pkg.md5))
if repository: if repository:
if repository >= pkg or ( if repository >= pkg or (
( (
...@@ -271,9 +278,9 @@ class ObdHome(object): ...@@ -271,9 +278,9 @@ class ObdHome(object):
self._call_stdio('print', '%s-%s already installed.' % (repository.name, repository.version)) self._call_stdio('print', '%s-%s already installed.' % (repository.name, repository.version))
continue continue
if config.version and pkg.version != config.version: if config.version and pkg.version != config.version:
self._call_stdio('warn', 'No such package %s-%s. Use similar package %s-%s.' % (component, config.version, pkg.name, pkg.version)) self._call_stdio('warn', 'No such package %s-%s-%s. Use similar package %s-%s-%s.' % (component, config.version, config.release, pkg.name, pkg.version, pkg.release))
else: else:
self._call_stdio('print', 'Package %s-%s is available.' % (pkg.name, pkg.version)) self._call_stdio('print', 'Package %s-%s-%s is available.' % (pkg.name, pkg.version, pkg.release))
repository = self.repository_manager.get_repository(pkg.name, pkg.md5) repository = self.repository_manager.get_repository(pkg.name, pkg.md5)
if repository: if repository:
repositories.append(repository) repositories.append(repository)
...@@ -282,12 +289,14 @@ class ObdHome(object): ...@@ -282,12 +289,14 @@ class ObdHome(object):
else: else:
pkg_name = [component] pkg_name = [component]
if config.version: if config.version:
pkg_name.append(config.version) pkg_name.append("version: %s" % config.version)
if config.release:
pkg_name.append("release: %s" % config.release)
if config.package_hash: if config.package_hash:
pkg_name.append(config.package_hash) pkg_name.append("package hash: %s" % config.package_hash)
elif config.tag: if config.tag:
pkg_name.append(config.tag) pkg_name.append("tag: %s" % config.tag)
errors.append('No such package %s.' % ('-'.join(pkg_name))) errors.append('No such package name: %s.' % (', '.join(pkg_name)))
return pkgs, repositories, errors return pkgs, repositories, errors
def load_local_repositories(self, deploy_info, allow_shadow=True): def load_local_repositories(self, deploy_info, allow_shadow=True):
...@@ -357,6 +366,7 @@ class ObdHome(object): ...@@ -357,6 +366,7 @@ class ObdHome(object):
initial_config = '' initial_config = ''
if deploy: if deploy:
try: try:
deploy.deploy_config.allow_include_error()
if deploy.deploy_info.config_status == DeployConfigStatus.UNCHNAGE: if deploy.deploy_info.config_status == DeployConfigStatus.UNCHNAGE:
path = deploy.deploy_config.yaml_path path = deploy.deploy_config.yaml_path
else: else:
...@@ -400,7 +410,12 @@ class ObdHome(object): ...@@ -400,7 +410,12 @@ class ObdHome(object):
subprocess_call([EDITOR, tf.name]) subprocess_call([EDITOR, tf.name])
self._call_stdio('verbose', 'Load %s' % tf.name) self._call_stdio('verbose', 'Load %s' % tf.name)
try: try:
deploy_config = DeployConfig(tf.name, yaml_loader=YamlLoader(self.stdio), config_parser_manager=self.deploy_manager.config_parser_manager) deploy_config = DeployConfig(
tf.name, yaml_loader=YamlLoader(self.stdio),
config_parser_manager=self.deploy_manager.config_parser_manager,
inner_config=deploy.deploy_config.inner_config if deploy else None
)
deploy_config.allow_include_error()
except Exception as e: except Exception as e:
if confirm(e): if confirm(e):
continue continue
...@@ -419,16 +434,37 @@ class ObdHome(object): ...@@ -419,16 +434,37 @@ class ObdHome(object):
if not self._call_stdio('confirm', 'Modifications to the deployment architecture take effect after you redeploy the architecture. Are you sure that you want to start a redeployment? '): if not self._call_stdio('confirm', 'Modifications to the deployment architecture take effect after you redeploy the architecture. Are you sure that you want to start a redeployment? '):
continue continue
config_status = DeployConfigStatus.NEED_REDEPLOY config_status = DeployConfigStatus.NEED_REDEPLOY
else:
if config_status != DeployConfigStatus.NEED_REDEPLOY:
comp_attr_changed = False
for component_name in deploy_config.components: for component_name in deploy_config.components:
old_cluster_config = deploy.deploy_config.components[component_name] old_cluster_config = deploy.deploy_config.components[component_name]
new_cluster_config = deploy_config.components[component_name] new_cluster_config = deploy_config.components[component_name]
if new_cluster_config.version != old_cluster_config.origin_version \ if new_cluster_config.version != old_cluster_config.config_version \
or new_cluster_config.package_hash != old_cluster_config.origin_package_hash \ or new_cluster_config.package_hash != old_cluster_config.config_package_hash \
or new_cluster_config.tag != old_cluster_config.origin_tag: or new_cluster_config.release != old_cluster_config.config_release \
or new_cluster_config.tag != old_cluster_config.tag:
comp_attr_changed = True
config_status = DeployConfigStatus.NEED_REDEPLOY config_status = DeployConfigStatus.NEED_REDEPLOY
break break
if comp_attr_changed:
if not self._call_stdio('confirm', 'Modifications to the version, release or hash of the component take effect after you redeploy the cluster. Are you sure that you want to start a redeployment? '):
continue
config_status = DeployConfigStatus.NEED_REDEPLOY
if config_status != DeployConfigStatus.NEED_REDEPLOY:
rsync_conf_changed = False
for component_name in deploy_config.components:
old_cluster_config = deploy.deploy_config.components[component_name]
new_cluster_config = deploy_config.components[component_name]
if new_cluster_config.get_rsync_list() != old_cluster_config.get_rsync_list():
rsync_conf_changed = True
break
if rsync_conf_changed:
if not self._call_stdio('confirm', 'Modifications to the rsync config of a deployed cluster take effect after you redeploy the cluster. Are you sure that you want to start a redeployment? '):
continue
config_status = DeployConfigStatus.NEED_REDEPLOY
# Loading the parameter plugins that are available to the application # Loading the parameter plugins that are available to the application
self._call_stdio('start_loading', 'Search param plugin and load') self._call_stdio('start_loading', 'Search param plugin and load')
if not is_deployed or config_status == DeployConfigStatus.NEED_REDEPLOY: if not is_deployed or config_status == DeployConfigStatus.NEED_REDEPLOY:
...@@ -492,7 +528,7 @@ class ObdHome(object): ...@@ -492,7 +528,7 @@ class ObdHome(object):
continue continue
else: else:
return False return False
for component_name in deploy_config.components: for component_name in deploy_config.components:
if config_status == DeployConfigStatus.NEED_REDEPLOY: if config_status == DeployConfigStatus.NEED_REDEPLOY:
break break
...@@ -817,7 +853,7 @@ class ObdHome(object): ...@@ -817,7 +853,7 @@ class ObdHome(object):
self._call_stdio('error', 'Deploy configuration is empty.\nIt may be caused by a failure to resolve the configuration.\nPlease check your configuration file.') self._call_stdio('error', 'Deploy configuration is empty.\nIt may be caused by a failure to resolve the configuration.\nPlease check your configuration file.')
return False return False
# Check the best suitable mirror for the components and installation plguins. Install locally # Check the best suitable mirror for the components and installation plugins. Install locally
repositories, install_plugins = self.search_components_from_mirrors_and_install(deploy_config) repositories, install_plugins = self.search_components_from_mirrors_and_install(deploy_config)
if not install_plugins or not repositories: if not install_plugins or not repositories:
return False return False
...@@ -869,13 +905,13 @@ class ObdHome(object): ...@@ -869,13 +905,13 @@ class ObdHome(object):
if not deploy: if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name) self._call_stdio('error', 'No such deploy: %s.' % name)
return False return False
deploy_info = deploy.deploy_info deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Deploy status judge') self._call_stdio('verbose', 'Deploy status judge')
if deploy_info.status != DeployStatus.STATUS_RUNNING: if deploy_info.status != DeployStatus.STATUS_RUNNING:
self._call_stdio('error', 'Deploy "%s" not RUNNING' % (name)) self._call_stdio('error', 'Deploy "%s" not RUNNING' % (name))
return False return False
version = getattr(options, 'version', '') version = getattr(options, 'version', '')
if not version: if not version:
self._call_stdio('error', 'Use the --version option to specify the required OCP version.') self._call_stdio('error', 'Use the --version option to specify the required OCP version.')
...@@ -912,7 +948,7 @@ class ObdHome(object): ...@@ -912,7 +948,7 @@ class ObdHome(object):
new_deploy_config = None new_deploy_config = None
self._call_stdio('stop_loading', 'succeed') self._call_stdio('stop_loading', 'succeed')
# Get the client # Get the client
ssh_clients = self.get_clients(deploy_config, repositories) ssh_clients = self.get_clients(deploy_config, repositories)
if new_deploy_config and deploy_config.user.username != new_deploy_config.user.username: if new_deploy_config and deploy_config.user.username != new_deploy_config.user.username:
...@@ -928,11 +964,11 @@ class ObdHome(object): ...@@ -928,11 +964,11 @@ class ObdHome(object):
component_num -= 1 component_num -= 1
self._call_stdio('print', '%s No check plugin available.' % repository.name) self._call_stdio('print', '%s No check plugin available.' % repository.name)
continue continue
cluster_config = deploy_config.components[repository.name] cluster_config = deploy_config.components[repository.name]
new_cluster_config = new_deploy_config.components[repository.name] if new_deploy_config else None new_cluster_config = new_deploy_config.components[repository.name] if new_deploy_config else None
cluster_servers = cluster_config.servers cluster_servers = cluster_config.servers
self._call_stdio('verbose', 'Call %s for %s' % (connect_plugins[repository], repository)) self._call_stdio('verbose', 'Call %s for %s' % (connect_plugins[repository], repository))
ret = connect_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, '', options, self.stdio) ret = connect_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, '', options, self.stdio)
if ret: if ret:
...@@ -940,12 +976,12 @@ class ObdHome(object): ...@@ -940,12 +976,12 @@ class ObdHome(object):
cursor = ret.get_return('cursor') cursor = ret.get_return('cursor')
else: else:
break break
self._call_stdio('verbose', 'Call %s for %s' % (ocp_check[repository], repository)) self._call_stdio('verbose', 'Call %s for %s' % (ocp_check[repository], repository))
if ocp_check[repository](deploy_config.components.keys(), ssh_clients, cluster_config, '', options, self.stdio, cursor=cursor, ocp_version=version, new_cluster_config=new_cluster_config, new_clients=new_ssh_clients): if ocp_check[repository](deploy_config.components.keys(), ssh_clients, cluster_config, '', options, self.stdio, cursor=cursor, ocp_version=version, new_cluster_config=new_cluster_config, new_clients=new_ssh_clients):
component_num -= 1 component_num -= 1
self._call_stdio('print', '%s Check passed.' % repository.name) self._call_stdio('print', '%s Check passed.' % repository.name)
return component_num == 0 return component_num == 0
def change_deploy_config_style(self, name, options=Values()): def change_deploy_config_style(self, name, options=Values()):
...@@ -954,7 +990,7 @@ class ObdHome(object): ...@@ -954,7 +990,7 @@ class ObdHome(object):
if not deploy: if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name) self._call_stdio('error', 'No such deploy: %s.' % name)
return False return False
deploy_info = deploy.deploy_info deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Deploy config status judge') self._call_stdio('verbose', 'Deploy config status judge')
if deploy_info.config_status != DeployConfigStatus.UNCHNAGE: if deploy_info.config_status != DeployConfigStatus.UNCHNAGE:
...@@ -995,11 +1031,10 @@ class ObdHome(object): ...@@ -995,11 +1031,10 @@ class ObdHome(object):
return True return True
except Exception as e: except Exception as e:
self._call_stdio('exception', e) self._call_stdio('exception', e)
self._call_stdio('stop_loading', 'fail') self._call_stdio('stop_loading', 'fail')
return False return False
def deploy_cluster(self, name, opt=Values()): def deploy_cluster(self, name, opt=Values()):
self._call_stdio('verbose', 'Get Deploy by name') self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name) deploy = self.deploy_manager.get_deploy_config(name)
...@@ -1015,7 +1050,7 @@ class ObdHome(object): ...@@ -1015,7 +1050,7 @@ class ObdHome(object):
if not deploy.apply_temp_deploy_config(): if not deploy.apply_temp_deploy_config():
self._call_stdio('error', 'Failed to apply new deploy configuration') self._call_stdio('error', 'Failed to apply new deploy configuration')
return False return False
config_path = getattr(opt, 'config', '') config_path = getattr(opt, 'config', '')
unuse_lib_repo = getattr(opt, 'unuselibrepo', False) unuse_lib_repo = getattr(opt, 'unuselibrepo', False)
auto_create_tenant = getattr(opt, 'auto_create_tenant', False) auto_create_tenant = getattr(opt, 'auto_create_tenant', False)
...@@ -1026,10 +1061,10 @@ class ObdHome(object): ...@@ -1026,10 +1061,10 @@ class ObdHome(object):
if not deploy: if not deploy:
self._call_stdio('error', 'Failed to create deploy: %s. please check you configuration file' % name) self._call_stdio('error', 'Failed to create deploy: %s. please check you configuration file' % name)
return False return False
if not deploy: if not deploy:
self._call_stdio('error', 'No such deploy: %s. you can input configuration path to create a new deploy' % name) self._call_stdio('error', 'No such deploy: %s. you can input configuration path to create a new deploy' % name)
return False return False
self._call_stdio('verbose', 'Get deploy configuration') self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config deploy_config = deploy.deploy_config
if not deploy_config: if not deploy_config:
...@@ -1045,8 +1080,28 @@ class ObdHome(object): ...@@ -1045,8 +1080,28 @@ class ObdHome(object):
self._call_stdio('error', '%s\'s servers list is empty.' % component_name) self._call_stdio('error', '%s\'s servers list is empty.' % component_name)
return False return False
# Check the best suitable mirror for the components and installation plguins. Install locally if self.dev_mode:
base_dir = COMMAND_ENV.get(BASE_DIR_KEY, '')
deploy_config.enable_cp_install_mode(save=False)
else:
base_dir = ''
deploy_config.enable_ln_install_mode(save=False)
deploy_config.set_base_dir(base_dir, save=False)
# Check the best suitable mirror for the components and installation plugins. Install locally
repositories, install_plugins = self.search_components_from_mirrors_and_install(deploy_config) repositories, install_plugins = self.search_components_from_mirrors_and_install(deploy_config)
if not repositories or not install_plugins:
return False
if unuse_lib_repo and not deploy_config.unuse_lib_repository:
deploy_config.set_unuse_lib_repository(True)
if auto_create_tenant and not deploy_config.auto_create_tenant:
deploy_config.set_auto_create_tenant(True)
return self._deploy_cluster(deploy, repositories, opt)
def _deploy_cluster(self, deploy, repositories, opt=Values()):
deploy_config = deploy.deploy_config
install_plugins = self.search_plugins(repositories, PluginType.INSTALL)
if not install_plugins: if not install_plugins:
return False return False
...@@ -1062,7 +1117,7 @@ class ObdHome(object): ...@@ -1062,7 +1117,7 @@ class ObdHome(object):
self._call_stdio('start_loading', 'Repository integrity check') self._call_stdio('start_loading', 'Repository integrity check')
for repository in repositories: for repository in repositories:
if not repository.file_check(install_plugins[repository]): if not repository.file_check(install_plugins[repository]):
errors.append('%s intstall failed' % repository.name) errors.append('%s install failed' % repository.name)
if errors: if errors:
self._call_stdio('stop_loading', 'fail') self._call_stdio('stop_loading', 'fail')
self._call_stdio('error', '\n'.join(errors)) self._call_stdio('error', '\n'.join(errors))
...@@ -1081,23 +1136,103 @@ class ObdHome(object): ...@@ -1081,23 +1136,103 @@ class ObdHome(object):
self._call_stdio('error', '\n'.join(errors)) self._call_stdio('error', '\n'.join(errors))
return False return False
self._call_stdio('stop_loading', 'succeed') self._call_stdio('stop_loading', 'succeed')
if unuse_lib_repo and not deploy_config.unuse_lib_repository:
deploy_config.set_unuse_lib_repository(True)
if auto_create_tenant and not deploy_config.auto_create_tenant:
deploy_config.set_auto_create_tenant(True)
# Get the client # Get the client
ssh_clients = self.get_clients(deploy_config, repositories) ssh_clients = self.get_clients(deploy_config, repositories)
# Check the status for the deployed cluster
if not getattr(opt, 'skip_cluster_status_check', False):
component_status = {}
cluster_status = self.cluster_status_check(ssh_clients, deploy_config, repositories, component_status)
if cluster_status is False or cluster_status == 1:
if self.stdio:
self._call_stdio('error', 'Some of the servers in the cluster have been started')
for repository in component_status:
cluster_status = component_status[repository]
for server in cluster_status:
if cluster_status[server] == 1:
self._call_stdio('print', '%s %s is started' % (server, repository.name))
return False
self._call_stdio('verbose', 'Search init plugin')
init_plugins = self.search_py_script_plugin(repositories, 'init')
component_num = len(repositories)
for repository in repositories:
cluster_config = deploy_config.components[repository.name]
init_plugin = init_plugins[repository]
self._call_stdio('verbose', 'Exec %s init plugin' % repository)
self._call_stdio('verbose', 'Apply %s for %s-%s' % (init_plugin, repository.name, repository.version))
if init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opt, self.stdio, self.home_path, repository.repository_dir):
component_num -= 1
if component_num != 0:
return False
# Install repository to servers
if not self.install_repositories_to_servers(deploy_config, repositories, install_plugins, ssh_clients, opt):
return False
# Sync runtime dependencies
if not self.sync_runtime_dependencies(deploy_config, repositories, ssh_clients, opt):
return False
for repository in repositories:
deploy.use_model(repository.name, repository, False)
if deploy.update_deploy_status(DeployStatus.STATUS_DEPLOYED) and deploy_config.dump():
self._call_stdio('print', '%s deployed' % deploy.name)
return True
return False
def install_repository_to_servers(self, components, cluster_config, repository, ssh_clients, options=Values(), unuse_lib_repository=False):
install_repo_plugin = self.plugin_manager.get_best_py_script_plugin('install_repo', 'general', '0.1')
install_plugins = self.search_plugins([repository], PluginType.INSTALL)
if not install_plugins:
return False
install_plugin = install_plugins[repository]
check_file_map = install_plugin.file_map(repository)
ret = install_repo_plugin(components, ssh_clients, cluster_config, [], options, self.stdio,
obd_home=self.home_path, install_repository=repository,
install_plugin=install_plugin, check_repository=repository,
check_file_map=check_file_map,
msg_lv='error' if unuse_lib_repository else 'warn')
if not ret:
return False
elif ret.get_return('checked'):
return True
elif unuse_lib_repository:
return False
self._call_stdio('print', 'Try to get lib-repository')
repositories_lib_map = self.install_lib_for_repositories([repository])
if repositories_lib_map is False:
self._call_stdio('error', 'Failed to install lib package for local')
return False
lib_repository = repositories_lib_map[repository]['repositories']
install_plugin = repositories_lib_map[repository]['install_plugin']
ret = install_repo_plugin(components, ssh_clients, cluster_config, [], options,
self.stdio,
obd_home=self.home_path, install_repository=lib_repository,
install_plugin=install_plugin, check_repository=repository,
check_file_map=check_file_map, msg_lv='error')
if not ret or not ret.get_return('checked'):
self._call_stdio('error', 'Failed to install lib package for cluster servers')
return False
def install_repositories_to_servers(self, deploy_config, repositories, install_plugins, ssh_clients, options):
install_repo_plugin = self.plugin_manager.get_best_py_script_plugin('install_repo', 'general', '0.1')
check_file_maps = {}
need_lib_repositories = [] need_lib_repositories = []
for repository in repositories: for repository in repositories:
cluster_config = deploy_config.components[repository.name] cluster_config = deploy_config.components[repository.name]
# cluster files check install_plugin = install_plugins[repository]
self.servers_repository_install(ssh_clients, cluster_config.servers, repository, install_plugins[repository]) check_file_map = check_file_maps[repository] = install_plugin.file_map(repository)
# lib check ret = install_repo_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio,
msg_lv = 'error' if deploy_config.unuse_lib_repository else 'warn' obd_home=self.home_path, install_repository=repository,
if not self.servers_repository_lib_check(ssh_clients, cluster_config.servers, repository, install_plugins[repository], msg_lv): install_plugin=install_plugin, check_repository=repository,
check_file_map=check_file_map,
msg_lv='error' if deploy_config.unuse_lib_repository else 'warn')
if not ret:
return False
if not ret.get_return('checked'):
need_lib_repositories.append(repository) need_lib_repositories.append(repository)
if need_lib_repositories: if need_lib_repositories:
...@@ -1109,39 +1244,28 @@ class ObdHome(object): ...@@ -1109,39 +1244,28 @@ class ObdHome(object):
if repositories_lib_map is False: if repositories_lib_map is False:
self._call_stdio('error', 'Failed to install lib package for local') self._call_stdio('error', 'Failed to install lib package for local')
return False return False
if self.servers_apply_lib_repository_and_check(ssh_clients, deploy_config, need_lib_repositories, repositories_lib_map): for need_lib_repository in need_lib_repositories:
self._call_stdio('error', 'Failed to install lib package for cluster servers') cluster_config = deploy_config.components[need_lib_repository.name]
return False check_file_map = check_file_maps[need_lib_repository]
lib_repository = repositories_lib_map[need_lib_repository]['repositories']
install_plugin = repositories_lib_map[need_lib_repository]['install_plugin']
ret = install_repo_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], options,
self.stdio,
obd_home=self.home_path, install_repository=lib_repository,
install_plugin=install_plugin, check_repository=need_lib_repository,
check_file_map=check_file_map, msg_lv='error')
if not ret or not ret.get_return('checked'):
self._call_stdio('error', 'Failed to install lib package for cluster servers')
return False
return True
# Check the status for the deployed cluster def sync_runtime_dependencies(self, deploy_config, repositories, ssh_clients, option):
component_status = {} rsync_plugin = self.plugin_manager.get_best_py_script_plugin('rsync', 'general', '0.1')
cluster_status = self.cluster_status_check(ssh_clients, deploy_config, repositories, component_status) ret = True
if cluster_status is False or cluster_status == 1:
if self.stdio:
self._call_stdio('error', 'Some of the servers in the cluster have been started')
for repository in component_status:
cluster_status = component_status[repository]
for server in cluster_status:
if cluster_status[server] == 1:
self._call_stdio('print', '%s %s is started' % (server, repository.name))
return False
self._call_stdio('verbose', 'Search init plugin')
init_plugins = self.search_py_script_plugin(repositories, 'init')
component_num = len(repositories)
for repository in repositories: for repository in repositories:
cluster_config = deploy_config.components[repository.name] cluster_config = deploy_config.components[repository.name]
init_plugin = init_plugins[repository] ret = rsync_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], option, self.stdio) and ret
self._call_stdio('verbose', 'Exec %s init plugin' % repository) return ret
self._call_stdio('verbose', 'Apply %s for %s-%s' % (init_plugin, repository.name, repository.version))
if init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opt, self.stdio, self.home_path, repository.repository_dir):
deploy.use_model(repository.name, repository, False)
component_num -= 1
if component_num == 0 and deploy.update_deploy_status(DeployStatus.STATUS_DEPLOYED):
self._call_stdio('print', '%s deployed' % name)
return True
return False
def start_cluster(self, name, cmd=[], options=Values()): def start_cluster(self, name, cmd=[], options=Values()):
self._call_stdio('verbose', 'Get Deploy by name') self._call_stdio('verbose', 'Get Deploy by name')
...@@ -1163,8 +1287,18 @@ class ObdHome(object): ...@@ -1163,8 +1287,18 @@ class ObdHome(object):
self._call_stdio('error', 'Deploy %s.%s\nIf you still need to start the cluster, use the `obd cluster start %s --wop` option to start the cluster without loading parameters. ' % (deploy_info.config_status.value, deploy.effect_tip(), name)) self._call_stdio('error', 'Deploy %s.%s\nIf you still need to start the cluster, use the `obd cluster start %s --wop` option to start the cluster without loading parameters. ' % (deploy_info.config_status.value, deploy.effect_tip(), name))
return False return False
self._call_stdio('start_loading', 'Get local repositories')
# Get the repository
repositories = self.load_local_repositories(deploy_info, False)
self._call_stdio('stop_loading', 'succeed')
return self._start_cluster(deploy, repositories, cmd, options)
def _start_cluster(self, deploy, repositories, cmd=None, options=Values()):
self._call_stdio('verbose', 'Get deploy config') self._call_stdio('verbose', 'Get deploy config')
deploy_config = deploy.deploy_config deploy_config = deploy.deploy_config
deploy_info = deploy.deploy_info
name = deploy.name
update_deploy_status = True update_deploy_status = True
components = getattr(options, 'components', '') components = getattr(options, 'components', '')
...@@ -1182,11 +1316,7 @@ class ObdHome(object): ...@@ -1182,11 +1316,7 @@ class ObdHome(object):
servers = getattr(options, 'servers', '') servers = getattr(options, 'servers', '')
server_list = servers.split(',') if servers else [] server_list = servers.split(',') if servers else []
self._call_stdio('start_loading', 'Get local repositories and plugins') self._call_stdio('start_loading', 'Search plugins')
# Get the repository
repositories = self.load_local_repositories(deploy_info, False)
start_check_plugins = self.search_py_script_plugin(repositories, 'start_check', no_found_act='warn') start_check_plugins = self.search_py_script_plugin(repositories, 'start_check', no_found_act='warn')
create_tenant_plugins = self.search_py_script_plugin(repositories, 'create_tenant', no_found_act='ignore') if deploy_config.auto_create_tenant else {} create_tenant_plugins = self.search_py_script_plugin(repositories, 'create_tenant', no_found_act='ignore') if deploy_config.auto_create_tenant else {}
start_plugins = self.search_py_script_plugin(repositories, 'start') start_plugins = self.search_py_script_plugin(repositories, 'start')
...@@ -1553,8 +1683,18 @@ class ObdHome(object): ...@@ -1553,8 +1683,18 @@ class ObdHome(object):
if deploy_info.status not in status: if deploy_info.status not in status:
self._call_stdio('error', 'Deploy "%s" is %s. You could not stop an %s cluster.' % (name, deploy_info.status.value, deploy_info.status.value)) self._call_stdio('error', 'Deploy "%s" is %s. You could not stop an %s cluster.' % (name, deploy_info.status.value, deploy_info.status.value))
return False return False
self._call_stdio('start_loading', 'Get local repositories')
# Get the repository
repositories = self.load_local_repositories(deploy_info)
self._call_stdio('stop_loading', 'succeed')
return self._stop_cluster(deploy, repositories, options)
def _stop_cluster(self, deploy, repositories, options=Values()):
self._call_stdio('verbose', 'Get deploy config') self._call_stdio('verbose', 'Get deploy config')
deploy_config = deploy.deploy_config deploy_config = deploy.deploy_config
deploy_info = deploy.deploy_info
name = deploy.name
update_deploy_status = True update_deploy_status = True
components = getattr(options, 'components', '') components = getattr(options, 'components', '')
...@@ -1572,10 +1712,7 @@ class ObdHome(object): ...@@ -1572,10 +1712,7 @@ class ObdHome(object):
servers = getattr(options, 'servers', '') servers = getattr(options, 'servers', '')
server_list = servers.split(',') if servers else [] server_list = servers.split(',') if servers else []
self._call_stdio('start_loading', 'Get local repositories and plugins') self._call_stdio('start_loading', 'Search plugins')
# Get the repository
repositories = self.load_local_repositories(deploy_info)
# Check whether the components have the parameter plugins and apply the plugins # Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config) self.search_param_plugin_and_apply(repositories, deploy_config)
...@@ -1657,7 +1794,7 @@ class ObdHome(object): ...@@ -1657,7 +1794,7 @@ class ObdHome(object):
self._call_stdio('stop_loading', 'succeed') self._call_stdio('stop_loading', 'succeed')
update_deploy_status = True update_deploy_status = True
components = getattr(options, 'components', '') components = getattr(options, 'components', '')
if components: if components:
components = components.split(',') components = components.split(',')
...@@ -1675,7 +1812,7 @@ class ObdHome(object): ...@@ -1675,7 +1812,7 @@ class ObdHome(object):
servers = getattr(options, 'servers', '') servers = getattr(options, 'servers', '')
if servers: if servers:
server_list = servers.split(',') server_list = servers.split(',')
if apply_change: if apply_change:
for repository in repositories: for repository in repositories:
cluster_config = deploy_config.components[repository.name] cluster_config = deploy_config.components[repository.name]
...@@ -1739,13 +1876,13 @@ class ObdHome(object): ...@@ -1739,13 +1876,13 @@ class ObdHome(object):
if restart_plugins[repository]( if restart_plugins[repository](
deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio, deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio,
local_home_path=self.home_path, local_home_path=self.home_path,
start_plugin=start_plugins[repository], start_plugin=start_plugins[repository],
reload_plugin=reload_plugins[repository], reload_plugin=reload_plugins[repository],
stop_plugin=stop_plugins[repository], stop_plugin=stop_plugins[repository],
connect_plugin=connect_plugins[repository], connect_plugin=connect_plugins[repository],
display_plugin=display_plugins[repository], display_plugin=display_plugins[repository],
repository=repository, repository=repository,
new_cluster_config=new_cluster_config, new_cluster_config=new_cluster_config,
new_clients=new_ssh_clients new_clients=new_ssh_clients
): ):
component_num -= 1 component_num -= 1
...@@ -1755,7 +1892,7 @@ class ObdHome(object): ...@@ -1755,7 +1892,7 @@ class ObdHome(object):
deploy_config.update_component(new_cluster_config) deploy_config.update_component(new_cluster_config)
else: else:
break break
if component_num == 0: if component_num == 0:
if len(components) != len(repositories) or servers: if len(components) != len(repositories) or servers:
self._call_stdio('print', "succeed") self._call_stdio('print', "succeed")
...@@ -1779,13 +1916,13 @@ class ObdHome(object): ...@@ -1779,13 +1916,13 @@ class ObdHome(object):
if restart_plugins[repository]( if restart_plugins[repository](
deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio, deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio,
local_home_path=self.home_path, local_home_path=self.home_path,
start_plugin=start_plugins[repository], start_plugin=start_plugins[repository],
reload_plugin=reload_plugins[repository], reload_plugin=reload_plugins[repository],
stop_plugin=stop_plugins[repository], stop_plugin=stop_plugins[repository],
connect_plugin=connect_plugins[repository], connect_plugin=connect_plugins[repository],
display_plugin=display_plugins[repository], display_plugin=display_plugins[repository],
repository=repository, repository=repository,
new_cluster_config=new_cluster_config, new_cluster_config=new_cluster_config,
new_clients=new_ssh_clients, new_clients=new_ssh_clients,
rollback=True, rollback=True,
bootstrap_plugin=bootstrap_plugins[repository], bootstrap_plugin=bootstrap_plugins[repository],
...@@ -1795,37 +1932,82 @@ class ObdHome(object): ...@@ -1795,37 +1932,82 @@ class ObdHome(object):
self._call_stdio('stop_loading', 'succeed') self._call_stdio('stop_loading', 'succeed')
return False return False
def redeploy_cluster(self, name, opt=Values()): def redeploy_cluster(self, name, opt=Values(), search_repo=True):
return self.destroy_cluster(name, opt) and self.deploy_cluster(name) and self.start_cluster(name)
def destroy_cluster(self, name, opt=Values()):
self._call_stdio('verbose', 'Get Deploy by name') self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name) deploy = self.deploy_manager.get_deploy_config(name)
if not deploy: if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name) self._call_stdio('error', 'No such deploy: %s.' % name)
return False return False
deploy_info = deploy.deploy_info deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config
self._call_stdio('start_loading', 'Get local repositories')
# Get the repository
repositories = self.load_local_repositories(deploy_info)
self._call_stdio('stop_loading', 'succeed')
self._call_stdio('verbose', 'Check deploy status') self._call_stdio('verbose', 'Check deploy status')
if deploy_info.status in [DeployStatus.STATUS_RUNNING, DeployStatus.STATUS_UPRADEING]: if deploy_info.status in [DeployStatus.STATUS_RUNNING, DeployStatus.STATUS_UPRADEING]:
if not self.stop_cluster(name, Values({'force': True})): if not self._stop_cluster(deploy, repositories, options=Values({'force': True})):
return False return False
elif deploy_info.status not in [DeployStatus.STATUS_STOPPED, DeployStatus.STATUS_DEPLOYED]: elif deploy_info.status not in [DeployStatus.STATUS_STOPPED, DeployStatus.STATUS_DEPLOYED]:
self._call_stdio('error', 'Deploy "%s" is %s. You could not destroy an undeployed cluster' % (name, deploy_info.status.value)) self._call_stdio('error', 'Deploy "%s" is %s. You could not destroy an undeployed cluster' % (
name, deploy_info.status.value))
return False
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
if not self._destroy_cluster(deploy, repositories, opt):
return False
if search_repo:
if deploy_info.config_status != DeployConfigStatus.UNCHNAGE and not deploy.apply_temp_deploy_config():
self._call_stdio('error', 'Failed to apply new deploy configuration')
return False
self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config
repositories, install_plugins = self.search_components_from_mirrors_and_install(deploy_config)
if not repositories or not install_plugins:
return False
return self._deploy_cluster(deploy, repositories, opt) and self._start_cluster(deploy, repositories)
def destroy_cluster(self, name, opt=Values()):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False return False
deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Get deploy configuration') self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config deploy_config = deploy.deploy_config
# allow included file not exist
deploy_config.allow_include_error()
self._call_stdio('start_loading', 'Get local repositories and plugins') self._call_stdio('start_loading', 'Get local repositories')
# Get the repository # Get the repository
repositories = self.load_local_repositories(deploy_info) repositories = self.load_local_repositories(deploy_info)
self._call_stdio('stop_loading', 'succeed')
self._call_stdio('verbose', 'Check deploy status')
if deploy_info.status in [DeployStatus.STATUS_RUNNING, DeployStatus.STATUS_UPRADEING]:
if not self._stop_cluster(deploy, repositories, Values({'force': True})):
return False
elif deploy_info.status not in [DeployStatus.STATUS_STOPPED, DeployStatus.STATUS_DEPLOYED]:
self._call_stdio('error', 'Deploy "%s" is %s. You could not destroy an undeployed cluster' % (name, deploy_info.status.value))
return False
# Check whether the components have the parameter plugins and apply the plugins # Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config) self.search_param_plugin_and_apply(repositories, deploy_config)
return self._destroy_cluster(deploy, repositories, opt)
def _destroy_cluster(self, deploy, repositories, opt=Values()):
deploy_config = deploy.deploy_config
self._call_stdio('start_loading', 'Search plugins')
# Get the repository
plugins = self.search_py_script_plugin(repositories, 'destroy') plugins = self.search_py_script_plugin(repositories, 'destroy')
self._call_stdio('stop_loading', 'succeed') self._call_stdio('stop_loading', 'succeed')
# Get the client # Get the client
ssh_clients = self.get_clients(deploy_config, repositories) ssh_clients = self.get_clients(deploy_config, repositories)
...@@ -1837,7 +2019,7 @@ class ObdHome(object): ...@@ -1837,7 +2019,7 @@ class ObdHome(object):
self._call_stdio('verbose', 'Try to stop cluster') self._call_stdio('verbose', 'Try to stop cluster')
status = deploy.deploy_info.status status = deploy.deploy_info.status
deploy.update_deploy_status(DeployStatus.STATUS_RUNNING) deploy.update_deploy_status(DeployStatus.STATUS_RUNNING)
if not self.stop_cluster(name): if not self._stop_cluster(deploy, repositories):
deploy.update_deploy_status(status) deploy.update_deploy_status(status)
self._call_stdio('error', 'Fail to stop cluster') self._call_stdio('error', 'Fail to stop cluster')
return False return False
...@@ -1857,13 +2039,13 @@ class ObdHome(object): ...@@ -1857,13 +2039,13 @@ class ObdHome(object):
self._call_stdio('verbose', 'Call %s for %s' % (plugins[repository], repository)) self._call_stdio('verbose', 'Call %s for %s' % (plugins[repository], repository))
plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio) plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio)
self._call_stdio('verbose', 'Set %s deploy status to destroyed' % name) self._call_stdio('verbose', 'Set %s deploy status to destroyed' % deploy.name)
if deploy.update_deploy_status(DeployStatus.STATUS_DESTROYED): if deploy.update_deploy_status(DeployStatus.STATUS_DESTROYED):
self._call_stdio('print', '%s destroyed' % name) self._call_stdio('print', '%s destroyed' % deploy.name)
return True return True
return False return False
def change_repository(self, name, options=Values()): def reinstall(self, name, options=Values()):
self._call_stdio('verbose', 'Get Deploy by name') self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name) deploy = self.deploy_manager.get_deploy_config(name)
if not deploy: if not deploy:
...@@ -1879,10 +2061,7 @@ class ObdHome(object): ...@@ -1879,10 +2061,7 @@ class ObdHome(object):
component = getattr(options, 'component') component = getattr(options, 'component')
usable = getattr(options, 'hash') usable = getattr(options, 'hash')
if not component: if not component:
self._call_stdio('error', 'Specify the components you want to change.') self._call_stdio('error', 'Specify the components you want to reinstall.')
return False
if not usable:
self._call_stdio('error', 'Specify the hash you want to upgrade.')
return False return False
if component not in deploy_info.components: if component not in deploy_info.components:
self._call_stdio('error', 'Not found %s in Deploy "%s" ' % (component, name)) self._call_stdio('error', 'Not found %s in Deploy "%s" ' % (component, name))
...@@ -1899,77 +2078,81 @@ class ObdHome(object): ...@@ -1899,77 +2078,81 @@ class ObdHome(object):
stop_plugins = self.search_py_script_plugin([current_repository], 'stop') stop_plugins = self.search_py_script_plugin([current_repository], 'stop')
start_plugins = self.search_py_script_plugin([current_repository], 'start') start_plugins = self.search_py_script_plugin([current_repository], 'start')
change_repo_plugin = self.plugin_manager.get_best_py_script_plugin('change_repo', 'general', '0.1')
self._call_stdio('stop_loading', 'succeed')
self._call_stdio('verbose', 'search target repository')
dest_repository = self.repository_manager.get_repository(current_repository.name, version=current_repository.version, tag=usable)
if not dest_repository:
pkg = self.mirror_manager.get_exact_pkg(name=current_repository.name, version=current_repository.version, md5=usable)
if not pkg:
self._call_stdio('error', 'No such package %s-%s-%s' % (component, current_repository.version, usable))
return False
repositories = []
install_plugins = self.get_install_plugin_and_install(repositories, [pkg])
if not install_plugins:
return False
dest_repository = repositories[0]
else:
install_plugins = self.search_plugins([dest_repository], PluginType.INSTALL)
if dest_repository is None: self._call_stdio('stop_loading', 'succeed')
self._call_stdio('error', 'Target version not found')
return False
if dest_repository == current_repository:
self._call_stdio('print', 'The current version is already %s.\nNoting to do.' % current_repository)
return False
# Get the client # Get the client
ssh_clients = self.get_clients(deploy_config, [current_repository]) ssh_clients = self.get_clients(deploy_config, [current_repository])
cluster_config = deploy_config.components[current_repository.name]
self._call_stdio('start_loading', 'Load cluster param plugin') current_cluster_config = deploy_config.components[current_repository.name]
# Check whether the components have the parameter plugins and apply the plugins need_sync = bool(current_cluster_config.get_rsync_list())
self.search_param_plugin_and_apply(repositories, deploy_config) need_change_repo = bool(usable)
self._call_stdio('stop_loading', 'succeed') sync_repositories = [current_repository]
repository = current_repository
cluster_config = current_cluster_config
# search repo and install
if usable:
self._call_stdio('verbose', 'search target repository')
dest_repository = self.repository_manager.get_repository(current_repository.name, version=current_repository.version, tag=usable)
if not dest_repository:
pkg = self.mirror_manager.get_exact_pkg(name=current_repository.name, version=current_repository.version, md5=usable)
if not pkg:
self._call_stdio('error', 'No such package %s-%s-%s' % (component, current_repository.version, usable))
return False
repositories = []
install_plugins = self.get_install_plugin_and_install(repositories, [pkg])
if not install_plugins:
return False
dest_repository = repositories[0]
else:
install_plugins = self.search_plugins([dest_repository], PluginType.INSTALL)
cluster_config = deploy_config.components[dest_repository.name] if dest_repository is None:
# cluster files check self._call_stdio('error', 'Target version not found')
self.servers_repository_install(ssh_clients, cluster_config.servers, dest_repository, install_plugins[dest_repository])
# lib check
if not self.servers_repository_lib_check(ssh_clients, cluster_config.servers, dest_repository, install_plugins[dest_repository], 'warn'):
self._call_stdio('print', 'Try to get lib-repository')
repositories_lib_map = self.install_lib_for_repositories([dest_repository])
if repositories_lib_map is False:
self._call_stdio('error', 'Failed to install lib package for local')
return False
if self.servers_apply_lib_repository_and_check(ssh_clients, deploy_config, [dest_repository], repositories_lib_map):
self._call_stdio('error', 'Failed to install lib package for cluster servers')
return False return False
if dest_repository == current_repository:
self._call_stdio('print', 'The current version is already %s.\nNoting to do.' % current_repository)
need_change_repo = False
else:
self._call_stdio('start_loading', 'Load cluster param plugin')
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
self._call_stdio('stop_loading', 'succeed')
cluster_config = deploy_config.components[dest_repository.name]
need_restart = need_sync or need_change_repo
# stop cluster if needed
if need_restart:
# Check the status for the deployed cluster
component_status = {}
cluster_status = self.cluster_status_check(ssh_clients, deploy_config, [current_repository], component_status)
if cluster_status is False or cluster_status == 1:
self._call_stdio('verbose', 'Call %s for %s' % (stop_plugins[current_repository], current_repository))
if not stop_plugins[current_repository](deploy_config.components.keys(), ssh_clients, current_cluster_config, [], options, self.stdio):
return False
# Check the status for the deployed cluster # install repo to remote servers
component_status = {} if need_change_repo:
cluster_status = self.cluster_status_check(ssh_clients, deploy_config, [current_repository], component_status) if not self.install_repositories_to_servers(deploy_config, [dest_repository, ], install_plugins, ssh_clients, options):
if cluster_status is False or cluster_status == 1:
self._call_stdio('verbose', 'Call %s for %s' % (stop_plugins[current_repository], current_repository))
if not stop_plugins[current_repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio):
return False return False
sync_repositories = [dest_repository]
repository = dest_repository
self._call_stdio('verbose', 'Call %s for %s' % (change_repo_plugin, dest_repository)) # sync runtime dependencies
if not change_repo_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio, self.home_path, dest_repository): if not self.sync_runtime_dependencies(deploy_config, sync_repositories, ssh_clients, options):
return False return False
if deploy_info.status == DeployStatus.STATUS_RUNNING: # start cluster if needed
self._call_stdio('verbose', 'Call %s for %s' % (start_plugins[current_repository], dest_repository)) if need_restart and deploy_info.status == DeployStatus.STATUS_RUNNING:
self._call_stdio('verbose', 'Call %s for %s' % (start_plugins[current_repository], repository))
setattr(options, 'without_parameter', True) setattr(options, 'without_parameter', True)
if not start_plugins[current_repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio, self.home_path, dest_repository.repository_dir) and getattr(options, 'force', False) is False: if not start_plugins[current_repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio, self.home_path, repository.repository_dir) and getattr(options, 'force', False) is False:
self._call_stdio('verbose', 'Call %s for %s' % (change_repo_plugin, current_repository)) self.install_repositories_to_servers(deploy_config, [current_repository, ], install_plugins, ssh_clients, options)
change_repo_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio, self.home_path, current_repository)
return False return False
deploy.update_component_repository(dest_repository) # update deploy info
if need_change_repo:
deploy.use_model(dest_repository.name, dest_repository)
return True return True
def upgrade_cluster(self, name, options=Values()): def upgrade_cluster(self, name, options=Values()):
...@@ -1978,13 +2161,13 @@ class ObdHome(object): ...@@ -1978,13 +2161,13 @@ class ObdHome(object):
if not deploy: if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name) self._call_stdio('error', 'No such deploy: %s.' % name)
return False return False
deploy_info = deploy.deploy_info deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Deploy status judge') self._call_stdio('verbose', 'Deploy status judge')
if deploy_info.status not in [DeployStatus.STATUS_UPRADEING, DeployStatus.STATUS_RUNNING]: if deploy_info.status not in [DeployStatus.STATUS_UPRADEING, DeployStatus.STATUS_RUNNING]:
self._call_stdio('error', 'Deploy "%s" is %s' % (name, deploy_info.status.value)) self._call_stdio('error', 'Deploy "%s" is %s' % (name, deploy_info.status.value))
return False return False
deploy_config = deploy.deploy_config deploy_config = deploy.deploy_config
self._call_stdio('start_loading', 'Get local repositories and plugins') self._call_stdio('start_loading', 'Get local repositories and plugins')
...@@ -2037,9 +2220,9 @@ class ObdHome(object): ...@@ -2037,9 +2220,9 @@ class ObdHome(object):
self._call_stdio( self._call_stdio(
'print_list', 'print_list',
images, images,
['name', 'version', 'release', 'arch', 'md5'], ['name', 'version', 'release', 'arch', 'md5'],
lambda x: [x.name, x.version, x.release, x.arch, x.md5], lambda x: [x.name, x.version, x.release, x.arch, x.md5],
title='%s %s Candidates' % (component, version) title='%s %s Candidates' % (component, version)
) )
self._call_stdio('error', 'Too many match') self._call_stdio('error', 'Too many match')
return False return False
...@@ -2056,11 +2239,11 @@ class ObdHome(object): ...@@ -2056,11 +2239,11 @@ class ObdHome(object):
repositories = [] repositories = []
pkg = self.mirror_manager.get_exact_pkg(name=images[0].name, md5=images[0].md5) pkg = self.mirror_manager.get_exact_pkg(name=images[0].name, md5=images[0].md5)
pkgs = [pkg] pkgs = [pkg]
install_plugins = self.get_install_plugin_and_install(repositories, pkgs) install_plugins = self.get_install_plugin_and_install(repositories, pkgs)
if not install_plugins: if not install_plugins:
return False return False
dest_repository = repositories[0] dest_repository = repositories[0]
if dest_repository is None: if dest_repository is None:
self._call_stdio('error', 'Target version not found') self._call_stdio('error', 'Target version not found')
...@@ -2082,17 +2265,24 @@ class ObdHome(object): ...@@ -2082,17 +2265,24 @@ class ObdHome(object):
if not route: if not route:
return False return False
for node in route[1: -1]: for node in route[1: -1]:
images = self.search_images(component, version=node.get('version'), release=node.get('release'), disable=disable, usable=usable, release_first=True) _version = node.get('version')
_release = node.get('release')
images = self.search_images(component, version=_version, release=_release, disable=disable, usable=usable, release_first=True)
if not images: if not images:
self._call_stdio('error', 'No such package %s-%s' % (component, version)) pkg_name = component
if _version:
pkg_name = pkg_name + '-' + str(_version)
if _release:
pkg_name = pkg_name + '-' + str(_release)
self._call_stdio('error', 'No such package %s' % pkg_name)
return False return False
if len(images) > 1: if len(images) > 1:
self._call_stdio( self._call_stdio(
'print_list', 'print_list',
images, images,
['name', 'version', 'release', 'arch', 'md5'], ['name', 'version', 'release', 'arch', 'md5'],
lambda x: [x.name, x.version, x.release, x.arch, x.md5], lambda x: [x.name, x.version, x.release, x.arch, x.md5],
title='%s %s Candidates' % (component, version) title='%s %s Candidates' % (component, version)
) )
self._call_stdio('error', 'Too many match') self._call_stdio('error', 'Too many match')
return False return False
...@@ -2106,7 +2296,7 @@ class ObdHome(object): ...@@ -2106,7 +2296,7 @@ class ObdHome(object):
if isinstance(image, Repository): if isinstance(image, Repository):
upgrade_repositories.append(image) upgrade_repositories.append(image)
else: else:
repository = self.repository_manager.get_repository_by_version(name=image.name, version=image.version, tag=image.md5) repository = self.repository_manager.get_repository(name=image.name, version=image.version, package_hash=image.md5)
if repository: if repository:
upgrade_repositories.append(repository) upgrade_repositories.append(repository)
else: else:
...@@ -2132,7 +2322,7 @@ class ObdHome(object): ...@@ -2132,7 +2322,7 @@ class ObdHome(object):
return False return False
self._call_stdio('verbose', 'Call %s for %s' % (upgrade_check_plugins[current_repository], current_repository)) self._call_stdio('verbose', 'Call %s for %s' % (upgrade_check_plugins[current_repository], current_repository))
if not upgrade_check_plugins[current_repository]( if not upgrade_check_plugins[current_repository](
deploy_config.components.keys(), ssh_clients, cluster_config, {}, options, self.stdio, deploy_config.components.keys(), ssh_clients, cluster_config, {}, options, self.stdio,
current_repository=current_repository, current_repository=current_repository,
repositories=upgrade_repositories, repositories=upgrade_repositories,
route=route, route=route,
...@@ -2145,17 +2335,17 @@ class ObdHome(object): ...@@ -2145,17 +2335,17 @@ class ObdHome(object):
self._call_stdio( self._call_stdio(
'print_list', 'print_list',
upgrade_repositories, upgrade_repositories,
['name', 'version', 'release', 'arch', 'md5', 'mark'], ['name', 'version', 'release', 'arch', 'md5', 'mark'],
lambda x: [x.name, x.version, x.release, x.arch, x.md5, 'start' if x == current_repository else 'dest' if x == dest_repository else ''], lambda x: [x.name, x.version, x.release, x.arch, x.md5, 'start' if x == current_repository else 'dest' if x == dest_repository else ''],
title='Packages Will Be Used' title='Packages Will Be Used'
) )
if not self._call_stdio('confirm', 'If you use a non-official release, we cannot guarantee a successful upgrade or technical support when you fail. Make sure that you want to use the above package to upgrade.'): if not self._call_stdio('confirm', 'If you use a non-official release, we cannot guarantee a successful upgrade or technical support when you fail. Make sure that you want to use the above package to upgrade.'):
return False return False
index = 1 index = 1
upgrade_ctx = { upgrade_ctx = {
'route': route, 'route': route,
'upgrade_repositories': [ 'upgrade_repositories': [
{ {
'version': repository.version, 'version': repository.version,
...@@ -2178,29 +2368,13 @@ class ObdHome(object): ...@@ -2178,29 +2368,13 @@ class ObdHome(object):
# Get the client # Get the client
ssh_clients = self.get_clients(deploy_config, [current_repository]) ssh_clients = self.get_clients(deploy_config, [current_repository])
cluster_config = deploy_config.components[current_repository.name] cluster_config = deploy_config.components[current_repository.name]
install_plugins = self.get_install_plugin_and_install(upgrade_repositories, []) install_plugins = self.get_install_plugin_and_install(upgrade_repositories, [])
if not install_plugins: if not install_plugins:
return False return False
need_lib_repositories = [] if not self.install_repositories_to_servers(deploy_config, upgrade_repositories[1:], install_plugins, ssh_clients, options):
for repository in upgrade_repositories[1:]: return False
cluster_config = deploy_config.components[repository.name]
# cluster files check
self.servers_repository_install(ssh_clients, cluster_config.servers, repository, install_plugins[repository])
# lib check
if not self.servers_repository_lib_check(ssh_clients, cluster_config.servers, repository, install_plugins[repository], 'warn'):
need_lib_repositories.append(repository)
if need_lib_repositories:
self._call_stdio('print', 'Try to get lib-repository')
repositories_lib_map = self.install_lib_for_repositories(need_lib_repositories)
if repositories_lib_map is False:
self._call_stdio('error', 'Failed to install lib package for local')
return False
if self.servers_apply_lib_repository_and_check(ssh_clients, deploy_config, need_lib_repositories, repositories_lib_map):
self._call_stdio('error', 'Failed to install lib package for cluster servers')
return False
n = len(upgrade_repositories) n = len(upgrade_repositories)
while upgrade_ctx['index'] < n: while upgrade_ctx['index'] < n:
...@@ -2215,7 +2389,9 @@ class ObdHome(object): ...@@ -2215,7 +2389,9 @@ class ObdHome(object):
current_repository=current_repository, current_repository=current_repository,
upgrade_repositories=upgrade_repositories, upgrade_repositories=upgrade_repositories,
apply_param_plugin=lambda repository: self.search_param_plugin_and_apply([repository], deploy_config), apply_param_plugin=lambda repository: self.search_param_plugin_and_apply([repository], deploy_config),
upgrade_ctx=upgrade_ctx upgrade_ctx=upgrade_ctx,
install_repository_to_servers=self.install_repository_to_servers,
unuse_lib_repository=deploy_config.unuse_lib_repository
) )
deploy.update_upgrade_ctx(**upgrade_ctx) deploy.update_upgrade_ctx(**upgrade_ctx)
if not ret: if not ret:
...@@ -2250,13 +2426,17 @@ class ObdHome(object): ...@@ -2250,13 +2426,17 @@ class ObdHome(object):
for item in plugin.file_list(info): for item in plugin.file_list(info):
path = os.path.join(repo_path, item.src_path) path = os.path.join(repo_path, item.src_path)
path = os.path.normcase(path) path = os.path.normcase(path)
if not os.path.exists(path): if not os.path.exists(path) or os.path.isdir(path) != (item.type == InstallPlugin.FileItemType.DIR):
path = os.path.join(repo_path, item.target_path) path = os.path.join(repo_path, item.target_path)
path = os.path.normcase(path) path = os.path.normcase(path)
if not os.path.exists(path): if not os.path.exists(path):
self._call_stdio('error', 'need %s: %s ' % ('dir' if item.type == InstallPlugin.FileItemType.DIR else 'file', path)) self._call_stdio('error', 'need %s: %s ' % ('dir' if item.type == InstallPlugin.FileItemType.DIR else 'file', path))
success = False success = False
continue continue
if os.path.isdir(path) != (item.type == InstallPlugin.FileItemType.DIR):
self._call_stdio('error', 'need %s, but %s is %s' % (item.type, path, 'file' if item.type == InstallPlugin.FileItemType.DIR else 'dir'))
success = False
continue
files[item.src_path] = path files[item.src_path] = path
if success is False: if success is False:
return False return False
...@@ -2336,9 +2516,14 @@ class ObdHome(object): ...@@ -2336,9 +2516,14 @@ class ObdHome(object):
self._call_stdio('start_loading', 'Get local repositories and plugins') self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository # Get the repository
repositories = self.get_local_repositories({opts.component: deploy_config.components[opts.component]}) # repositories = self.get_local_repositories({opts.component: deploy_config.components[opts.component]})
repository = repositories[0] repositories = self.load_local_repositories(deploy_info)
for repository in repositories:
if repository.name == opts.component:
break
else:
self._call_stdio('error', 'Can not find the component for mysqltest, use `--component` to select component')
return False
# Check whether the components have the parameter plugins and apply the plugins # Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config) self.search_param_plugin_and_apply(repositories, deploy_config)
self._call_stdio('stop_loading', 'succeed') self._call_stdio('stop_loading', 'succeed')
...@@ -2370,6 +2555,7 @@ class ObdHome(object): ...@@ -2370,6 +2555,7 @@ class ObdHome(object):
mysqltest_check_opt_plugin = self.plugin_manager.get_best_py_script_plugin('check_opt', 'mysqltest', repository.version) mysqltest_check_opt_plugin = self.plugin_manager.get_best_py_script_plugin('check_opt', 'mysqltest', repository.version)
mysqltest_check_test_plugin = self.plugin_manager.get_best_py_script_plugin('check_test', 'mysqltest', repository.version) mysqltest_check_test_plugin = self.plugin_manager.get_best_py_script_plugin('check_test', 'mysqltest', repository.version)
mysqltest_run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'mysqltest', repository.version) mysqltest_run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'mysqltest', repository.version)
mysqltest_collect_log_plugin = self.plugin_manager.get_best_py_script_plugin('collect_log', 'mysqltest', repository.version)
env = opts.__dict__ env = opts.__dict__
env['cursor'] = cursor env['cursor'] = cursor
...@@ -2379,66 +2565,84 @@ class ObdHome(object): ...@@ -2379,66 +2565,84 @@ class ObdHome(object):
ret = mysqltest_check_opt_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env) ret = mysqltest_check_opt_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env)
if not ret: if not ret:
return False return False
self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_check_test_plugin, repository)) if not env['init_only']:
ret = mysqltest_check_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env) self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_check_test_plugin, repository))
if not ret: ret = mysqltest_check_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env)
self._call_stdio('error', 'Failed to get test set') if not ret:
return False self._call_stdio('error', 'Failed to get test set')
if not env['test_set']: return False
self._call_stdio('error', 'Test set is empty') if env['test_set'] is None:
return False self._call_stdio('error', 'Test set is empty')
return False
if env['need_init']: if env['need_init'] or env['init_only']:
self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_init_plugin, repository)) self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_init_plugin, repository))
if not mysqltest_init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env): if not mysqltest_init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env):
self._call_stdio('error', 'Failed to init for mysqltest') self._call_stdio('error', 'Failed to init for mysqltest')
return False return False
if env['init_only']:
result = [] return True
for test in env['test_set']:
self._call_stdio('verbose', 'test set: {}'.format(env['test_set']))
self._call_stdio('verbose', 'total: {}'.format(len(env['test_set'])))
reboot_success = True
while True:
self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_run_test_plugin, repository)) self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_run_test_plugin, repository))
ret = mysqltest_run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, test, env) ret = mysqltest_run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env)
if not ret: if not ret:
break break
case_result = ret.get_return('result') self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_collect_log_plugin, repository))
if case_result['ret'] != 0 and opts.auto_retry: mysqltest_collect_log_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {},
self.stdio, env)
if ret.get_return('finished'):
break
if ret.get_return('reboot') and not env['disable_reboot']:
cursor.close() cursor.close()
db.close() db.close()
if getattr(self.stdio, 'sub_io'): if getattr(self.stdio, 'sub_io'):
stdio = self.stdio.sub_io(msg_lv=MsgLevel.ERROR) stdio = self.stdio.sub_io(msg_lv=MsgLevel.ERROR)
else: else:
stdio = None stdio = None
self._call_stdio('start_loading', 'Reboot') reboot_timeout = getattr(opts, 'reboot_timeout', 0)
obd = ObdHome(self.home_path, self.dev_mode, stdio=stdio) reboot_retries = getattr(opts, 'reboot_retries', 5)
obd.lock_manager.set_try_times(-1) reboot_success = False
if obd.redeploy_cluster(name): while reboot_retries and not reboot_success:
self._call_stdio('stop_loading', 'succeed') reboot_retries -= 1
else: with timeout(reboot_timeout):
self._call_stdio('stop_loading', 'fail') self._call_stdio('start_loading', 'Reboot')
result.append(case_result) obd = ObdHome(self.home_path, self.dev_mode, stdio=stdio)
break obd.lock_manager.set_try_times(-1)
obd.lock_manager.set_try_times(6000) if obd.redeploy_cluster(
obd = None name,
connect_plugin = self.search_py_script_plugin(repositories, 'connect')[repository] opt=Values({'force_kill': True, 'force': True, 'force_delete': True}), search_repo=False):
ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, target_server=opts.test_server, sys_root=False) self._call_stdio('stop_loading', 'succeed')
if not ret or not ret.get_return('connect'): else:
break self._call_stdio('stop_loading', 'fail')
db = ret.get_return('connect') continue
cursor = ret.get_return('cursor') obd.lock_manager.set_try_times(6000)
env['cursor'] = cursor obd = None
self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_init_plugin, repository)) connect_plugin = self.search_py_script_plugin(repositories, 'connect')[repository]
if not mysqltest_init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env): ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {},
self._call_stdio('error', 'Failed to prepare for mysqltest') self.stdio, target_server=opts.test_server, sys_root=False)
break if not ret or not ret.get_return('connect'):
ret = mysqltest_run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, test, env) self._call_stdio('error', 'Failed to connect server')
if not ret: continue
db = ret.get_return('connect')
cursor = ret.get_return('cursor')
env['cursor'] = cursor
self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_init_plugin, repository))
if mysqltest_init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {},
self.stdio, env):
reboot_success = True
else:
self._call_stdio('error', 'Failed to prepare for mysqltest')
if not reboot_success:
env['collect_log'] = True
mysqltest_collect_log_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env, test_name='reboot_failed')
break break
case_result = ret.get_return('result') result = env.get('case_results', [])
result.append(case_result)
passcnt = len(list(filter(lambda x: x["ret"] == 0, result))) passcnt = len(list(filter(lambda x: x["ret"] == 0, result)))
totalcnt = len(env['test_set']) totalcnt = len(env.get('run_test_cases', []))
failcnt = totalcnt - passcnt failcnt = totalcnt - passcnt
if result: if result:
self._call_stdio( self._call_stdio(
...@@ -2447,7 +2651,9 @@ class ObdHome(object): ...@@ -2447,7 +2651,9 @@ class ObdHome(object):
title='Result (Total %d, Passed %d, Failed %s)' % (totalcnt, passcnt, failcnt), title='Result (Total %d, Passed %d, Failed %s)' % (totalcnt, passcnt, failcnt),
align={'Cost (s)': 'r'} align={'Cost (s)': 'r'}
) )
if failcnt: if failcnt or not reboot_success:
if not reboot_success:
self._call_stdio('error', 'reboot cluster failed')
self._call_stdio('print', 'Mysqltest failed') self._call_stdio('print', 'Mysqltest failed')
else: else:
self._call_stdio('print', 'Mysqltest passed') self._call_stdio('print', 'Mysqltest passed')
...@@ -2529,10 +2735,16 @@ class ObdHome(object): ...@@ -2529,10 +2735,16 @@ class ObdHome(object):
self._call_stdio('print', '%s %s is stopped' % (server, repository.name)) self._call_stdio('print', '%s %s is stopped' % (server, repository.name))
return False return False
for repository in repositories: ob_repository = None
if repository.name == opts.component: repository = None
break for tmp_repository in repositories:
if tmp_repository.name in ["oceanbase", "oceanbase-ce"]:
ob_repository = tmp_repository
if tmp_repository.name == opts.component:
repository = tmp_repository
plugin_version = ob_repository.version if ob_repository else repository.version
env = {'sys_root': False} env = {'sys_root': False}
db = None db = None
cursor = None cursor = None
...@@ -2542,7 +2754,6 @@ class ObdHome(object): ...@@ -2542,7 +2754,6 @@ class ObdHome(object):
connect_plugin = self.search_py_script_plugin(repositories, 'connect')[repository] connect_plugin = self.search_py_script_plugin(repositories, 'connect')[repository]
if repository.name in ['obproxy', 'obproxy-ce']: if repository.name in ['obproxy', 'obproxy-ce']:
ob_optimization = False ob_optimization = False
allow_components = ['oceanbase', 'oceanbase-ce'] allow_components = ['oceanbase', 'oceanbase-ce']
...@@ -2564,8 +2775,8 @@ class ObdHome(object): ...@@ -2564,8 +2775,8 @@ class ObdHome(object):
return False return False
db = ret.get_return('connect') db = ret.get_return('connect')
cursor = ret.get_return('cursor') cursor = ret.get_return('cursor')
run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'sysbench', repository.version) run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'sysbench', plugin_version)
setattr(opts, 'host', opts.test_server.ip) setattr(opts, 'host', opts.test_server.ip)
setattr(opts, 'port', db.port) setattr(opts, 'port', db.port)
...@@ -2685,6 +2896,95 @@ class ObdHome(object): ...@@ -2685,6 +2896,95 @@ class ObdHome(object):
return True return True
return False return False
def tpcds(self, name, opts):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Check deploy status')
if deploy_info.status != DeployStatus.STATUS_RUNNING:
self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value))
return False
self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config
db_component = None
db_components = ['oceanbase', 'oceanbase-ce']
allow_components = ['obproxy', 'obproxy-ce', 'oceanbase', 'oceanbase-ce']
if opts.component is None:
for component_name in allow_components:
if component_name in deploy_config.components:
opts.component = component_name
break
elif opts.component not in allow_components:
self._call_stdio('error', '%s not support. %s is allowed' % (opts.component, allow_components))
return False
if opts.component not in deploy_config.components:
self._call_stdio('error', 'Can not find the component for tpcds, use `--component` to select component')
return False
for component_name in db_components:
if component_name in deploy_config.components:
db_component = component_name
if db_component is None:
self._call_stdio('error', 'Missing database component (%s) in deploy' % ','.join(db_components))
return False
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
# repositories = self.get_local_repositories({opts.component: deploy_config.components[opts.component]})
repositories = self.load_local_repositories(deploy_info)
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
self._call_stdio('stop_loading', 'succeed')
# Get the client
ssh_clients = self.get_clients(deploy_config, repositories)
# Check the status for the deployed cluster
component_status = {}
cluster_status = self.cluster_status_check(ssh_clients, deploy_config, repositories, component_status)
if cluster_status is False or cluster_status == 0:
if self.stdio:
self._call_stdio('error', EC_SOME_SERVER_STOPED)
for repository in component_status:
cluster_status = component_status[repository]
for server in cluster_status:
if cluster_status[server] == 0:
self._call_stdio('print', '%s %s is stopped' % (server, repository.name))
return False
db_cluster_config = deploy_config.components[db_component]
cluster_config = deploy_config.components[opts.component]
if opts.test_server is None:
opts.test_server = cluster_config.servers[0]
else:
for server in cluster_config.servers:
if server.name == opts.test_server:
opts.test_server = server
break
else:
self._call_stdio('error', '%s is not a server in %s' % (opts.test_server, opts.component))
return False
check_opt_plugin = self.plugin_manager.get_best_py_script_plugin('check_opt', 'tpcds', db_cluster_config.version)
load_data_plugin = self.plugin_manager.get_best_py_script_plugin('load_data', 'tpcds', cluster_config.version)
run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'tpcds', cluster_config.version)
self._call_stdio('verbose', 'Call %s for %s' % (check_opt_plugin, cluster_config.name))
if not check_opt_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, db_cluster_config=db_cluster_config):
return False
self._call_stdio('verbose', 'Call %s for %s' % (load_data_plugin, db_cluster_config.name))
if not load_data_plugin(deploy_config.components.keys(), ssh_clients, db_cluster_config, [], opts, self.stdio):
return False
self._call_stdio('verbose', 'Call %s for %s' % (run_test_plugin, cluster_config.name))
return run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio)
def tpcc(self, name, opts): def tpcc(self, name, opts):
self._call_stdio('verbose', 'Get Deploy by name') self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name) deploy = self.deploy_manager.get_deploy_config(name)
...@@ -2730,8 +3030,8 @@ class ObdHome(object): ...@@ -2730,8 +3030,8 @@ class ObdHome(object):
self._call_stdio('start_loading', 'Get local repositories and plugins') self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository # Get the repository
repositories = self.get_local_repositories({opts.component: deploy_config.components[opts.component]}) # repositories = self.get_local_repositories({opts.component: deploy_config.components[opts.component]})
repository = repositories[0] repositories = self.load_local_repositories(deploy_info)
# Check whether the components have the parameter plugins and apply the plugins # Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config) self.search_param_plugin_and_apply(repositories, deploy_config)
...@@ -2753,9 +3053,15 @@ class ObdHome(object): ...@@ -2753,9 +3053,15 @@ class ObdHome(object):
self._call_stdio('print', '%s %s is stopped' % (server, repository.name)) self._call_stdio('print', '%s %s is stopped' % (server, repository.name))
return False return False
for repository in repositories: ob_repository = None
if repository.name == opts.component: repository = None
break for tmp_repository in repositories:
if tmp_repository.name in ["oceanbase", "oceanbase-ce"]:
ob_repository = tmp_repository
if tmp_repository.name == opts.component:
repository = tmp_repository
plugin_version = ob_repository.version if ob_repository else repository.version
env = {'sys_root': False} env = {'sys_root': False}
odp_db = None odp_db = None
...@@ -2796,12 +3102,11 @@ class ObdHome(object): ...@@ -2796,12 +3102,11 @@ class ObdHome(object):
return False return False
db = ret.get_return('connect') db = ret.get_return('connect')
cursor = ret.get_return('cursor') cursor = ret.get_return('cursor')
pre_test_plugin = self.plugin_manager.get_best_py_script_plugin('pre_test', 'tpcc', plugin_version)
pre_test_plugin = self.plugin_manager.get_best_py_script_plugin('pre_test', 'tpcc', repository.version) optimize_plugin = self.plugin_manager.get_best_py_script_plugin('optimize', 'tpcc', plugin_version)
optimize_plugin = self.plugin_manager.get_best_py_script_plugin('optimize', 'tpcc', repository.version) build_plugin = self.plugin_manager.get_best_py_script_plugin('build', 'tpcc', plugin_version)
build_plugin = self.plugin_manager.get_best_py_script_plugin('build', 'tpcc', repository.version) run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'tpcc', plugin_version)
run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'tpcc', repository.version) recover_plugin = self.plugin_manager.get_best_py_script_plugin('recover', 'tpcc', plugin_version)
recover_plugin = self.plugin_manager.get_best_py_script_plugin('recover', 'tpcc', repository.version)
setattr(opts, 'host', opts.test_server.ip) setattr(opts, 'host', opts.test_server.ip)
setattr(opts, 'port', db.port) setattr(opts, 'port', db.port)
...@@ -2917,5 +3222,103 @@ class ObdHome(object): ...@@ -2917,5 +3222,103 @@ class ObdHome(object):
if odp_db: if odp_db:
odp_db.close() odp_db.close()
def db_connect(self, name, opts):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name, read_only=True)
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config
deploy_info = deploy.deploy_info
if deploy_info.status in (DeployStatus.STATUS_DESTROYED, DeployStatus.STATUS_CONFIGURED):
self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value))
return False
allow_components = ['obproxy', 'obproxy-ce', 'oceanbase', 'oceanbase-ce']
if opts.component is None:
for component_name in allow_components:
if component_name in deploy_config.components:
opts.component = component_name
break
elif opts.component not in allow_components:
self._call_stdio('error', '%s not support. %s is allowed' % (opts.component, allow_components))
return False
if opts.component not in deploy_config.components:
self._call_stdio('error', 'Can not find the component for tpch, use `--component` to select component')
return False
cluster_config = deploy_config.components[opts.component]
if not cluster_config.servers:
self._call_stdio('error', '%s server list is empty' % opts.component)
return False
if opts.server is None:
opts.server = cluster_config.servers[0]
else:
for server in cluster_config.servers:
if server.name == opts.server:
opts.server = server
break
else:
self._call_stdio('error', '%s is not a server in %s' % (opts.server, opts.component))
return False
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
repositories = self.load_local_repositories(deploy_info)
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
self._call_stdio('stop_loading', 'succeed')
sync_config_plugin = self.plugin_manager.get_best_py_script_plugin('sync_cluster_config', 'general', '0.1')
sync_config_plugin(deploy_config.components.keys(), [], cluster_config, [], opts, self.stdio)
db_connect_plugin = self.plugin_manager.get_best_py_script_plugin('db_connect', 'general', '0.1')
return db_connect_plugin(deploy_config.components.keys(), [], cluster_config, [], opts, self.stdio)
def commands(self, name, cmd_name, opts):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name, read_only=True)
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config
deploy_info = deploy.deploy_info
if deploy_info.status in (DeployStatus.STATUS_DESTROYED, DeployStatus.STATUS_CONFIGURED):
self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value))
return False
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
repositories = self.load_local_repositories(deploy_info)
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
self._call_stdio('stop_loading', 'succeed')
check_opt_plugin = self.plugin_manager.get_best_py_script_plugin('check_opt', 'commands', '0.1')
prepare_variables_plugin = self.plugin_manager.get_best_py_script_plugin('prepare_variables', 'commands', '0.1')
commands_plugin = self.plugin_manager.get_best_py_script_plugin('commands', 'commands', '0.1')
ssh_clients = self.get_clients(deploy_config, repositories)
sync_config_plugin = self.plugin_manager.get_best_py_script_plugin('sync_cluster_config', 'general', '0.1')
cluster_config = deploy_config.components[repositories[0].name]
context = {}
sync_config_plugin(deploy_config.components.keys(), [], cluster_config, [], opts, self.stdio)
ret = check_opt_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, name=cmd_name, context=context)
if not ret:
return
for component in context['components']:
cluster_config = deploy_config.components[component]
for server in context['servers']:
ret = prepare_variables_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, name=cmd_name, component=component, server=server, context=context)
if not ret:
return
if not ret.get_return("skip"):
ret = commands_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, context=context)
if context.get('interactive'):
return bool(ret)
results = context.get('results', [])
self._call_stdio("print_list", results, ["Component", "Server", cmd_name.title()], title=cmd_name.title())
return not context.get('failed')
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import os
from tool import YamlLoader, ConfigUtil
ALLOWED_LEVEL = [0, 1, 2]
YAML_LOADER = YamlLoader()
YAML_TEMPLATE_PATH = os.path.join(os.path.dirname(__file__), "command_template.yaml")
class CommandConfig(object):
def __init__(self, yaml_path=YAML_TEMPLATE_PATH, loader=YAML_LOADER, stdio=None):
self.yaml_path = yaml_path
self.loader = loader
self.stdio = stdio
self._load()
def _load(self):
try:
with open(self.yaml_path, 'rb') as f:
self._data = self.loader.load(f)
self.all_variables = self._data.get('variables')
self.global_variables = self.all_variables.get('global', [])
self.server_variables = self.all_variables.get('server', [])
self.ssh_variables = self.all_variables.get('ssh', [])
self.all_commands = self._data.get('commands', [])
self.all_wrappers = self._data.get('wrappers', [])
except:
if self.stdio:
self.stdio.exception('failed to load command template')
def check_opt(plugin_context, name, context, *args, **kwargs):
def get_option(key, default=''):
value = getattr(options, key, default)
if value is None:
value = default
stdio.verbose('get option: %s value %s' % (key, value))
return value
stdio = plugin_context.stdio
cluster_config = plugin_context.cluster_config
options = plugin_context.options
clients = plugin_context.clients
deployed_components = list(plugin_context.components)
components = get_option("components", None)
servers = get_option("servers", None)
interactive = False
command_config = CommandConfig()
for command in command_config.all_commands:
command_name = ConfigUtil.get_value_from_dict(command, 'name', transform_func=str)
if command_name == name:
interactive = ConfigUtil.get_value_from_dict(command, 'interactive', False, transform_func=bool)
if components is None:
if interactive:
components = deployed_components[:1]
stdio.verbose("Component {} will be used according to the order in the deploy configuration yaml.".format(components[0]))
else:
components = deployed_components
stdio.verbose("Component {} will be used because {} is a non-interactive command".format(", ".join(components), name))
elif components == "*":
components = deployed_components
else:
components = components.split(',')
if not clients:
stdio.error("{} server list is empty".format(','.join(components)))
return
if servers is None:
if interactive:
servers = [None, ]
else:
servers = list(clients.keys())
stdio.verbose("Server {} will be used because {} is a non-interactive command".format(", ".join([str(s) for s in servers]), name))
elif servers == '*':
servers = list(clients.keys())
else:
server_names = servers.split(',')
servers = []
for server in clients:
if server.name in server_names:
server_names.remove(server.name)
servers.append(server)
if server_names:
stdio.error("Server {} not found in current deployment".format(','.join(server_names)))
return
failed_components = []
for component in components:
if component not in deployed_components:
failed_components.append(component)
if failed_components:
stdio.error('{} not support. {} is allowed'.format(','.join(failed_components), deployed_components))
return plugin_context.return_false()
context.update(components=components, servers=servers, command_config=command_config)
return plugin_context.return_true(context=context)
variables:
ssh:
- name: host
config_key: host
components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce']
- name: user
config_key: username
components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce']
server:
- name: home_path
config_key: home_path
components: ['oceanbase', 'oceanbase-ce', 'obproxy', 'obproxy-ce']
- name: mysql_port
config_key: mysql_port
components: ['oceanbase', 'oceanbase-ce']
global:
- name: password
config_key: root_password
components: ['oceanbase', 'oceanbase-ce']
- name: password
config_key: observer_root_password
components: ['obproxy', 'obproxy-ce']
wrappers:
- name: ssh
remote_command: ssh {user}@{host} -t '{cmd}'
local_command: "{cmd}"
- name: ssh_client
command: "{cmd}"
executor: "ssh_client"
commands:
- name: ssh
components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce']
command: "cd {home_path}/log;bash --login"
wrapper: "ssh"
interactive: true
- name: less
command: "less {home_path}/log/observer.log"
components: ['oceanbase', 'oceanbase-ce']
wrapper: "ssh"
interactive: true
no_interruption: true
- name: less
command: "less {home_path}/log/obproxy.log"
components: ['obproxy', 'obproxy-ce']
wrapper: "ssh"
interactive: true
no_interruption: true
- name: pid
wrapper: ssh_client
command: "pgrep -u {user} -f ^{home_path}/bin/observer"
components: ['oceanbase', 'oceanbase-ce']
no_excption: true
- name: pid
wrapper: ssh_client
command: "pgrep -u {user} -f ^{home_path}/bin/obproxy"
components: ['obproxy', 'obproxy-ce']
no_excption: true
- name: gdb
wrapper: "ssh"
command: "cd {home_path}; LD_LIBRARY_PATH=./lib:$LD_LIBRARY_PATH gdb --pid=`$pid`"
components: ['oceanbase', 'oceanbase-ce']
interactive: true
no_interruption: true
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
try:
import subprocess32 as subprocess
except:
import subprocess
import signal
import os
from ssh import LocalClient
from tool import var_replace, COMMAND_ENV
def commands(plugin_context, context, *args, **kwargs):
def get_value_from_context(key, default=None):
value = context.get(key, default)
stdio.verbose('get value from context: %s value %s' % (key, value))
return value
stdio = plugin_context.stdio
command_template = get_value_from_context("command_template")
command_variables = get_value_from_context("command_variables", {})
interactive = get_value_from_context("interactive")
results = get_value_from_context("results", [])
failed = get_value_from_context("failed", False)
no_exception = get_value_from_context("no_exception", False)
no_interruption = get_value_from_context("no_interruption", False)
executor = get_value_from_context("executor", False)
component = get_value_from_context("component", False)
server = get_value_from_context("server", None)
env = get_value_from_context("env", {})
cmd = command_template.format(**command_variables)
cmd = var_replace(cmd, env)
if interactive:
if no_interruption:
stdio.verbose('ctrl c is not accepted in this command')
def _no_interruption(signum, frame):
stdio.verbose('ctrl c is not accepted in this command')
signal.signal(signal.SIGINT, _no_interruption)
stdio.verbose('exec cmd: {}'.format(cmd))
subprocess.call(cmd, env=os.environ.copy(), shell=True)
else:
client = plugin_context.clients[server]
if executor == "ssh_client":
ret = client.execute_command(cmd, stdio=stdio)
else:
ret = LocalClient.execute_command(cmd, env=client.env, stdio=stdio)
if ret and ret.stdout:
results.append([component, server, ret.stdout.strip()])
elif not no_exception:
failed = True
context.update(results=results, failed=failed)
return plugin_context.return_true(context=context)
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from tool import ConfigUtil
class CommandVariables(dict):
def __getitem__(self, item):
if item not in self.items():
return item
else:
return super(CommandVariables, self).__getitem__(item)
def load_variables_from_config(variables, component, config, command_variables, stdio=None):
for variable in variables:
if component not in ConfigUtil.get_list_from_dict(variable, 'components', str):
continue
variable_name = ConfigUtil.get_value_from_dict(variable, 'name', transform_func=str)
config_key = ConfigUtil.get_value_from_dict(variable, 'config_key', transform_func=str)
value = config.get(config_key)
if value is not None:
command_variables[variable_name] = str(value)
if stdio:
stdio.verbose('get variable %s for config key %s, value is %s' % (variable_name, config_key, value))
def prepare_variables(plugin_context, name, context, component, server, *args, **kwargs):
def get_value_from_context(key, default=None):
value = context.get(key, default)
stdio.verbose('get value from context: %s value %s' % (key, value))
return value
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
clients = plugin_context.clients
components = get_value_from_context("components", [])
servers = get_value_from_context("servers", [])
cmd_conf = get_value_from_context("command_config")
loading_env = {}
if server is None:
server = cluster_config.servers[0]
# find command template
command_template = None
interactive = None
wrapper_name = None
no_exception = False
no_interruption = False
executor = None
command_variables = CommandVariables()
for command in cmd_conf.all_commands:
cmd_name = ConfigUtil.get_value_from_dict(command, 'name', transform_func=str)
allow_components = ConfigUtil.get_list_from_dict(command, 'components', str)
if component in allow_components:
current_command = ConfigUtil.get_value_from_dict(command, 'command', transform_func=str)
loading_env[cmd_name] = current_command
if name == cmd_name:
command_template = current_command
interactive = ConfigUtil.get_value_from_dict(command, 'interactive', transform_func=bool)
wrapper_name = ConfigUtil.get_value_from_dict(command, 'wrapper', transform_func=str)
no_exception = ConfigUtil.get_value_from_dict(command, 'no_exception', transform_func=bool)
no_interruption = ConfigUtil.get_value_from_dict(command, 'no_interruption', transform_func=bool)
if command_template is None:
stdio.error(
'There is no command {} in component {}. Please use --components to set the right component.'.format(name,
component))
return
if interactive and (len(components) > 1 or len(servers) > 1):
stdio.error('Interactive commands do not support specifying multiple components or servers.')
return
cmd_input = None
if server not in cluster_config.servers:
if interactive:
stdio.error("{} is not a server in {}".format(server, component))
return plugin_context.return_false()
else:
stdio.verbose("{} is not a server in {}".format(server, component))
return plugin_context.return_true(skip=True)
global_config = cluster_config.get_global_conf()
server_config = cluster_config.get_server_conf(server)
client = clients[server]
ssh_config = vars(client.config)
# load global config
stdio.verbose('load variables from global config')
load_variables_from_config(cmd_conf.global_variables, component, global_config, command_variables, stdio)
# load server config
stdio.verbose('load variables from server config')
load_variables_from_config(cmd_conf.server_variables, component, server_config, command_variables, stdio)
# load ssh config
stdio.verbose('load variables from ssh config')
load_variables_from_config(cmd_conf.ssh_variables, component, ssh_config, command_variables, stdio)
if wrapper_name:
for wrapper in cmd_conf.all_wrappers:
if wrapper_name == ConfigUtil.get_value_from_dict(wrapper, 'name', transform_func=str):
local_command = ConfigUtil.get_value_from_dict(wrapper, "local_command", transform_func=str)
remote_command = ConfigUtil.get_value_from_dict(wrapper, "remote_command", transform_func=str)
command = ConfigUtil.get_value_from_dict(wrapper, "command", transform_func=str)
cmd_input = ConfigUtil.get_value_from_dict(wrapper, "input", transform_func=str)
executor = ConfigUtil.get_value_from_dict(wrapper, "executor", transform_func=str)
if local_command and remote_command:
if client.is_localhost():
command = local_command
else:
command = remote_command
command_template = command.format(cmd=command_template, **command_variables)
if cmd_input:
cmd_input = cmd_input.format(cmd=command_template, **command_variables)
break
else:
stdio.error("Wrapper {} not found in component {}.".format(wrapper_name, component))
for key, value in loading_env.items():
loading_env[key] = str(value).format(**command_variables)
context.update(
command_variables=command_variables, command_config=cmd_conf, command_template=command_template,
interactive=interactive, cmd_input=cmd_input, no_exception=no_exception, no_interruption=no_interruption,
component=component, server=server, env=loading_env, executor=executor)
return plugin_context.return_true()
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from subprocess import call, Popen, PIPE
from ssh import LocalClient
def db_connect(plugin_context, *args, **kwargs):
def get_option(key, default=''):
value = getattr(options, key)
if value is None:
value = default
stdio.verbose('get option: %s value %s' % (key, value))
return value
def local_execute_command(command, env=None, timeout=None):
return LocalClient.execute_command(command, env, timeout, stdio)
def get_connect_cmd():
cmd = r"{obclient_bin} -h{host} -P{port} -u {user}@{tenant} --prompt 'OceanBase(\u@\d)>' -A".format(
obclient_bin=obclient_bin,
host=server.ip,
port=port,
user=user,
tenant=tenant
)
if need_password:
cmd += " -p"
elif password:
cmd += " -p{}".format(password)
if database:
cmd += " -D{}".format(database)
return cmd
def test_connect():
return local_execute_command(get_connect_cmd() + " -e 'help'")
def connect():
conn_cmd = get_connect_cmd()
stdio.verbose('execute cmd: {}'.format(conn_cmd))
p = None
return_code = 255
try:
p = Popen(conn_cmd, shell=True)
return_code = p.wait()
except:
stdio.exception("")
if p:
p.kill()
stdio.verbose('exit code: {}'.format(return_code))
return return_code
options = plugin_context.options
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
user = get_option('user', 'root')
tenant = get_option('tenant', 'sys')
database = get_option('database')
password = get_option('password')
obclient_bin = get_option('obclient_bin')
server = get_option('server')
component = get_option('component')
global_conf = cluster_config.get_global_conf()
server_config = cluster_config.get_server_conf(server)
need_password = False
# use oceanbase if root@sys as default
if not database and user == 'root' and tenant == 'sys':
database = 'oceanbase'
if component in ["oceanbase", "oceanbase-ce"]:
port = server_config.get("mysql_port")
else:
port = server_config.get("listen_port")
if not obclient_bin:
ret = local_execute_command('%s --help' % obclient_bin)
if not ret:
stdio.error(
'%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % (
ret.stderr, obclient_bin))
return
if not password:
connected = test_connect()
if not connected:
if user == "root" and tenant == "sys":
if component in ["oceanbase", "oceanbase-ce"]:
password = global_conf.get('root_password')
elif component in ["obproxy", "obproxy-ce"]:
password = global_conf.get('observer_root_password')
elif user == "root" and tenant == "proxysys":
if component in ["obproxy", "obproxy-ce"]:
password = global_conf.get("obproxy_sys_password")
elif user == "proxyro" and tenant == 'sys':
if component in ["oceanbase", "oceanbase-ce"]:
password = global_conf.get("proxyro_password")
elif component in ["obproxy", "obproxy-ce"]:
password = global_conf.get("observer_sys_password")
if password:
connected = test_connect()
need_password = not connected
try:
code = connect()
except KeyboardInterrupt:
stdio.exception("")
return False
return code == 0
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import os
import re
from _plugin import InstallPlugin
from _deploy import InnerConfigKeywords
from tool import YamlLoader
def install_repo(plugin_context, obd_home, install_repository, install_plugin, check_repository, check_file_map,
msg_lv, *args, **kwargs):
cluster_config = plugin_context.cluster_config
def install_to_home_path():
repo_dir = install_repository.repository_dir.replace(obd_home, remote_obd_home, 1)
if is_lib_repo:
home_path = os.path.join(remote_home_path, 'lib')
else:
home_path = remote_home_path
client.add_env("_repo_dir", repo_dir, True)
client.add_env("_home_path", home_path, True)
mkdir_bash = "mkdir -p ${_home_path} && cd ${_repo_dir} && find -type d | xargs -i mkdir -p ${_home_path}/{}"
if not client.execute_command(mkdir_bash):
return False
success = True
for install_file_item in install_file_items:
source = os.path.join(repo_dir, install_file_item.target_path)
target = os.path.join(home_path, install_file_item.target_path)
client.add_env("source", source, True)
client.add_env("target", target, True)
if install_file_item.install_method == InstallPlugin.InstallMethod.CP:
install_cmd = "cp -f"
else:
install_cmd = "ln -fs"
if install_file_item.type == InstallPlugin.FileItemType.DIR:
if client.execute_command("ls -1 ${source}"):
success = client.execute_command("cd ${source} && find -type f | xargs -i %(install_cmd)s ${source}/{} ${target}/{}" % {"install_cmd": install_cmd}) and success
success = client.execute_command("cd ${source} && find -type l | xargs -i %(install_cmd)s ${source}/{} ${target}/{}" % {"install_cmd": install_cmd}) and success
else:
success = client.execute_command("%(install_cmd)s ${source} ${target}" % {"install_cmd": install_cmd}) and success
return success
stdio = plugin_context.stdio
clients = plugin_context.clients
servers = cluster_config.servers
is_lib_repo = install_repository.name.endswith("-libs")
home_path_map = {}
for server in servers:
server_config = cluster_config.get_server_conf(server)
home_path_map[server] = server_config.get("home_path")
is_ln_install_mode = cluster_config.is_ln_install_mode()
# remote install repository
stdio.start_loading('Remote %s repository install' % install_repository)
stdio.verbose('Remote %s repository integrity check' % install_repository)
for server in servers:
client = clients[server]
remote_home_path = home_path_map[server]
install_file_items = install_plugin.file_map(install_repository).values()
stdio.verbose('%s %s repository integrity check' % (server, install_repository))
if is_ln_install_mode:
remote_obd_home = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
install_path = install_repository.repository_dir.replace(obd_home, remote_obd_home, 1)
else:
if is_lib_repo:
install_path = os.path.join(remote_home_path, 'lib')
else:
install_path = remote_home_path
client.execute_command('mkdir -p {}'.format(install_path))
remote_repository_data_path = os.path.join(install_path, '.data')
remote_repository_data = client.execute_command('cat %s' % remote_repository_data_path).stdout
stdio.verbose('%s %s install check' % (server, install_repository))
try:
yaml_loader = YamlLoader(stdio=stdio)
data = yaml_loader.load(remote_repository_data)
if not data:
stdio.verbose('%s %s need to be installed ' % (server, install_repository))
elif data == install_repository:
# Version sync. Check for damages (TODO)
stdio.verbose('%s %s has installed ' % (server, install_repository))
if not install_to_home_path():
stdio.error("Failed to install repository {} to {}".format(install_repository, remote_home_path))
return False
continue
else:
stdio.verbose('%s %s need to be updated' % (server, install_repository))
except:
stdio.exception('')
stdio.verbose('%s %s need to be installed ' % (server, install_repository))
stdio.verbose('%s %s installing' % (server, install_repository))
for file_item in install_file_items:
file_path = os.path.join(install_repository.repository_dir, file_item.target_path)
remote_file_path = os.path.join(install_path, file_item.target_path)
if file_item.type == InstallPlugin.FileItemType.DIR:
if os.path.isdir(file_path) and not client.put_dir(file_path, remote_file_path):
stdio.stop_loading('fail')
return False
else:
if not client.put_file(file_path, remote_file_path):
stdio.stop_loading('fail')
return False
if is_ln_install_mode:
# save data file for later comparing
client.put_file(install_repository.data_file_path, remote_repository_data_path)
# link files to home_path
install_to_home_path()
stdio.verbose('%s %s installed' % (server, install_repository.name))
stdio.stop_loading('succeed')
# check lib
lib_check = True
stdio.start_loading('Remote %s repository lib check' % check_repository)
for server in servers:
stdio.verbose('%s %s repository lib check' % (server, check_repository))
client = clients[server]
remote_home_path = home_path_map[server]
need_libs = set()
client.add_env('LD_LIBRARY_PATH', '%s/lib:' % remote_home_path, True)
for file_item in check_file_map.values():
if file_item.type == InstallPlugin.FileItemType.BIN:
remote_file_path = os.path.join(remote_home_path, file_item.target_path)
ret = client.execute_command('ldd %s' % remote_file_path)
libs = re.findall('(/?[\w+\-/]+\.\w+[\.\w]+)[\s\\n]*\=\>[\s\\n]*not found', ret.stdout)
if not libs:
libs = re.findall('(/?[\w+\-/]+\.\w+[\.\w]+)[\s\\n]*\=\>[\s\\n]*not found', ret.stderr)
if not libs and not ret:
stdio.error('Failed to execute repository lib check.')
return
need_libs.update(libs)
if need_libs:
for lib in need_libs:
getattr(stdio, msg_lv, '%s %s require: %s' % (server, check_repository, lib))
lib_check = False
client.add_env('LD_LIBRARY_PATH', '', True)
if msg_lv == 'error':
stdio.stop_loading('succeed' if lib_check else 'fail')
elif msg_lv == 'warn':
stdio.stop_loading('succeed' if lib_check else 'warn')
return plugin_context.return_true(checked=lib_check)
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import os
from _deploy import RsyncConfig
def rsync(plugin_context, *args, **kwargs):
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
clients = plugin_context.clients
rsync_configs = cluster_config.get_rsync_list()
if not rsync_configs:
return plugin_context.return_true()
stdio.start_loading("Synchronizing runtime dependencies")
succeed = True
for rsync_config in rsync_configs:
source_path = rsync_config.get(RsyncConfig.SOURCE_PATH)
target_path = rsync_config.get(RsyncConfig.TARGET_PATH)
if os.path.isabs(target_path):
rsync_config[RsyncConfig.TARGET_PATH] = os.path.normpath('./' + target_path)
sub_io = stdio.sub_io()
for server in cluster_config.servers:
server_config = cluster_config.get_server_conf(server)
client = clients[server]
home_path = server_config['home_path']
for rsync_config in rsync_configs:
source_path = rsync_config.get(RsyncConfig.SOURCE_PATH)
target_path = rsync_config.get(RsyncConfig.TARGET_PATH)
if os.path.isdir(source_path):
stdio.verbose('put local dir %s to %s: %s.' % (source_path, server, target_path))
if not client.put_dir(source_path, os.path.join(home_path, target_path), stdio=sub_io):
stdio.warn('failed to put local dir %s to %s: %s.' % (source_path, server, target_path))
succeed = False
elif os.path.exists(source_path):
stdio.verbose('put local file %s to %s: %s.' % (source_path, server, target_path))
if not client.put_file(source_path, os.path.join(home_path, target_path), stdio=sub_io):
stdio.warn('failed to put local file %s to %s: %s.' % (source_path, server, target_path))
succeed = False
else:
stdio.verbose('%s is not found.' % source_path)
if succeed:
stdio.stop_loading("succeed")
return plugin_context.return_true()
else:
stdio.stop_loading("fail")
return plugin_context.return_false()
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
def sync_cluster_config(plugin_context, *args, **kwargs):
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
for comp in ['oceanbase', 'oceanbase-ce']:
if comp in cluster_config.depends:
root_servers = {}
ob_config = cluster_config.get_depend_config(comp)
if not ob_config:
continue
odp_config = cluster_config.get_global_conf()
for server in cluster_config.get_depend_servers(comp):
config = cluster_config.get_depend_config(comp, server)
zone = config['zone']
if zone not in root_servers:
root_servers[zone] = '%s:%s' % (server.ip, config['mysql_port'])
depend_rs_list = ';'.join([root_servers[zone] for zone in root_servers])
cluster_config.update_global_conf('rs_list', depend_rs_list, save=False)
config_map = {
'observer_sys_password': 'proxyro_password',
'cluster_name': 'appname',
'observer_root_password': 'root_password'
}
for key in config_map:
ob_key = config_map[key]
if not odp_config.get(key) and ob_config.get(ob_key):
stdio.verbose("update config, key: {}, value: {}".format(key, ob_config.get(ob_key)))
cluster_config.update_global_conf(key, ob_config.get(ob_key), save=False)
break
...@@ -30,6 +30,23 @@ def check_opt(plugin_context, opt, *args, **kwargs): ...@@ -30,6 +30,23 @@ def check_opt(plugin_context, opt, *args, **kwargs):
server = opt['test_server'] server = opt['test_server']
obclient_bin = opt['obclient_bin'] obclient_bin = opt['obclient_bin']
mysqltest_bin = opt['mysqltest_bin'] mysqltest_bin = opt['mysqltest_bin']
reboot_retries = opt['reboot_retries']
if int(reboot_retries) <= 0:
stdio.error('invalid reboot-retries')
return
case_filter = opt.get('case_filter')
default_case_filter = './mysql_test/filter.py'
if case_filter is None and os.path.exists(default_case_filter):
stdio.verbose('case-filter not set and {} exists, use it'.format(default_case_filter))
opt['case_filter'] = default_case_filter
case_filter = opt.get('reboot_cases')
default_reboot_case = './mysql_test/rebootcases.py'
if case_filter is None and os.path.exists(default_reboot_case):
stdio.verbose('reboot-cases not set and {} exists, use it'.format(default_reboot_case))
opt['reboot_cases'] = default_reboot_case
if not server: if not server:
stdio.error('test server is None. please use `--test-server` to set') stdio.error('test server is None. please use `--test-server` to set')
...@@ -42,7 +59,7 @@ def check_opt(plugin_context, opt, *args, **kwargs): ...@@ -42,7 +59,7 @@ def check_opt(plugin_context, opt, *args, **kwargs):
if not ret: if not ret:
mysqltest_bin = opt['mysqltest_bin'] = 'mysqltest' mysqltest_bin = opt['mysqltest_bin'] = 'mysqltest'
if not LocalClient.execute_command('%s --help' % mysqltest_bin, stdio=stdio): if not LocalClient.execute_command('%s --help' % mysqltest_bin, stdio=stdio):
stdio.error('%s\n%s is not an executable file. please use `--mysqltest-bin` to set\nYou may not have obclient installed' % (ret.stderr, mysqltest_bin)) stdio.error('%s\n%s is not an executable file. please use `--mysqltest-bin` to set\nYou may not have mysqltest installed' % (ret.stderr, mysqltest_bin))
return return
if 'suite_dir' not in opt or not os.path.exists(opt['suite_dir']): if 'suite_dir' not in opt or not os.path.exists(opt['suite_dir']):
...@@ -55,5 +72,37 @@ def check_opt(plugin_context, opt, *args, **kwargs): ...@@ -55,5 +72,37 @@ def check_opt(plugin_context, opt, *args, **kwargs):
if 'slb' in opt: if 'slb' in opt:
opt['slb_host'], opt['slb_id'] = opt['slb'].split(',') opt['slb_host'], opt['slb_id'] = opt['slb'].split(',')
if 'exclude' in opt and opt['exclude']:
opt['exclude'] = opt['exclude'].split(',')
cluster_config = plugin_context.cluster_config
is_obproxy = opt["component"].startswith("obproxy")
if is_obproxy:
intersection = list({'oceanbase', 'oceanbase-ce'}.intersection(set(cluster_config.depends)))
if not intersection:
stdio.warn('observer config not in the depends.')
return
ob_component = intersection[0]
global_config = cluster_config.get_depend_config(ob_component)
else:
global_config = cluster_config.get_global_conf()
cursor = opt['cursor']
opt['_enable_static_typing_engine'] = None
if '_enable_static_typing_engine' in global_config:
stdio.verbose('load engine from config')
opt['_enable_static_typing_engine'] = global_config['_enable_static_typing_engine']
else:
try:
sql = "select value from oceanbase.__all_virtual_sys_parameter_stat where name like '_enable_static_typing_engine';"
stdio.verbose('execute sql: {}'.format(sql))
cursor.execute(sql)
ret = cursor.fetchone()
stdio.verbose('query engine ret: {}'.format(ret))
if ret:
opt['_enable_static_typing_engine'] = ret.get('value')
except:
stdio.exception('')
stdio.verbose('_enable_static_typing_engine: {}'.format(opt['_enable_static_typing_engine']))
return plugin_context.return_true() return plugin_context.return_true()
...@@ -21,25 +21,93 @@ ...@@ -21,25 +21,93 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
import os import os
import sys
import re
from glob import glob from glob import glob
from mysqltest_lib import case_filter, succtest import tool
from mysqltest_lib.psmallsource import psmall_source from mysqltest_lib import succtest
from mysqltest_lib.psmalltest import psmall_test
def get_variable_from_python_file(file_path, var_name, default_file=None, default_value=None, stdio=None):
global_vars = {}
try:
stdio and stdio.verbose('read variable {} from {}'.format(var_name, file_path))
exec(open(file_path).read(), global_vars, global_vars)
except Exception as e:
stdio and stdio.warn(str(e))
if default_file:
try:
default_path = os.path.join(os.path.dirname(__file__), 'mysqltest_lib', default_file)
stdio and stdio.verbose('read variable {} from {}'.format(var_name, file_path))
exec(open(default_path).read(), global_vars, global_vars)
except Exception as ex:
stdio and stdio.warn(str(ex))
return global_vars.get(var_name, default_value)
def find_tag_test_with_file_pat(file_pattern, flag_pattern, tag, filelist):
for test in glob(file_pattern):
if "test_suite/" in test:
if os.path.dirname(test).split('/')[-2] == tag:
filelist.append(test)
continue
test_file = tool.FileUtil.open(test, 'rb')
line_num = 0
line = test_file.readline().decode('utf-8', 'ignore')
while line and line_num <= 30:
line_num += 1
matchobj = re.search(flag_pattern, line)
if matchobj:
tag_set = line.split(':')[1].split(',')
for tag_tmp in tag_set:
tag_t = tag_tmp.strip()
if tag.lower() == tag_t.lower():
filelist.append(test)
line = test_file.readline().decode('utf-8', 'ignore')
def find_tag_tests(opt, flag_pattern, tags):
filelist = []
for tag in tags:
test_pattern = os.path.join(opt['test_dir'], "*.test")
find_tag_test_with_file_pat(test_pattern, flag_pattern, tag, filelist)
test_pattern = os.path.join(opt['suite_dir'], "*/t/*.test")
find_tag_test_with_file_pat(test_pattern, flag_pattern, tag, filelist)
return filelist
def test_name(test_file):
if "test_suite/" in test_file:
suite_name = os.path.dirname(test_file).split('/')[-2]
base_name = os.path.basename(test_file).rsplit('.')[0]
return suite_name + '.' + base_name
else:
base_name = os.path.basename(test_file).rsplit('.')[0]
return base_name
def check_test(plugin_context, opt, *args, **kwargs): def check_test(plugin_context, opt, *args, **kwargs):
stdio = plugin_context.stdio
cluster_config = plugin_context.cluster_config
tags = []
regress_suites = []
if opt.get('tags'):
tags = opt['tags'].split(',')
if opt.get('regress_suite'):
regress_suites = opt['regress_suite'].split(',')
test_set = [] test_set = []
has_test_point = False has_test_point = False
basename = lambda path: os.path.basename(path) basename = lambda path: os.path.basename(path)
dirname =lambda path: os.path.dirname(path) dirname =lambda path: os.path.dirname(path)
if 'all' in opt and opt['all'] and os.path.isdir(os.path.realpath(opt['suite_dir'])): if 'all' in opt and opt['all'] and os.path.isdir(os.path.realpath(opt['suite_dir'])):
opt['suite'] = ','.join(os.listdir(os.path.realpath(opt['suite_dir']))) opt['suite'] = ','.join(os.listdir(os.path.realpath(opt['suite_dir'])))
if 'psmall' in opt and opt['psmall']: if 'psmall' in opt and opt['psmall']:
test_set = psmall_test test_set = get_variable_from_python_file(
opt['source_limit'] = psmall_source opt.get('psmall_test'), 'psmall_test', default_file='psmalltest.py', default_value=[], stdio=stdio)
opt['source_limit'] = get_variable_from_python_file(
opt.get('psmall_source'), 'psmall_source', default_file='psmallsource.py', default_value={}, stdio=stdio)
has_test_point = True
elif 'suite' not in opt or not opt['suite']: elif 'suite' not in opt or not opt['suite']:
if 'test_set' in opt and opt['test_set']: if 'test_set' in opt and opt['test_set']:
test_set = opt['test_set'].split(',') test_set = opt['test_set'].split(',')
...@@ -64,17 +132,79 @@ def check_test(plugin_context, opt, *args, **kwargs): ...@@ -64,17 +132,79 @@ def check_test(plugin_context, opt, *args, **kwargs):
opt['test_pattern'] = '*.test' opt['test_pattern'] = '*.test'
pat = os.path.join(path, opt['test_pattern']) pat = os.path.join(path, opt['test_pattern'])
test_set_tmp = [suitename + '.' + basename(test).rsplit('.', 1)[0] for test in glob(pat)] test_set_tmp = [suitename + '.' + basename(test).rsplit('.', 1)[0] for test in glob(pat)]
test_set.extend(test_set_tmp) test_set.extend(test_set_tmp)
if "all" in opt and opt["all"]:
pat = os.path.join(opt['test_dir'], "*.test")
test_set_t = [basename(test).rsplit('.', 1)[0] for test in glob(pat)]
test_set.extend(test_set_t)
if opt["cluster_mode"]:
opt["filter"] = opt["cluster_mode"]
else:
opt["filter"] = 'c'
if opt.get("java"):
opt["filter"] = 'j'
if opt.get("ps"):
opt["filter"] = opt["filter"] + 'p'
opt['ps_protocol'] = True
if opt["component"].startswith("obproxy"):
opt["filter"] = 'proxy'
else:
test_zone = cluster_config.get_server_conf(opt['test_server'])['zone']
query = 'select zone, count(*) as a from oceanbase.__all_virtual_zone_stat group by region order by a desc limit 1'
try:
stdio.verbose('execute sql: {}'.format(query))
cursor = opt['cursor']
cursor.execute(query)
ret = cursor.fetchone()
except:
msg = 'execute sql exception: %s' % query
raise Exception(msg)
primary_zone = ret.get('zone', '')
if test_zone != primary_zone:
opt["filter"] = 'slave'
if regress_suites:
suite2tags = get_variable_from_python_file(opt.get('regress_suite_map'), 'suite2tags', default_file='regress_suite_map.py', default_value={}, stdio=stdio)
composite_suite = get_variable_from_python_file(opt.get('regress_suite_map'), 'composite_suite', default_file='regress_suite_map.py', default_value={}, stdio=stdio)
for suitename in regress_suites:
if suitename in composite_suite.keys():
regress_suite_list = composite_suite[suitename].split(',')
else:
regress_suite_list = [suitename]
for name in regress_suite_list:
if name in suite2tags.keys():
if suite2tags[name]:
tags.extend(suite2tags[name].split(','))
else:
tags.append(name)
tags = list(set(tags))
if tags:
stdio.verbose('running mysqltest by tag, all tags: {}'.format(tags))
support_test_tags = get_variable_from_python_file(
opt.get('test_tags'), 'test_tags', default_file='test_tags.py', default_value=[], stdio=stdio)
support_test_tags = list(set(support_test_tags).union(set(os.listdir(os.path.join(opt["suite_dir"])))))
diff_tags = list(set(tags).difference(set(support_test_tags)))
if len(diff_tags) > 0:
stdio.error('%s not in test_tags' % ','.join(diff_tags))
return plugin_context.return_false()
test_set_by_tag = [test_name(test) for test in find_tag_tests(opt, r"#[ \t]*tags[ \t]*:", tags)]
if has_test_point:
test_set = list(set(test_set).intersection(set(test_set_by_tag)))
else:
test_set = list(set(test_set_by_tag))
has_test_point = True
stdio.verbose('filter mode: {}'.format(opt["filter"]))
# exclude somt tests. # exclude somt tests.
if 'exclude' not in opt or not opt['exclude']: if 'exclude' not in opt or not opt['exclude']:
opt['exclude'] = [] opt['exclude'] = []
test_set = filter(lambda k: k not in opt['exclude'], test_set) test_set = filter(lambda k: k not in opt['exclude'], test_set)
if 'filter' in opt and opt['filter']: if 'filter' in opt and opt['filter']:
exclude_list = getattr(case_filter, '%s_list' % opt['filter'], []) if opt.get('case_filter'):
exclude_list = get_variable_from_python_file(opt['case_filter'], var_name='%s_list' % opt['filter'],
default_file='case_filter.py', default_value=[], stdio=stdio)
else:
exclude_list = []
test_set = filter(lambda k: k not in exclude_list, test_set) test_set = filter(lambda k: k not in exclude_list, test_set)
##有all参数时重新排序,保证运行case的顺序 ##有all参数时重新排序,保证运行case的顺序
if 'all' in opt and opt['all'] == 'all': if 'all' in opt and opt['all'] == 'all':
test_set_suite = filter(lambda k: '.' in k, test_set) test_set_suite = filter(lambda k: '.' in k, test_set)
...@@ -86,11 +216,30 @@ def check_test(plugin_context, opt, *args, **kwargs): ...@@ -86,11 +216,30 @@ def check_test(plugin_context, opt, *args, **kwargs):
test_set = filter(lambda k: k not in succtest.succ_filter, test_set) test_set = filter(lambda k: k not in succtest.succ_filter, test_set)
else: else:
test_set = sorted(test_set) test_set = sorted(test_set)
slb_host = opt.get('slb_host')
exec_id = opt.get('exec_id')
use_slb = all([slb_host is not None, exec_id is not None])
slices = opt.get('slices')
slice_idx = opt.get('slice_idx')
use_slices = all([slices is not None, slice_idx is not None])
if not use_slb and use_slices:
slices = int(slices)
slice_idx = int(slice_idx)
test_set = test_set[slice_idx::slices]
if 'mode' in opt and opt['mode'] != 'both':
if opt['mode'] == 'oracle':
not_run = '_mysql'
# test_set = filter(lambda k: not k.endswith(not_run), test_set)
test_set = filter(lambda k: k.endswith('_oracle'), test_set)
if opt['mode'] == 'mysql':
not_run = '_oracle'
test_set = filter(lambda k: not k.endswith(not_run), test_set)
opt['test_set'] = list(set(test_set))
if 'slices' in opt and opt['slices'] and 'slice_idx' in opt and opt['slice_idx']: if opt.get('reboot_cases'):
slices = int(opt['slices']) reboot_cases = get_variable_from_python_file(opt['reboot_cases'], var_name='reboot_cases',
slice_idx = int(opt['slice_idx']) default_file='rebootcases.py', default_value=[], stdio=stdio)
test_set = test_set[slice_idx::slices] opt['reboot_cases'] = list(set(test_set).intersection(set(reboot_cases)))
else:
opt['test_set'] = test_set opt['reboot_cases'] = []
return plugin_context.return_true(test_set=test_set) return plugin_context.return_true(test_set=test_set)
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import os
def collect_log(plugin_context, env, test_name=None, *args, **kwargs):
cluster_config = plugin_context.cluster_config
clients = plugin_context.clients
stdio = plugin_context.stdio
if not env.get('collect_log', False):
stdio.verbose('collect_log is False')
return
if test_name is None:
case_results = env.get('case_results', [])
if case_results:
test_name = case_results[-1].get('name')
if test_name is None:
stdio.verbose('Undefined: test_name')
return
log_pattern = env.get('log_pattern', '*.log')
if not env.get('log_dir'):
log_dir = os.path.join(env['var_dir'], 'log')
else:
log_dir = env['log_dir']
is_obproxy = env["component"].startswith("obproxy")
ob_component = env["component"]
if is_obproxy:
intersection = list({'oceanbase', 'oceanbase-ce'}.intersection(set(cluster_config.depends)))
if not intersection:
stdio.warn('observer config not in the depends.')
return
ob_component = intersection[0]
ob_services = cluster_config.get_depend_servers(ob_component)
proxy_services = cluster_config.servers
else:
ob_services = cluster_config.servers
proxy_services = []
collect_components = env.get('collect_components')
if not collect_components:
collect_components = [ob_component]
else:
collect_components = collect_components.split(',')
if ob_component in collect_components:
for server in ob_services:
if is_obproxy:
server_config = cluster_config.get_depend_config(ob_component, server)
else:
server_config = cluster_config.get_server_conf(server)
ip = server.ip
port = server_config.get('mysql_port', 0)
client = clients[server]
home_path = server_config['home_path']
remote_path = os.path.join(home_path, 'log', log_pattern)
local_path = os.path.join(log_dir, test_name, '{}:{}'.format(ip, port))
stdio.start_loading('Collect log for {}'.format(server.name))
sub_io = stdio.sub_io()
client.get_dir(local_path, os.path.join(home_path, 'core.*'), stdio=sub_io)
if client.get_dir(local_path, remote_path, stdio=sub_io):
stdio.stop_loading('succeed')
else:
stdio.stop_loading('fail')
if 'obproxy' in collect_components:
if not is_obproxy:
stdio.warn('No obproxy detected.')
return
for server in proxy_services:
server_config = cluster_config.get_server_conf(server)
ip = server.ip
port = server_config.get('listen_port', 0)
client = clients[server]
home_path = server_config['home_path']
remote_path = os.path.join(home_path, 'log')
local_path = os.path.join(log_dir, test_name, '{}:{}'.format(ip, port))
stdio.start_loading('Collect obproxy log for {}'.format(server.name))
if client.get_dir(local_path, remote_path):
stdio.stop_loading('succeed')
else:
stdio.stop_loading('fail')
\ No newline at end of file
...@@ -24,12 +24,13 @@ import re ...@@ -24,12 +24,13 @@ import re
import os import os
import time import time
import shlex import shlex
import requests
import urllib
from subprocess import Popen, PIPE from subprocess import Popen, PIPE
from copy import deepcopy from copy import deepcopy
from ssh import LocalClient from ssh import LocalClient
from tool import DirectoryUtil from tool import DirectoryUtil
from _stdio import FormtatText
inner_dir = os.path.split(__file__)[0] inner_dir = os.path.split(__file__)[0]
inner_test_dir = os.path.join(inner_dir, 't') inner_test_dir = os.path.join(inner_dir, 't')
...@@ -39,11 +40,11 @@ inner_suite_dir = os.path.join(inner_dir, 'test_suite') ...@@ -39,11 +40,11 @@ inner_suite_dir = os.path.join(inner_dir, 'test_suite')
class Arguments: class Arguments:
def add(self, k, v=None): def add(self, k, v=None):
self.args.update({k:v}) self.args.update({k: v})
def __str__(self): def __str__(self):
s = [] s = []
for k,v in self.args.items(): for k, v in self.args.items():
if v != None: if v != None:
if re.match('^--\w', k): if re.match('^--\w', k):
s.append(' %s=%s' % (k, v)) s.append(' %s=%s' % (k, v))
...@@ -55,232 +56,391 @@ class Arguments: ...@@ -55,232 +56,391 @@ class Arguments:
def __init__(self, opt): def __init__(self, opt):
self.args = dict() self.args = dict()
if 'connector' in opt and 'java' in opt and opt['java']: if 'connector' in opt and opt.get('java'):
self.add('--connector', opt['connector']) self.add('--connector', opt['connector'])
self.add('--host', opt['host']) self.add('--host', opt['host'])
self.add('--port', opt['port']) self.add('--port', opt['port'])
self.add('--tmpdir', opt['tmp_dir']) self.add('--tmpdir', opt['tmp_dir'])
self.add('--logdir', '%s/log' % opt['var_dir']) if not opt.get('log_dir'):
log_dir = os.path.join(opt['var_dir'], 'log')
else:
log_dir = opt['log_dir']
self.add('--logdir', log_dir)
DirectoryUtil.mkdir(opt['tmp_dir']) DirectoryUtil.mkdir(opt['tmp_dir'])
DirectoryUtil.mkdir('%s/log' % opt['var_dir']) DirectoryUtil.mkdir(log_dir)
self.add('--silent') self.add('--silent')
# our mysqltest doesn't support this option # our mysqltest doesn't support this option
# self.add('--skip-safemalloc') # self.add('--skip-safemalloc')
self.add('--user', 'root') self.add('--user', 'root')
if 'user' in opt and opt['user']: if opt.get('user'):
user = opt['user'] user = opt['user']
if 'connector' not in opt or opt['connector'] == 'ob': if 'connector' not in opt or opt['connector'] == 'ob':
user = user + '@' + opt['case_mode'] user = user + '@' + opt['case_mode']
self.add('--user', user) self.add('--user', user)
if 'password' in opt and opt['password']: if opt.get('password'):
self.add('--password', opt['password']) self.add('--password', opt['password'])
if 'full_user' in opt and opt['full_user']: if opt.get('full_user'):
self.add('--full_username', opt['full_user'].replace('sys',opt['case_mode'])) self.add('--full_username', opt['full_user'].replace('sys', opt['case_mode']))
if 'tenant' in opt and opt['tenant']: if opt.get('tenant'):
self.add('--user', 'root@' + opt['tenant']) self.add('--user', 'root@' + opt['tenant'])
self.add('--password', '') self.add('--password', '')
if 'cluster' in opt and opt['cluster']: if opt.get('cluster'):
self.add('--full_username', 'root@' + opt['tenant'] + '#' + opt['cluster']) self.add('--full_username', 'root@' + opt['tenant'] + '#' + opt['cluster'])
else: else:
self.add('--full_username', 'root@' + opt['tenant']) self.add('--full_username', 'root@' + opt['tenant'])
if 'rslist_url' in opt and opt['rslist_url']: if opt.get('rslist_url'):
self.add('--rslist_url', opt['rslist_url']) self.add('--rslist_url', opt['rslist_url'])
if 'database' in opt and opt['database']: if opt.get('database'):
self.add('--database', opt['database']) self.add('--database', opt['database'])
if 'charsetdsdir' in opt and opt['charsetdsdir']: if opt.get('charsetdsdir'):
self.add('--character-sets-dir', opt['charsetsdir']) self.add('--character-sets-dir', opt['charsetsdir'])
if 'basedir' in opt and opt['basedir']: if opt.get('basedir'):
self.add('--basedir', opt['basedir']) self.add('--basedir', opt['basedir'])
if 'use_px' in opt and opt['use_px']: if opt.get('use_px'):
self.add('--use-px') self.add('--use-px')
if 'force_explain_as_px' in opt and opt['force_explain_as_px']: if opt.get('force_explain_as_px'):
self.add('--force-explain-as-px') self.add('--force-explain-as-px')
if 'force-explain-as-no-px' in opt: if 'force-explain-as-no-px' in opt:
self.add('--force-explain-as-no-px') self.add('--force-explain-as-no-px')
if 'mark_progress' in opt and opt['mark_progress']: if opt.get('mark_progress'):
self.add('--mark-progress') self.add('--mark-progress')
if 'ps_protocol' in opt and opt['ps_protocol']: if opt.get('ps_protocol'):
self.add('--ps-protocol') self.add('--ps-protocol')
if 'sp_protocol' in opt and opt['sp_protocol']: if opt.get('sp_protocol'):
self.add('--sp-protocol') self.add('--sp-protocol')
if 'view_protocol' in opt and opt['view_protocol']: if opt.get('view_protocol'):
self.add('--view-protocol') self.add('--view-protocol')
if 'cursor_protocol' in opt and opt['cursor_protocol']: if opt.get('cursor_protocol'):
self.add('--cursor-protocol') self.add('--cursor-protocol')
if opt.get('special_run'):
self.add('--timer-file', '%s/log/timer' % opt['var_dir']) self.add('--disable-explain')
if opt.get('sp_hint'):
if 'compress' in opt and opt['compress']: self.add('--sp-hint', '"%s"' % opt['sp_hint'])
if opt.get('sort_result'):
self.add('--sort-result')
self.add('--timer-file', os.path.join(log_dir, 'timer'))
if opt.get('compress'):
self.add('--compress') self.add('--compress')
if 'sleep' in opt and opt['sleep']: if opt.get('sleep'):
self.add('--sleep', '%d' % opt['sleep']) self.add('--sleep', '%d' % opt['sleep'])
if 'max_connections' in opt and opt['max_connections']: if opt.get('max_connections'):
self.add('--max-connections', '%d' % opt['max_connections']) self.add('--max-connections', '%d' % opt['max_connections'])
if 'test_file' in opt and opt['test_file']: if opt.get('test_file'):
self.add('--test-file', opt['test_file']) self.add('--test-file', opt['test_file'])
self.add('--tail-lines', ('tail_lines' in opt and opt['tail_lines']) or 20) self.add('--tail-lines', (opt.get('tail_lines')) or 20)
if 'oblog_diff' in opt and opt['oblog_diff']: if opt.get('oblog_diff'):
self.add('--oblog_diff') self.add('--oblog_diff')
if 'record' in opt and opt['record'] and 'record_file' in opt and opt['record_file']: if opt.get('record') and opt.get('record_file'):
self.add('--record') self.add('--record')
self.add('--result-file', opt['record_file']) self.add('--result-file', opt['record_file'])
DirectoryUtil.mkdir(os.path.dirname(opt['record_file']))
else: # diff result & file else: # diff result & file
self.add('--result-file', opt['result_file']) self.add('--result-file', opt['result_file'])
def _return(test, cmd, result): def _return(test, cmd, result):
return {'name' : test, 'ret' : result.code, 'output' : result.stdout, 'cmd' : cmd, 'errput': result.stderr} return {'name': test, 'ret': result.code, 'output': result.stdout, 'cmd': cmd, 'errput': result.stderr}
def slb_request(case_name, exec_id, slb_host, op='lock', stdio=None):
slb_data = {'eid': exec_id, 'case': case_name}
slb_msg = {
'lock': (
'get lock for case {} successful.',
'get lock for case {} failed.'),
'success': (
'mark successful for case {} successful.',
'mark successful for case {} failed.')
}
assert op in slb_msg
try:
url = 'http://{slb_host}/farm/mysqltest/recorder/{op}.php'.format(slb_host=slb_host, op=op)
stdio.verbose('send request: {}, param: {}'.format(url, slb_data))
resp = requests.get(url, params=slb_data)
verbose_msg = 'response code: {}, content: {}'.format(resp.status_code, resp.content)
stdio.verbose(verbose_msg)
if resp.status_code == 200:
stdio.verbose(slb_msg[op][0].format(case_name))
return True
elif resp.status_code in (202, 300):
stdio.verbose(slb_msg[op][1].format(case_name))
return False
else:
stdio.warn(slb_msg[op][1].format(case_name) + verbose_msg)
return False
except:
stdio.warn('send request failed')
stdio.exception('')
return False
def run_test(plugin_context, env, *args, **kwargs):
def return_true(**kw):
env['run_test_cases'] = run_test_cases
env['index'] = index
env['case_results'] = case_results
env['is_retry'] = is_retry
env['need_reboot'] = need_reboot
env['collect_log'] = collect_log
return plugin_context.return_true(**kw)
def run_test(plugin_context, test, env, *args, **kwargs):
cluster_config = plugin_context.cluster_config cluster_config = plugin_context.cluster_config
clients = plugin_context.clients
stdio = plugin_context.stdio stdio = plugin_context.stdio
stdio.start_loading('Runing case: %s' % test) slb_host = env.get('slb_host')
test_ori = test exec_id = env.get('exec_id')
opt = {} cursor = env.get('cursor')
for key in env: run_test_cases = env.get('run_test_cases', [])
if key != 'cursor': index = env.get('index', 0)
opt[key] = env[key] test_set = env.get('test_set', [])
case_results = env.get('case_results', [])
opt['connector'] = 'ob' auto_retry = env.get('auto_retry')
opt['mysql_mode'] = True is_retry = env.get('is_retry', False)
mysqltest_bin = opt['mysqltest_bin'] if 'mysqltest_bin' in opt and opt['mysqltest_bin'] else 'mysqltest' reboot_cases = env.get('reboot_cases', [])
obclient_bin = opt['obclient_bin'] if 'obclient_bin' in opt and opt['obclient_bin'] else 'obclient' need_reboot = env.get('need_reboot', False)
collect_all = env.get('collect_all', False)
soft = 3600 collect_log = False
buffer = 0
if 'source_limit' in opt and opt['source_limit']: total_test_count = len(test_set)
if test_ori in opt['source_limit']: while index < total_test_count:
soft = opt['source_limit'][test_ori] test = test_set[index]
elif 'g.default' in opt['source_limit']: if test not in run_test_cases:
soft = opt['source_limit']['g.default'] if slb_host and exec_id and not slb_request(test, exec_id=exec_id, slb_host=slb_host, stdio=stdio):
index += 1
if 'g.buffer' in opt['source_limit']: continue
buffer = opt['source_limit']['g.buffer'] run_test_cases.append(test)
case_timeout = soft + buffer if test in reboot_cases:
need_reboot = True
if need_reboot:
opt['filter'] = 'c' need_reboot = False
if 'profile' in args: return return_true(reboot=True)
opt['profile'] = True retry_msg = "in auto retry mode" if is_retry else ""
opt['record'] = True label = FormtatText.info("[ RUN ]")
if 'ps' in args: stdio.start_loading('%sRunning case: %s ( %s / %s ) %s' % (label, test, index+1, total_test_count, retry_msg))
opt['filter'] = opt['filter'] + 'p' test_name = test
opt = {}
if 'cluster-mode' in opt and opt['cluster-mode'] in ['slave', 'proxy']: for key in env:
opt['filter'] = opt['cluster-mode'] if key != 'cursor':
opt[key] = env[key]
# support explain select w/o px hit
# force-explain-xxxx 的结果文件目录为
# - explain_r/mysql
# 其余的结果文件目录为
# - r/mysql
suffix = ''
opt_explain_dir = ''
if 'force-explain-as-px' in opt:
suffix = '.use_px'
opt_explain_dir = 'explain_r/'
elif 'force-explain-as-no-px' in opt:
suffix = '.no_use_px'
opt_explain_dir = 'explain_r/'
opt['case_mode'] = 'mysql'
if 'mode' not in opt:
opt['mode'] = 'both'
if opt['mode'] == 'mysql':
opt['case_mode'] = opt['mode']
if opt['mode'] == 'both':
if test.endswith('_mysql'):
opt['case_mode'] = 'mysql'
get_result_dir = lambda path: os.path.join(path, opt_explain_dir, opt['case_mode'])
opt['result_dir'] = get_result_dir(opt['result_dir'])
if opt['filter'] == 'slave':
opt['slave_cmp'] = 1
result_file = os.path.join(opt['result_dir'], test + suffix + '.slave.result')
if os.path.exists(result_file):
opt['slave_cmp'] = 0
opt['result_file'] = result_file
if len(test.split('.')) == 2:
suite_name, test= test.split('.')
opt['result_dir'] = get_result_dir(os.path.join(opt['suite_dir'], suite_name, 'r'))
opt['test_file'] = os.path.join(opt['suite_dir'], suite_name, 't', test + '.test')
if not os.path.isfile(opt['test_file']):
inner_test_file = os.path.join(inner_suite_dir, suite_name, 't', test + '.test')
if os.path.isfile(inner_test_file):
opt['test_file'] = inner_test_file
opt['result_dir'] = get_result_dir(os.path.join(inner_suite_dir, suite_name, 'r'))
else:
opt['test_file'] = os.path.join(opt['test_dir'], test + '.test')
if not os.path.isfile(opt['test_file']):
inner_test_file = os.path.join(inner_test_dir, test + '.test')
if os.path.isfile(inner_test_file):
opt['test_file'] = inner_test_file
opt['result_dir'] = get_result_dir(inner_result_dir)
opt['record_file'] = os.path.join(opt['result_dir'], test + suffix + '.record')
opt['result_file'] = os.path.join(opt['result_dir'], test + suffix + '.result')
if 'my_host' in opt or 'oracle_host' in opt:
# compare mode
pass
sys_pwd = cluster_config.get_global_conf().get('root_password', '')
exec_sql_cmd = "%s -h%s -P%s -uroot %s -A -Doceanbase -e" % (obclient_bin, opt['host'], opt['port'], ("-p'%s'" % sys_pwd) if sys_pwd else '')
server_engine_cmd = '''%s "select value from __all_virtual_sys_parameter_stat where name like '_enable_static_typing_engine';"''' % exec_sql_cmd
result = LocalClient.execute_command(server_engine_cmd, timeout=3600, stdio=stdio)
if not result:
stdio.error('engine failed, exit code %s. error msg: %s' % (result.code, result.stderr))
env = {
'OBMYSQL_PORT': str(opt['port']),
'OBMYSQL_MS0': str(opt['host']),
'OBMYSQL_PWD': str(opt['password']),
'OBMYSQL_USR': opt['user'],
'PATH': os.getenv('PATH')
}
if 'case_mode' in opt and opt['case_mode']:
env['TENANT'] = opt['case_mode']
if 'user' in opt and opt['user']:
env['OBMYSQL_USR'] = str(opt['user'] + '@' + opt['case_mode'])
else:
env['OBMYSQL_USR'] = 'root'
if 'java' in opt:
opt['connector'] = 'ob'
LocalClient.execute_command('%s "alter system set _enable_static_typing_engine = True;select sleep(2);"' % (exec_sql_cmd), stdio=stdio) opt['connector'] = 'ob'
opt['mysql_mode'] = True
test_file_suffix = opt['test_file_suffix']
result_file_suffix = opt['result_file_suffix']
record_file_suffix = opt['record_file_suffix']
mysqltest_bin = opt.get('mysqltest_bin', 'mysqltest')
obclient_bin = opt.get('obclient_bin', 'obclient')
soft = 3600
buffer = 0
if opt.get('source_limit'):
if test_name in opt['source_limit']:
soft = opt['source_limit'][test_name]
elif 'g.default' in opt['source_limit']:
soft = opt['source_limit']['g.default']
if 'g.buffer' in opt['source_limit']:
buffer = opt['source_limit']['g.buffer']
case_timeout = soft + buffer
if opt.get('case_timeout'):
case_timeout = opt['case_timeout']
# support explain select w/o px hit
# force-explain-xxxx 的结果文件目录为
# - explain_r/mysql
# - explain_r/oracle
# 其余的结果文件目录为
# - r/mysql
# - r/oracle
suffix = ''
opt_explain_dir = ''
if 'force-explain-as-px' in opt:
suffix = '.use_px'
opt_explain_dir = 'explain_r/'
elif 'force-explain-as-no-px' in opt:
suffix = '.no_use_px'
opt_explain_dir = 'explain_r/'
opt['case_mode'] = 'mysql'
if 'mode' not in opt:
opt['mode'] = 'both'
if opt['mode'] == 'mysql' or opt['mode'] == 'oracle':
opt['case_mode'] = opt['mode']
if opt['mode'] == 'both':
if test.endswith('_mysql'):
opt['case_mode'] = 'mysql'
if test.endswith('_oracle'):
opt['case_mode'] = 'oracle'
get_result_dir = lambda path: os.path.join(path, opt_explain_dir, opt['case_mode'])
if len(test.split('.')) == 2:
suite_name, test = test.split('.')
result_dir = get_result_dir(os.path.join(opt['result_dir'], suite_name, 'r'))
if os.path.exists(result_dir):
opt['result_dir'] = result_dir
else:
opt['result_dir'] = get_result_dir(os.path.join(opt['suite_dir'], suite_name, 'r'))
opt['record_dir'] = get_result_dir(os.path.join(opt['record_dir'], suite_name, 'r'))
opt['test_file'] = os.path.join(opt['suite_dir'], suite_name, 't', test + test_file_suffix)
if not os.path.isfile(opt['test_file']):
inner_test_file = os.path.join(inner_suite_dir, suite_name, 't', test + test_file_suffix)
if os.path.isfile(inner_test_file):
opt['test_file'] = inner_test_file
opt['result_dir'] = get_result_dir(os.path.join(inner_suite_dir, suite_name, 'r'))
start_time = time.time() else:
cmd = 'timeout %s %s %s' % (case_timeout, mysqltest_bin, str(Arguments(opt))) opt['test_file'] = os.path.join(opt['test_dir'], test + test_file_suffix)
try: opt['record_dir'] = get_result_dir(os.path.join(opt['record_dir']))
stdio.verbose('local execute: %s ' % cmd, end='') if not os.path.isfile(opt['test_file']):
p = Popen(shlex.split(cmd), env=env, stdout=PIPE, stderr=PIPE) inner_test_file = os.path.join(inner_test_dir, test + test_file_suffix)
output, errput = p.communicate() if os.path.isfile(inner_test_file):
retcode = p.returncode opt['test_file'] = inner_test_file
if retcode == 124: opt['result_dir'] = get_result_dir(inner_result_dir)
output = ''
if 'source_limit' in opt and 'g.buffer' in opt['source_limit']:
errput = "%s secs out of soft limit (%s secs), sql may be hung, please check" % (opt['source_limit']['g.buffer'], case_timeout)
else: else:
errput = "%s seconds timeout, sql may be hung, please check" % case_timeout opt['result_dir'] = get_result_dir(opt['result_dir'])
elif isinstance(errput, bytes): # owner
errput = errput.decode(errors='replace') owner = "anonymous"
except Exception as e: try:
errput = str(e) cmd_t = "grep -E '#\s*owner\s*:' " + opt['test_file'] + " | awk -F':' '{print $2}' | head -n 1"
output = '' p = Popen(cmd_t, stdout=PIPE, stderr=PIPE, shell=True)
retcode = 255 output, errput = p.communicate()
verbose_msg = 'exited code %s' % retcode owner = output.decode("utf-8").strip()
if retcode: except:
verbose_msg += ', error output:\n%s' % errput stdio.verbose("fail open %s" % (opt['test_file']))
stdio.verbose(verbose_msg)
cost = time.time() - start_time opt['record_file'] = os.path.join(opt['record_dir'], test + suffix + record_file_suffix)
opt['result_file'] = os.path.join(opt['result_dir'], test + suffix + result_file_suffix)
LocalClient.execute_command('%s "alter system set _enable_static_typing_engine = False;select sleep(2);"' % (exec_sql_cmd), stdio=stdio) if opt['filter'] == 'slave':
result = {"name" : test_ori, "ret" : retcode, "output" : output, "cmd" : cmd, "errput" : errput, 'cost': cost} opt['slave_cmp'] = 1
stdio.stop_loading('fail' if retcode else 'succeed') result_file = os.path.join(opt['result_dir'], test + suffix + '.slave' + result_file_suffix)
return plugin_context.return_true(result=result) if os.path.exists(result_file):
opt['slave_cmp'] = 0
opt['result_file'] = result_file
if 'my_host' in opt or 'oracle_host' in opt:
# compare mode
pass
sys_pwd = cluster_config.get_global_conf().get('root_password', '')
exec_sql_cmd = "%s -h%s -P%s -uroot %s -A -Doceanbase -e" % (obclient_bin, opt['host'], opt['port'], ("-p'%s'" % sys_pwd) if sys_pwd else '')
server_engine_cmd = '''%s "select value from __all_virtual_sys_parameter_stat where name like '_enable_static_typing_engine';"''' % exec_sql_cmd
result = LocalClient.execute_command(server_engine_cmd, timeout=3600, stdio=stdio)
stdio.verbose('query engine result: {}'.format(result.stdout))
if not result:
stdio.error('engine failed, exit code %s. error msg: %s' % (result.code, result.stderr))
update_env = {
'OBMYSQL_PORT': str(opt['port']),
'OBMYSQL_MS0': str(opt['host']),
'OBMYSQL_PWD': str(opt['password']),
'OBMYSQL_USR': opt['user'],
'PATH': os.getenv('PATH'),
'OBSERVER_DIR': cluster_config.get_server_conf(opt['test_server'])['home_path']
}
test_env = deepcopy(os.environ.copy())
test_env.update(update_env)
if opt.get('case_mode'):
test_env['TENANT'] = opt['case_mode']
if opt.get('user'):
test_env['OBMYSQL_USR'] = str(opt['user'] + '@' + opt['case_mode'])
else:
test_env['OBMYSQL_USR'] = 'root'
if 'java' in opt:
opt['connector'] = 'ob'
if opt['_enable_static_typing_engine'] is not None:
ret = None
try:
sql = "select value from oceanbase.__all_virtual_sys_parameter_stat where name like '_enable_static_typing_engine';"
cursor.execute(sql)
ret = cursor.fetchone()
except:
pass
if ret and str(ret.get('value')).lower() != str(opt['_enable_static_typing_engine']).lower():
LocalClient.execute_command('%s "alter system set _enable_static_typing_engine = %s;select sleep(2);"' % (exec_sql_cmd, opt['_enable_static_typing_engine']), stdio=stdio)
start_time = time.time()
cmd = 'timeout %s %s %s' % (case_timeout, mysqltest_bin, str(Arguments(opt)))
try:
stdio.verbose('local execute: %s ' % cmd)
p = Popen(shlex.split(cmd), env=test_env, stdout=PIPE, stderr=PIPE)
output, errput = p.communicate()
retcode = p.returncode
if retcode == 124:
output = ''
if 'source_limit' in opt and 'g.buffer' in opt['source_limit']:
errput = "%s secs out of soft limit (%s secs), sql may be hung, please check" % (opt['source_limit']['g.buffer'], case_timeout)
else:
errput = "%s seconds timeout, sql may be hung, please check" % case_timeout
elif isinstance(errput, bytes):
errput = errput.decode(errors='replace')
except Exception as e:
errput = str(e)
output = ''
retcode = 255
cost = time.time() - start_time
case_info = "%s %s ( %f s )" % (test_name, owner, cost)
patterns = ['output', 'NAME', 'SORT', 'SCAN', 'LIMIT', 'EXCHANGE', 'GET', 'FUNCTION', 'MERGE', 'JOIN', 'MATERIAL',
'DISTINCT', 'SUBPLAN', 'UNION|ALL', 'EXPRESSION', 'SCALAR', 'HASH', 'VALUES', 'DELETE', 'result',
'reject', '=====', '-------', 'conds', 'output', 'access', 'GROUP', 'DELETE', 'UPDATE', 'INSERT',
'CONNECT', 'nil', 'values', 'COUNT', '^$']
count = 0
# 不处理liboblog的结果对比
if re.search("liboblog_r", errput):
stdio.verbose("do nothing for liboblog")
elif (opt['filter'] == 'slave' and opt['slave_cmp'] == 1) or opt['filter'] == 'j' or opt['filter'] == 'jp':
diff = errput.split('\n')
for line in diff:
match = 0
if re.search(r"^\+", line) or re.search(r"^\-", line):
for pattern in patterns:
if re.search(pattern, line):
match = match + 1
continue
if match == 0:
count = count + 1
break
if count == 0:
# 处理slave/java 模式下result文件不存在的情况
if re.search(r"\+", errput):
stdio.verbose('ignore explain plan diff')
retcode = 0
result = {"name": test_name, "ret": retcode, "output": output, "cmd": cmd, "errput": errput, 'cost': cost}
stdio.stop_loading('fail' if retcode else 'succeed')
stdio.verbose('exited code %s' % retcode)
if retcode:
# verbose_msg += ', error output:\n%s' % errput
stdio.print(errput)
case_status = FormtatText.error("[ FAILED ]")
else:
case_status = FormtatText.success('[ OK ]')
stdio.print("%s%s" % (case_status, case_info))
if retcode == 0 and slb_host and exec_id:
slb_request(test_name, exec_id=exec_id, slb_host=slb_host, op='success', stdio=stdio)
if retcode == 0:
# success
case_results.append(result)
index += 1
is_retry = False
elif is_retry or not auto_retry:
# failed and no chance to retry
case_results.append(result)
index += 1
is_retry = False
need_reboot = True
collect_log = collect_all
else:
# retry
is_retry = True
need_reboot = True
return return_true(finished=True)
...@@ -28,7 +28,7 @@ global_ret = True ...@@ -28,7 +28,7 @@ global_ret = True
def destroy(plugin_context, *args, **kwargs): def destroy(plugin_context, *args, **kwargs):
def clean(server, path): def clean(server, path):
client = clients[server] client = clients[server]
ret = client.execute_command('rm -fr %s/' % (path)) ret = client.execute_command('rm -fr %s/' % (path), timeout=-1)
if not ret: if not ret:
global global_ret global global_ret
global_ret = False global_ret = False
......
...@@ -4,4 +4,5 @@ ...@@ -4,4 +4,5 @@
mode: 755 mode: 755
- src_path: ./home/admin/obagent/conf - src_path: ./home/admin/obagent/conf
target_path: conf target_path: conf
type: dir type: dir
\ No newline at end of file install_method: cp
\ No newline at end of file
...@@ -29,6 +29,7 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -29,6 +29,7 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
stdio = plugin_context.stdio stdio = plugin_context.stdio
global_ret = True global_ret = True
force = getattr(plugin_context.options, 'force', False) force = getattr(plugin_context.options, 'force', False)
clean = getattr(plugin_context.options, 'clean', False)
stdio.start_loading('Initializes obagent work home') stdio.start_loading('Initializes obagent work home')
for server in cluster_config.servers: for server in cluster_config.servers:
server_config = cluster_config.get_server_conf(server) server_config = cluster_config.get_server_conf(server)
...@@ -37,7 +38,18 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -37,7 +38,18 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip() remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path) remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
stdio.verbose('%s init cluster work home', server) stdio.verbose('%s init cluster work home', server)
if force: need_clean = force
if clean and not force:
if client.execute_command('bash -c \'if [[ "$(ls -d {0} 2>/dev/null)" != "" && ! -O {0} ]]; then exit 0; else exit 1; fi\''.format(home_path)):
owner = client.execute_command("ls -ld %s | awk '{print $3}'" % home_path).stdout.strip()
global_ret = False
err_msg = ' {} is not empty, and the owner is {}'.format(home_path, owner)
stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=err_msg))
continue
need_clean = True
if need_clean:
client.execute_command("pkill -9 -u `whoami` -f '^%s/bin/monagent -c conf/monagent.yaml'" % home_path)
ret = client.execute_command('rm -fr %s' % home_path) ret = client.execute_command('rm -fr %s' % home_path)
if not ret: if not ret:
global_ret = False global_ret = False
...@@ -55,10 +67,7 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -55,10 +67,7 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=home_path))) stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=home_path)))
continue continue
if not (client.execute_command("bash -c 'mkdir -p %s/{run,bin,lib,conf,log}'" % (home_path)) \ if not client.execute_command("bash -c 'mkdir -p %s/{run,bin,lib,conf,log}'" % home_path):
and client.execute_command("cp -r %s/conf %s/" % (remote_repository_dir, home_path)) \
and client.execute_command("if [ -d %s/bin ]; then ln -fs %s/bin/* %s/bin; fi" % (remote_repository_dir, remote_repository_dir, home_path)) \
and client.execute_command("if [ -d %s/lib ]; then ln -fs %s/lib/* %s/lib; fi" % (remote_repository_dir, remote_repository_dir, home_path))):
global_ret = False global_ret = False
stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.PATH_ONLY.format(path=home_path))) stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.PATH_ONLY.format(path=home_path)))
......
...@@ -226,7 +226,7 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -226,7 +226,7 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
server_config[key] = '' server_config[key] = ''
if isinstance(server_config[key], bool): if isinstance(server_config[key], bool):
server_config[key] = str(server_config[key]).lower() server_config[key] = str(server_config[key]).lower()
if server_config.get('crypto_method', 'plain').lower() == 'aes': if server_config.get('crypto_method', 'plain').lower() == 'aes':
secret_key = generate_aes_b64_key() secret_key = generate_aes_b64_key()
crypto_path = server_config.get('crypto_path', 'conf/.config_secret.key') crypto_path = server_config.get('crypto_path', 'conf/.config_secret.key')
...@@ -247,20 +247,8 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -247,20 +247,8 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
if not client.put_file(tf.name, path.replace(repository_dir, home_path)): if not client.put_file(tf.name, path.replace(repository_dir, home_path)):
stdio.error(EC_OBAGENT_SEND_CONFIG_FAILED.format(server=server)) stdio.error(EC_OBAGENT_SEND_CONFIG_FAILED.format(server=server))
stdio.stop_loading('fail') stdio.stop_loading('fail')
return return
for path in glob(os.path.join(repository_dir, 'conf/*/*')):
if path.endswith('.yaml'):
continue
if os.path.isdir(path):
ret = client.put_dir(path, path.replace(repository_dir, home_path))
else:
ret = client.put_file(path, path.replace(repository_dir, home_path))
if not ret:
stdio.error(EC_OBAGENT_SEND_CONFIG_FAILED.format(server=server))
stdio.stop_loading('fail')
return
config = { config = {
'log': { 'log': {
'level': server_config.get('log_level', 'info'), 'level': server_config.get('log_level', 'info'),
...@@ -287,7 +275,7 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -287,7 +275,7 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
if not client.put_file(tf.name, os.path.join(home_path, 'conf/monagent.yaml')): if not client.put_file(tf.name, os.path.join(home_path, 'conf/monagent.yaml')):
stdio.error(EC_OBAGENT_SEND_CONFIG_FAILED.format(server=server)) stdio.error(EC_OBAGENT_SEND_CONFIG_FAILED.format(server=server))
stdio.stop_loading('fail') stdio.stop_loading('fail')
return return
log_path = '%s/log/monagent_stdout.log' % home_path log_path = '%s/log/monagent_stdout.log' % home_path
client.execute_command('cd %s;nohup %s/bin/monagent -c conf/monagent.yaml >> %s 2>&1 & echo $! > %s' % (home_path, home_path, log_path, remote_pid_path)) client.execute_command('cd %s;nohup %s/bin/monagent -c conf/monagent.yaml >> %s 2>&1 & echo $! > %s' % (home_path, home_path, log_path, remote_pid_path))
......
...@@ -38,16 +38,6 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, ...@@ -38,16 +38,6 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
repository_dir = dest_repository.repository_dir repository_dir = dest_repository.repository_dir
kwargs['repository_dir'] = repository_dir kwargs['repository_dir'] = repository_dir
for server in cluster_config.servers:
client = clients[server]
server_config = cluster_config.get_server_conf(server)
home_path = server_config['home_path']
remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
client.execute_command("bash -c 'mkdir -p %s/{bin,lib}'" % (home_path))
client.execute_command("ln -fs %s/bin/* %s/bin" % (remote_repository_dir, home_path))
client.execute_command("ln -fs %s/lib/* %s/lib" % (remote_repository_dir, home_path))
stop_plugin = search_py_script_plugin([cur_repository], 'stop')[cur_repository] stop_plugin = search_py_script_plugin([cur_repository], 'stop')[cur_repository]
start_plugin = search_py_script_plugin([dest_repository], 'start')[dest_repository] start_plugin = search_py_script_plugin([dest_repository], 'start')[dest_repository]
connect_plugin = search_py_script_plugin([dest_repository], 'connect')[dest_repository] connect_plugin = search_py_script_plugin([dest_repository], 'connect')[dest_repository]
......
...@@ -243,18 +243,6 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -243,18 +243,6 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
stdio.error(EC_OBAGENT_SEND_CONFIG_FAILED.format(server=server)) stdio.error(EC_OBAGENT_SEND_CONFIG_FAILED.format(server=server))
stdio.stop_loading('fail') stdio.stop_loading('fail')
return return
for path in glob(os.path.join(repository_dir, 'conf/*/*')):
if path.endswith('.yaml'):
continue
if os.path.isdir(path):
ret = client.put_dir(path, path.replace(repository_dir, home_path))
else:
ret = client.put_file(path, path.replace(repository_dir, home_path))
if not ret:
stdio.error(EC_OBAGENT_SEND_CONFIG_FAILED.format(server=server))
stdio.stop_loading('fail')
return
config = { config = {
'log': { 'log': {
......
...@@ -28,7 +28,7 @@ global_ret = True ...@@ -28,7 +28,7 @@ global_ret = True
def destroy(plugin_context, *args, **kwargs): def destroy(plugin_context, *args, **kwargs):
def clean(server, path): def clean(server, path):
client = clients[server] client = clients[server]
ret = client.execute_command('rm -fr %s/' % (path)) ret = client.execute_command('rm -fr %s/' % (path), timeout=-1)
if not ret: if not ret:
# pring stderror # pring stderror
global global_ret global global_ret
......
...@@ -28,7 +28,9 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -28,7 +28,9 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
stdio = plugin_context.stdio stdio = plugin_context.stdio
global_ret = True global_ret = True
force = getattr(plugin_context.options, 'force', False) force = getattr(plugin_context.options, 'force', False)
clean = getattr(plugin_context.options, 'clean', False)
stdio.start_loading('Initializes obproxy work home') stdio.start_loading('Initializes obproxy work home')
for server in cluster_config.servers: for server in cluster_config.servers:
server_config = cluster_config.get_server_conf(server) server_config = cluster_config.get_server_conf(server)
client = clients[server] client = clients[server]
...@@ -36,15 +38,25 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -36,15 +38,25 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip() remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path) remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
stdio.verbose('%s init cluster work home', server) stdio.verbose('%s init cluster work home', server)
if force: need_clean = force
if clean and not force:
if client.execute_command('bash -c \'if [[ "$(ls -d {0} 2>/dev/null)" != "" && ! -O {0} ]]; then exit 0; else exit 1; fi\''.format(home_path)):
owner = client.execute_command("ls -ld %s | awk '{print $3}'" % home_path).stdout.strip()
global_ret = False
err_msg = ' {} is not empty, and the owner is {}'.format(home_path, owner)
stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=err_msg))
continue
need_clean = True
if need_clean:
client.execute_command("pkill -9 -u `whoami` -f '^bash {home_path}/obproxyd.sh {home_path} {ip} {port} daemon$'".format(home_path=home_path, ip=server.ip, port=server_config.get('listen_port')))
client.execute_command("pkill -9 -u `whoami` -f '^%s/bin/obproxy --listen_port %s'" % (home_path, server_config.get('listen_port')))
ret = client.execute_command('rm -fr %s' % home_path) ret = client.execute_command('rm -fr %s' % home_path)
if not ret: if not ret:
global_ret = False global_ret = False
stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=ret.stderr)) stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=ret.stderr))
continue continue
if not (client.execute_command("bash -c 'mkdir -p %s/{run,bin,lib}'" % (home_path)) \ if not client.execute_command("bash -c 'mkdir -p %s/{run,bin,lib}'" % home_path):
and client.execute_command("if [ -d %s/bin ]; then ln -fs %s/bin/* %s/bin; fi" % (remote_repository_dir, remote_repository_dir, home_path)) \
and client.execute_command("if [ -d %s/lib ]; then ln -fs %s/lib/* %s/lib; fi" % (remote_repository_dir, remote_repository_dir, home_path))):
global_ret = False global_ret = False
stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=home_path))) stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=home_path)))
......
...@@ -22,7 +22,7 @@ from __future__ import absolute_import, division, print_function ...@@ -22,7 +22,7 @@ from __future__ import absolute_import, division, print_function
import os import os
import time import time
from copy import deepcopy
stdio = None stdio = None
...@@ -49,7 +49,7 @@ def confirm_port(client, pid, port): ...@@ -49,7 +49,7 @@ def confirm_port(client, pid, port):
def confirm_command(client, pid, command): def confirm_command(client, pid, command):
command = command.replace(' ', '').strip() command = command.replace(' ', '').strip()
if client.execute_command('bash -c \'cmd=`cat /proc/%s/cmdline`; if [ "$cmd" != "%s" ]; then exot 1; fi\'' % (pid, command)): if client.execute_command('bash -c \'cmd=`cat /proc/%s/cmdline`; if [ "$cmd" != "%s" ]; then exit 1; fi\'' % (pid, command)):
return True return True
return False return False
...@@ -86,6 +86,26 @@ def obproxyd(home_path, client, ip, port): ...@@ -86,6 +86,26 @@ def obproxyd(home_path, client, ip, port):
return False return False
class EnvVariables(object):
def __init__(self, environments, client):
self.environments = environments
self.client = client
self.env_done = {}
def __enter__(self):
for env_key, env_value in self.environments.items():
self.env_done[env_key] = self.client.get_env(env_key)
self.client.add_env(env_key, env_value, True)
def __exit__(self, *args, **kwargs):
for env_key, env_value in self.env_done.items():
if env_value is not None:
self.client.add_env(env_key, env_value, True)
else:
self.client.del_env(env_key)
def start(plugin_context, local_home_path, repository_dir, need_bootstrap=False, *args, **kwargs): def start(plugin_context, local_home_path, repository_dir, need_bootstrap=False, *args, **kwargs):
global stdio global stdio
cluster_config = plugin_context.cluster_config cluster_config = plugin_context.cluster_config
...@@ -152,13 +172,6 @@ def start(plugin_context, local_home_path, repository_dir, need_bootstrap=False, ...@@ -152,13 +172,6 @@ def start(plugin_context, local_home_path, repository_dir, need_bootstrap=False,
server_config = cluster_config.get_server_conf(server) server_config = cluster_config.get_server_conf(server)
home_path = server_config['home_path'] home_path = server_config['home_path']
if client.execute_command("bash -c 'if [ -f %s/bin/obproxy ]; then exit 1; else exit 0; fi;'" % home_path):
remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
client.execute_command("bash -c 'mkdir -p %s/{bin,lib}'" % (home_path))
client.execute_command("ln -fs %s/bin/* %s/bin" % (remote_repository_dir, home_path))
client.execute_command("ln -fs %s/lib/* %s/lib" % (remote_repository_dir, home_path))
pid_path[server] = "%s/run/obproxy-%s-%s.pid" % (home_path, server.ip, server_config["listen_port"]) pid_path[server] = "%s/run/obproxy-%s-%s.pid" % (home_path, server.ip, server_config["listen_port"])
if use_parameter: if use_parameter:
...@@ -187,6 +200,7 @@ def start(plugin_context, local_home_path, repository_dir, need_bootstrap=False, ...@@ -187,6 +200,7 @@ def start(plugin_context, local_home_path, repository_dir, need_bootstrap=False,
clusters_cmd[server] = 'cd %s; %s' % (home_path, real_cmd[server]) clusters_cmd[server] = 'cd %s; %s' % (home_path, real_cmd[server])
for server in clusters_cmd: for server in clusters_cmd:
environments = deepcopy(cluster_config.get_environments())
client = clients[server] client = clients[server]
server_config = cluster_config.get_server_conf(server) server_config = cluster_config.get_server_conf(server)
port = int(server_config["listen_port"]) port = int(server_config["listen_port"])
...@@ -204,9 +218,10 @@ def start(plugin_context, local_home_path, repository_dir, need_bootstrap=False, ...@@ -204,9 +218,10 @@ def start(plugin_context, local_home_path, repository_dir, need_bootstrap=False,
return plugin_context.return_false() return plugin_context.return_false()
stdio.verbose('starting %s obproxy', server) stdio.verbose('starting %s obproxy', server)
client.add_env('LD_LIBRARY_PATH', '%s/lib:' % server_config['home_path'], True) if 'LD_LIBRARY_PATH' not in environments:
ret = client.execute_command(clusters_cmd[server]) environments['LD_LIBRARY_PATH'] = '%s/lib:' % server_config['home_path']
client.add_env('LD_LIBRARY_PATH', '', True) with EnvVariables(environments, client):
ret = client.execute_command(clusters_cmd[server])
if not ret: if not ret:
stdio.stop_loading('fail') stdio.stop_loading('fail')
stdio.error('failed to start %s obproxy: %s' % (server, ret.stderr)) stdio.error('failed to start %s obproxy: %s' % (server, ret.stderr))
......
...@@ -38,16 +38,6 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, ...@@ -38,16 +38,6 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
repository_dir = dest_repository.repository_dir repository_dir = dest_repository.repository_dir
kwargs['repository_dir'] = repository_dir kwargs['repository_dir'] = repository_dir
for server in cluster_config.servers:
client = clients[server]
server_config = cluster_config.get_server_conf(server)
home_path = server_config['home_path']
remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
client.execute_command("bash -c 'mkdir -p %s/{bin,lib}'" % (home_path))
client.execute_command("ln -fs %s/bin/* %s/bin" % (remote_repository_dir, home_path))
client.execute_command("ln -fs %s/lib/* %s/lib" % (remote_repository_dir, home_path))
stop_plugin = search_py_script_plugin([cur_repository], 'stop')[cur_repository] stop_plugin = search_py_script_plugin([cur_repository], 'stop')[cur_repository]
start_plugin = search_py_script_plugin([dest_repository], 'start')[dest_repository] start_plugin = search_py_script_plugin([dest_repository], 'start')[dest_repository]
connect_plugin = search_py_script_plugin([dest_repository], 'connect')[dest_repository] connect_plugin = search_py_script_plugin([dest_repository], 'connect')[dest_repository]
......
...@@ -29,11 +29,13 @@ from _errno import EC_OBSERVER_CAN_NOT_MIGRATE_IN ...@@ -29,11 +29,13 @@ from _errno import EC_OBSERVER_CAN_NOT_MIGRATE_IN
def parse_size(size): def parse_size(size):
_bytes = 0 _bytes = 0
if isinstance(size, str):
size = size.strip()
if not isinstance(size, str) or size.isdigit(): if not isinstance(size, str) or size.isdigit():
_bytes = int(size) _bytes = int(size)
else: else:
units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40}
match = re.match(r'([1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) match = re.match(r'^([1-9][0-9]*)\s*([B,K,M,G,T])$', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)] _bytes = int(match.group(1)) * units[match.group(2)]
return _bytes return _bytes
...@@ -59,6 +61,16 @@ def create_tenant(plugin_context, cursor, *args, **kwargs): ...@@ -59,6 +61,16 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
if not value: if not value:
value = default value = default
return value return value
def get_parsed_option(key, default=''):
value = get_option(key=key, default=default)
try:
parsed_value = parse_size(value)
except:
stdio.exception("")
raise Exception("Invalid option {}: {}".format(key, value))
return parsed_value
def error(*arg, **kwargs): def error(*arg, **kwargs):
stdio.error(*arg, **kwargs) stdio.error(*arg, **kwargs)
stdio.stop_loading('fail') stdio.stop_loading('fail')
...@@ -70,6 +82,11 @@ def create_tenant(plugin_context, cursor, *args, **kwargs): ...@@ -70,6 +82,11 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
stdio = plugin_context.stdio stdio = plugin_context.stdio
options = plugin_context.options options = plugin_context.options
mode = get_option('mode', 'mysql').lower()
if not mode in ['mysql', 'oracle']:
error('No such tenant mode: %s.\n--mode must be `mysql` or `oracle`' % mode)
return
name = get_option('tenant_name', 'test') name = get_option('tenant_name', 'test')
unit_name = '%s_unit' % name unit_name = '%s_unit' % name
pool_name = '%s_pool' % name pool_name = '%s_pool' % name
...@@ -153,7 +170,7 @@ def create_tenant(plugin_context, cursor, *args, **kwargs): ...@@ -153,7 +170,7 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
exception('execute sql exception: %s' % sql) exception('execute sql exception: %s' % sql)
return return
units_id = set() units_id = {}
res = cursor.fetchall() res = cursor.fetchall()
for row in res: for row in res:
if str(row['name']) == unit_name: if str(row['name']) == unit_name:
...@@ -162,7 +179,8 @@ def create_tenant(plugin_context, cursor, *args, **kwargs): ...@@ -162,7 +179,8 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
continue continue
for zone in str(row['zone_list']).replace(';', ',').split(','): for zone in str(row['zone_list']).replace(';', ',').split(','):
if zone in zones: if zone in zones:
units_id.add(row['unit_config_id']) unit_config_id = row['unit_config_id']
units_id[unit_config_id] = units_id.get(unit_config_id, 0) + 1
break break
sql = 'select * from oceanbase.__all_unit_config order by name' sql = 'select * from oceanbase.__all_unit_config order by name'
...@@ -178,8 +196,8 @@ def create_tenant(plugin_context, cursor, *args, **kwargs): ...@@ -178,8 +196,8 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
if str(row['name']) == unit_name: if str(row['name']) == unit_name:
unit_name += '1' unit_name += '1'
if row['unit_config_id'] in units_id: if row['unit_config_id'] in units_id:
cpu_total -= row['max_cpu'] cpu_total -= row['max_cpu'] * units_id[row['unit_config_id']]
mem_total -= row['max_memory'] mem_total -= row['max_memory'] * units_id[row['unit_config_id']]
# disk_total -= row['max_disk_size'] # disk_total -= row['max_disk_size']
MIN_CPU = 2 MIN_CPU = 2
...@@ -194,13 +212,18 @@ def create_tenant(plugin_context, cursor, *args, **kwargs): ...@@ -194,13 +212,18 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
if disk_total < MIN_DISK_SIZE: if disk_total < MIN_DISK_SIZE:
return error('%s: resource not enough: disk space less than %s' % (zone_list, format_size(MIN_DISK_SIZE))) return error('%s: resource not enough: disk space less than %s' % (zone_list, format_size(MIN_DISK_SIZE)))
try:
max_memory = get_parsed_option('max_memory', mem_total)
max_disk_size = get_parsed_option('max_disk_size', disk_total)
min_memory = get_parsed_option('min_memory', max_memory)
except Exception as e:
error(e)
return
max_cpu = get_option('max_cpu', cpu_total) max_cpu = get_option('max_cpu', cpu_total)
max_memory = parse_size(get_option('max_memory', mem_total))
max_iops = get_option('max_iops', MIN_IOPS) max_iops = get_option('max_iops', MIN_IOPS)
max_disk_size = parse_size(get_option('max_disk_size', disk_total))
max_session_num = get_option('max_session_num', MIN_SESSION_NUM) max_session_num = get_option('max_session_num', MIN_SESSION_NUM)
min_cpu = get_option('min_cpu', max_cpu) min_cpu = get_option('min_cpu', max_cpu)
min_memory = parse_size(get_option('min_memory', max_memory))
min_iops = get_option('min_iops', max_iops) min_iops = get_option('min_iops', max_iops)
if cpu_total < max_cpu: if cpu_total < max_cpu:
...@@ -258,7 +281,7 @@ def create_tenant(plugin_context, cursor, *args, **kwargs): ...@@ -258,7 +281,7 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
stdio.verbose('execute sql: %s' % sql) stdio.verbose('execute sql: %s' % sql)
cursor.execute(sql) cursor.execute(sql)
except: except:
exception('faild to crate pool, execute sql exception: %s' % sql) exception('failed to create pool, execute sql exception: %s' % sql)
return return
# create tenant # create tenant
...@@ -274,8 +297,12 @@ def create_tenant(plugin_context, cursor, *args, **kwargs): ...@@ -274,8 +297,12 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
sql += ", default tablegroup ='%s'" % tablegroup sql += ", default tablegroup ='%s'" % tablegroup
if locality: if locality:
sql += ", locality = '%s'" % locality sql += ", locality = '%s'" % locality
set_mode = "ob_compatibility_mode = '%s'" % mode
if variables: if variables:
sql += "set %s" % variables sql += "set %s, %s" % (variables, set_mode)
else:
sql += "set %s" % set_mode
try: try:
stdio.verbose('execute sql: %s' % sql) stdio.verbose('execute sql: %s' % sql)
cursor.execute(sql) cursor.execute(sql)
......
...@@ -28,7 +28,7 @@ global_ret = True ...@@ -28,7 +28,7 @@ global_ret = True
def destroy(plugin_context, *args, **kwargs): def destroy(plugin_context, *args, **kwargs):
def clean(server, path): def clean(server, path):
client = clients[server] client = clients[server]
ret = client.execute_command('rm -fr %s/' % (path)) ret = client.execute_command('rm -fr %s/' % (path), timeout=-1)
if not ret: if not ret:
# print stderror # print stderror
global global_ret global global_ret
......
...@@ -4,4 +4,5 @@ ...@@ -4,4 +4,5 @@
mode: 755 mode: 755
- src_path: ./home/admin/oceanbase/etc - src_path: ./home/admin/oceanbase/etc
target_path: etc target_path: etc
type: dir type: dir
\ No newline at end of file install_method: cp
\ No newline at end of file
...@@ -34,6 +34,7 @@ def critical(*arg, **kwargs): ...@@ -34,6 +34,7 @@ def critical(*arg, **kwargs):
global_ret = False global_ret = False
stdio.error(*arg, **kwargs) stdio.error(*arg, **kwargs)
def init_dir(server, client, key, path, link_path=None): def init_dir(server, client, key, path, link_path=None):
if force: if force:
ret = client.execute_command('rm -fr %s' % path) ret = client.execute_command('rm -fr %s' % path)
...@@ -66,6 +67,7 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -66,6 +67,7 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
stdio = plugin_context.stdio stdio = plugin_context.stdio
servers_dirs = {} servers_dirs = {}
force = getattr(plugin_context.options, 'force', False) force = getattr(plugin_context.options, 'force', False)
clean = getattr(plugin_context.options, 'clean', False)
stdio.verbose('option `force` is %s' % force) stdio.verbose('option `force` is %s' % force)
stdio.start_loading('Initializes observer work home') stdio.start_loading('Initializes observer work home')
for server in cluster_config.servers: for server in cluster_config.servers:
...@@ -102,9 +104,20 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -102,9 +104,20 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
'server': server, 'server': server,
'key': key, 'key': key,
} }
stdio.verbose('%s initializes observer work home' % server) stdio.verbose('%s initializes observer work home' % server)
if force: need_clean = force
if clean and not force:
if client.execute_command('bash -c \'if [[ "$(ls -d {0} 2>/dev/null)" != "" && ! -O {0} ]]; then exit 0; else exit 1; fi\''.format(home_path)):
owner = client.execute_command("ls -ld %s | awk '{print $3}'" % home_path).stdout.strip()
err_msg = ' {} is not empty, and the owner is {}'.format(home_path, owner)
critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=err_msg))
continue
need_clean = True
if need_clean:
client.execute_command(
"pkill -9 -u `whoami` -f '^%s/bin/observer -p %s'" % (home_path, server_config['mysql_port']))
ret = client.execute_command('rm -fr %s/*' % home_path) ret = client.execute_command('rm -fr %s/*' % home_path)
if not ret: if not ret:
critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=ret.stderr)) critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=ret.stderr))
...@@ -117,12 +130,10 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -117,12 +130,10 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
continue continue
else: else:
critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=home_path))) critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=home_path)))
ret = client.execute_command('bash -c "mkdir -p %s/{etc,admin,.conf,log,bin,lib}"' % home_path) \ ret = client.execute_command('bash -c "mkdir -p %s/{etc,admin,.conf,log,bin,lib}"' % home_path)
and client.execute_command("if [ -d %s/bin ]; then ln -fs %s/bin/* %s/bin; fi" % (remote_repository_dir, remote_repository_dir, home_path)) \
and client.execute_command("if [ -d %s/lib ]; then ln -fs %s/lib/* %s/lib; fi" % (remote_repository_dir, remote_repository_dir, home_path))
if ret: if ret:
data_path = server_config['data_dir'] data_path = server_config['data_dir']
if force: if need_clean:
ret = client.execute_command('rm -fr %s/*' % data_path) ret = client.execute_command('rm -fr %s/*' % data_path)
if not ret: if not ret:
critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='data dir', msg=InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=data_path))) critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='data dir', msg=InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=data_path)))
...@@ -165,7 +176,6 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -165,7 +176,6 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='data dir', msg=InitDirFailedErrorMessage.PATH_ONLY.format(path=data_path))) critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='data dir', msg=InitDirFailedErrorMessage.PATH_ONLY.format(path=data_path)))
else: else:
critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=home_path))) critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=home_path)))
if global_ret: if global_ret:
stdio.stop_loading('succeed') stdio.stop_loading('succeed')
plugin_context.return_true() plugin_context.return_true()
......
...@@ -28,6 +28,8 @@ from copy import deepcopy ...@@ -28,6 +28,8 @@ from copy import deepcopy
from _errno import EC_OBSERVER_FAIL_TO_START from _errno import EC_OBSERVER_FAIL_TO_START
from collections import OrderedDict
def config_url(ocp_config_server, appname, cid): def config_url(ocp_config_server, appname, cid):
cfg_url = '%s&Action=ObRootServiceInfo&ObCluster=%s' % (ocp_config_server, appname) cfg_url = '%s&Action=ObRootServiceInfo&ObCluster=%s' % (ocp_config_server, appname)
...@@ -57,7 +59,27 @@ def init_config_server(ocp_config_server, appname, cid, force_delete, stdio): ...@@ -57,7 +59,27 @@ def init_config_server(ocp_config_server, appname, cid, force_delete, stdio):
if post(register_to_config_url) != 200: if post(register_to_config_url) != 200:
return False return False
return cfg_url return cfg_url
class EnvVariables(object):
def __init__(self, environments, client):
self.environments = environments
self.client = client
self.env_done = {}
def __enter__(self):
for env_key, env_value in self.environments.items():
self.env_done[env_key] = self.client.get_env(env_key)
self.client.add_env(env_key, env_value, True)
def __exit__(self, *args, **kwargs):
for env_key, env_value in self.env_done.items():
if env_value is not None:
self.client.add_env(env_key, env_value, True)
else:
self.client.del_env(env_key)
def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
cluster_config = plugin_context.cluster_config cluster_config = plugin_context.cluster_config
...@@ -75,12 +97,12 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -75,12 +97,12 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
if obconfig_url: if obconfig_url:
if not appname or not cluster_id: if not appname or not cluster_id:
stdio.error('need appname and cluster_id') stdio.error('need appname and cluster_id')
return return
try: try:
cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), stdio) cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), stdio)
if not cfg_url: if not cfg_url:
stdio.error('failed to register cluster. %s may have been registered in %s.' % (appname, obconfig_url)) stdio.error('failed to register cluster. %s may have been registered in %s.' % (appname, obconfig_url))
return return
except: except:
stdio.exception('failed to register cluster') stdio.exception('failed to register cluster')
return return
...@@ -98,19 +120,12 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -98,19 +120,12 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
server_config = cluster_config.get_server_conf(server) server_config = cluster_config.get_server_conf(server)
home_path = server_config['home_path'] home_path = server_config['home_path']
if client.execute_command("bash -c 'if [ -f %s/bin/observer ]; then exit 1; else exit 0; fi;'" % home_path):
remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
client.execute_command("bash -c 'mkdir -p %s/{bin,lib}'" % (home_path))
client.execute_command("ln -fs %s/bin/* %s/bin" % (remote_repository_dir, home_path))
client.execute_command("ln -fs %s/lib/* %s/lib" % (remote_repository_dir, home_path))
if not server_config.get('data_dir'): if not server_config.get('data_dir'):
server_config['data_dir'] = '%s/store' % home_path server_config['data_dir'] = '%s/store' % home_path
if client.execute_command('ls %s/ilog/' % server_config['data_dir']).stdout.strip(): if client.execute_command('ls %s/ilog/' % server_config['data_dir']).stdout.strip():
need_bootstrap = False need_bootstrap = False
remote_pid_path = '%s/run/observer.pid' % home_path remote_pid_path = '%s/run/observer.pid' % home_path
remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip() remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip()
if remote_pid: if remote_pid:
...@@ -125,10 +140,10 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -125,10 +140,10 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
cmd = [] cmd = []
if use_parameter: if use_parameter:
not_opt_str = { not_opt_str = OrderedDict({
'zone': '-z',
'mysql_port': '-p', 'mysql_port': '-p',
'rpc_port': '-P', 'rpc_port': '-P',
'zone': '-z',
'nodaemon': '-N', 'nodaemon': '-N',
'appname': '-n', 'appname': '-n',
'cluster_id': '-c', 'cluster_id': '-c',
...@@ -138,9 +153,9 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -138,9 +153,9 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
'ipv6': '-6', 'ipv6': '-6',
'mode': '-m', 'mode': '-m',
'scn': '-f' 'scn': '-f'
} })
not_cmd_opt = [ not_cmd_opt = [
'home_path', 'obconfig_url', 'root_password', 'proxyro_password', 'home_path', 'obconfig_url', 'root_password', 'proxyro_password',
'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc' 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc'
] ]
get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key]
...@@ -153,21 +168,25 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): ...@@ -153,21 +168,25 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
opt_str.append('obconfig_url=\'%s\'' % cfg_url) opt_str.append('obconfig_url=\'%s\'' % cfg_url)
else: else:
cmd.append(rs_list_opt) cmd.append(rs_list_opt)
cmd.append('-o %s' % ','.join(opt_str))
for key in not_opt_str: for key in not_opt_str:
if key in server_config: if key in server_config:
value = get_value(key) value = get_value(key)
cmd.append('%s %s' % (not_opt_str[key], value)) cmd.append('%s %s' % (not_opt_str[key], value))
cmd.append('-o %s' % ','.join(opt_str))
else:
cmd.append('-p %s' % server_config['mysql_port'])
clusters_cmd[server] = 'cd %s; %s/bin/observer %s' % (home_path, home_path, ' '.join(cmd)) clusters_cmd[server] = 'cd %s; %s/bin/observer %s' % (home_path, home_path, ' '.join(cmd))
for server in clusters_cmd: for server in clusters_cmd:
environments = deepcopy(cluster_config.get_environments())
client = clients[server] client = clients[server]
server_config = cluster_config.get_server_conf(server) server_config = cluster_config.get_server_conf(server)
stdio.verbose('starting %s observer', server) stdio.verbose('starting %s observer', server)
client.add_env('LD_LIBRARY_PATH', '%s/lib:' % server_config['home_path'], True) if 'LD_LIBRARY_PATH' not in environments:
ret = client.execute_command(clusters_cmd[server]) environments['LD_LIBRARY_PATH'] = '%s/lib:' % server_config['home_path']
client.add_env('LD_LIBRARY_PATH', '', True) with EnvVariables(environments, client):
ret = client.execute_command(clusters_cmd[server])
if not ret: if not ret:
stdio.stop_loading('fail') stdio.stop_loading('fail')
stdio.error(EC_OBSERVER_FAIL_TO_START.format(server=server) + ': ' + ret.stderr) stdio.error(EC_OBSERVER_FAIL_TO_START.format(server=server) + ': ' + ret.stderr)
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
import json
import time import time
import requests import requests
...@@ -29,9 +28,11 @@ def config_url(ocp_config_server, appname, cid): ...@@ -29,9 +28,11 @@ def config_url(ocp_config_server, appname, cid):
cfg_url = '%s&Action=ObRootServiceInfo&ObCluster=%s' % (ocp_config_server, appname) cfg_url = '%s&Action=ObRootServiceInfo&ObCluster=%s' % (ocp_config_server, appname)
proxy_cfg_url = '%s&Action=GetObProxyConfig&ObRegionGroup=%s' % (ocp_config_server, appname) proxy_cfg_url = '%s&Action=GetObProxyConfig&ObRegionGroup=%s' % (ocp_config_server, appname)
# 清除集群URL内容命令 # 清除集群URL内容命令
cleanup_config_url_content = '%s&Action=DeleteObRootServiceInfoByClusterName&ClusterName=%s' % (ocp_config_server, appname) cleanup_config_url_content = '%s&Action=DeleteObRootServiceInfoByClusterName&ClusterName=%s' % (
ocp_config_server, appname)
# 注册集群信息到Config URL命令 # 注册集群信息到Config URL命令
register_to_config_url = '%s&Action=ObRootServiceRegister&ObCluster=%s&ObClusterId=%s' % (ocp_config_server, appname, cid) register_to_config_url = '%s&Action=ObRootServiceRegister&ObCluster=%s&ObClusterId=%s' % (
ocp_config_server, appname, cid)
return cfg_url, cleanup_config_url_content, register_to_config_url return cfg_url, cleanup_config_url_content, register_to_config_url
...@@ -40,7 +41,7 @@ def get_port_socket_inode(client, port): ...@@ -40,7 +41,7 @@ def get_port_socket_inode(client, port):
cmd = "bash -c 'cat /proc/net/{tcp,udp}' | awk -F' ' '{print $2,$10}' | grep '00000000:%s' | awk -F' ' '{print $2}' | uniq" % port cmd = "bash -c 'cat /proc/net/{tcp,udp}' | awk -F' ' '{print $2,$10}' | grep '00000000:%s' | awk -F' ' '{print $2}' | uniq" % port
res = client.execute_command(cmd) res = client.execute_command(cmd)
if not res or not res.stdout.strip(): if not res or not res.stdout.strip():
return False return []
return res.stdout.strip().split('\n') return res.stdout.strip().split('\n')
...@@ -62,7 +63,6 @@ def stop(plugin_context, *args, **kwargs): ...@@ -62,7 +63,6 @@ def stop(plugin_context, *args, **kwargs):
clients = plugin_context.clients clients = plugin_context.clients
stdio = plugin_context.stdio stdio = plugin_context.stdio
global_config = cluster_config.get_global_conf() global_config = cluster_config.get_global_conf()
global_config = cluster_config.get_global_conf()
appname = global_config['appname'] if 'appname' in global_config else None appname = global_config['appname'] if 'appname' in global_config else None
cluster_id = global_config['cluster_id'] if 'cluster_id' in global_config else None cluster_id = global_config['cluster_id'] if 'cluster_id' in global_config else None
obconfig_url = global_config['obconfig_url'] if 'obconfig_url' in global_config else None obconfig_url = global_config['obconfig_url'] if 'obconfig_url' in global_config else None
...@@ -118,6 +118,14 @@ def stop(plugin_context, *args, **kwargs): ...@@ -118,6 +118,14 @@ def stop(plugin_context, *args, **kwargs):
servers = tmp_servers servers = tmp_servers
count -= 1 count -= 1
if count and servers: if count and servers:
if count == 5:
for server in servers:
data = servers[server]
server_config = cluster_config.get_server_conf(server)
client = clients[server]
client.execute_command(
"if [[ -d /proc/%s ]]; then pkill -9 -u `whoami` -f '%s/bin/observer -p %s';fi" %
(data['pid'], server_config['home_path'], server_config['mysql_port']))
time.sleep(3) time.sleep(3)
if servers: if servers:
......
...@@ -126,7 +126,7 @@ class Exector(object): ...@@ -126,7 +126,7 @@ class Exector(object):
class Upgrader(object): class Upgrader(object):
def __init__(self, plugin_context, search_py_script_plugin, apply_param_plugin, upgrade_ctx, upgrade_repositories, local_home_path, exector_path): def __init__(self, plugin_context, search_py_script_plugin, apply_param_plugin, upgrade_ctx, upgrade_repositories, local_home_path, exector_path, install_repository_to_servers, unuse_lib_repository):
self._search_py_script_plugin = search_py_script_plugin self._search_py_script_plugin = search_py_script_plugin
self.apply_param_plugin = apply_param_plugin self.apply_param_plugin = apply_param_plugin
self.plugin_context = plugin_context self.plugin_context = plugin_context
...@@ -138,6 +138,8 @@ class Upgrader(object): ...@@ -138,6 +138,8 @@ class Upgrader(object):
self._start_plugin = None self._start_plugin = None
self._stop_plugin = None self._stop_plugin = None
self._display_plugin = None self._display_plugin = None
self.install_repository_to_servers = install_repository_to_servers
self.unuse_lib_repository = unuse_lib_repository
self.local_home_path = local_home_path self.local_home_path = local_home_path
self.exector_path = exector_path self.exector_path = exector_path
self.components = plugin_context.components self.components = plugin_context.components
...@@ -386,18 +388,6 @@ class Upgrader(object): ...@@ -386,18 +388,6 @@ class Upgrader(object):
time.sleep(3) time.sleep(3)
return True return True
def _replace_repository(self, servers, repository):
repository_dir = repository.repository_dir
for server in servers:
client = self.clients[server]
server_config = self.cluster_config.get_server_conf(server)
home_path = server_config['home_path']
remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(self.local_home_path, remote_home_path)
client.execute_command("bash -c 'mkdir -p %s/{bin,lib}'" % (home_path))
client.execute_command("ln -fs %s/bin/* %s/bin" % (remote_repository_dir, home_path))
client.execute_command("ln -fs %s/lib/* %s/lib" % (remote_repository_dir, home_path))
def upgrade_zone(self): def upgrade_zone(self):
zones_servers = {} zones_servers = {}
for server in self.cluster_config.servers: for server in self.cluster_config.servers:
...@@ -426,7 +416,8 @@ class Upgrader(object): ...@@ -426,7 +416,8 @@ class Upgrader(object):
self.stdio.start_loading('Upgrade') self.stdio.start_loading('Upgrade')
repository = self.repositories[self.next_stage] repository = self.repositories[self.next_stage]
repository_dir = repository.repository_dir repository_dir = repository.repository_dir
self._replace_repository(self.cluster_config.servers, repository) self.install_repository_to_servers(self.components, self.cluster_config, repository, self.clients,
self.unuse_lib_repository)
if not self.stop_plugin(self.components, self.clients, self.cluster_config, self.plugin_context.cmd, self.plugin_context.options, self.stdio): if not self.stop_plugin(self.components, self.clients, self.cluster_config, self.plugin_context.cmd, self.plugin_context.options, self.stdio):
self.stdio.stop_loading('stop_loading', 'fail') self.stdio.stop_loading('stop_loading', 'fail')
...@@ -473,7 +464,8 @@ class Upgrader(object): ...@@ -473,7 +464,8 @@ class Upgrader(object):
self.stop_zone(zone) self.stop_zone(zone)
self.stdio.print('upgrade zone "%s"' % zone) self.stdio.print('upgrade zone "%s"' % zone)
self._replace_repository(self.cluster_config.servers, repository) self.install_repository_to_servers(self.components, self.cluster_config, repository, self.clients, self.unuse_lib_repository)
if pre_zone: if pre_zone:
self.apply_param_plugin(self.repositories[self.route_index - 1]) self.apply_param_plugin(self.repositories[self.route_index - 1])
...@@ -524,7 +516,7 @@ class Upgrader(object): ...@@ -524,7 +516,7 @@ class Upgrader(object):
return self._exec_script_dest_only('upgrade_post_checker.py') return self._exec_script_dest_only('upgrade_post_checker.py')
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, **kwargs): def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, unuse_lib_repository, *args, **kwargs):
components = plugin_context.components components = plugin_context.components
clients = plugin_context.clients clients = plugin_context.clients
cluster_config = plugin_context.cluster_config cluster_config = plugin_context.cluster_config
...@@ -535,9 +527,18 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, ...@@ -535,9 +527,18 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
upgrade_ctx = kwargs.get('upgrade_ctx') upgrade_ctx = kwargs.get('upgrade_ctx')
local_home_path = kwargs.get('local_home_path') local_home_path = kwargs.get('local_home_path')
upgrade_repositories = kwargs.get('upgrade_repositories') upgrade_repositories = kwargs.get('upgrade_repositories')
exector_path = getattr(options, 'exector_path', '/usr/obd/lib/executer') exector_path = getattr(options, 'executer_path', '/usr/obd/lib/executer')
upgrader = Upgrader(plugin_context, search_py_script_plugin, apply_param_plugin, upgrade_ctx, upgrade_repositories, local_home_path, exector_path) upgrader = Upgrader(
plugin_context=plugin_context,
search_py_script_plugin=search_py_script_plugin,
apply_param_plugin=apply_param_plugin,
upgrade_ctx=upgrade_ctx,
upgrade_repositories=upgrade_repositories,
local_home_path=local_home_path,
exector_path=exector_path,
install_repository_to_servers=install_repository_to_servers,
unuse_lib_repository=unuse_lib_repository)
if upgrader.run(): if upgrader.run():
if upgrader.route_index >= len(upgrader.route): if upgrader.route_index >= len(upgrader.route):
upgrader.display_plugin(components, clients, cluster_config, cmd, options, stdio, upgrader.cursor, *args, **kwargs) upgrader.display_plugin(components, clients, cluster_config, cmd, options, stdio, upgrader.cursor, *args, **kwargs)
......
...@@ -212,7 +212,7 @@ def pre_test(plugin_context, cursor, odp_cursor, *args, **kwargs): ...@@ -212,7 +212,7 @@ def pre_test(plugin_context, cursor, odp_cursor, *args, **kwargs):
user = get_option('user', 'root') user = get_option('user', 'root')
password = get_option('password', '') password = get_option('password', '')
warehouses = get_option('warehouses', cpu_total * 20) warehouses = get_option('warehouses', cpu_total * 20)
load_workers = get_option('load_workers', int(min(min_cpu, (max_memory >> 30) / 2))) load_workers = get_option('load_workers', int(max(min(min_cpu, (max_memory >> 30) / 2), 1)))
terminals = get_option('terminals', min(cpu_total * 15, warehouses * 10)) terminals = get_option('terminals', min(cpu_total * 15, warehouses * 10))
run_mins = get_option('run_mins', 10) run_mins = get_option('run_mins', 10)
test_only = get_option('test_only') test_only = get_option('test_only')
......
#!/bin/bash #!/bin/bash
if [ -n "$BASH_VERSION" ]; then if [ -n "$BASH_VERSION" ]; then
complete -F _obd_complete_func obd complete -F _obd_complete_func obd
fi fi
function _obd_complete_func
{
local cur prev cmd obd_cmd cluster_cmd tenant_cmd mirror_cmd test_cmd devmode_cmd
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
obd_cmd="mirror cluster test update repo"
cluster_cmd="autodeploy tenant start deploy redeploy restart reload destroy stop edit-config list display upgrade chst check4ocp change-repo"
tenant_cmd="create drop"
mirror_cmd="clone create list update enable disable"
repo_cmd="list"
test_cmd="mysqltest sysbench tpch"
if [ -f "${OBD_HOME:-"$HOME"}/.obd/.dev_mode" ]; then
obd_cmd="$obd_cmd devmode"
devmode_cmd="enable disable"
fi
if [[ ${cur} == * ]] ; then function _obd_reply_current_files() {
case "${prev}" in filename=${cur##*/}
obd);& dirname=${cur%*$filename}
test);& res=`ls -a -p $dirname 2>/dev/null | sed "s#^#$dirname#"`
cluster);& compopt -o nospace
tenant);& COMPREPLY=( $(compgen -o filenames -W "${res}" -- ${cur}) )
mirror);& }
devmode);&
repo)
cmd=$(eval echo \$"${prev}_cmd") function _obd_reply_deploy_names() {
COMPREPLY=( $(compgen -W "${cmd}" -- ${cur}) ) res=`ls -p $obd_home/.obd/cluster 2>/dev/null | sed "s#/##"`
;; COMPREPLY=( $(compgen -o filenames -W "${res}" -- ${cur}) )
clone);& }
-p|--path);&
-c|--config) function _obd_reply_tool_commands() {
filename=${cur##*/} cmd_yaml=$obd_home/.obd/plugins/commands/0.1/command_template.yaml
dirname=${cur%*$filename} sections=`grep -En '^[0-9a-zA-Z]:' $cmd_yaml`
res=`ls -p $dirname 2>/dev/null | sed "s#^#$dirname#"` for line in sections
compopt -o nospace do
COMPREPLY=( $(compgen -o filenames -W "${res}" -- ${cur}) ) num=`echo $line | awk -F ':' '{print $1}'`
;; section=`echo $line | awk -F ':' '{print $2}'`
*) if [[ "$section" == "commands" ]];then
if [ "$prev" == "list" ]; then start_num=num
return 0 elif [[ "$start_num" != "" ]];then
else end_num=num
prev="${COMP_WORDS[COMP_CWORD-2]}" fi
obd_home=${OBD_HOME:-~} done
if [[ "$prev" == "cluster" || "$prev" == "test" || "$prev" == "tenant" ]]; then if [[ "$end_num" == "" ]]; then
res=`ls -p $obd_home/.obd/cluster 2>/dev/null | sed "s#/##"` end_num=`cat $cmd_yaml | wc -l`
compopt -o nospace
COMPREPLY=( $(compgen -o filenames -W "${res}" -- ${cur}) )
fi
fi
esac
return 0
fi fi
total_num=$((end_num - start_num))
res=`grep -E '^commands:' $cmd_yaml -A $total_num | grep name | awk -F 'name:' '{print $2}' | sort -u | tr '\n' ' '`
COMPREPLY=( $(compgen -o filenames -W "${res}" -- ${cur}) )
} }
function _obd_complete_func
{
local all_cmds
declare -A all_cmds
COMPREPLY=()
obd_home=${OBD_HOME:-~}
env_file=${obd_home}/.obd/.obd_environ
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
all_cmds["obd"]="mirror cluster test update repo"
all_cmds["obd cluster"]="autodeploy tenant start deploy redeploy restart reload destroy stop edit-config list display upgrade chst check4ocp reinstall"
all_cmds["obd cluster *"]="_obd_reply_deploy_names"
all_cmds["obd cluster tenant"]="create drop"
all_cmds["obd cluster tenant *"]="_obd_reply_deploy_names"
all_cmds["obd mirror"]="clone create list update enable disable"
all_cmds["obd mirror clone"]="_obd_reply_current_files"
all_cmds["obd repo"]="list"
all_cmds["obd test"]="mysqltest sysbench tpch tpcc"
all_cmds["obd test *"]="_obd_reply_deploy_names"
if [ -f "$env_file" ] && [ "$(grep '"OBD_DEV_MODE": "1"' "$env_file")" != "" ]; then
all_cmds["obd"]="${all_cmds[obd]} devmode env tool"
all_cmds["obd devmode"]="enable disable"
all_cmds["obd tool"]="command db_connect"
all_cmds["obd tool db_connect"]="_obd_reply_deploy_names"
all_cmds["obd tool command"]="_obd_reply_deploy_names"
all_cmds["obd tool command *"]="_obd_reply_tool_commands"
all_cmds["obd env"]="set unset show clear"
fi
case $prev in
list)
return 0
;;
-p|--path);&
-c|--config)
_obd_reply_current_files
;;
*)
valid_len=$COMP_CWORD
words=( ${COMP_WORDS[@]::valid_len} )
index=valid_len
while (( index >= 1 )); do
target="${words[*]}"
cmd=${all_cmds[$target]}
if [[ "$cmd" != "" ]]
then
if [[ $cmd =~ ^_obd_reply.* ]]
then
$cmd
break
else
COMPREPLY=( $(compgen -W "${cmd}" -- ${cur}) )
break
fi
fi
index=$(( index - 1))
tmp=${words[*]::index}
[[ "$tmp" != "" ]] && parent_cmd=${all_cmds[$tmp]}
if [[ "$parent_cmd" =~ ^_obd_reply.* || " $parent_cmd " =~ " ${words[index]} " ]]; then
words[index]='*'
else
break
fi
done
;;
esac
}
\ No newline at end of file
requests==2.24.0 requests==2.24.0
rpmfile==1.0.8 rpmfile==1.0.8
paramiko==2.10.1 paramiko==2.7.2
backports.lzma==0.0.14 backports.lzma==0.0.14
MySQL-python==1.2.5 MySQL-python==1.2.5
ruamel.yaml.clib==0.2.2 ruamel.yaml.clib==0.2.2
...@@ -11,3 +11,6 @@ enum34==1.1.6 ...@@ -11,3 +11,6 @@ enum34==1.1.6
progressbar==2.5 progressbar==2.5
halo==0.0.30 halo==0.0.30
pycryptodome==3.10.1 pycryptodome==3.10.1
inspect2==0.1.2
six==1.16.0
pyinstaller==3.6
\ No newline at end of file
rpmfile==1.0.8 rpmfile==1.0.8
paramiko==2.10.1 paramiko==2.7.2
requests==2.25.1 requests==2.25.1
PyMySQL==1.0.2 PyMySQL==1.0.2
ruamel.yaml==0.17.4 ruamel.yaml==0.17.4
...@@ -8,3 +8,6 @@ prettytable==2.1.0 ...@@ -8,3 +8,6 @@ prettytable==2.1.0
progressbar==2.5 progressbar==2.5
halo==0.0.31 halo==0.0.31
pycryptodome==3.10.1 pycryptodome==3.10.1
inspect2==0.1.2
six==1.16.0
pyinstaller>=4.3
...@@ -43,7 +43,7 @@ function pacakge_executer27() ...@@ -43,7 +43,7 @@ function pacakge_executer27()
rm -fr executer27 rm -fr executer27
mkdir -p ./executer27/{site-packages,bin} mkdir -p ./executer27/{site-packages,bin}
cd executer27 cd executer27
pip install mysql-connector-python==8.0.21 --target=./site-packages -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com || exit 1 pip install -r ../../executer27-requirements.txt --target=./site-packages -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com || exit 1
pyinstaller -F ../../executer27.py pyinstaller -F ../../executer27.py
if [ -e dist/executer27 ]; then if [ -e dist/executer27 ]; then
cp dist/executer27 ./bin/executer cp dist/executer27 ./bin/executer
...@@ -76,7 +76,6 @@ function get_python() ...@@ -76,7 +76,6 @@ function get_python()
{ {
if [ `id -u` != 0 ] ; then if [ `id -u` != 0 ] ; then
echo "Please use root to run" echo "Please use root to run"
exit 1
fi fi
obd_dir=`dirname $0` obd_dir=`dirname $0`
...@@ -122,6 +121,7 @@ function build() ...@@ -122,6 +121,7 @@ function build()
mkdir -p $BUILD_DIR/mirror/remote mkdir -p $BUILD_DIR/mirror/remote
wget https://mirrors.aliyun.com/oceanbase/OceanBase.repo -O $BUILD_DIR/mirror/remote/OceanBase.repo wget https://mirrors.aliyun.com/oceanbase/OceanBase.repo -O $BUILD_DIR/mirror/remote/OceanBase.repo
cat _cmd.py | sed "s/<CID>/$CID/" | sed "s/<B_BRANCH>/$BRANCH/" | sed "s/<B_TIME>/$DATE/" | sed "s/<DEBUG>/$OBD_DUBUG/" | sed "s/<VERSION>/$VERSION/" > obd.py cat _cmd.py | sed "s/<CID>/$CID/" | sed "s/<B_BRANCH>/$BRANCH/" | sed "s/<B_TIME>/$DATE/" | sed "s/<DEBUG>/$OBD_DUBUG/" | sed "s/<VERSION>/$VERSION/" > obd.py
sed -i "s|<DOC_LINK>|$OBD_DOC_LINK|" _errno.py
pip install -r $req_fn.txt || exit 1 pip install -r $req_fn.txt || exit 1
pip install -r plugins-$req_fn.txt --target=$BUILD_DIR/lib/site-packages || exit 1 pip install -r plugins-$req_fn.txt --target=$BUILD_DIR/lib/site-packages || exit 1
pyinstaller --hidden-import=decimal --hidden-import=configparser -F obd.py || exit 1 pyinstaller --hidden-import=decimal --hidden-import=configparser -F obd.py || exit 1
...@@ -129,15 +129,14 @@ function build() ...@@ -129,15 +129,14 @@ function build()
cp -r plugins $BUILD_DIR/plugins cp -r plugins $BUILD_DIR/plugins
cp -r config_parser $BUILD_DIR/config_parser cp -r config_parser $BUILD_DIR/config_parser
rm -fr $BUILD_DIR/plugins/oceanbase-ce rm -fr $BUILD_DIR/plugins/oceanbase-ce
rm -fr $BUILD_DIR/plugins/obproxy-ce
rm -fr $BUILD_DIR/config_parser/oceanbase-ce rm -fr $BUILD_DIR/config_parser/oceanbase-ce
rm -fr /usr/obd /usr/bin/obd rm -fr /usr/obd /usr/bin/obd
cp ./dist/obd /usr/bin/obd cp ./dist/obd /usr/bin/obd
cp -fr ./profile/* /etc/profile.d/ cp -fr ./profile/* /etc/profile.d/
cd $BUILD_DIR/plugins && ln -s oceanbase oceanbase-ce && mv obproxy obproxy-ce
cd $BUILD_DIR/config_parser && ln -s oceanbase oceanbase-ce
mv $BUILD_DIR /usr/obd mv $BUILD_DIR /usr/obd
rm -fr dist rm -fr dist
cd $BUILD_DIR/plugins && ln -s oceanbase oceanbase-ce && cp -rf obproxy/3.1.0 obproxy-ce/ && cp -rf $DIR/plugins/obproxy-ce/* obproxy-ce/
cd $BUILD_DIR/config_parser && ln -s oceanbase oceanbase-ce
chmod +x /usr/bin/obd chmod +x /usr/bin/obd
chmod -R 755 /usr/obd/* chmod -R 755 /usr/obd/*
chown -R root:root /usr/obd/* chown -R root:root /usr/obd/*
...@@ -168,4 +167,4 @@ case "x$1" in ...@@ -168,4 +167,4 @@ case "x$1" in
get_python get_python
build build
;; ;;
esac esac
\ No newline at end of file
...@@ -55,6 +55,7 @@ if [ "$OBD_DUBUG" ]; then ...@@ -55,6 +55,7 @@ if [ "$OBD_DUBUG" ]; then
VERSION=$VERSION".`date +%s`" VERSION=$VERSION".`date +%s`"
fi fi
cat _cmd.py | sed "s/<CID>/$CID/" | sed "s/<B_BRANCH>/$BRANCH/" | sed "s/<B_TIME>/$DATE/" | sed "s/<DEBUG>/$OBD_DUBUG/" | sed "s/<VERSION>/$VERSION/" > obd.py cat _cmd.py | sed "s/<CID>/$CID/" | sed "s/<B_BRANCH>/$BRANCH/" | sed "s/<B_TIME>/$DATE/" | sed "s/<DEBUG>/$OBD_DUBUG/" | sed "s/<VERSION>/$VERSION/" > obd.py
sed -i "s|<DOC_LINK>|$OBD_DOC_LINK|" _errno.py
mkdir -p $BUILD_DIR/SOURCES ${RPM_BUILD_ROOT} mkdir -p $BUILD_DIR/SOURCES ${RPM_BUILD_ROOT}
mkdir -p $BUILD_DIR/SOURCES/{site-packages} mkdir -p $BUILD_DIR/SOURCES/{site-packages}
mkdir -p ${RPM_BUILD_ROOT}/usr/bin mkdir -p ${RPM_BUILD_ROOT}/usr/bin
...@@ -82,7 +83,8 @@ mkdir -p ${RPM_BUILD_ROOT}/usr/obd/lib/executer ...@@ -82,7 +83,8 @@ mkdir -p ${RPM_BUILD_ROOT}/usr/obd/lib/executer
\cp -rf ${RPM_DIR}/executer27 ${RPM_BUILD_ROOT}/usr/obd/lib/executer/ \cp -rf ${RPM_DIR}/executer27 ${RPM_BUILD_ROOT}/usr/obd/lib/executer/
\cp -rf $BUILD_DIR/SOURCES/example ${RPM_BUILD_ROOT}/usr/obd/ \cp -rf $BUILD_DIR/SOURCES/example ${RPM_BUILD_ROOT}/usr/obd/
cd ${RPM_BUILD_ROOT}/usr/obd/plugins && ln -s oceanbase oceanbase-ce && mv obproxy obproxy-ce cd ${RPM_BUILD_ROOT}/usr/obd/plugins && ln -s oceanbase oceanbase-ce && mv obproxy obproxy-ce
cd ${RPM_BUILD_ROOT}/usr/obd/config_parser && ln -s oceanbase oceanbase-ce rm -rf obproxy
cd ${RPM_BUILD_ROOT}/usr/obd/config_parser && ln -s oceanbase oceanbase-ce
# package infomation # package infomation
%files %files
...@@ -116,6 +118,12 @@ echo -e 'Installation of obd finished successfully\nPlease source /etc/profile.d ...@@ -116,6 +118,12 @@ echo -e 'Installation of obd finished successfully\nPlease source /etc/profile.d
#/sbin/chkconfig obd on #/sbin/chkconfig obd on
%changelog %changelog
* Wed Aug 17 2022 obd 1.5.0
- new features: obd cluster reinstall
- new features: obd tool
- new features: support rsync
- new keyword: include
- more option: obd test mysqltest
* Sun Jul 17 2022 obd 1.4.0 * Sun Jul 17 2022 obd 1.4.0
- new features: support tpcc - new features: support tpcc
- new features: support mysqltest record - new features: support mysqltest record
......
...@@ -20,12 +20,14 @@ ...@@ -20,12 +20,14 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
import os import enum
import sys
import getpass import getpass
import os
import warnings import warnings
from copy import deepcopy from glob import glob
from subprocess32 import Popen, PIPE from subprocess32 import Popen, PIPE
# paramiko import cryptography 模块在python2下会报不支持警报 # paramiko import cryptography 模块在python2下会报不支持警报
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
...@@ -33,14 +35,18 @@ from paramiko import AuthenticationException, SFTPClient ...@@ -33,14 +35,18 @@ from paramiko import AuthenticationException, SFTPClient
from paramiko.client import SSHClient, AutoAddPolicy from paramiko.client import SSHClient, AutoAddPolicy
from paramiko.ssh_exception import NoValidConnectionsError, SSHException from paramiko.ssh_exception import NoValidConnectionsError, SSHException
from tool import DirectoryUtil from multiprocessing.queues import Empty
from multiprocessing import Queue, Process
from multiprocessing.pool import ThreadPool
from tool import COMMAND_ENV, DirectoryUtil
from _stdio import SafeStdio
__all__ = ("SshClient", "SshConfig", "LocalClient")
__all__ = ("SshClient", "SshConfig", "LocalClient", "ConcurrentExecutor")
class SshConfig(object):
class SshConfig(object):
def __init__(self, host, username='root', password=None, key_filename=None, port=22, timeout=30): def __init__(self, host, username='root', password=None, key_filename=None, port=22, timeout=30):
self.host = host self.host = host
...@@ -68,11 +74,60 @@ class SshReturn(object): ...@@ -68,11 +74,60 @@ class SshReturn(object):
return self.__bool__() return self.__bool__()
class LocalClient(object): class FutureSshReturn(SshReturn):
def __init__(self, client, command, timeout=None, stdio=None):
self.client = client
self.command = command
self.timeout = timeout
self.stdio = stdio if stdio else client.stdio
if self.stdio:
self.stdio = self.stdio.sub_io()
self.finsh = False
super(FutureSshReturn, self).__init__(127, '', '')
def set_return(self, ssh_return):
self.code = ssh_return.code
self.stdout = ssh_return.stdout
self.stderr = ssh_return.stderr
self.finsh = True
class ConcurrentExecutor(object):
def __init__(self, workers=None):
self.workers = workers
self.futures = []
def add_task(self, client, command, timeout=None, stdio=None):
ret = FutureSshReturn(client, command, timeout, stdio=stdio)
self.futures.append(ret)
return ret
@staticmethod
def execute(future):
client = SshClient(future.client.config, future.stdio)
future.set_return(client.execute_command(future.command, timeout=future.timeout))
return future
def submit(self):
rets = []
pool = ThreadPool(processes=self.workers)
try:
results = pool.map(ConcurrentExecutor.execute, tuple(self.futures))
for r in results:
rets.append(r)
finally:
pool.close()
self.futures = []
return rets
class LocalClient(SafeStdio):
@staticmethod @staticmethod
def execute_command(command, env=None, timeout=None, stdio=None): def execute_command(command, env=None, timeout=None, stdio=None):
stdio and getattr(stdio, 'verbose', print)('local execute: %s ' % command, end='') stdio.verbose('local execute: %s ' % command, end='')
try: try:
p = Popen(command, env=env, shell=True, stdout=PIPE, stderr=PIPE) p = Popen(command, env=env, shell=True, stdout=PIPE, stderr=PIPE)
output, error = p.communicate(timeout=timeout) output, error = p.communicate(timeout=timeout)
...@@ -82,14 +137,14 @@ class LocalClient(object): ...@@ -82,14 +137,14 @@ class LocalClient(object):
verbose_msg = 'exited code %s' % code verbose_msg = 'exited code %s' % code
if code: if code:
verbose_msg += ', error output:\n%s' % error verbose_msg += ', error output:\n%s' % error
stdio and getattr(stdio, 'verbose', print)(verbose_msg) stdio.verbose(verbose_msg)
except Exception as e: except Exception as e:
output = '' output = ''
error = str(e) error = str(e)
code = 255 code = 255
verbose_msg = 'exited code 255, error output:\n%s' % error verbose_msg = 'exited code 255, error output:\n%s' % error
stdio and getattr(stdio, 'verbose', print)(verbose_msg) stdio.verbose(verbose_msg)
stdio and getattr(stdio, 'exception', print)('') stdio.exception('')
return SshReturn(code, output, error) return SshReturn(code, output, error)
@staticmethod @staticmethod
...@@ -100,7 +155,12 @@ class LocalClient(object): ...@@ -100,7 +155,12 @@ class LocalClient(object):
@staticmethod @staticmethod
def put_dir(local_dir, remote_dir, stdio=None): def put_dir(local_dir, remote_dir, stdio=None):
if LocalClient.execute_command('mkdir -p %s && cp -fr %s %s' % (remote_dir, os.path.join(local_dir, '*'), remote_dir), stdio=stdio): if os.path.isdir(local_dir):
local_dir = os.path.join(local_dir, '*')
if os.path.exists(os.path.dirname(local_dir)) and not glob(local_dir):
stdio.verbose("%s is empty" % local_dir)
return True
if LocalClient.execute_command('mkdir -p %s && cp -fr %s %s' % (remote_dir, local_dir, remote_dir), stdio=stdio):
return True return True
return False return False
...@@ -113,7 +173,18 @@ class LocalClient(object): ...@@ -113,7 +173,18 @@ class LocalClient(object):
return LocalClient.put_dir(remote_path, local_path, stdio=stdio) return LocalClient.put_dir(remote_path, local_path, stdio=stdio)
class SshClient(object): class RemoteTransporter(enum.Enum):
CLIENT = 0
RSYNC = 1
def __lt__(self, other):
return self.value < other.value
def __gt__(self, other):
return self.value > other.value
class SshClient(SafeStdio):
def __init__(self, config, stdio=None): def __init__(self, config, stdio=None):
self.config = config self.config = config
...@@ -122,11 +193,19 @@ class SshClient(object): ...@@ -122,11 +193,19 @@ class SshClient(object):
self.is_connected = False self.is_connected = False
self.ssh_client = SSHClient() self.ssh_client = SSHClient()
self.env_str = '' self.env_str = ''
self._remote_transporter = None
self.task_queue = None
self.result_queue = None
if self._is_local(): if self._is_local():
self.env = deepcopy(os.environ.copy()) self.env = COMMAND_ENV.copy()
else: else:
self.env = {'PATH': '/sbin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:'} self.env = {'PATH': '/sbin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:'}
self._update_env() self._update_env()
super(SshClient, self).__init__()
def _init_queue(self):
self.task_queue = Queue()
self.result_queue = Queue()
def _update_env(self): def _update_env(self):
env = [] env = []
...@@ -136,18 +215,23 @@ class SshClient(object): ...@@ -136,18 +215,23 @@ class SshClient(object):
self.env_str = ''.join(env) self.env_str = ''.join(env)
def add_env(self, key, value, rewrite=False, stdio=None): def add_env(self, key, value, rewrite=False, stdio=None):
stdio = stdio if stdio else self.stdio
if key not in self.env or not self.env[key] or rewrite: if key not in self.env or not self.env[key] or rewrite:
stdio and getattr(stdio, 'verbose', print)('%s@%s set env %s to \'%s\'' % (self.config.username, self.config.host, key, value)) stdio.verbose('%s@%s set env %s to \'%s\'' % (self.config.username, self.config.host, key, value))
self.env[key] = value self.env[key] = value
else: else:
stdio and getattr(stdio, 'verbose', print)('%s@%s append \'%s\' to %s' % (self.config.username, self.config.host, value, key)) stdio.verbose('%s@%s append \'%s\' to %s' % (self.config.username, self.config.host, value, key))
self.env[key] += value self.env[key] += value
self._update_env() self._update_env()
def get_env(self, key): def get_env(self, key, stdio=None):
return self.env[key] if key in self.env else None return self.env[key] if key in self.env else None
def del_env(self, key, stdio=None):
if key in self.env:
stdio.verbose('%s@%s delete env %s' % (self.config.username, self.config.host, key))
del self.env[key]
self._update_env()
def __str__(self): def __str__(self):
return '%s@%s:%d' % (self.config.username, self.config.host, self.config.port) return '%s@%s:%d' % (self.config.username, self.config.host, self.config.port)
...@@ -160,33 +244,32 @@ class SshClient(object): ...@@ -160,33 +244,32 @@ class SshClient(object):
def _login(self, stdio=None): def _login(self, stdio=None):
if self.is_connected: if self.is_connected:
return True return True
stdio = stdio if stdio else self.stdio
try: try:
self.ssh_client.set_missing_host_key_policy(AutoAddPolicy()) self.ssh_client.set_missing_host_key_policy(AutoAddPolicy())
self.ssh_client.connect( self.ssh_client.connect(
self.config.host, self.config.host,
port=self.config.port, port=self.config.port,
username=self.config.username, username=self.config.username,
password=self.config.password, password=self.config.password,
key_filename=self.config.key_filename, key_filename=self.config.key_filename,
timeout=self.config.timeout timeout=self.config.timeout
) )
self.is_connected = True self.is_connected = True
except AuthenticationException: except AuthenticationException:
stdio and getattr(stdio, 'exception', print)('') stdio.exception('')
stdio and getattr(stdio, 'critical', print)('%s@%s username or password error' % (self.config.username, self.config.host)) stdio.critical('%s@%s username or password error' % (self.config.username, self.config.host))
except NoValidConnectionsError: except NoValidConnectionsError:
stdio and getattr(stdio, 'exception', print)('') stdio.exception('')
stdio and getattr(stdio, 'critical', print)('%s@%s connect failed: time out' % (self.config.username, self.config.host)) stdio.critical('%s@%s connect failed: time out' % (self.config.username, self.config.host))
except Exception as e: except Exception as e:
stdio and getattr(stdio, 'exception', print)('') stdio.exception('')
stdio and getattr(stdio, 'critical', print)('%s@%s connect failed: %s' % (self.config.username, self.config.host, e)) stdio.critical('%s@%s connect failed: %s' % (self.config.username, self.config.host, e))
return self.is_connected return self.is_connected
def _open_sftp(self, stdio=None): def _open_sftp(self, stdio=None):
if self.sftp: if self.sftp:
return True return True
if self._login(stdio): if self._login(stdio=stdio):
SFTPClient.from_transport(self.ssh_client.get_transport()) SFTPClient.from_transport(self.ssh_client.get_transport())
self.sftp = self.ssh_client.open_sftp() self.sftp = self.ssh_client.open_sftp()
return True return True
...@@ -195,11 +278,11 @@ class SshClient(object): ...@@ -195,11 +278,11 @@ class SshClient(object):
def connect(self, stdio=None): def connect(self, stdio=None):
if self._is_local(): if self._is_local():
return True return True
return self._login(stdio) return self._login(stdio=stdio)
def reconnect(self, stdio=None): def reconnect(self, stdio=None):
self.close(stdio) self.close(stdio=stdio)
return self.connect(stdio) return self.connect(stdio=stdio)
def close(self, stdio=None): def close(self, stdio=None):
if self._is_local(): if self._is_local():
...@@ -212,13 +295,11 @@ class SshClient(object): ...@@ -212,13 +295,11 @@ class SshClient(object):
def __del__(self): def __del__(self):
self.close() self.close()
def _execute_command(self, command, retry, stdio): def _execute_command(self, command, timeout=None, retry=3, stdio=None):
if not self._login(stdio): if not self._login(stdio):
return SshReturn(255, '', 'connect failed') return SshReturn(255, '', 'connect failed')
stdio = stdio if stdio else self.stdio
try: try:
stdin, stdout, stderr = self.ssh_client.exec_command(command) stdin, stdout, stderr = self.ssh_client.exec_command(command, timeout=timeout)
output = stdout.read().decode(errors='replace') output = stdout.read().decode(errors='replace')
error = stderr.read().decode(errors='replace') error = stderr.read().decode(errors='replace')
if output: if output:
...@@ -230,152 +311,332 @@ class SshClient(object): ...@@ -230,152 +311,332 @@ class SshClient(object):
code, stdout = 1, '' code, stdout = 1, ''
if code: if code:
verbose_msg = 'exited code %s, error output:\n%s' % (code, error) verbose_msg = 'exited code %s, error output:\n%s' % (code, error)
stdio and getattr(stdio, 'verbose', print)(verbose_msg) stdio.verbose(verbose_msg)
return SshReturn(code, stdout, error) return SshReturn(code, stdout, error)
except SSHException as e: except SSHException as e:
if retry: if retry:
self.close() self.close()
return self._execute_command(command, retry-1, stdio) return self._execute_command(command, retry-1, stdio)
else: else:
stdio and getattr(stdio, 'exception', print)('') stdio.exception('')
stdio and getattr(stdio, 'critical', print)('%s@%s connect failed: %s' % (self.config.username, self.config.host, e)) stdio.critical('%s@%s connect failed: %s' % (self.config.username, self.config.host, e))
raise e raise e
except Exception as e: except Exception as e:
stdio and getattr(stdio, 'exception', print)('') stdio.exception('')
stdio and getattr(stdio, 'critical', print)('%s@%s connect failed: %s' % (self.config.username, self.config.host, e)) stdio.critical('%s@%s connect failed: %s' % (self.config.username, self.config.host, e))
raise e raise e
def execute_command(self, command, stdio=None): def execute_command(self, command, timeout=None, stdio=None):
if timeout is None:
timeout = self.config.timeout
elif timeout <= 0:
timeout = None
if self._is_local(): if self._is_local():
return LocalClient.execute_command(command, self.env, self.config.timeout, stdio=stdio) return LocalClient.execute_command(command, self.env, timeout, stdio=stdio)
stdio = stdio if stdio else self.stdio
verbose_msg = '%s execute: %s ' % (self.config, command) verbose_msg = '%s execute: %s ' % (self.config, command)
stdio and getattr(stdio, 'verbose', print)(verbose_msg, end='') stdio.verbose(verbose_msg, end='')
command = '%s %s;echo -e "\n$?\c"' % (self.env_str, command.strip(';')) command = '%s %s;echo -e "\n$?\c"' % (self.env_str, command.strip(';'))
return self._execute_command(command, 3, stdio=stdio) return self._execute_command(command, retry=3, timeout=timeout, stdio=stdio)
@property
def disable_rsync(self):
return COMMAND_ENV.get("OBD_DISABLE_RSYNC") == "1"
@property
def remote_transporter(self):
if self._remote_transporter is not None:
return self._remote_transporter
_transporter = RemoteTransporter.CLIENT
if not self._is_local() and self._remote_transporter is None:
if not self.config.password and not self.disable_rsync:
ret = LocalClient.execute_command('rsync -h', stdio=self.stdio)
if ret:
_transporter = RemoteTransporter.RSYNC
self._remote_transporter = _transporter
self.stdio.verbose("current remote_transporter {}".format(self._remote_transporter))
return self._remote_transporter
def put_file(self, local_path, remote_path, stdio=None): def put_file(self, local_path, remote_path, stdio=None):
stdio = stdio if stdio else self.stdio
if not os.path.isfile(local_path): if not os.path.isfile(local_path):
stdio and getattr(stdio, 'error', print)('%s is not file' % local_path) stdio.error('path: %s is not file' % local_path)
return False return False
if self._is_local(): if self._is_local():
return LocalClient.put_file(local_path, remote_path, stdio=stdio) return LocalClient.put_file(local_path, remote_path, stdio=stdio)
if not self._open_sftp(stdio): if not self._open_sftp(stdio=stdio):
return False return False
return self._put_file(local_path, remote_path, stdio=stdio) return self._put_file(local_path, remote_path, stdio=stdio)
def _put_file(self, local_path, remote_path, stdio=None): @property
def _put_file(self):
if self.remote_transporter == RemoteTransporter.RSYNC:
return self._rsync_put_file
else:
return self._client_put_file
def _client_put_file(self, local_path, remote_path, stdio=None):
if self.execute_command('mkdir -p %s && rm -fr %s' % (os.path.dirname(remote_path), remote_path), stdio=stdio): if self.execute_command('mkdir -p %s && rm -fr %s' % (os.path.dirname(remote_path), remote_path), stdio=stdio):
stdio and getattr(stdio, 'verbose', print)('send %s to %s' % (local_path, remote_path)) stdio.verbose('send %s to %s' % (local_path, remote_path))
if self.sftp.put(local_path, remote_path): if self.sftp.put(local_path, remote_path):
return self.execute_command('chmod %s %s' % (oct(os.stat(local_path).st_mode)[-3: ], remote_path)) return self.execute_command('chmod %s %s' % (oct(os.stat(local_path).st_mode)[-3:], remote_path))
return False return False
def _rsync(self, source, target, stdio=None):
identity_option = ""
if self.config.key_filename:
identity_option += '-e "ssh -i {key_filename} "'.format(key_filename=self.config.key_filename)
cmd = 'rsync -a -W {identity_option} {source} {target}'.format(
identity_option=identity_option,
source=source,
target=target
)
ret = LocalClient.execute_command(cmd, stdio=stdio)
return bool(ret)
def _rsync_put_dir(self, local_path, remote_path, stdio=None):
stdio.verbose('send %s to %s by rsync' % (local_path, remote_path))
source = os.path.join(local_path, '*')
if os.path.exists(os.path.dirname(source)) and not glob(source):
stdio.verbose("%s is empty" % source)
return True
target = "{user}@{host}:{remote_path}".format(user=self.config.username, host=self.config.host, remote_path=remote_path)
if self._rsync(source, target, stdio=stdio):
return True
else:
return False
def _rsync_put_file(self, local_path, remote_path, stdio=None):
if not self.execute_command('mkdir -p %s' % os.path.dirname(remote_path), stdio=stdio):
return False
stdio.verbose('send %s to %s by rsync' % (local_path, remote_path))
target = "{user}@{host}:{remote_path}".format(user=self.config.username, host=self.config.host, remote_path=remote_path)
if self._rsync(local_path, target, stdio=stdio):
return True
else:
return False
def put_dir(self, local_dir, remote_dir, stdio=None): def put_dir(self, local_dir, remote_dir, stdio=None):
stdio = stdio if stdio else self.stdio
if self._is_local(): if self._is_local():
return LocalClient.put_dir(local_dir, remote_dir, stdio=stdio) return LocalClient.put_dir(local_dir, remote_dir, stdio=stdio)
if not self._open_sftp(stdio): if not self._open_sftp(stdio=stdio):
return False return False
if not self.execute_command('mkdir -p %s' % remote_dir, stdio=stdio): if not self.execute_command('mkdir -p %s' % remote_dir, stdio=stdio):
return False return False
stdio.start_loading('Send %s to %s' % (local_dir, remote_dir))
failed = [] ret = self._put_dir(local_dir, remote_dir, stdio=stdio)
failed_dirs = [] stdio.stop_loading('succeed' if ret else 'fail')
local_dir_path_len = len(local_dir) return ret
for root, dirs, files in os.walk(local_dir):
for path in failed_dirs: @property
if root.find(path) == 0: def _put_dir(self):
# 父目录已经在被标记为失败,该层可直接跳过 if self.remote_transporter == RemoteTransporter.RSYNC:
# break退出不执行else代码段 return self._rsync_put_dir
break else:
else: return self._client_put_dir
for name in files:
local_path = os.path.join(root, name) def _client_put_dir(self, local_dir, remote_dir, stdio=None):
remote_path = os.path.join(remote_dir, root[local_dir_path_len:].lstrip('/'), name) has_failed = False
if not self._put_file(local_path, remote_path, stdio=stdio): ret = LocalClient.execute_command('find %s -type f' % local_dir)
failed.append(remote_path) if not ret:
for name in dirs: has_failed = True
local_path = os.path.join(root, name) all_files = ret.stdout.strip().split('\n') if ret.stdout else []
remote_path = os.path.join(remote_dir, root[local_dir_path_len:].lstrip('/'), name) ret = LocalClient.execute_command('find %s -type d' % local_dir)
if not self.execute_command('mkdir -p %s' % remote_path, stdio=stdio): if not ret:
failed_dirs.append(local_dir) has_failed = True
failed.append(remote_path) all_dirs = ret.stdout.strip().split('\n') if ret.stdout else []
self._filter_dir_in_file_path(all_files, all_dirs)
for path in failed: for local_path in all_files:
stdio and getattr(stdio, 'error', print)('send %s to %s@%s failed' % (path, self.config.username, self.config.host)) remote_path = os.path.join(remote_dir, os.path.relpath(local_path, local_dir))
return not failed if not self._client_put_file(local_path, remote_path, stdio=stdio):
stdio.error('Fail to get %s' % remote_path)
has_failed = True
for local_path in all_dirs:
remote_path = os.path.join(remote_dir, os.path.relpath(local_path, local_dir))
stat = oct(os.stat(local_path).st_mode)[-3:]
cmd = '[ -d "{remote_path}" ] || (mkdir -p {remote_path}; chmod {stat} {remote_path})'.format(remote_path=remote_path, stat=stat)
if not self.execute_command(cmd):
has_failed = True
return not has_failed
def get_file(self, local_path, remote_path, stdio=None): def get_file(self, local_path, remote_path, stdio=None):
stdio = stdio if stdio else self.stdio
dirname, _ = os.path.split(local_path) dirname, _ = os.path.split(local_path)
if not dirname: if not dirname:
dirname = os.getcwd() dirname = os.getcwd()
local_path = os.path.join(dirname, local_path) local_path = os.path.join(dirname, local_path)
if os.path.exists(dirname): if os.path.exists(dirname):
if not os.path.isdir(dirname): if not os.path.isdir(dirname):
stdio and getattr(stdio, 'error', print)('%s is not directory' % dirname) stdio.error('%s is not directory' % dirname)
return False return False
elif not DirectoryUtil.mkdir(dirname, stdio=stdio): elif not DirectoryUtil.mkdir(dirname, stdio=stdio):
return False return False
if os.path.exists(local_path) and not os.path.isfile(local_path): if os.path.exists(local_path) and not os.path.isfile(local_path):
stdio and getattr(stdio, 'error', print)('%s is not file' % local_path) stdio.error('path: %s is not file' % local_path)
return False return False
if self._is_local(): if self._is_local():
return LocalClient.get_file(local_path, remote_path, stdio=stdio) return LocalClient.get_file(local_path, remote_path, stdio=stdio)
if not self._open_sftp(stdio): if not self._open_sftp(stdio=stdio):
return False return False
return self._get_file(local_path, remote_path, stdio=stdio) return self._get_file(local_path, remote_path, stdio=stdio)
def _get_file(self, local_path, remote_path, stdio=None): @property
stdio and getattr(stdio, 'verbose', print)('get %s to %s' % (remote_path, local_path)) def _get_file(self):
if self.remote_transporter == RemoteTransporter.RSYNC:
return self._rsync_get_file
else:
return self._client_get_file
def _rsync_get_dir(self, local_path, remote_path, stdio=None):
source = "{user}@{host}:{remote_path}".format(user=self.config.username, host=self.config.host, remote_path=remote_path)
if "*" not in remote_path:
source = os.path.join(source, "*")
target = local_path
stdio.verbose('get %s from %s by rsync' % (local_path, remote_path))
if LocalClient.execute_command('mkdir -p {}'.format(local_path), stdio=stdio) and self._rsync(source, target, stdio=stdio):
return True
else:
return False
def _rsync_get_file(self, local_path, remote_path, stdio=None):
source = "{user}@{host}:{remote_path}".format(user=self.config.username, host=self.config.host, remote_path=remote_path)
target = local_path
stdio.verbose('get %s from %s by rsync' % (local_path, remote_path))
if self._rsync(source, target, stdio=stdio):
return True
else:
return False
def _client_get_file(self, local_path, remote_path, stdio=None):
try: try:
self.sftp.get(remote_path, local_path) self.sftp.get(remote_path, local_path)
stat = self.sftp.stat(remote_path) stat = self.sftp.stat(remote_path)
os.chmod(local_path, stat.st_mode) os.chmod(local_path, stat.st_mode)
return True return True
except Exception as e: except Exception as e:
stdio and getattr(stdio, 'exception', print)('from %s@%s get %s to %s failed: %s' % (self.config.username, self.config.host, remote_path, local_path, e)) stdio.exception('get %s from %s@%s:%s failed: %s' % (local_path, self.config.username, self.config.host, remote_path, e))
return False return False
def get_dir(self, local_dir, remote_dir, stdio=None): def get_dir(self, local_dir, remote_dir, stdio=None):
stdio = stdio if stdio else self.stdio
dirname, _ = os.path.split(local_dir) dirname, _ = os.path.split(local_dir)
if not dirname: if not dirname:
dirname = os.getcwd() dirname = os.getcwd()
local_dir = os.path.join(dirname, local_dir) local_dir = os.path.join(dirname, local_dir)
if "*" in dirname:
stdio.error('Invalid directory {}'.format(dirname))
return False
if os.path.exists(dirname): if os.path.exists(dirname):
if not os.path.isdir(dirname): if not os.path.isdir(dirname):
stdio and getattr(stdio, 'error', print)('%s is not directory' % dirname) stdio.error('%s is not directory' % dirname)
return False return False
elif not DirectoryUtil.mkdir(dirname, stdio=stdio): elif not DirectoryUtil.mkdir(dirname, stdio=stdio):
return False return False
if os.path.exists(local_dir) and not os.path.isdir(local_dir): if os.path.exists(local_dir) and not os.path.isdir(local_dir):
stdio and getattr(stdio, 'error', print)('%s is not directory' % local_dir) stdio.error('%s is not directory' % local_dir)
return False return False
if self._is_local(): if self._is_local():
return LocalClient.get_dir(local_dir, remote_dir, stdio=stdio) return LocalClient.get_dir(local_dir, remote_dir, stdio=stdio)
if not self._open_sftp(stdio): if not self._open_sftp(stdio=stdio):
return False return False
return self._get_dir(local_dir, remote_dir, stdio=stdio) stdio.start_loading('Get %s from %s' % (local_dir, remote_dir))
ret = self._get_dir(local_dir, remote_dir, stdio=stdio)
stdio.stop_loading('succeed' if ret else 'fail')
return ret
@property
def _get_dir(self):
if self.remote_transporter == RemoteTransporter.RSYNC:
return self._rsync_get_dir
else:
return self._client_get_dir
def _get_dir(self, local_dir, remote_dir, failed=[], stdio=None): def _client_get_dir(self, local_dir, remote_dir, stdio=None):
task_queue = []
has_failed = False
if DirectoryUtil.mkdir(local_dir, stdio=stdio): if DirectoryUtil.mkdir(local_dir, stdio=stdio):
try: try:
for fn in self.sftp.listdir(remote_dir): ret = self.execute_command('find %s -type f' % remote_dir)
remote_path = os.path.join(remote_dir, fn) if not ret:
local_path = os.path.join(local_dir, fn) stdio.verbose(ret.stderr)
if self.execute_command('bash -c "if [ -f %s ]; then exit 0; else exit 1; fi;"' % remote_path): has_failed = True
if not self._get_file(local_path, remote_path, stdio=stdio): all_files = ret.stdout.strip().split('\n') if ret.stdout else []
failed.append(remote_path) ret = self.execute_command('find %s -type d' % remote_dir)
else: if not ret:
self._get_dir(local_path, remote_path, failed=failed, stdio=stdio.sub_io()) has_failed = True
all_dirs = ret.stdout.strip().split('\n') if ret.stdout else []
self._filter_dir_in_file_path(all_files, all_dirs)
for f in all_files:
task_queue.append(f)
if "*" in remote_dir:
remote_base_dir = os.path.dirname(remote_dir)
else:
remote_base_dir = remote_dir
for remote_path in task_queue:
local_path = os.path.join(local_dir, os.path.relpath(remote_path, remote_dir))
if not self._client_get_file(local_path, remote_path, stdio=stdio):
stdio.error('Fail to get %s' % remote_path)
has_failed = True
for remote_path in all_dirs:
try:
local_path = os.path.join(local_dir, os.path.relpath(remote_path, remote_base_dir))
if not os.path.exists(local_path):
stat = self.sftp.stat(remote_path)
os.makedirs(local_path, mode=stat.st_mode)
except Exception as e:
stdio.exception('Fail to make directory %s in local: %s' % (remote_path, e))
has_failed = True
return not has_failed
except Exception as e: except Exception as e:
stdio and getattr(stdio, 'exception', print)('Fail to get %s: %s' % (remote_dir, e)) stdio.exception('Fail to get %s: %s' % (remote_dir, e))
failed.append(remote_dir)
else: @staticmethod
failed.append(remote_dir) def _filter_dir_in_file_path(files, directories):
return not failed skip_directories = []
for path in files:
dir_name = os.path.dirname(path)
while dir_name not in ["/", ".", ""]:
if dir_name in skip_directories:
break
if dir_name in directories:
directories.remove(dir_name)
skip_directories.append(dir_name)
dir_name = os.path.dirname(dir_name)
def file_downloader(self, local_dir, remote_dir, stdio=None):
try:
client = SshClient(config=self.config, stdio=None)
client._open_sftp(stdio=stdio)
client._remote_transporter = self.remote_transporter
while True:
remote_path = self.task_queue.get(block=False)
local_path = os.path.join(local_dir, os.path.relpath(remote_path, remote_dir))
if client.get_file(local_path, remote_path, stdio=stdio):
self.result_queue.put(remote_path)
else:
stdio.error('Fail to get %s' % remote_path)
except Empty:
return
except:
stdio.exception("")
stdio.exception('Failed to get %s' % remote_dir)
def file_uploader(self, local_dir, remote_dir, stdio=None):
try:
client = SshClient(config=self.config, stdio=None)
client._remote_transporter = self.remote_transporter
while True:
local_path, is_dir = self.task_queue.get(block=False)
remote_path = os.path.join(remote_dir, os.path.relpath(local_path, local_dir))
if is_dir:
stat = oct(os.stat(local_path).st_mode)[-3:]
cmd = '[ -d "{remote_path}" ] || (mkdir -p {remote_path}; chmod {stat} {remote_path})'.format(remote_path=remote_path, stat=stat)
if client.execute_command(cmd):
self.result_queue.put(remote_path)
else:
if client.put_file(local_path, remote_path, stdio=stdio):
self.result_queue.put(remote_path)
else:
stdio.error('Fail to get %s' % remote_path)
except Empty:
return
except:
stdio.exception("")
stdio.verbose('Failed to get %s' % remote_dir)
...@@ -29,29 +29,68 @@ import gzip ...@@ -29,29 +29,68 @@ import gzip
import fcntl import fcntl
import signal import signal
import shutil import shutil
import re
import json
from ruamel.yaml import YAML, YAMLContextManager, representer from ruamel.yaml import YAML, YAMLContextManager, representer
from _stdio import SafeStdio
_open = open
if sys.version_info.major == 2: if sys.version_info.major == 2:
from collections import OrderedDict from collections import OrderedDict
from backports import lzma from backports import lzma
from io import open from io import open as _open
def encoding_open(path, _type, encoding=None, *args, **kwrags):
if encoding:
kwrags['encoding'] = encoding
return _open(path, _type, *args, **kwrags)
else:
return open(path, _type, *args, **kwrags)
class TimeoutError(OSError): class TimeoutError(OSError):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(TimeoutError, self).__init__(*args, **kwargs) super(TimeoutError, self).__init__(*args, **kwargs)
else: else:
import lzma import lzma
encoding_open = open
class OrderedDict(dict): class OrderedDict(dict):
pass pass
__all__ = ("timeout", "DynamicLoading", "ConfigUtil", "DirectoryUtil", "FileUtil", "YamlLoader", "OrderedDict") __all__ = ("timeout", "DynamicLoading", "ConfigUtil", "DirectoryUtil", "FileUtil", "YamlLoader", "OrderedDict", "COMMAND_ENV")
_WINDOWS = os.name == 'nt' _WINDOWS = os.name == 'nt'
class Timeout(object):
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def _is_timeout(self):
return self.seconds and self.seconds > 0
def __enter__(self):
if self._is_timeout():
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
if self._is_timeout():
signal.alarm(0)
timeout = Timeout
class Timeout: class Timeout:
def __init__(self, seconds=1, error_message='Timeout'): def __init__(self, seconds=1, error_message='Timeout'):
...@@ -162,6 +201,17 @@ class ConfigUtil(object): ...@@ -162,6 +201,17 @@ class ConfigUtil(object):
except: except:
return default return default
@staticmethod
def get_list_from_dict(conf, key, transform_func=None):
try:
return_list = conf[key]
if transform_func:
return [transform_func(value) for value in return_list]
else:
return return_list
except:
return []
class DirectoryUtil(object): class DirectoryUtil(object):
...@@ -320,7 +370,7 @@ class FileUtil(object): ...@@ -320,7 +370,7 @@ class FileUtil(object):
stdio and getattr(stdio, 'verbose', print)('open %s for %s' % (path, _type)) stdio and getattr(stdio, 'verbose', print)('open %s for %s' % (path, _type))
if os.path.exists(path): if os.path.exists(path):
if os.path.isfile(path): if os.path.isfile(path):
return open(path, _type, encoding=encoding) return encoding_open(path, _type, encoding=encoding)
info = '%s is not file' % path info = '%s is not file' % path
if stdio: if stdio:
getattr(stdio, 'error', print)(info) getattr(stdio, 'error', print)(info)
...@@ -329,7 +379,7 @@ class FileUtil(object): ...@@ -329,7 +379,7 @@ class FileUtil(object):
raise IOError(info) raise IOError(info)
dir_path, file_name = os.path.split(path) dir_path, file_name = os.path.split(path)
if not dir_path or DirectoryUtil.mkdir(dir_path, stdio=stdio): if not dir_path or DirectoryUtil.mkdir(dir_path, stdio=stdio):
return open(path, _type, encoding=encoding) return encoding_open(path, _type, encoding=encoding)
info = '%s is not file' % path info = '%s is not file' % path
if stdio: if stdio:
getattr(stdio, 'error', print)(info) getattr(stdio, 'error', print)(info)
...@@ -422,3 +472,116 @@ class YamlLoader(YAML): ...@@ -422,3 +472,116 @@ class YamlLoader(YAML):
if getattr(self.stdio, 'exception', False): if getattr(self.stdio, 'exception', False):
self.stdio.exception('dump error:\n%s' % e) self.stdio.exception('dump error:\n%s' % e)
raise e raise e
_KEYCRE = re.compile(r"\$(\w+)")
def var_replace(string, var, pattern=_KEYCRE):
if not var:
return string
done = []
while string:
m = pattern.search(string)
if not m:
done.append(string)
break
varname = m.group(1).lower()
replacement = var.get(varname, m.group())
start, end = m.span()
done.append(string[:start])
done.append(str(replacement))
string = string[end:]
return ''.join(done)
class CommandEnv(SafeStdio):
def __init__(self):
self.source_path = None
self._env = os.environ.copy()
self._cmd_env = {}
def load(self, source_path, stdio=None):
if self.source_path:
stdio.error("Source path of env already set.")
return False
self.source_path = source_path
try:
if os.path.exists(source_path):
with FileUtil.open(source_path, 'r') as f:
self._cmd_env = json.load(f)
except:
stdio.exception("Failed to load environments from {}".format(source_path))
return False
return True
def save(self, stdio=None):
if self.source_path is None:
stdio.error("Command environments need to load at first.")
return False
stdio.verbose("save environment variables {}".format(self._cmd_env))
try:
with FileUtil.open(self.source_path, 'w', stdio=stdio) as f:
json.dump(self._cmd_env, f)
except:
stdio.exception('Failed to save environment variables')
return False
return True
def get(self, key, default=""):
try:
return self.__getitem__(key)
except KeyError:
return default
def set(self, key, value, save=False, stdio=None):
stdio.verbose("set environment variable {} value {}".format(key, value))
self._cmd_env[key] = str(value)
if save:
return self.save(stdio=stdio)
return True
def delete(self, key, save=False, stdio=None):
stdio.verbose("delete environment variable {}".format(key))
if key in self._cmd_env:
del self._cmd_env[key]
if save:
return self.save(stdio=stdio)
return True
def clear(self, save=True, stdio=None):
self._cmd_env = {}
if save:
return self.save(stdio=stdio)
return True
def __getitem__(self, item):
value = self._cmd_env.get(item)
if value is None:
value = self._env.get(item)
if value is None:
raise KeyError(item)
return value
def __contains__(self, item):
if item in self._cmd_env:
return True
elif item in self._env:
return True
else:
return False
def copy(self):
result = dict(self._env)
result.update(self._cmd_env)
return result
def show_env(self):
return self._cmd_env
COMMAND_ENV = CommandEnv()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册