未验证 提交 274bc962 编写于 作者: L LIN 提交者: GitHub

Merge pull request #130 from frf12/v1.5.0

v1.5.0
......@@ -32,19 +32,28 @@ from optparse import OptionParser, OptionGroup, BadOptionError, Option
from core import ObdHome
from _stdio import IO
from log import Logger
from tool import DirectoryUtil, FileUtil, COMMAND_ENV
from _errno import DOC_LINK_MSG, LockError
from tool import DirectoryUtil, FileUtil
ROOT_IO = IO(1)
VERSION = u'<VERSION>'
VERSION = '<VERSION>'
REVISION = '<CID>'
BUILD_BRANCH = '<B_BRANCH>'
BUILD_TIME = '<B_TIME>'
DEBUG = True if '<DEBUG>' else False
CONST_OBD_HOME = "OBD_HOME"
CONST_OBD_INSTALL_PRE = "OBD_INSTALL_PRE"
FORBIDDEN_VARS = (CONST_OBD_HOME, CONST_OBD_INSTALL_PRE)
OBD_HOME_PATH = os.path.join(os.environ.get(CONST_OBD_HOME, os.getenv('HOME')), '.obd')
COMMAND_ENV.load(os.path.join(OBD_HOME_PATH, '.obd_environ'), ROOT_IO)
DEV_MODE = "OBD_DEV_MODE"
class AllowUndefinedOptionParser(OptionParser):
IS_TTY = sys.stdin.isatty()
def __init__(self,
usage=None,
......@@ -65,7 +74,10 @@ class AllowUndefinedOptionParser(OptionParser):
self.allow_undefine = allow_undefine
def warn(self, msg, file=None):
print ('warn: %s' % msg)
if self.IS_TTY:
print("%s %s" % (IO.WARNING_PREV, msg))
else:
print('warn: %s' % msg)
def _process_long_opt(self, rargs, values):
try:
......@@ -88,7 +100,7 @@ class AllowUndefinedOptionParser(OptionParser):
if self.allow_undefine:
key = e.opt_str
value = value[len(key)+1:]
setattr(values, key.strip('-').replace('-', '_'), value if value != '' else True)
setattr(values, key.strip('-').replace('-', '_'), value if value != '' else True)
return self.warn(e)
else:
raise e
......@@ -141,9 +153,8 @@ class BaseCommand(object):
class ObdCommand(BaseCommand):
OBD_PATH = os.path.join(os.environ.get('OBD_HOME', os.getenv('HOME')), '.obd')
OBD_INSTALL_PRE = os.environ.get('OBD_INSTALL_PRE', '/')
OBD_DEV_MODE_FILE = '.dev_mode'
OBD_PATH = OBD_HOME_PATH
OBD_INSTALL_PRE = os.environ.get(CONST_OBD_INSTALL_PRE, '/')
def init_home(self):
version_path = os.path.join(self.OBD_PATH, 'version')
......@@ -164,13 +175,13 @@ class ObdCommand(BaseCommand):
version_fobj.flush()
version_fobj.close()
@property
def dev_mode_path(self):
return os.path.join(self.OBD_PATH, self.OBD_DEV_MODE_FILE)
@property
def dev_mode(self):
return os.path.exists(self.dev_mode_path)
return COMMAND_ENV.get(DEV_MODE) == "1"
def parse_command(self):
self.parser.allow_undefine = self.dev_mode
return super(ObdCommand, self).parse_command()
def parse_command(self):
self.parser.allow_undefine = self.dev_mode
......@@ -203,6 +214,8 @@ class ObdCommand(BaseCommand):
ROOT_IO.exception('Another app is currently holding the obd lock.')
except SystemExit:
pass
except KeyboardInterrupt:
ROOT_IO.exception('Keyboard Interrupt')
except:
e = sys.exc_info()[1]
ROOT_IO.exception('Running Error: %s' % e)
......@@ -281,8 +294,7 @@ class DevModeEnableCommand(HiddenObdCommand):
super(DevModeEnableCommand, self).__init__('enable', 'Enable Dev Mode')
def _do_command(self, obd):
from tool import FileUtil
if FileUtil.open(self.dev_mode_path, _type='w', stdio=obd.stdio):
if COMMAND_ENV.set(DEV_MODE, "1", save=True, stdio=obd.stdio):
obd.stdio.print("Dev Mode: ON")
return True
return False
......@@ -294,8 +306,7 @@ class DevModeDisableCommand(HiddenObdCommand):
super(DevModeDisableCommand, self).__init__('disable', 'Disable Dev Mode')
def _do_command(self, obd):
from tool import FileUtil
if FileUtil.rm(self.dev_mode_path, stdio=obd.stdio):
if COMMAND_ENV.set(DEV_MODE, "0", save=True, stdio=obd.stdio):
obd.stdio.print("Dev Mode: OFF")
return True
return False
......@@ -309,6 +320,78 @@ class DevModeMajorCommand(HiddenMajorCommand):
self.register_command(DevModeDisableCommand())
class EnvironmentSetCommand(HiddenObdCommand):
def __init__(self):
super(EnvironmentSetCommand, self).__init__("set", "Set obd environment variable")
def init(self, cmd, args):
super(EnvironmentSetCommand, self).init(cmd, args)
self.parser.set_usage('%s [key] [value]' % self.prev_cmd)
return self
def _do_command(self, obd):
if len(self.cmds) == 2:
key = self.cmds[0]
if key in FORBIDDEN_VARS:
obd.stdio.error("Set the environment variable {} is not allowed.".format(key))
return False
return COMMAND_ENV.set(key, self.cmds[1], save=True, stdio=obd.stdio)
else:
return self._show_help()
class EnvironmentUnsetCommand(HiddenObdCommand):
def __init__(self):
super(EnvironmentUnsetCommand, self).__init__("unset", "Unset obd environment variable")
def init(self, cmd, args):
super(EnvironmentUnsetCommand, self).init(cmd, args)
self.parser.set_usage('%s [key] [value]' % self.prev_cmd)
return self
def _do_command(self, obd):
if len(self.cmds) == 1:
return COMMAND_ENV.delete(self.cmds[0], save=True, stdio=obd.stdio)
else:
return self._show_help()
class EnvironmentShowCommand(HiddenObdCommand):
def __init__(self):
super(EnvironmentShowCommand, self).__init__("show", "Show obd environment variables")
self.parser.add_option('-A', '--all', action="store_true", help="Show all environment variables including system variables")
def _do_command(self, obd):
if self.opts.all:
envs = COMMAND_ENV.copy().items()
else:
envs = COMMAND_ENV.show_env().items()
obd.stdio.print_list(envs, ["Key", "Value"], title="Environ")
return True
class EnvironmentClearCommand(HiddenObdCommand):
def __init__(self):
super(EnvironmentClearCommand, self).__init__("clear", "Clear obd environment variables")
def _do_command(self, obd):
return COMMAND_ENV.clear(stdio=obd.stdio)
class EnvironmentMajorCommand(HiddenMajorCommand):
def __init__(self):
super(EnvironmentMajorCommand, self).__init__('env', 'Environment variables for OBD')
self.register_command(EnvironmentSetCommand())
self.register_command(EnvironmentUnsetCommand())
self.register_command(EnvironmentShowCommand())
self.register_command(EnvironmentClearCommand())
class MirrorCloneCommand(ObdCommand):
def __init__(self):
......@@ -382,7 +465,7 @@ class MirrorListCommand(ObdCommand):
repos = obd.mirror_manager.get_mirrors(is_enabled=None)
ROOT_IO.print_list(
repos,
['SectionName', 'Type', 'Enabled','Update Time'],
['SectionName', 'Type', 'Enabled','Update Time'],
lambda x: [x.section_name, x.mirror_type.value, x.enabled, time.strftime("%Y-%m-%d %H:%M", time.localtime(x.repo_age))],
title='Mirror Repository List'
)
......@@ -413,7 +496,7 @@ class MirrorEnableCommand(ObdCommand):
def __init__(self):
super(MirrorEnableCommand, self).__init__('enable', 'Enable remote mirror repository.')
def _do_command(self, obd):
name = self.cmds[0]
return obd.mirror_manager.set_remote_mirror_enabled(name, True)
......@@ -423,7 +506,7 @@ class MirrorDisableCommand(ObdCommand):
def __init__(self):
super(MirrorDisableCommand, self).__init__('disable', 'Disable remote mirror repository.')
def _do_command(self, obd):
name = self.cmds[0]
return obd.mirror_manager.set_remote_mirror_enabled(name, False)
......@@ -451,7 +534,7 @@ class RepositoryListCommand(ObdCommand):
repos,
['name', 'version', 'release', 'arch', 'md5', 'tags'],
lambda x: [x.name, x.version, x.release, x.arch, x.md5, ', '.join(x.tags)],
title='%s Local Repository List' % name if name else ''
title='%s Local Repository List' % name if name else 'Local Repository List'
)
def _do_command(self, obd):
......@@ -511,6 +594,7 @@ class ClusterAutoDeployCommand(ClusterMirrorCommand):
super(ClusterAutoDeployCommand, self).__init__('autodeploy', 'Deploy a cluster automatically by using a simple configuration file.')
self.parser.add_option('-c', '--config', type='string', help="Path to the configuration file.")
self.parser.add_option('-f', '--force', action='store_true', help="Force autodeploy, overwrite the home_path.")
self.parser.add_option('-C', '--clean', action='store_true', help="Clean the home path if the directory belong to you.", default=False)
self.parser.add_option('-U', '--unuselibrepo', '--ulp', action='store_true', help="Disable OBD from installing the libs mirror automatically.")
self.parser.add_option('-A', '--auto-create-tenant', '--act', action='store_true', help="Automatically create a tenant named `test` by using all the available resource of the cluster.")
self.parser.add_option('--force-delete', action='store_true', help="Force delete, delete the registered cluster.")
......@@ -518,6 +602,8 @@ class ClusterAutoDeployCommand(ClusterMirrorCommand):
def _do_command(self, obd):
if self.cmds:
if getattr(self.opts, 'force', False) or getattr(self.opts, 'clean', False):
setattr(self.opts, 'skip_cluster_status_check', True)
name = self.cmds[0]
if obd.genconfig(name, self.opts):
self.opts.config = ''
......@@ -533,12 +619,15 @@ class ClusterDeployCommand(ClusterMirrorCommand):
super(ClusterDeployCommand, self).__init__('deploy', 'Deploy a cluster by using the current deploy configuration or a deploy yaml file.')
self.parser.add_option('-c', '--config', type='string', help="Path to the configuration yaml file.")
self.parser.add_option('-f', '--force', action='store_true', help="Force deploy, overwrite the home_path.", default=False)
self.parser.add_option('-C', '--clean', action='store_true', help="Clean the home path if the directory belong to you.", default=False)
self.parser.add_option('-U', '--unuselibrepo', '--ulp', action='store_true', help="Disable OBD from installing the libs mirror automatically.")
self.parser.add_option('-A', '--auto-create-tenant', '--act', action='store_true', help="Automatically create a tenant named `test` by using all the available resource of the cluster.")
# self.parser.add_option('-F', '--fuzzymatch', action='store_true', help="enable fuzzy match when search package")
def _do_command(self, obd):
if self.cmds:
if getattr(self.opts, 'force', False) or getattr(self.opts, 'clean', False):
setattr(self.opts, 'skip_cluster_status_check', True)
return obd.deploy_cluster(self.cmds[0], self.opts)
else:
return self._show_help()
......@@ -669,14 +758,14 @@ class ClusterEditConfigCommand(ClusterMirrorCommand):
class ClusterChangeRepositoryCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterChangeRepositoryCommand, self).__init__('change-repo', 'Change repository for a deployed component')
super(ClusterChangeRepositoryCommand, self).__init__('reinstall', 'Reinstall a deployed component')
self.parser.add_option('-c', '--component', type='string', help="Component name to change repository.")
self.parser.add_option('--hash', type='string', help="Repository's hash")
self.parser.add_option('-f', '--force', action='store_true', help="force change even start failed.")
def _do_command(self, obd):
if self.cmds:
return obd.change_repository(self.cmds[0], self.opts)
return obd.reinstall(self.cmds[0], self.opts)
else:
return self._show_help()
......@@ -703,15 +792,18 @@ class ClusterTenantCreateCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterTenantCreateCommand, self).__init__('create', 'Create a tenant.')
self.parser.add_option('-n', '--tenant-name', type='string', help="The tenant name. The default tenant name is [test].", default='test')
self.parser.add_option('-t', '-n', '--tenant-name', type='string', help="The tenant name. The default tenant name is [test].", default='test')
self.parser.add_option('--max-cpu', type='float', help="Max CPU unit number.")
self.parser.add_option('--min-cpu', type='float', help="Mind CPU unit number.")
self.parser.add_option('--max-memory', type='int', help="Max memory unit size.")
self.parser.add_option('--min-memory', type='int', help="Min memory unit size.")
self.parser.add_option('--max-disk-size', type='int', help="Max disk unit size.")
self.parser.add_option('--max-iops', type='int', help="Max IOPS unit number. [128].", default=128)
self.parser.add_option('--max-memory', type='string', help="Max memory unit size. Not supported after version 4.0, use `--memory-size` instead")
self.parser.add_option('--min-memory', type='string', help="Min memory unit size. Not supported after version 4.0, use `--memory-size` instead")
self.parser.add_option('--memory-size', type='string', help="Memory unit size. Supported since version 4.0.")
self.parser.add_option('--max-disk-size', type='string', help="Max disk unit size. Not supported after version 4.0")
self.parser.add_option('--log-disk-size', type='string', help="Log disk unit size.")
self.parser.add_option('--max-iops', type='int', help="Max IOPS unit number.")
self.parser.add_option('--min-iops', type='int', help="Min IOPS unit number.")
self.parser.add_option('--max-session-num', type='int', help="Max session unit number. [64].", default=64)
self.parser.add_option('--iops-weight', type='int', help="The weight of IOPS. When Max IOPS is greater than Min IOPS, the weight of idle resources available to the current tenant. Supported since version 4.0.")
self.parser.add_option('--max-session-num', type='int', help="Max session unit number. Not supported after version 4.0")
self.parser.add_option('--unit-num', type='int', help="Pool unit number.")
self.parser.add_option('-z', '--zone-list', type='string', help="Tenant zone list.")
self.parser.add_option('--charset', type='string', help="Tenant charset.")
......@@ -734,7 +826,7 @@ class ClusterTenantDropCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterTenantDropCommand, self).__init__('drop', 'Drop a tenant.')
self.parser.add_option('-n', '--tenant-name', type='string', help="Tenant name.")
self.parser.add_option('-t', '-n', '--tenant-name', type='string', help="Tenant name.")
def _do_command(self, obd):
if self.cmds:
......@@ -793,23 +885,52 @@ class MySQLTestCommand(TestMirrorCommand):
self.parser.add_option('--mysqltest-bin', type='string', help='Mysqltest bin path. [/u01/obclient/bin/mysqltest]', default='/u01/obclient/bin/mysqltest')
self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient')
self.parser.add_option('--test-dir', type='string', help='Test case file directory. [./mysql_test/t]', default='./mysql_test/t')
self.parser.add_option('--test-file-suffix', type='string', help='Test case file suffix. [.test]', default='.test')
self.parser.add_option('--result-dir', type='string', help='Result case file directory. [./mysql_test/r]', default='./mysql_test/r')
self.parser.add_option('--result-file-suffix', type='string', help='Result file suffix. [.result]', default='.result')
self.parser.add_option('--record', action='store_true', help='record mysqltest execution results', default=False)
self.parser.add_option('--record-dir', type='string', help='The directory of the result file for mysqltest.')
self.parser.add_option('--log-dir', type='string', help='The log file directory. [./log]', default='./log')
self.parser.add_option('--record-dir', type='string', help='The directory of the result file for mysqltest.', default='./record')
self.parser.add_option('--record-file-suffix', type='string', help='Result file suffix. [.record]', default='.record')
self.parser.add_option('--log-dir', type='string', help='The log file directory.')
self.parser.add_option('--tmp-dir', type='string', help='Temporary directory for mysqltest. [./tmp]', default='./tmp')
self.parser.add_option('--var-dir', type='string', help='Var directory to use when run mysqltest. [./var]', default='./var')
self.parser.add_option('--test-set', type='string', help='test list, use `,` interval')
self.parser.add_option('--exclude', type='string', help='exclude list, use `,` interval')
self.parser.add_option('--test-pattern', type='string', help='Pattern for test file.')
self.parser.add_option('--suite', type='string', help='Suite list. Multiple suites are separated with commas.')
self.parser.add_option('--suite-dir', type='string', help='Suite case directory. [./mysql_test/test_suite]', default='./mysql_test/test_suite')
self.parser.add_option('--init-sql-dir', type='string', help='Initiate sql directory. [../]', default='../')
self.parser.add_option('--init-sql-dir', type='string', help='Initiate sql directory. [./]', default='./')
self.parser.add_option('--init-sql-files', type='string', help='Initiate sql file list.Multiple files are separated with commas.')
self.parser.add_option('--need-init', action='store_true', help='Execute the init SQL file.', default=False)
self.parser.add_option('--init-only', action='store_true', help='Exit after executing init SQL.', default=False)
self.parser.add_option('--auto-retry', action='store_true', help='Auto retry when fails.', default=False)
self.parser.add_option('--all', action='store_true', help='Run all suite-dir cases.', default=False)
self.parser.add_option('--all', action='store_true', help='Run all cases.', default=False)
self.parser.add_option('--psmall', action='store_true', help='Run psmall cases.', default=False)
self.parser.add_option('--special-run', action='store_true', help='run mysqltest in special mode.', default=False)
self.parser.add_option('--sp-hint', type='string', help='run test with specified hint', default='')
self.parser.add_option('--sort-result', action='store_true', help='sort query result', default=False)
# self.parser.add_option('--java', action='store_true', help='use java sdk', default=False)
self.parser.add_option('--slices', type='int', help='How many slices the test set should be')
self.parser.add_option('--slice-idx', type='int', help='The id of slices')
self.parser.add_option('--slb-host', type='string', help='The host of soft load balance.')
self.parser.add_option('--exec-id', type='string', help='The unique execute id.')
self.parser.add_option('--case-filter', type='string', help='The case filter file for mysqltest.')
self.parser.add_option('--psmall-test', type='string', help='The file maintain psmall cases.', default='./mysql_test/psmalltest.py')
self.parser.add_option('--psmall-source', type='string', help='The file maintain psmall source control.', default='./mysql_test/psmallsource.py')
self.parser.add_option('--ps', action='store_true', help='Run in ps mode.', default=False)
self.parser.add_option('--test-tags', type='string', help='The file maintain basic tags.', default='./mysql_test/test_tags.py')
self.parser.add_option('--tags', type='string', help='Run cases by tag.', default='')
self.parser.add_option('--regress-suite-map', type='string', help='The file maintain basic regress suite map', default='./regress_suite_map.py')
self.parser.add_option('--regress_suite', type='string', help='Run cases by regress_suite.', default='')
self.parser.add_option('--reboot-cases', type='string', help='The file maintain reboot cases')
self.parser.add_option('--reboot-timeout', type='int', help='The timeout of observer bootstrap', default=0)
self.parser.add_option('--reboot-retries', type='int', help='How many times to retry when rebooting failed', default=5)
self.parser.add_option('--collect-all', action='store_true', help='Collect servers log.', default=False)
self.parser.add_option('--collect-components', type='string', help='The components which need collect log, multiple components are separated with commas')
self.parser.add_option('--case-timeout', type='int', help='The timeout of mysqltest case')
self.parser.add_option('--log-pattern', type='string', help='The pattern for collected servers log ', default='*.log')
self.parser.add_option('--cluster-mode', type='string', help="The mode of mysqltest")
self.parser.add_option('--disable-reboot', action='store_true', help='Never reboot during test.', default=False)
def _do_command(self, obd):
if self.cmds:
......@@ -826,7 +947,7 @@ class SysBenchCommand(TestMirrorCommand):
self.parser.add_option('--test-server', type='string', help='The server for test. By default, the first root server in the component is the test server.')
self.parser.add_option('--user', type='string', help='Username for a test. [root]', default='root')
self.parser.add_option('--password', type='string', help='Password for a test.')
self.parser.add_option('--tenant', type='string', help='Tenant for a test. [test]', default='test')
self.parser.add_option('-t', '--tenant', type='string', help='Tenant for a test. [test]', default='test')
self.parser.add_option('--database', type='string', help='Database for a test. [test]', default='test')
self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient')
self.parser.add_option('--sysbench-bin', type='string', help='Sysbench bin path. [sysbench]', default='sysbench')
......@@ -858,7 +979,7 @@ class TPCHCommand(TestMirrorCommand):
self.parser.add_option('--test-server', type='string', help='The server for a test. By default, the first root server in the component is the test server.')
self.parser.add_option('--user', type='string', help='Username for a test. [root]', default='root')
self.parser.add_option('--password', type='string', help='Password for a test.')
self.parser.add_option('--tenant', type='string', help='Tenant for a test. [test]', default='test')
self.parser.add_option('-t', '--tenant', type='string', help='Tenant for a test. [test]', default='test')
self.parser.add_option('--database', type='string', help='Database for a test. [test]', default='test')
self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient')
self.parser.add_option('--dbgen-bin', type='string', help='dbgen bin path. [/usr/tpc-h-tools/tpc-h-tools/bin/dbgen]', default='/usr/tpc-h-tools/tpc-h-tools/bin/dbgen')
......@@ -888,7 +1009,7 @@ class TPCCCommand(TestMirrorCommand):
self.parser.add_option('--test-server', type='string', help='The server for a test. By default, the first root server in the component is the test server.')
self.parser.add_option('--user', type='string', help='Username for a test. [root]', default='root')
self.parser.add_option('--password', type='string', help='Password for a test.')
self.parser.add_option('--tenant', type='string', help='Tenant for a test. [test]', default='test')
self.parser.add_option('-t', '--tenant', type='string', help='Tenant for a test. [test]', default='test')
self.parser.add_option('--database', type='string', help='Database for a test. [test]', default='test')
self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient')
self.parser.add_option('--java-bin', type='string', help='Java bin path. [java]', default='java')
......@@ -897,10 +1018,10 @@ class TPCCCommand(TestMirrorCommand):
self.parser.add_option('--bmsql-jar', type='string', help='BenchmarkSQL jar path.')
self.parser.add_option('--bmsql-libs', type='string', help='BenchmarkSQL libs path.')
self.parser.add_option('--bmsql-sql-dir', type='string', help='The directory of BenchmarkSQL sql scripts.')
self.parser.add_option('--warehouses', type='int', help='The number of warehouses.')
self.parser.add_option('--warehouses', type='int', help='The number of warehouses.[10]', default=10)
self.parser.add_option('--load-workers', type='int', help='The number of workers to load data.')
self.parser.add_option('--terminals', type='int', help='The number of terminals.')
self.parser.add_option('--run-mins', type='int', help='To run for specified minutes.', default=10)
self.parser.add_option('--run-mins', type='int', help='To run for specified minutes.[10]', default=10)
self.parser.add_option('--test-only', action='store_true', help='Only testing SQLs are executed. No initialization is executed.')
self.parser.add_option('-O', '--optimization', type='int', help='Optimization level {0/1/2}. [1] 0 - No optimization. 1 - Optimize some of the parameters which do not need to restart servers. 2 - Optimize all the parameters and maybe RESTART SERVERS for better performance.', default=1)
......@@ -909,7 +1030,7 @@ class TPCCCommand(TestMirrorCommand):
return obd.tpcc(self.cmds[0], self.opts)
else:
return self._show_help()
class TestMajorCommand(MajorCommand):
......@@ -921,6 +1042,59 @@ class TestMajorCommand(MajorCommand):
self.register_command(TPCCCommand())
class DbConnectCommand(HiddenObdCommand):
def init(self, cmd, args):
super(DbConnectCommand, self).init(cmd, args)
self.parser.set_usage('%s <deploy name> [options]' % self.prev_cmd)
return self
def __init__(self):
super(DbConnectCommand, self).__init__('db_connect', 'Establish a database connection to the deployment.')
self.parser.add_option('-c', '--component', type='string', help='The component used by database connection.')
self.parser.add_option('-s', '--server', type='string',
help='The server used by database connection. The first server in the configuration will be used by default')
self.parser.add_option('-u', '--user', type='string', help='The username used by d'
'atabase connection. [root]', default='root')
self.parser.add_option('-p', '--password', type='string', help='The password used by database connection.')
self.parser.add_option('-t', '--tenant', type='string', help='The tenant used by database connection. [sys]', default='sys')
self.parser.add_option('-D', '--database', type='string', help='The database name used by database connection.')
self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient')
def _do_command(self, obd):
if self.cmds:
return obd.db_connect(self.cmds[0], self.opts)
else:
return self._show_help()
class CommandsCommand(HiddenObdCommand):
def init(self, cmd, args):
super(CommandsCommand, self).init(cmd, args)
self.parser.set_usage('%s <deploy name> <command> [options]' % self.prev_cmd)
return self
def __init__(self):
super(CommandsCommand, self).__init__('command', 'Common tool commands')
self.parser.add_option('-c', '--components', type='string', help='The components used by the command. The first component in the configuration will be used by default in interactive commands, and all available components will be used by default in non-interactive commands.')
self.parser.add_option('-s', '--servers', type='string', help='The servers used by the command. The first server in the configuration will be used by default in interactive commands, and all available servers will be used by default in non-interactive commands.')
def _do_command(self, obd):
if len(self.cmds) == 2:
return obd.commands(self.cmds[0], self.cmds[1], self.opts)
else:
return self._show_help()
class ToolCommand(HiddenMajorCommand):
def __init__(self):
super(ToolCommand, self).__init__('tool', 'Tools')
self.register_command(DbConnectCommand())
self.register_command(CommandsCommand())
class BenchMajorCommand(MajorCommand):
def __init__(self):
......@@ -952,6 +1126,8 @@ class MainCommand(MajorCommand):
self.register_command(RepositoryMajorCommand())
self.register_command(TestMajorCommand())
self.register_command(UpdateCommand())
self.register_command(EnvironmentMajorCommand())
self.register_command(ToolCommand())
self.parser.version = '''OceanBase Deploy: %s
REVISION: %s
BUILD_BRANCH: %s
......
......@@ -29,13 +29,16 @@ from enum import Enum
from ruamel.yaml.comments import CommentedMap
from tool import ConfigUtil, FileUtil, YamlLoader, OrderedDict
from tool import ConfigUtil, FileUtil, YamlLoader, OrderedDict, COMMAND_ENV
from _manager import Manager
from _repository import Repository
from _stdio import SafeStdio
yaml = YamlLoader()
DEFAULT_CONFIG_PARSER_MANAGER = None
ENV = 'env'
BASE_DIR_KEY = "OBD_DEPLOY_BASE_DIR"
class ParserError(Exception):
......@@ -96,24 +99,51 @@ class ServerConfigFlyweightFactory(object):
return ServerConfigFlyweightFactory._CACHE[_key]
class RsyncConfig(object):
RSYNC = 'runtime_dependencies'
SOURCE_PATH = 'src_path'
TARGET_PATH = 'target_path'
class InnerConfigItem(str):
pass
class InnerConfigKeywords(object):
DEPLOY_INSTALL_MODE = 'deploy_install_mode'
DEPLOY_BASE_DIR = 'deploy_base_dir'
class InnerConfig(object):
keyword_symbol = "$_"
def __init__(self, path, yaml_loader):
self.path = path
self.yaml_loader = yaml_loader
self.config = {}
self._load()
def is_keyword(self, s):
return s.startswith(self.keyword_symbol)
def to_keyword(self, key):
return "{}{}".format(self.keyword_symbol, key)
def keyword_to_str(self, _keyword):
return str(_keyword.replace(self.keyword_symbol, '', 1))
def _load(self):
self.config = {}
try:
with FileUtil.open(self.path, 'rb') as f:
config = self.yaml_loader.load(f)
for component_name in config:
if self.is_keyword(component_name):
self.config[InnerConfigItem(component_name)] = config[component_name]
continue
self.config[component_name] = {}
c_config = config[component_name]
for server in c_config:
......@@ -142,7 +172,14 @@ class InnerConfig(object):
return self.config.get(component_name, {})
def get_server_config(self, component_name, server):
return self.get_component(component_name).get(server, {})
return self.get_component_config(component_name).get(server, {})
def get_global_config(self, key, default=None):
key = self.to_keyword(key)
return self.config.get(key, default)
def update_global_config(self, key, value):
self.config[self.to_keyword(key)] = value
def update_component_config(self, component_name, config):
self.config[component_name] = {}
......@@ -165,11 +202,11 @@ class ConfigParser(object):
@classmethod
def _is_inner_item(cls, key):
return isinstance(key, InnerConfigItem) and key.startswith(cls.PREFIX)
@classmethod
def extract_inner_config(cls, cluster_config, config):
return {}
@classmethod
def _to_cluster_config(cls, component_name, config):
raise NotImplementedError
......@@ -177,18 +214,19 @@ class ConfigParser(object):
@classmethod
def to_cluster_config(cls, component_name, config):
cluster_config = cls._to_cluster_config(component_name, config)
cluster_config.set_include_file(config.get('include', ''))
cluster_config.parser = cls
return cluster_config
@classmethod
def _from_cluster_config(cls, conf, cluster_config):
raise NotImplementedError
@classmethod
def from_cluster_config(cls, cluster_config):
if not cls.STYLE:
raise NotImplementedError('undefined Style ConfigParser')
conf = CommentedMap()
conf['style'] = cls.STYLE
if cluster_config.origin_package_hash:
......@@ -205,7 +243,7 @@ class ConfigParser(object):
'inner_config': inner_config,
'config': conf
}
@classmethod
def get_server_src_conf(cls, cluster_config, component_config, server):
if server.name not in component_config:
......@@ -246,10 +284,18 @@ class DefaultConfigParser(ConfigParser):
component_name,
ConfigUtil.get_value_from_dict(conf, 'version', None, str),
ConfigUtil.get_value_from_dict(conf, 'tag', None, str),
ConfigUtil.get_value_from_dict(conf, 'release', None, str),
ConfigUtil.get_value_from_dict(conf, 'package_hash', None, str)
)
if 'global' in conf:
cluster_config.set_global_conf(conf['global'])
if RsyncConfig.RSYNC in conf:
cluster_config.set_rsync_list(conf[RsyncConfig.RSYNC])
if ENV in conf:
cluster_config.set_environments(conf[ENV])
for server in servers:
if server.name in conf:
cluster_config.add_server_conf(server, conf[server.name])
......@@ -269,7 +315,7 @@ class DefaultConfigParser(ConfigParser):
for server in cluster_config.servers:
inner_config[server.name][key] = global_config[key]
del global_config[key]
for server in cluster_config.servers:
if server.name not in config:
continue
......@@ -301,21 +347,32 @@ class DefaultConfigParser(ConfigParser):
class ClusterConfig(object):
def __init__(self, servers, name, version, tag, package_hash, parser=None):
self.version = version
def __init__(self, servers, name, version, tag, release, package_hash, parser=None):
self._version = version
self.origin_version = version
self.tag = tag
self.origin_tag = tag
self._release = release
self.origin_release = release
self.name = name
self.origin_package_hash = package_hash
self.package_hash = package_hash
self._package_hash = package_hash
self._temp_conf = {}
self._default_conf = {}
self._global_conf = {}
self._global_conf = None
self._server_conf = {}
self._cache_server = {}
self._original_global_conf = {}
self._rsync_list = None
self._include_config = None
self._origin_rsync_list = {}
self._include_file = None
self._origin_include_file = None
self._origin_include_config = None
self._environments = None
self._origin_environments = {}
self._inner_config = {}
self._base_dir = ''
servers = list(servers)
self.servers = servers
self._original_servers = servers # 保证顺序
......@@ -325,10 +382,12 @@ class ClusterConfig(object):
self._deploy_config = None
self._depends = {}
self.parser = parser
self._has_package_pattern = None
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
# todo 检查 rsync include等
return self._global_conf == other._global_conf and self._server_conf == other._server_conf
def __deepcopy__(self, memo):
......@@ -344,8 +403,17 @@ class ClusterConfig(object):
def set_deploy_config(self, _deploy_config):
if self._deploy_config is None:
self._deploy_config = _deploy_config
self.set_base_dir(self._deploy_config.get_base_dir())
return True
return False
def set_base_dir(self, base_dir):
if self._base_dir != base_dir:
self._base_dir = base_dir
self._rsync_list = None
self._include_config = None
self._global_conf = None
@property
def original_servers(self):
return self._original_servers
......@@ -361,6 +429,12 @@ class ClusterConfig(object):
def get_inner_config(self):
return self._inner_config
def is_cp_install_mode(self):
return self._deploy_config.is_cp_install_mode()
def is_ln_install_mode(self):
return self._deploy_config.is_ln_install_mode()
def apply_inner_config(self, config):
self._inner_config = config
self._clear_cache_server()
......@@ -416,6 +490,23 @@ class ClusterConfig(object):
self._original_global_conf[key] = value
self._global_conf[key] = value
def update_rsync_list(self, rsync_list, save=True):
if self._deploy_config is None:
return False
if not self._deploy_config.update_component_rsync_list(self.name, rsync_list, save):
return False
self._rsync_list = rsync_list
return True
def update_environments(self, environments, save=True):
if self._deploy_config is None:
return False
if not self._deploy_config.update_component_environments(self.name, environments, save):
return False
self._origin_environments = environments
self._environments = None
return True
def get_unconfigured_require_item(self, server):
items = []
config = self.get_server_conf(server)
......@@ -464,18 +555,19 @@ class ClusterConfig(object):
for key in self._temp_conf:
if self._temp_conf[key].require and self._temp_conf[key].default is not None:
self._default_conf[key] = self._temp_conf[key].default
self.set_global_conf(self._global_conf) # 更新全局配置
self._global_conf = None
self._clear_cache_server()
def get_temp_conf_item(self, key):
if self._temp_conf:
return self._temp_conf.get(key)
else:
return None
def check_param(self):
error = []
if self._temp_conf:
error += self._check_param(self._global_conf)
error += self._check_param(self.get_global_conf())
for server in self._server_conf:
error += self._check_param(self._server_conf[server])
return not error, set(error)
......@@ -493,10 +585,22 @@ class ClusterConfig(object):
def set_global_conf(self, conf):
self._original_global_conf = deepcopy(conf)
self._global_conf = deepcopy(self._default_conf)
self._global_conf.update(self._original_global_conf)
self._global_conf = None
self._clear_cache_server()
def set_rsync_list(self, configs):
self._origin_rsync_list = configs
def set_include_file(self, path):
if path != self._origin_include_file:
self._origin_include_file = path
self._include_file = None
self._include_config = None
def set_environments(self, config):
self._origin_environments = config
self._environments = None
def add_server_conf(self, server, conf):
if server not in self.servers:
self.servers.append(server)
......@@ -506,14 +610,115 @@ class ClusterConfig(object):
self._cache_server[server] = None
def get_global_conf(self):
if self._global_conf is None:
self._global_conf = deepcopy(self._default_conf)
self._global_conf.update(self._get_include_config('config', {}))
self._global_conf.update(self._original_global_conf)
return self._global_conf
def _add_base_dir(self, path):
if not os.path.isabs(path):
if self._base_dir:
path = os.path.join(self._base_dir, path)
else:
raise Exception("`{}` need to use absolute paths. If you want to use relative paths, please enable developer mode "
"and set environment variables {}".format(RsyncConfig.RSYNC, BASE_DIR_KEY))
return path
@property
def has_package_pattern(self):
if self._has_package_pattern is None:
patterns = (self.origin_package_hash, self.origin_version, self.origin_release, self.origin_tag)
self._has_package_pattern = any([x is not None for x in patterns])
return self._has_package_pattern
@property
def version(self):
if self._version is None:
self._version = self.config_version
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def config_version(self):
if not self.has_package_pattern:
return self._get_include_config('version', None)
else:
return self.origin_version
@property
def release(self):
if self._release is None:
self._release = self.config_release
return self._release
@release.setter
def release(self, value):
self._release = value
@property
def config_release(self):
if not self.has_package_pattern:
return self._get_include_config('release', None)
else:
return self.origin_release
@property
def package_hash(self):
if self._package_hash is None:
self._package_hash = self.config_package_hash
return self._package_hash
@package_hash.setter
def package_hash(self, value):
self._package_hash = value
@property
def config_package_hash(self):
if not self.has_package_pattern:
return self._get_include_config('package_hash', None)
else:
return self.origin_package_hash
def _get_include_config(self, key=None, default=None, not_found_act="ignore"):
if self._include_config is None:
if self._origin_include_file:
if os.path.isabs(self._origin_include_file):
include_file = self._origin_include_file
else:
include_file = os.path.join(self._base_dir, self._origin_include_file)
if include_file != self._include_file:
self._include_file = include_file
self._origin_include_config = self._deploy_config.load_include_file(self._include_file)
if self._origin_include_config is None:
self._origin_include_config = {}
self._include_config = self._origin_include_config
value = self._include_config.get(key, default) if key else self._include_config
return deepcopy(value)
def get_rsync_list(self):
if self._rsync_list is None:
self._rsync_list = self._get_include_config(RsyncConfig.RSYNC, [])
self._rsync_list += self._origin_rsync_list
for item in self._rsync_list:
item[RsyncConfig.SOURCE_PATH] = self._add_base_dir(item[RsyncConfig.SOURCE_PATH])
return self._rsync_list
def get_environments(self):
if self._environments is None:
self._environments = self._get_include_config(ENV, OrderedDict())
self._environments.update(self._origin_environments)
return self._environments
def get_server_conf(self, server):
if server not in self._server_conf:
return None
if self._cache_server[server] is None:
conf = deepcopy(self._inner_config.get(server.name, {}))
conf.update(self._global_conf)
conf.update(self.get_global_conf())
conf.update(self._server_conf[server])
self._cache_server[server] = conf
return self._cache_server[server]
......@@ -547,6 +752,12 @@ class DeployConfigStatus(Enum):
NEED_REDEPLOY = 'need redeploy'
class DeployInstallMode(object):
LN = 'ln'
CP = 'cp'
class DeployInfo(object):
def __init__(self, name, status, components=OrderedDict(), config_status=DeployConfigStatus.UNCHNAGE):
......@@ -562,9 +773,9 @@ class DeployInfo(object):
return '\n'.join(info)
class DeployConfig(object):
class DeployConfig(SafeStdio):
def __init__(self, yaml_path, yaml_loader=yaml, inner_config=None, config_parser_manager=None):
def __init__(self, yaml_path, yaml_loader=yaml, inner_config=None, config_parser_manager=None, stdio=None):
self._user = None
self.unuse_lib_repository = False
self.auto_create_tenant = False
......@@ -574,6 +785,8 @@ class DeployConfig(object):
self.yaml_path = yaml_path
self.yaml_loader = yaml_loader
self.config_parser_manager = config_parser_manager if config_parser_manager else DEFAULT_CONFIG_PARSER_MANAGER
self.stdio = stdio
self._ignore_include_error = False
if self.config_parser_manager is None:
raise ParserError('ConfigParaserManager Not Set')
self._load()
......@@ -594,9 +807,13 @@ class DeployConfig(object):
else:
def get_inner_config(component_name):
return {}
for component_name in self.components:
self.components[component_name].apply_inner_config(get_inner_config(component_name))
self._inner_config = inner_config
base_dir = self.get_base_dir()
for component_name in self.components:
cluster_config = self.components[component_name]
cluster_config.apply_inner_config(get_inner_config(component_name))
cluster_config.set_base_dir(base_dir)
def set_unuse_lib_repository(self, status):
if self.unuse_lib_repository != status:
......@@ -624,7 +841,7 @@ class DeployConfig(object):
del src_data['version']
if 'tag' in src_data:
del src_data['tag']
self._src_data[component] = src_data
if self._dump():
cluster_config = self.components[component]
......@@ -665,6 +882,24 @@ class DeployConfig(object):
if not self.user:
self.set_user_conf(UserConfig())
def allow_include_error(self):
self.stdio.verbose("allow include file not exists")
self._ignore_include_error = True
def load_include_file(self, path):
if not os.path.isabs(path):
raise Exception("`{}` need to use absolute path. If you want to use relative paths, please enable developer mode "
"and set environment variables {}".format('include', BASE_DIR_KEY))
if os.path.isfile(path):
with open(path, 'rb') as f:
return self.yaml_loader.load(f)
else:
if self._ignore_include_error:
self.stdio.warn("include file: {} not found, some configurations may be lost".format(path))
return {}
else:
raise Exception('Not such file: %s' % path)
def _separate_config(self):
if self.inner_config:
for component_name in self.components:
......@@ -674,7 +909,7 @@ class DeployConfig(object):
if parser:
inner_config = parser.extract_inner_config(cluster_config, src_data)
self.inner_config.update_component_config(component_name, inner_config)
def _dump_inner_config(self):
if self.inner_config:
self._separate_config()
......@@ -695,6 +930,47 @@ class DeployConfig(object):
def dump(self):
return self._dump()
def _update_global_inner_config(self, key, value, save=True):
if self.inner_config:
self.inner_config.update_global_config(key, value)
return self._dump_inner_config() if save else True
def _get_global_inner_config(self, key, default=None):
if self.inner_config:
return self.inner_config.get_global_config(key, default)
return default
def set_base_dir(self, path, save=True):
if path and not os.path.isabs(path):
raise Exception('%s is not an absolute path' % path)
if self._update_global_inner_config(InnerConfigKeywords.DEPLOY_BASE_DIR, path, save=save):
for component_name in self.components:
cluster_config = self.components[component_name]
cluster_config.set_base_dir(path)
return True
return False
def get_base_dir(self):
return self._get_global_inner_config(InnerConfigKeywords.DEPLOY_BASE_DIR, '')
def set_deploy_install_mode(self, mode, save=True):
return self._update_global_inner_config(InnerConfigKeywords.DEPLOY_INSTALL_MODE, mode, save=save)
def get_deploy_install_mode(self):
return self._get_global_inner_config(InnerConfigKeywords.DEPLOY_INSTALL_MODE, DeployInstallMode.CP)
def enable_ln_install_mode(self, save=True):
return self.set_deploy_install_mode(DeployInstallMode.LN, save=save)
def enable_cp_install_mode(self, save=True):
return self.set_deploy_install_mode(DeployInstallMode.CP, save=save)
def is_ln_install_mode(self):
return self.get_deploy_install_mode() == DeployInstallMode.LN
def is_cp_install_mode(self):
return self.get_deploy_install_mode() == DeployInstallMode.CP
def set_user_conf(self, conf):
self._user = conf
......@@ -855,7 +1131,7 @@ class Deploy(object):
def _load_deploy_config(self, path):
yaml_loader = YamlLoader(stdio=self.stdio)
deploy_config = DeployConfig(path, yaml_loader=yaml_loader, config_parser_manager=self.config_parser_manager)
deploy_config = DeployConfig(path, yaml_loader=yaml_loader, config_parser_manager=self.config_parser_manager, stdio=self.stdio)
deploy_info = self.deploy_info
for component_name in deploy_info.components:
if component_name not in deploy_config.components:
......@@ -866,10 +1142,9 @@ class Deploy(object):
cluster_config.version = config['version']
if 'hash' in config and config['hash']:
cluster_config.package_hash = config['hash']
deploy_config.inner_config = InnerConfig(self.get_inner_config_path(self.config_dir), yaml_loader=yaml_loader)
return deploy_config
@property
def temp_deploy_config(self):
path = self.get_temp_deploy_yaml_path(self.config_dir)
......@@ -1027,7 +1302,7 @@ class Deploy(object):
class ConfigParserManager(Manager):
RELATIVE_PATH = 'config_parser/'
def __init__(self, home_path, stdio=None):
super(ConfigParserManager, self).__init__(home_path, stdio)
self.global_parsers = {
......
......@@ -49,7 +49,8 @@ class InitDirFailedErrorMessage(object):
PERMISSION_DENIED = ': {path} permission denied .'
DOC_LINK_MSG = 'See https://open.oceanbase.com/docs/obd-cn/V1.4.0/10000000000436999 .'
DOC_LINK = '<DOC_LINK>'
DOC_LINK_MSG = 'See {}'.format(DOC_LINK if DOC_LINK else "https://open.oceanbase.com/docs/obd-cn/V1.4.0/10000000000436999 .")
EC_CONFIG_CONFLICT_PORT = OBDErrorCode(1000, 'Configuration conflict {server1}:{port} port is used for {server2}\'s {key}')
EC_CONFLICT_PORT = OBDErrorCode(1001, '{server}:{port} port is already used')
......
......@@ -64,7 +64,7 @@ class MixLock(object):
FileUtil.exclusive_lock_obj(self.lock_obj, stdio=self.stdio)
except Exception as e:
raise LockError(e)
def _sh_lock(self):
if self.lock_obj:
try:
......@@ -100,7 +100,7 @@ class MixLock(object):
except Exception as e:
self.stdio and getattr(self.stdio, 'stop_loading', print)('fail')
raise LockError(e)
def _lock_escalation(self, try_times):
stdio = self.stdio
while try_times:
......
......@@ -22,9 +22,10 @@ from __future__ import absolute_import, division, print_function
import os
from tool import DirectoryUtil
from _stdio import SafeStdio
class Manager(object):
class Manager(SafeStdio):
RELATIVE_PATH = ''
......
......@@ -40,11 +40,10 @@ except:
from _arch import getArchList, getBaseArch
from _rpm import Package, PackageInfo
from tool import ConfigUtil, FileUtil
from tool import ConfigUtil, FileUtil, var_replace
from _manager import Manager
_KEYCRE = re.compile(r"\$(\w+)")
_ARCH = getArchList()
_RELEASE = None
SUP_MAP = {
......@@ -118,7 +117,7 @@ class MirrorRepository(object):
self.stdio and getattr(self.stdio, 'verbose', print)('pkg %s is %s, but %s is required' % (key, getattr(pkg, key), pattern[key]))
return None
return pkg
def get_rpm_pkg_by_info(self, pkg_info):
return None
......@@ -286,14 +285,20 @@ class RemoteMirrorRepository(MirrorRepository):
if self._db is None:
fp = FileUtil.unzip(file_path, stdio=self.stdio)
if not fp:
FileUtil.rm(file_path, stdio=self.stdio)
return []
self._db = {}
parser = cElementTree.iterparse(fp)
for event, elem in parser:
if RemoteMirrorRepository.ns_cleanup(elem.tag) == 'package' and elem.attrib.get('type') == 'rpm':
info = RemotePackageInfo(elem)
self._db[info.md5] = info
self._dump_db_cache()
try:
parser = cElementTree.iterparse(fp)
for event, elem in parser:
if RemoteMirrorRepository.ns_cleanup(elem.tag) == 'package' and elem.attrib.get('type') == 'rpm':
info = RemotePackageInfo(elem)
self._db[info.md5] = info
self._dump_db_cache()
except:
FileUtil.rm(file_path, stdio=self.stdio)
self.stdio and self.stdio.critical('failed to parse file %s, please retry later.' % file_path)
return []
return self._db
def _load_db_cache(self, path):
......@@ -341,29 +346,6 @@ class RemoteMirrorRepository(MirrorRepository):
def get_db_cache_file(mirror_path):
return os.path.join(mirror_path, RemoteMirrorRepository.DB_CACHE_FILE)
@staticmethod
def var_replace(string, var):
if not var:
return string
done = []
while string:
m = _KEYCRE.search(string)
if not m:
done.append(string)
break
varname = m.group(1).lower()
replacement = var.get(varname, m.group())
start, end = m.span()
done.append(string[:start])
done.append(str(replacement))
string = string[end:]
return ''.join(done)
def _load_repo_age(self):
try:
with open(self.get_repo_age_file(self.mirror_path), 'r') as f:
......@@ -817,8 +799,8 @@ class MirrorRepositorySection(object):
def get_mirror(self, server_vars, stdio=None):
meta_data = self.meta_data
meta_data['name'] = RemoteMirrorRepository.var_replace(meta_data['name'], server_vars)
meta_data['baseurl'] = RemoteMirrorRepository.var_replace(meta_data['baseurl'], server_vars)
meta_data['name'] = var_replace(meta_data['name'], server_vars)
meta_data['baseurl'] = var_replace(meta_data['baseurl'], server_vars)
mirror_path = os.path.join(self.remote_path, meta_data['name'])
mirror = RemoteMirrorRepository(mirror_path, meta_data, stdio)
return mirror
......@@ -947,9 +929,9 @@ class MirrorRepositoryManager(Manager):
def get_mirrors(self, is_enabled=True):
self._lock()
mirros = self.get_remote_mirrors(is_enabled=is_enabled)
mirros.append(self.local_mirror)
return mirros
mirrors = self.get_remote_mirrors(is_enabled=is_enabled)
mirrors.append(self.local_mirror)
return mirrors
def get_exact_pkg(self, **pattern):
only_info = 'only_info' in pattern and pattern['only_info']
......
......@@ -29,6 +29,7 @@ from copy import deepcopy
from _manager import Manager
from _rpm import Version
from ssh import ConcurrentExecutor
from tool import ConfigUtil, DynamicLoading, YamlLoader
......@@ -124,6 +125,7 @@ class PluginContext(object):
self.options = options
self.dev_mode = dev_mode
self.stdio = stdio
self.concurrent_exector = ConcurrentExecutor(32)
self._return = PluginReturn()
def get_return(self):
......@@ -164,7 +166,8 @@ class ScriptPlugin(Plugin):
def __getattr__(self, key):
def new_method(*args, **kwargs):
kwargs['stdio'] = self.stdio
if "stdio" not in kwargs:
kwargs['stdio'] = self.stdio
return attr(*args, **kwargs)
attr = getattr(self.client, key)
if hasattr(attr, '__call__'):
......@@ -595,12 +598,18 @@ class InstallPlugin(Plugin):
DIR = 1
BIN = 2
class InstallMethod(Enum):
ANY = 0
CP = 1
class FileItem(object):
def __init__(self, src_path, target_path, _type):
def __init__(self, src_path, target_path, _type, install_method):
self.src_path = src_path
self.target_path = target_path
self.type = _type if _type else InstallPlugin.FileItemType.FILE
self.install_method = install_method or InstallPlugin.InstallMethod.ANY
PLUGIN_TYPE = PluginType.INSTALL
FILES_MAP_YAML = 'file_map.yaml'
......@@ -611,6 +620,7 @@ class InstallPlugin(Plugin):
super(InstallPlugin, self).__init__(component_name, plugin_path, version, dev_mode)
self.file_map_path = os.path.join(self.plugin_path, self.FILES_MAP_YAML)
self._file_map = {}
self._file_map_data = None
@classmethod
def var_replace(cls, string, var):
......@@ -634,6 +644,13 @@ class InstallPlugin(Plugin):
return ''.join(done)
@property
def file_map_data(self):
if self._file_map_data is None:
with open(self.file_map_path, 'rb') as f:
self._file_map_data = yaml.load(f)
return self._file_map_data
def file_map(self, package_info):
var = {
'name': package_info.name,
......@@ -646,17 +663,17 @@ class InstallPlugin(Plugin):
if not self._file_map.get(key):
try:
file_map = {}
with open(self.file_map_path, 'rb') as f:
for data in yaml.load(f):
k = data['src_path']
if k[0] != '.':
k = '.%s' % os.path.join('/', k)
k = self.var_replace(k, var)
file_map[k] = InstallPlugin.FileItem(
k,
ConfigUtil.get_value_from_dict(data, 'target_path', k),
getattr(InstallPlugin.FileItemType, ConfigUtil.get_value_from_dict(data, 'type', 'FILE').upper(), None)
)
for data in self.file_map_data:
k = data['src_path']
if k[0] != '.':
k = '.%s' % os.path.join('/', k)
k = self.var_replace(k, var)
file_map[k] = InstallPlugin.FileItem(
k,
ConfigUtil.get_value_from_dict(data, 'target_path', k),
getattr(InstallPlugin.FileItemType, ConfigUtil.get_value_from_dict(data, 'type', 'FILE').upper(), None),
getattr(InstallPlugin.InstallMethod, ConfigUtil.get_value_from_dict(data, 'install_method', 'ANY').upper(), None),
)
self._file_map[key] = file_map
except:
pass
......
......@@ -31,6 +31,7 @@ from _arch import getBaseArch
from tool import DirectoryUtil, FileUtil, YamlLoader
from _manager import Manager
from _plugin import InstallPlugin
from ssh import LocalClient
class LocalPackage(Package):
......@@ -121,10 +122,15 @@ class LocalPackage(Package):
filelinktos.append(os.readlink(target_path))
filemodes.append(-24065)
else:
m = hashlib.md5()
with open(target_path, 'rb') as f:
m.update(f.read())
m_value = m.hexdigest().encode(sys.getdefaultencoding())
ret = LocalClient().execute_command('md5sum {}'.format(target_path))
if ret:
m_value = ret.stdout.strip().split(' ')[0].encode('utf-8')
else:
m = hashlib.md5()
with open(target_path, 'rb') as f:
m.update(f.read())
m_value = m.hexdigest().encode(sys.getdefaultencoding())
# raise Exception('Failed to get md5sum for {}, error: {}'.format(target_path, ret.stderr))
m_sum.update(m_value)
filemd5s.append(m_value)
filelinktos.append('')
......@@ -158,7 +164,7 @@ class Repository(PackageInfo):
return self.md5
def __str__(self):
return '%s-%s-%s' % (self.name, self.version, self.hash)
return '%s-%s-%s-%s' % (self.name, self.version, self.release, self.hash)
def __hash__(self):
return hash(self.repository_dir)
......@@ -380,48 +386,29 @@ class ComponentRepository(object):
repositories[repository.hash] = repository
return repositories
def get_repository_by_version(self, version, tag=None):
if tag:
return self.get_repository_by_tag(tag, version)
repository = self.get_repository_by_tag(self.name, version)
if repository:
return repository
path_partten = os.path.join(self.repository_dir, version, tag if tag else '*')
for path in glob(path_partten):
n_repository = Repository(self.name, path, self.stdio)
if n_repository.hash and n_repository > repository:
repository = n_repository
return repository
def get_repository_by_tag(self, tag, version=None):
path_partten = os.path.join(self.repository_dir, version if version else '*', tag)
def search_repository(self, version=None, tag=None, release=None):
path_pattern = os.path.join(self.repository_dir, version or '*', tag or '*')
repository = None
for path in glob(path_partten):
for path in glob(path_pattern):
n_repository = Repository(self.name, path, self.stdio)
if release and release != n_repository.release:
continue
if n_repository.hash and n_repository > repository:
repository = n_repository
return repository
def get_repository(self, version=None, tag=None):
if tag:
return self.get_repository_by_tag(tag, version)
if version:
return self.get_repository_by_version(version, tag)
version = None
for rep_version in os.listdir(self.repository_dir):
rep_version = Version(rep_version)
if rep_version > version:
version = rep_version
if version:
return self.get_repository_by_version(version, tag)
return None
def get_repository(self, version=None, tag=None, release=None):
if version or tag or release:
return self.search_repository(version=version, tag=tag, release=release)
else:
return self.search_repository(tag=self.name) or self.search_repository()
def get_repositories(self, version=None):
if not version:
version = '*'
repositories = []
path_partten = os.path.join(self.repository_dir, version, '*')
for path in glob(path_partten):
path_pattern = os.path.join(self.repository_dir, version, '*')
for path in glob(path_pattern):
repository = Repository(self.name, path, self.stdio)
if repository.hash:
repositories.append(repository)
......@@ -436,7 +423,7 @@ class RepositoryManager(Manager):
def __init__(self, home_path, lock_manager=None, stdio=None):
super(RepositoryManager, self).__init__(home_path, stdio=stdio)
self.repositories = {}
self.component_repositoies = {}
self.component_repositories = {}
self.lock_manager = lock_manager
def _lock(self, read_only=False):
......@@ -460,20 +447,20 @@ class RepositoryManager(Manager):
def get_repositories(self, name, version=None, instance=True):
repositories = []
for repository in self.get_component_repositoy(name).get_repositories(version):
for repository in self.get_component_repository(name).get_repositories(version):
if instance and repository.is_shadow_repository() is False:
repositories.append(repository)
return repositories
def get_repositories_view(self, name=None):
if name:
repositories = self.get_component_repositoy(name).get_repositories()
repositories = self.get_component_repository(name).get_repositories()
else:
repositories = []
path_partten = os.path.join(self.path, '*')
for path in glob(path_partten):
path_pattern = os.path.join(self.path, '*')
for path in glob(path_pattern):
_, name = os.path.split(path)
repositories += self.get_component_repositoy(name).get_repositories()
repositories += self.get_component_repository(name).get_repositories()
repositories_vo = {}
for repository in repositories:
......@@ -487,36 +474,46 @@ class RepositoryManager(Manager):
repositories_vo[repository] = self._get_repository_vo(repository)
return list(repositories_vo.values())
def get_component_repositoy(self, name):
if name not in self.component_repositoies:
def get_component_repository(self, name):
if name not in self.component_repositories:
self._lock(True)
path = os.path.join(self.path, name)
self.component_repositoies[name] = ComponentRepository(name, path, self.stdio)
return self.component_repositoies[name]
def get_repository_by_version(self, name, version, tag=None, instance=True):
if not tag:
tag = name
path = os.path.join(self.path, name, version, tag)
if path not in self.repositories:
component_repositoy = self.get_component_repositoy(name)
repository = component_repositoy.get_repository(version, tag)
if repository:
self.repositories[repository.repository_dir] = repository
self.repositories[path] = repository
self.component_repositories[name] = ComponentRepository(name, path, self.stdio)
return self.component_repositories[name]
def get_repository(self, name, version=None, tag=None, release=None, package_hash=None, instance=True):
self.stdio.verbose(
"Search repository {name} version: {version}, tag: {tag}, release: {release}, package_hash: {package_hash}".format(
name=name, version=version, tag=tag, release=release, package_hash=package_hash))
tag = tag or package_hash
component_repository = self.get_component_repository(name)
if version and tag:
repository_dir = os.path.join(self.path, name, version, tag)
if repository_dir in self.repositories:
repository = self.repositories[repository_dir]
else:
repository = component_repository.get_repository(version=version, tag=tag, release=release)
else:
repository = component_repository.get_repository(version=version, tag=tag, release=release)
if not repository:
return None
else:
repository = self.repositories[path]
if repository.repository_dir not in self.repositories:
self.repositories[repository.repository_dir] = repository
else:
repository = self.repositories[repository.repository_dir]
if not self._check_repository_pattern(repository, version=version, release=release, hash=package_hash):
return None
self.stdio.verbose("Found repository {}".format(repository))
return self.get_instance_repository_from_shadow(repository) if instance else repository
def get_repository(self, name, version=None, tag=None, instance=True):
if version:
return self.get_repository_by_version(name, version, tag)
component_repositoy = self.get_component_repositoy(name)
repository = component_repositoy.get_repository(version, tag)
if repository:
self.repositories[repository.repository_dir] = repository
return self.get_instance_repository_from_shadow(repository) if repository and instance else repository
def _check_repository_pattern(self, repository, **kwargs):
for key in ["version", "release", "hash"]:
current_value = getattr(repository, key)
if kwargs.get(key) is not None and current_value != kwargs[key]:
self.stdio.verbose("repository {} is {}, but {} is required".format(key, current_value, kwargs[key]))
return False
return True
def create_instance_repository(self, name, version, _hash):
path = os.path.join(self.path, name, version, _hash)
......@@ -534,7 +531,7 @@ class RepositoryManager(Manager):
self._lock(True)
self.repositories[path] = Repository(name, path, self.stdio)
return self.repositories[path]
repository = Repository(name, path, self.stdio)
repository = Repository(name, path, self.stdio)
repository.set_version(version)
return repository
......
......@@ -24,12 +24,16 @@ import os
import signal
import sys
import traceback
import inspect2
import six
from enum import Enum
from halo import Halo, cursor
from colorama import Fore
from prettytable import PrettyTable
from progressbar import AdaptiveETA, Bar, SimpleProgress, ETA, FileTransferSpeed, Percentage, ProgressBar
from types import MethodType
from inspect2 import Parameter
if sys.version_info.major == 3:
......@@ -74,8 +78,8 @@ class FormtatText(object):
return FormtatText.format(text, Fore.RED)
class LogSymbols(Enum):
class LogSymbols(Enum):
INFO = FormtatText.info('!')
SUCCESS = FormtatText.success('ok')
WARNING = FormtatText.warning('!!')
......@@ -112,7 +116,7 @@ class IOTable(PrettyTable):
val = 'l'
for field in self._field_names:
self._align[field] = val
class IOHalo(Halo):
......@@ -230,14 +234,14 @@ class IO(object):
WARNING_PREV = FormtatText.warning('[WARN]')
ERROR_PREV = FormtatText.error('[ERROR]')
IS_TTY = sys.stdin.isatty()
def __init__(self,
level,
msg_lv=MsgLevel.DEBUG,
trace_logger=None,
def __init__(self,
level,
msg_lv=MsgLevel.DEBUG,
trace_logger=None,
use_cache=False,
track_limit=0,
root_io=None,
track_limit=0,
root_io=None,
stream=sys.stdout
):
self.level = level
......@@ -258,7 +262,7 @@ class IO(object):
if self._root_io:
self._root_io.log_cache
return self._log_cache
def before_close(self):
if self._before_critical:
try:
......@@ -272,7 +276,7 @@ class IO(object):
def __del__(self):
self._close()
def exit(self, code):
self._close()
sys.exit(code)
......@@ -280,14 +284,14 @@ class IO(object):
def set_cache(self, status):
if status:
self._cache_on()
def _cache_on(self):
if self._root_io:
return False
if self.log_cache is None:
self._log_cache = []
return True
def _cache_off(self):
if self._root_io:
return False
......@@ -359,7 +363,7 @@ class IO(object):
finally:
self._clear_sync_ctx()
return ret
def start_loading(self, text, *arg, **kwargs):
if self.sync_obj:
return False
......@@ -405,7 +409,7 @@ class IO(object):
if not isinstance(self.sync_obj, IOProgressBar):
return False
return self._stop_sync_obj(IOProgressBar, 'interrupt')
def sub_io(self, pid=None, msg_lv=None):
if not pid:
pid = os.getpid()
......@@ -414,16 +418,20 @@ class IO(object):
key = "%s-%s" % (pid, msg_lv)
if key not in self.sub_ios:
self.sub_ios[key] = self.__class__(
self.level + 1,
msg_lv=msg_lv,
self.level + 1,
msg_lv=msg_lv,
trace_logger=self.trace_logger,
track_limit=self.track_limit,
root_io=self._root_io if self._root_io else self
)
return self.sub_ios[key]
def print_list(self, ary, field_names=None, exp=lambda x: x if isinstance(x, list) else [x], show_index=False, start=0, **kwargs):
def print_list(self, ary, field_names=None, exp=lambda x: x if isinstance(x, (list, tuple)) else [x], show_index=False, start=0, **kwargs):
if not ary:
title = kwargs.get("title", "")
empty_msg = kwargs.get("empty_msg", "{} is empty.".format(title))
if empty_msg:
self.print(empty_msg)
return
show_index = field_names is not None and show_index
if show_index:
......@@ -464,7 +472,7 @@ class IO(object):
kwargs['file'] and print(self._format(msg, *args), **kwargs)
del kwargs['file']
self.log(msg_lv, msg, *args, **kwargs)
def log(self, levelno, msg, *args, **kwargs):
self._cache_log(levelno, msg, *args, **kwargs)
......@@ -478,13 +486,11 @@ class IO(object):
else:
log_cache.append((levelno, line, args, kwargs))
def _flush_log(self):
if not self._root_io and self.trace_logger and self._log_cache:
for levelno, line, args, kwargs in self._log_cache:
self.trace_logger.log(levelno, line, *args, **kwargs)
self._log_cache = []
def _log(self, levelno, msg, *args, **kwargs):
if self.trace_logger:
self.trace_logger.log(levelno, msg, *args, **kwargs)
......@@ -560,3 +566,144 @@ class IO(object):
msg and self.error(msg)
print_stack(''.join(lines))
class _Empty(object):
pass
EMPTY = _Empty()
del _Empty
class FakeReturn(object):
def __call__(self, *args, **kwargs):
return None
def __len__(self):
return 0
FAKE_RETURN = FakeReturn()
class StdIO(object):
def __init__(self, io=None):
self.io = io
self._attrs = {}
self._warn_func = getattr(self.io, "warn", print)
def __getattr__(self, item):
if self.io is None:
return FAKE_RETURN
if item not in self._attrs:
attr = getattr(self.io, item, EMPTY)
if attr is not EMPTY:
self._attrs[item] = attr
else:
self._warn_func(FormtatText.warning("WARNING: {} has no attribute '{}'".format(self.io, item)))
self._attrs[item] = FAKE_RETURN
return self._attrs[item]
FAKE_IO = StdIO()
def get_stdio(io_obj):
if io_obj is None:
return FAKE_IO
elif isinstance(io_obj, StdIO):
return io_obj
else:
return StdIO(io_obj)
def safe_stdio_decorator(default_stdio=None):
def decorated(func):
is_bond_method = False
_type = None
if isinstance(func, (staticmethod, classmethod)):
is_bond_method = True
_type = type(func)
func = func.__func__
all_parameters = inspect2.signature(func).parameters
if "stdio" in all_parameters:
default_stdio_in_params = all_parameters["stdio"].default
if not isinstance(default_stdio_in_params, Parameter.empty):
_default_stdio = default_stdio_in_params or default_stdio
def func_wrapper(*args, **kwargs):
_params_keys = list(all_parameters.keys())
_index = _params_keys.index("stdio")
if "stdio" not in kwargs and len(args) > _index:
stdio = get_stdio(args[_index])
tmp_args = list(args)
tmp_args[_index] = stdio
args = tuple(tmp_args)
else:
stdio = get_stdio(kwargs.get("stdio", _default_stdio))
kwargs["stdio"] = stdio
return func(*args, **kwargs)
return _type(func_wrapper) if is_bond_method else func_wrapper
else:
return _type(func) if is_bond_method else func
return decorated
class SafeStdioMeta(type):
@staticmethod
def _init_wrapper_func(func):
def wrapper(*args, **kwargs):
setattr(args[0], "_wrapper_func", {})
func(*args, **kwargs)
if "stdio" in args[0].__dict__:
args[0].__dict__["stdio"] = get_stdio(args[0].__dict__["stdio"])
if func.__name__ != wrapper.__name__:
return wrapper
else:
return func
def __new__(mcs, name, bases, attrs):
for key, attr in attrs.items():
if key.startswith("__") and key.endswith("__"):
continue
if isinstance(attr, (staticmethod, classmethod)):
attrs[key] = safe_stdio_decorator()(attr)
cls = type.__new__(mcs, name, bases, attrs)
cls.__init__ = mcs._init_wrapper_func(cls.__init__)
return cls
class _StayTheSame(object):
pass
STAY_THE_SAME = _StayTheSame()
class SafeStdio(six.with_metaclass(SafeStdioMeta)):
_wrapper_func = {}
def __getattribute__(self, item):
_wrapper_func = super(SafeStdio, self).__getattribute__("_wrapper_func")
if item not in _wrapper_func:
attr = super(SafeStdio, self).__getattribute__(item)
if (not item.startswith("__") or not item.endswith("__")) and isinstance(attr, MethodType):
if "stdio" in inspect2.signature(attr).parameters:
_wrapper_func[item] = safe_stdio_decorator(default_stdio=getattr(self, "stdio", None))(attr)
return _wrapper_func[item]
_wrapper_func[item] = STAY_THE_SAME
return attr
if _wrapper_func[item] is STAY_THE_SAME:
return super(SafeStdio, self).__getattribute__(item)
return _wrapper_func[item]
def __setattr__(self, key, value):
if key in self._wrapper_func:
del self._wrapper_func[key]
return super(SafeStdio, self).__setattr__(key, value)
......@@ -30,7 +30,9 @@ from _deploy import (
ServerConfigFlyweightFactory,
ClusterConfig,
ConfigParser,
CommentedMap
CommentedMap,
RsyncConfig,
ENV
)
......@@ -85,11 +87,12 @@ class ClusterConfigParser(ConfigParser):
server_config['zone'] = zone_name
servers[server] = server_config
cluster_conf = ClusterConfig(
cluster_config = ClusterConfig(
servers.keys(),
component_name,
ConfigUtil.get_value_from_dict(conf, 'version', None, str),
ConfigUtil.get_value_from_dict(conf, 'tag', None, str),
ConfigUtil.get_value_from_dict(conf, 'release', None, str),
ConfigUtil.get_value_from_dict(conf, 'package_hash', None, str)
)
global_config = {}
......@@ -99,11 +102,17 @@ class ClusterConfigParser(ConfigParser):
global_config['appname'] = str(conf['name'])
if 'config' in conf:
global_config.update(conf['config'])
cluster_conf.set_global_conf(global_config)
cluster_config.set_global_conf(global_config)
if RsyncConfig.RSYNC in conf:
cluster_config.set_rsync_list(conf[RsyncConfig.RSYNC])
if ENV in conf:
cluster_config.set_environments(conf[ENV])
for server in servers:
cluster_conf.add_server_conf(server, servers[server])
return cluster_conf
cluster_config.add_server_conf(server, servers[server])
return cluster_config
@classmethod
def extract_inner_config(cls, cluster_config, config):
......
......@@ -33,14 +33,17 @@ from prettytable import PrettyTable
from halo import Halo
from ssh import SshClient, SshConfig
from tool import ConfigUtil, FileUtil, DirectoryUtil, YamlLoader
from tool import ConfigUtil, FileUtil, DirectoryUtil, YamlLoader, timeout, COMMAND_ENV
from _stdio import MsgLevel
from _rpm import Version
from _mirror import MirrorRepositoryManager, PackageInfo
from _plugin import PluginManager, PluginType, InstallPlugin
from _repository import RepositoryManager, LocalPackage
from _deploy import DeployManager, DeployStatus, DeployConfig, DeployConfigStatus, BASE_DIR_KEY, InnerConfigKeywords
from _lock import LockManager
from _repository import RepositoryManager, LocalPackage, Repository
from _deploy import (
DeployManager, DeployStatus,
DeployManager, DeployStatus,
DeployConfig, DeployConfigStatus,
ParserError, Deploy
)
......@@ -148,7 +151,7 @@ class ObdHome(object):
def ssh_clients_connect(self, ssh_clients, servers, user_config):
for server in servers:
if server.ip not in ssh_clients:
if server not in ssh_clients:
ssh_clients[server] = SshClient(
SshConfig(
server.ip,
......@@ -226,9 +229,9 @@ class ObdHome(object):
print_match and self._call_stdio(
'print_list',
matchs,
['name', 'version', 'release', 'arch', 'md5'],
['name', 'version', 'release', 'arch', 'md5'],
lambda x: [matchs[x].name, matchs[x].version, matchs[x].release, matchs[x].arch, matchs[x].md5],
title='Search %s %s Result' % (component_name, version)
title='Search %s %s Result' % (component_name, version)
)
for md5 in usable:
if md5 in matchs:
......@@ -238,9 +241,9 @@ class ObdHome(object):
usable_matchs = [info[1] for info in sorted(matchs.items())]
if release_first:
usable_matchs = usable_matchs[:1]
return usable_matchs
def search_components_from_mirrors(self, deploy_config, fuzzy_match=False, only_info=True, update_if_need=None):
pkgs = []
errors = []
......@@ -251,14 +254,18 @@ class ObdHome(object):
# First, check if the component exists in the repository. If exists, check if the version is available. If so, use the repository directly.
self._call_stdio('verbose', 'Get %s repository' % component)
repository = self.repository_manager.get_repository(component, config.version, config.package_hash if config.package_hash else config.tag)
repository = self.repository_manager.get_repository(name=component, version=config.version, tag=config.tag, release=config.release, package_hash=config.package_hash)
if repository and not repository.hash:
repository = None
self._call_stdio('verbose', 'Search %s package from mirror' % component)
pkg = self.mirror_manager.get_best_pkg(name=component, version=config.version, md5=config.package_hash, fuzzy_match=fuzzy_match, only_info=only_info)
if not config.tag:
self._call_stdio('verbose', 'Search %s package from mirror' % component)
pkg = self.mirror_manager.get_best_pkg(
name=component, version=config.version, md5=config.package_hash, release=config.release, fuzzy_match=fuzzy_match, only_info=only_info)
else:
pkg = None
if repository or pkg:
if pkg:
self._call_stdio('verbose', 'Found Package %s-%s-%s' % (pkg.name, pkg.version, pkg.md5))
self._call_stdio('verbose', 'Found Package %s-%s-%s-%s' % (pkg.name, pkg.version, pkg.release, pkg.md5))
if repository:
if repository >= pkg or (
(
......@@ -271,9 +278,9 @@ class ObdHome(object):
self._call_stdio('print', '%s-%s already installed.' % (repository.name, repository.version))
continue
if config.version and pkg.version != config.version:
self._call_stdio('warn', 'No such package %s-%s. Use similar package %s-%s.' % (component, config.version, pkg.name, pkg.version))
self._call_stdio('warn', 'No such package %s-%s-%s. Use similar package %s-%s-%s.' % (component, config.version, config.release, pkg.name, pkg.version, pkg.release))
else:
self._call_stdio('print', 'Package %s-%s is available.' % (pkg.name, pkg.version))
self._call_stdio('print', 'Package %s-%s-%s is available.' % (pkg.name, pkg.version, pkg.release))
repository = self.repository_manager.get_repository(pkg.name, pkg.md5)
if repository:
repositories.append(repository)
......@@ -282,12 +289,14 @@ class ObdHome(object):
else:
pkg_name = [component]
if config.version:
pkg_name.append(config.version)
pkg_name.append("version: %s" % config.version)
if config.release:
pkg_name.append("release: %s" % config.release)
if config.package_hash:
pkg_name.append(config.package_hash)
elif config.tag:
pkg_name.append(config.tag)
errors.append('No such package %s.' % ('-'.join(pkg_name)))
pkg_name.append("package hash: %s" % config.package_hash)
if config.tag:
pkg_name.append("tag: %s" % config.tag)
errors.append('No such package name: %s.' % (', '.join(pkg_name)))
return pkgs, repositories, errors
def load_local_repositories(self, deploy_info, allow_shadow=True):
......@@ -357,6 +366,7 @@ class ObdHome(object):
initial_config = ''
if deploy:
try:
deploy.deploy_config.allow_include_error()
if deploy.deploy_info.config_status == DeployConfigStatus.UNCHNAGE:
path = deploy.deploy_config.yaml_path
else:
......@@ -400,7 +410,12 @@ class ObdHome(object):
subprocess_call([EDITOR, tf.name])
self._call_stdio('verbose', 'Load %s' % tf.name)
try:
deploy_config = DeployConfig(tf.name, yaml_loader=YamlLoader(self.stdio), config_parser_manager=self.deploy_manager.config_parser_manager)
deploy_config = DeployConfig(
tf.name, yaml_loader=YamlLoader(self.stdio),
config_parser_manager=self.deploy_manager.config_parser_manager,
inner_config=deploy.deploy_config.inner_config if deploy else None
)
deploy_config.allow_include_error()
except Exception as e:
if confirm(e):
continue
......@@ -419,16 +434,37 @@ class ObdHome(object):
if not self._call_stdio('confirm', 'Modifications to the deployment architecture take effect after you redeploy the architecture. Are you sure that you want to start a redeployment? '):
continue
config_status = DeployConfigStatus.NEED_REDEPLOY
else:
if config_status != DeployConfigStatus.NEED_REDEPLOY:
comp_attr_changed = False
for component_name in deploy_config.components:
old_cluster_config = deploy.deploy_config.components[component_name]
new_cluster_config = deploy_config.components[component_name]
if new_cluster_config.version != old_cluster_config.origin_version \
or new_cluster_config.package_hash != old_cluster_config.origin_package_hash \
or new_cluster_config.tag != old_cluster_config.origin_tag:
if new_cluster_config.version != old_cluster_config.config_version \
or new_cluster_config.package_hash != old_cluster_config.config_package_hash \
or new_cluster_config.release != old_cluster_config.config_release \
or new_cluster_config.tag != old_cluster_config.tag:
comp_attr_changed = True
config_status = DeployConfigStatus.NEED_REDEPLOY
break
if comp_attr_changed:
if not self._call_stdio('confirm', 'Modifications to the version, release or hash of the component take effect after you redeploy the cluster. Are you sure that you want to start a redeployment? '):
continue
config_status = DeployConfigStatus.NEED_REDEPLOY
if config_status != DeployConfigStatus.NEED_REDEPLOY:
rsync_conf_changed = False
for component_name in deploy_config.components:
old_cluster_config = deploy.deploy_config.components[component_name]
new_cluster_config = deploy_config.components[component_name]
if new_cluster_config.get_rsync_list() != old_cluster_config.get_rsync_list():
rsync_conf_changed = True
break
if rsync_conf_changed:
if not self._call_stdio('confirm', 'Modifications to the rsync config of a deployed cluster take effect after you redeploy the cluster. Are you sure that you want to start a redeployment? '):
continue
config_status = DeployConfigStatus.NEED_REDEPLOY
# Loading the parameter plugins that are available to the application
self._call_stdio('start_loading', 'Search param plugin and load')
if not is_deployed or config_status == DeployConfigStatus.NEED_REDEPLOY:
......@@ -492,7 +528,7 @@ class ObdHome(object):
continue
else:
return False
for component_name in deploy_config.components:
if config_status == DeployConfigStatus.NEED_REDEPLOY:
break
......@@ -817,7 +853,7 @@ class ObdHome(object):
self._call_stdio('error', 'Deploy configuration is empty.\nIt may be caused by a failure to resolve the configuration.\nPlease check your configuration file.')
return False
# Check the best suitable mirror for the components and installation plguins. Install locally
# Check the best suitable mirror for the components and installation plugins. Install locally
repositories, install_plugins = self.search_components_from_mirrors_and_install(deploy_config)
if not install_plugins or not repositories:
return False
......@@ -869,13 +905,13 @@ class ObdHome(object):
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Deploy status judge')
if deploy_info.status != DeployStatus.STATUS_RUNNING:
self._call_stdio('error', 'Deploy "%s" not RUNNING' % (name))
return False
version = getattr(options, 'version', '')
if not version:
self._call_stdio('error', 'Use the --version option to specify the required OCP version.')
......@@ -912,7 +948,7 @@ class ObdHome(object):
new_deploy_config = None
self._call_stdio('stop_loading', 'succeed')
# Get the client
ssh_clients = self.get_clients(deploy_config, repositories)
if new_deploy_config and deploy_config.user.username != new_deploy_config.user.username:
......@@ -928,11 +964,11 @@ class ObdHome(object):
component_num -= 1
self._call_stdio('print', '%s No check plugin available.' % repository.name)
continue
cluster_config = deploy_config.components[repository.name]
new_cluster_config = new_deploy_config.components[repository.name] if new_deploy_config else None
cluster_servers = cluster_config.servers
self._call_stdio('verbose', 'Call %s for %s' % (connect_plugins[repository], repository))
ret = connect_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, '', options, self.stdio)
if ret:
......@@ -940,12 +976,12 @@ class ObdHome(object):
cursor = ret.get_return('cursor')
else:
break
self._call_stdio('verbose', 'Call %s for %s' % (ocp_check[repository], repository))
if ocp_check[repository](deploy_config.components.keys(), ssh_clients, cluster_config, '', options, self.stdio, cursor=cursor, ocp_version=version, new_cluster_config=new_cluster_config, new_clients=new_ssh_clients):
component_num -= 1
self._call_stdio('print', '%s Check passed.' % repository.name)
return component_num == 0
def change_deploy_config_style(self, name, options=Values()):
......@@ -954,7 +990,7 @@ class ObdHome(object):
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Deploy config status judge')
if deploy_info.config_status != DeployConfigStatus.UNCHNAGE:
......@@ -995,11 +1031,10 @@ class ObdHome(object):
return True
except Exception as e:
self._call_stdio('exception', e)
self._call_stdio('stop_loading', 'fail')
return False
def deploy_cluster(self, name, opt=Values()):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
......@@ -1015,7 +1050,7 @@ class ObdHome(object):
if not deploy.apply_temp_deploy_config():
self._call_stdio('error', 'Failed to apply new deploy configuration')
return False
config_path = getattr(opt, 'config', '')
unuse_lib_repo = getattr(opt, 'unuselibrepo', False)
auto_create_tenant = getattr(opt, 'auto_create_tenant', False)
......@@ -1026,10 +1061,10 @@ class ObdHome(object):
if not deploy:
self._call_stdio('error', 'Failed to create deploy: %s. please check you configuration file' % name)
return False
if not deploy:
self._call_stdio('error', 'No such deploy: %s. you can input configuration path to create a new deploy' % name)
return False
self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config
if not deploy_config:
......@@ -1045,8 +1080,28 @@ class ObdHome(object):
self._call_stdio('error', '%s\'s servers list is empty.' % component_name)
return False
# Check the best suitable mirror for the components and installation plguins. Install locally
if self.dev_mode:
base_dir = COMMAND_ENV.get(BASE_DIR_KEY, '')
deploy_config.enable_cp_install_mode(save=False)
else:
base_dir = ''
deploy_config.enable_ln_install_mode(save=False)
deploy_config.set_base_dir(base_dir, save=False)
# Check the best suitable mirror for the components and installation plugins. Install locally
repositories, install_plugins = self.search_components_from_mirrors_and_install(deploy_config)
if not repositories or not install_plugins:
return False
if unuse_lib_repo and not deploy_config.unuse_lib_repository:
deploy_config.set_unuse_lib_repository(True)
if auto_create_tenant and not deploy_config.auto_create_tenant:
deploy_config.set_auto_create_tenant(True)
return self._deploy_cluster(deploy, repositories, opt)
def _deploy_cluster(self, deploy, repositories, opt=Values()):
deploy_config = deploy.deploy_config
install_plugins = self.search_plugins(repositories, PluginType.INSTALL)
if not install_plugins:
return False
......@@ -1062,7 +1117,7 @@ class ObdHome(object):
self._call_stdio('start_loading', 'Repository integrity check')
for repository in repositories:
if not repository.file_check(install_plugins[repository]):
errors.append('%s intstall failed' % repository.name)
errors.append('%s install failed' % repository.name)
if errors:
self._call_stdio('stop_loading', 'fail')
self._call_stdio('error', '\n'.join(errors))
......@@ -1081,23 +1136,103 @@ class ObdHome(object):
self._call_stdio('error', '\n'.join(errors))
return False
self._call_stdio('stop_loading', 'succeed')
if unuse_lib_repo and not deploy_config.unuse_lib_repository:
deploy_config.set_unuse_lib_repository(True)
if auto_create_tenant and not deploy_config.auto_create_tenant:
deploy_config.set_auto_create_tenant(True)
# Get the client
ssh_clients = self.get_clients(deploy_config, repositories)
# Check the status for the deployed cluster
if not getattr(opt, 'skip_cluster_status_check', False):
component_status = {}
cluster_status = self.cluster_status_check(ssh_clients, deploy_config, repositories, component_status)
if cluster_status is False or cluster_status == 1:
if self.stdio:
self._call_stdio('error', 'Some of the servers in the cluster have been started')
for repository in component_status:
cluster_status = component_status[repository]
for server in cluster_status:
if cluster_status[server] == 1:
self._call_stdio('print', '%s %s is started' % (server, repository.name))
return False
self._call_stdio('verbose', 'Search init plugin')
init_plugins = self.search_py_script_plugin(repositories, 'init')
component_num = len(repositories)
for repository in repositories:
cluster_config = deploy_config.components[repository.name]
init_plugin = init_plugins[repository]
self._call_stdio('verbose', 'Exec %s init plugin' % repository)
self._call_stdio('verbose', 'Apply %s for %s-%s' % (init_plugin, repository.name, repository.version))
if init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opt, self.stdio, self.home_path, repository.repository_dir):
component_num -= 1
if component_num != 0:
return False
# Install repository to servers
if not self.install_repositories_to_servers(deploy_config, repositories, install_plugins, ssh_clients, opt):
return False
# Sync runtime dependencies
if not self.sync_runtime_dependencies(deploy_config, repositories, ssh_clients, opt):
return False
for repository in repositories:
deploy.use_model(repository.name, repository, False)
if deploy.update_deploy_status(DeployStatus.STATUS_DEPLOYED) and deploy_config.dump():
self._call_stdio('print', '%s deployed' % deploy.name)
return True
return False
def install_repository_to_servers(self, components, cluster_config, repository, ssh_clients, options=Values(), unuse_lib_repository=False):
install_repo_plugin = self.plugin_manager.get_best_py_script_plugin('install_repo', 'general', '0.1')
install_plugins = self.search_plugins([repository], PluginType.INSTALL)
if not install_plugins:
return False
install_plugin = install_plugins[repository]
check_file_map = install_plugin.file_map(repository)
ret = install_repo_plugin(components, ssh_clients, cluster_config, [], options, self.stdio,
obd_home=self.home_path, install_repository=repository,
install_plugin=install_plugin, check_repository=repository,
check_file_map=check_file_map,
msg_lv='error' if unuse_lib_repository else 'warn')
if not ret:
return False
elif ret.get_return('checked'):
return True
elif unuse_lib_repository:
return False
self._call_stdio('print', 'Try to get lib-repository')
repositories_lib_map = self.install_lib_for_repositories([repository])
if repositories_lib_map is False:
self._call_stdio('error', 'Failed to install lib package for local')
return False
lib_repository = repositories_lib_map[repository]['repositories']
install_plugin = repositories_lib_map[repository]['install_plugin']
ret = install_repo_plugin(components, ssh_clients, cluster_config, [], options,
self.stdio,
obd_home=self.home_path, install_repository=lib_repository,
install_plugin=install_plugin, check_repository=repository,
check_file_map=check_file_map, msg_lv='error')
if not ret or not ret.get_return('checked'):
self._call_stdio('error', 'Failed to install lib package for cluster servers')
return False
def install_repositories_to_servers(self, deploy_config, repositories, install_plugins, ssh_clients, options):
install_repo_plugin = self.plugin_manager.get_best_py_script_plugin('install_repo', 'general', '0.1')
check_file_maps = {}
need_lib_repositories = []
for repository in repositories:
cluster_config = deploy_config.components[repository.name]
# cluster files check
self.servers_repository_install(ssh_clients, cluster_config.servers, repository, install_plugins[repository])
# lib check
msg_lv = 'error' if deploy_config.unuse_lib_repository else 'warn'
if not self.servers_repository_lib_check(ssh_clients, cluster_config.servers, repository, install_plugins[repository], msg_lv):
install_plugin = install_plugins[repository]
check_file_map = check_file_maps[repository] = install_plugin.file_map(repository)
ret = install_repo_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio,
obd_home=self.home_path, install_repository=repository,
install_plugin=install_plugin, check_repository=repository,
check_file_map=check_file_map,
msg_lv='error' if deploy_config.unuse_lib_repository else 'warn')
if not ret:
return False
if not ret.get_return('checked'):
need_lib_repositories.append(repository)
if need_lib_repositories:
......@@ -1109,39 +1244,28 @@ class ObdHome(object):
if repositories_lib_map is False:
self._call_stdio('error', 'Failed to install lib package for local')
return False
if self.servers_apply_lib_repository_and_check(ssh_clients, deploy_config, need_lib_repositories, repositories_lib_map):
self._call_stdio('error', 'Failed to install lib package for cluster servers')
return False
for need_lib_repository in need_lib_repositories:
cluster_config = deploy_config.components[need_lib_repository.name]
check_file_map = check_file_maps[need_lib_repository]
lib_repository = repositories_lib_map[need_lib_repository]['repositories']
install_plugin = repositories_lib_map[need_lib_repository]['install_plugin']
ret = install_repo_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], options,
self.stdio,
obd_home=self.home_path, install_repository=lib_repository,
install_plugin=install_plugin, check_repository=need_lib_repository,
check_file_map=check_file_map, msg_lv='error')
if not ret or not ret.get_return('checked'):
self._call_stdio('error', 'Failed to install lib package for cluster servers')
return False
return True
# Check the status for the deployed cluster
component_status = {}
cluster_status = self.cluster_status_check(ssh_clients, deploy_config, repositories, component_status)
if cluster_status is False or cluster_status == 1:
if self.stdio:
self._call_stdio('error', 'Some of the servers in the cluster have been started')
for repository in component_status:
cluster_status = component_status[repository]
for server in cluster_status:
if cluster_status[server] == 1:
self._call_stdio('print', '%s %s is started' % (server, repository.name))
return False
self._call_stdio('verbose', 'Search init plugin')
init_plugins = self.search_py_script_plugin(repositories, 'init')
component_num = len(repositories)
def sync_runtime_dependencies(self, deploy_config, repositories, ssh_clients, option):
rsync_plugin = self.plugin_manager.get_best_py_script_plugin('rsync', 'general', '0.1')
ret = True
for repository in repositories:
cluster_config = deploy_config.components[repository.name]
init_plugin = init_plugins[repository]
self._call_stdio('verbose', 'Exec %s init plugin' % repository)
self._call_stdio('verbose', 'Apply %s for %s-%s' % (init_plugin, repository.name, repository.version))
if init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opt, self.stdio, self.home_path, repository.repository_dir):
deploy.use_model(repository.name, repository, False)
component_num -= 1
if component_num == 0 and deploy.update_deploy_status(DeployStatus.STATUS_DEPLOYED):
self._call_stdio('print', '%s deployed' % name)
return True
return False
ret = rsync_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], option, self.stdio) and ret
return ret
def start_cluster(self, name, cmd=[], options=Values()):
self._call_stdio('verbose', 'Get Deploy by name')
......@@ -1163,8 +1287,18 @@ class ObdHome(object):
self._call_stdio('error', 'Deploy %s.%s\nIf you still need to start the cluster, use the `obd cluster start %s --wop` option to start the cluster without loading parameters. ' % (deploy_info.config_status.value, deploy.effect_tip(), name))
return False
self._call_stdio('start_loading', 'Get local repositories')
# Get the repository
repositories = self.load_local_repositories(deploy_info, False)
self._call_stdio('stop_loading', 'succeed')
return self._start_cluster(deploy, repositories, cmd, options)
def _start_cluster(self, deploy, repositories, cmd=None, options=Values()):
self._call_stdio('verbose', 'Get deploy config')
deploy_config = deploy.deploy_config
deploy_info = deploy.deploy_info
name = deploy.name
update_deploy_status = True
components = getattr(options, 'components', '')
......@@ -1182,11 +1316,7 @@ class ObdHome(object):
servers = getattr(options, 'servers', '')
server_list = servers.split(',') if servers else []
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
repositories = self.load_local_repositories(deploy_info, False)
self._call_stdio('start_loading', 'Search plugins')
start_check_plugins = self.search_py_script_plugin(repositories, 'start_check', no_found_act='warn')
create_tenant_plugins = self.search_py_script_plugin(repositories, 'create_tenant', no_found_act='ignore') if deploy_config.auto_create_tenant else {}
start_plugins = self.search_py_script_plugin(repositories, 'start')
......@@ -1553,8 +1683,18 @@ class ObdHome(object):
if deploy_info.status not in status:
self._call_stdio('error', 'Deploy "%s" is %s. You could not stop an %s cluster.' % (name, deploy_info.status.value, deploy_info.status.value))
return False
self._call_stdio('start_loading', 'Get local repositories')
# Get the repository
repositories = self.load_local_repositories(deploy_info)
self._call_stdio('stop_loading', 'succeed')
return self._stop_cluster(deploy, repositories, options)
def _stop_cluster(self, deploy, repositories, options=Values()):
self._call_stdio('verbose', 'Get deploy config')
deploy_config = deploy.deploy_config
deploy_info = deploy.deploy_info
name = deploy.name
update_deploy_status = True
components = getattr(options, 'components', '')
......@@ -1572,10 +1712,7 @@ class ObdHome(object):
servers = getattr(options, 'servers', '')
server_list = servers.split(',') if servers else []
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
repositories = self.load_local_repositories(deploy_info)
self._call_stdio('start_loading', 'Search plugins')
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
......@@ -1657,7 +1794,7 @@ class ObdHome(object):
self._call_stdio('stop_loading', 'succeed')
update_deploy_status = True
update_deploy_status = True
components = getattr(options, 'components', '')
if components:
components = components.split(',')
......@@ -1675,7 +1812,7 @@ class ObdHome(object):
servers = getattr(options, 'servers', '')
if servers:
server_list = servers.split(',')
server_list = servers.split(',')
if apply_change:
for repository in repositories:
cluster_config = deploy_config.components[repository.name]
......@@ -1739,13 +1876,13 @@ class ObdHome(object):
if restart_plugins[repository](
deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio,
local_home_path=self.home_path,
start_plugin=start_plugins[repository],
start_plugin=start_plugins[repository],
reload_plugin=reload_plugins[repository],
stop_plugin=stop_plugins[repository],
connect_plugin=connect_plugins[repository],
stop_plugin=stop_plugins[repository],
connect_plugin=connect_plugins[repository],
display_plugin=display_plugins[repository],
repository=repository,
new_cluster_config=new_cluster_config,
repository=repository,
new_cluster_config=new_cluster_config,
new_clients=new_ssh_clients
):
component_num -= 1
......@@ -1755,7 +1892,7 @@ class ObdHome(object):
deploy_config.update_component(new_cluster_config)
else:
break
if component_num == 0:
if len(components) != len(repositories) or servers:
self._call_stdio('print', "succeed")
......@@ -1779,13 +1916,13 @@ class ObdHome(object):
if restart_plugins[repository](
deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio,
local_home_path=self.home_path,
start_plugin=start_plugins[repository],
start_plugin=start_plugins[repository],
reload_plugin=reload_plugins[repository],
stop_plugin=stop_plugins[repository],
connect_plugin=connect_plugins[repository],
stop_plugin=stop_plugins[repository],
connect_plugin=connect_plugins[repository],
display_plugin=display_plugins[repository],
repository=repository,
new_cluster_config=new_cluster_config,
repository=repository,
new_cluster_config=new_cluster_config,
new_clients=new_ssh_clients,
rollback=True,
bootstrap_plugin=bootstrap_plugins[repository],
......@@ -1795,37 +1932,82 @@ class ObdHome(object):
self._call_stdio('stop_loading', 'succeed')
return False
def redeploy_cluster(self, name, opt=Values()):
return self.destroy_cluster(name, opt) and self.deploy_cluster(name) and self.start_cluster(name)
def destroy_cluster(self, name, opt=Values()):
def redeploy_cluster(self, name, opt=Values(), search_repo=True):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config
self._call_stdio('start_loading', 'Get local repositories')
# Get the repository
repositories = self.load_local_repositories(deploy_info)
self._call_stdio('stop_loading', 'succeed')
self._call_stdio('verbose', 'Check deploy status')
if deploy_info.status in [DeployStatus.STATUS_RUNNING, DeployStatus.STATUS_UPRADEING]:
if not self.stop_cluster(name, Values({'force': True})):
if not self._stop_cluster(deploy, repositories, options=Values({'force': True})):
return False
elif deploy_info.status not in [DeployStatus.STATUS_STOPPED, DeployStatus.STATUS_DEPLOYED]:
self._call_stdio('error', 'Deploy "%s" is %s. You could not destroy an undeployed cluster' % (name, deploy_info.status.value))
self._call_stdio('error', 'Deploy "%s" is %s. You could not destroy an undeployed cluster' % (
name, deploy_info.status.value))
return False
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
if not self._destroy_cluster(deploy, repositories, opt):
return False
if search_repo:
if deploy_info.config_status != DeployConfigStatus.UNCHNAGE and not deploy.apply_temp_deploy_config():
self._call_stdio('error', 'Failed to apply new deploy configuration')
return False
self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config
repositories, install_plugins = self.search_components_from_mirrors_and_install(deploy_config)
if not repositories or not install_plugins:
return False
return self._deploy_cluster(deploy, repositories, opt) and self._start_cluster(deploy, repositories)
def destroy_cluster(self, name, opt=Values()):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config
# allow included file not exist
deploy_config.allow_include_error()
self._call_stdio('start_loading', 'Get local repositories and plugins')
self._call_stdio('start_loading', 'Get local repositories')
# Get the repository
repositories = self.load_local_repositories(deploy_info)
self._call_stdio('stop_loading', 'succeed')
self._call_stdio('verbose', 'Check deploy status')
if deploy_info.status in [DeployStatus.STATUS_RUNNING, DeployStatus.STATUS_UPRADEING]:
if not self._stop_cluster(deploy, repositories, Values({'force': True})):
return False
elif deploy_info.status not in [DeployStatus.STATUS_STOPPED, DeployStatus.STATUS_DEPLOYED]:
self._call_stdio('error', 'Deploy "%s" is %s. You could not destroy an undeployed cluster' % (name, deploy_info.status.value))
return False
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
return self._destroy_cluster(deploy, repositories, opt)
def _destroy_cluster(self, deploy, repositories, opt=Values()):
deploy_config = deploy.deploy_config
self._call_stdio('start_loading', 'Search plugins')
# Get the repository
plugins = self.search_py_script_plugin(repositories, 'destroy')
self._call_stdio('stop_loading', 'succeed')
# Get the client
ssh_clients = self.get_clients(deploy_config, repositories)
......@@ -1837,7 +2019,7 @@ class ObdHome(object):
self._call_stdio('verbose', 'Try to stop cluster')
status = deploy.deploy_info.status
deploy.update_deploy_status(DeployStatus.STATUS_RUNNING)
if not self.stop_cluster(name):
if not self._stop_cluster(deploy, repositories):
deploy.update_deploy_status(status)
self._call_stdio('error', 'Fail to stop cluster')
return False
......@@ -1857,13 +2039,13 @@ class ObdHome(object):
self._call_stdio('verbose', 'Call %s for %s' % (plugins[repository], repository))
plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio)
self._call_stdio('verbose', 'Set %s deploy status to destroyed' % name)
self._call_stdio('verbose', 'Set %s deploy status to destroyed' % deploy.name)
if deploy.update_deploy_status(DeployStatus.STATUS_DESTROYED):
self._call_stdio('print', '%s destroyed' % name)
self._call_stdio('print', '%s destroyed' % deploy.name)
return True
return False
def change_repository(self, name, options=Values()):
def reinstall(self, name, options=Values()):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
if not deploy:
......@@ -1879,10 +2061,7 @@ class ObdHome(object):
component = getattr(options, 'component')
usable = getattr(options, 'hash')
if not component:
self._call_stdio('error', 'Specify the components you want to change.')
return False
if not usable:
self._call_stdio('error', 'Specify the hash you want to upgrade.')
self._call_stdio('error', 'Specify the components you want to reinstall.')
return False
if component not in deploy_info.components:
self._call_stdio('error', 'Not found %s in Deploy "%s" ' % (component, name))
......@@ -1899,77 +2078,81 @@ class ObdHome(object):
stop_plugins = self.search_py_script_plugin([current_repository], 'stop')
start_plugins = self.search_py_script_plugin([current_repository], 'start')
change_repo_plugin = self.plugin_manager.get_best_py_script_plugin('change_repo', 'general', '0.1')
self._call_stdio('stop_loading', 'succeed')
self._call_stdio('verbose', 'search target repository')
dest_repository = self.repository_manager.get_repository(current_repository.name, version=current_repository.version, tag=usable)
if not dest_repository:
pkg = self.mirror_manager.get_exact_pkg(name=current_repository.name, version=current_repository.version, md5=usable)
if not pkg:
self._call_stdio('error', 'No such package %s-%s-%s' % (component, current_repository.version, usable))
return False
repositories = []
install_plugins = self.get_install_plugin_and_install(repositories, [pkg])
if not install_plugins:
return False
dest_repository = repositories[0]
else:
install_plugins = self.search_plugins([dest_repository], PluginType.INSTALL)
if dest_repository is None:
self._call_stdio('error', 'Target version not found')
return False
if dest_repository == current_repository:
self._call_stdio('print', 'The current version is already %s.\nNoting to do.' % current_repository)
return False
self._call_stdio('stop_loading', 'succeed')
# Get the client
ssh_clients = self.get_clients(deploy_config, [current_repository])
cluster_config = deploy_config.components[current_repository.name]
self._call_stdio('start_loading', 'Load cluster param plugin')
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
self._call_stdio('stop_loading', 'succeed')
current_cluster_config = deploy_config.components[current_repository.name]
need_sync = bool(current_cluster_config.get_rsync_list())
need_change_repo = bool(usable)
sync_repositories = [current_repository]
repository = current_repository
cluster_config = current_cluster_config
# search repo and install
if usable:
self._call_stdio('verbose', 'search target repository')
dest_repository = self.repository_manager.get_repository(current_repository.name, version=current_repository.version, tag=usable)
if not dest_repository:
pkg = self.mirror_manager.get_exact_pkg(name=current_repository.name, version=current_repository.version, md5=usable)
if not pkg:
self._call_stdio('error', 'No such package %s-%s-%s' % (component, current_repository.version, usable))
return False
repositories = []
install_plugins = self.get_install_plugin_and_install(repositories, [pkg])
if not install_plugins:
return False
dest_repository = repositories[0]
else:
install_plugins = self.search_plugins([dest_repository], PluginType.INSTALL)
cluster_config = deploy_config.components[dest_repository.name]
# cluster files check
self.servers_repository_install(ssh_clients, cluster_config.servers, dest_repository, install_plugins[dest_repository])
# lib check
if not self.servers_repository_lib_check(ssh_clients, cluster_config.servers, dest_repository, install_plugins[dest_repository], 'warn'):
self._call_stdio('print', 'Try to get lib-repository')
repositories_lib_map = self.install_lib_for_repositories([dest_repository])
if repositories_lib_map is False:
self._call_stdio('error', 'Failed to install lib package for local')
return False
if self.servers_apply_lib_repository_and_check(ssh_clients, deploy_config, [dest_repository], repositories_lib_map):
self._call_stdio('error', 'Failed to install lib package for cluster servers')
if dest_repository is None:
self._call_stdio('error', 'Target version not found')
return False
if dest_repository == current_repository:
self._call_stdio('print', 'The current version is already %s.\nNoting to do.' % current_repository)
need_change_repo = False
else:
self._call_stdio('start_loading', 'Load cluster param plugin')
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
self._call_stdio('stop_loading', 'succeed')
cluster_config = deploy_config.components[dest_repository.name]
need_restart = need_sync or need_change_repo
# stop cluster if needed
if need_restart:
# Check the status for the deployed cluster
component_status = {}
cluster_status = self.cluster_status_check(ssh_clients, deploy_config, [current_repository], component_status)
if cluster_status is False or cluster_status == 1:
self._call_stdio('verbose', 'Call %s for %s' % (stop_plugins[current_repository], current_repository))
if not stop_plugins[current_repository](deploy_config.components.keys(), ssh_clients, current_cluster_config, [], options, self.stdio):
return False
# Check the status for the deployed cluster
component_status = {}
cluster_status = self.cluster_status_check(ssh_clients, deploy_config, [current_repository], component_status)
if cluster_status is False or cluster_status == 1:
self._call_stdio('verbose', 'Call %s for %s' % (stop_plugins[current_repository], current_repository))
if not stop_plugins[current_repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio):
# install repo to remote servers
if need_change_repo:
if not self.install_repositories_to_servers(deploy_config, [dest_repository, ], install_plugins, ssh_clients, options):
return False
sync_repositories = [dest_repository]
repository = dest_repository
self._call_stdio('verbose', 'Call %s for %s' % (change_repo_plugin, dest_repository))
if not change_repo_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio, self.home_path, dest_repository):
# sync runtime dependencies
if not self.sync_runtime_dependencies(deploy_config, sync_repositories, ssh_clients, options):
return False
if deploy_info.status == DeployStatus.STATUS_RUNNING:
self._call_stdio('verbose', 'Call %s for %s' % (start_plugins[current_repository], dest_repository))
# start cluster if needed
if need_restart and deploy_info.status == DeployStatus.STATUS_RUNNING:
self._call_stdio('verbose', 'Call %s for %s' % (start_plugins[current_repository], repository))
setattr(options, 'without_parameter', True)
if not start_plugins[current_repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio, self.home_path, dest_repository.repository_dir) and getattr(options, 'force', False) is False:
self._call_stdio('verbose', 'Call %s for %s' % (change_repo_plugin, current_repository))
change_repo_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio, self.home_path, current_repository)
if not start_plugins[current_repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio, self.home_path, repository.repository_dir) and getattr(options, 'force', False) is False:
self.install_repositories_to_servers(deploy_config, [current_repository, ], install_plugins, ssh_clients, options)
return False
deploy.update_component_repository(dest_repository)
# update deploy info
if need_change_repo:
deploy.use_model(dest_repository.name, dest_repository)
return True
def upgrade_cluster(self, name, options=Values()):
......@@ -1978,13 +2161,13 @@ class ObdHome(object):
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Deploy status judge')
if deploy_info.status not in [DeployStatus.STATUS_UPRADEING, DeployStatus.STATUS_RUNNING]:
self._call_stdio('error', 'Deploy "%s" is %s' % (name, deploy_info.status.value))
return False
deploy_config = deploy.deploy_config
self._call_stdio('start_loading', 'Get local repositories and plugins')
......@@ -2037,9 +2220,9 @@ class ObdHome(object):
self._call_stdio(
'print_list',
images,
['name', 'version', 'release', 'arch', 'md5'],
['name', 'version', 'release', 'arch', 'md5'],
lambda x: [x.name, x.version, x.release, x.arch, x.md5],
title='%s %s Candidates' % (component, version)
title='%s %s Candidates' % (component, version)
)
self._call_stdio('error', 'Too many match')
return False
......@@ -2056,11 +2239,11 @@ class ObdHome(object):
repositories = []
pkg = self.mirror_manager.get_exact_pkg(name=images[0].name, md5=images[0].md5)
pkgs = [pkg]
install_plugins = self.get_install_plugin_and_install(repositories, pkgs)
if not install_plugins:
return False
dest_repository = repositories[0]
if dest_repository is None:
self._call_stdio('error', 'Target version not found')
......@@ -2082,17 +2265,24 @@ class ObdHome(object):
if not route:
return False
for node in route[1: -1]:
images = self.search_images(component, version=node.get('version'), release=node.get('release'), disable=disable, usable=usable, release_first=True)
_version = node.get('version')
_release = node.get('release')
images = self.search_images(component, version=_version, release=_release, disable=disable, usable=usable, release_first=True)
if not images:
self._call_stdio('error', 'No such package %s-%s' % (component, version))
pkg_name = component
if _version:
pkg_name = pkg_name + '-' + str(_version)
if _release:
pkg_name = pkg_name + '-' + str(_release)
self._call_stdio('error', 'No such package %s' % pkg_name)
return False
if len(images) > 1:
self._call_stdio(
'print_list',
images,
['name', 'version', 'release', 'arch', 'md5'],
['name', 'version', 'release', 'arch', 'md5'],
lambda x: [x.name, x.version, x.release, x.arch, x.md5],
title='%s %s Candidates' % (component, version)
title='%s %s Candidates' % (component, version)
)
self._call_stdio('error', 'Too many match')
return False
......@@ -2106,7 +2296,7 @@ class ObdHome(object):
if isinstance(image, Repository):
upgrade_repositories.append(image)
else:
repository = self.repository_manager.get_repository_by_version(name=image.name, version=image.version, tag=image.md5)
repository = self.repository_manager.get_repository(name=image.name, version=image.version, package_hash=image.md5)
if repository:
upgrade_repositories.append(repository)
else:
......@@ -2132,7 +2322,7 @@ class ObdHome(object):
return False
self._call_stdio('verbose', 'Call %s for %s' % (upgrade_check_plugins[current_repository], current_repository))
if not upgrade_check_plugins[current_repository](
deploy_config.components.keys(), ssh_clients, cluster_config, {}, options, self.stdio,
deploy_config.components.keys(), ssh_clients, cluster_config, {}, options, self.stdio,
current_repository=current_repository,
repositories=upgrade_repositories,
route=route,
......@@ -2145,17 +2335,17 @@ class ObdHome(object):
self._call_stdio(
'print_list',
upgrade_repositories,
['name', 'version', 'release', 'arch', 'md5', 'mark'],
['name', 'version', 'release', 'arch', 'md5', 'mark'],
lambda x: [x.name, x.version, x.release, x.arch, x.md5, 'start' if x == current_repository else 'dest' if x == dest_repository else ''],
title='Packages Will Be Used'
title='Packages Will Be Used'
)
if not self._call_stdio('confirm', 'If you use a non-official release, we cannot guarantee a successful upgrade or technical support when you fail. Make sure that you want to use the above package to upgrade.'):
return False
index = 1
upgrade_ctx = {
'route': route,
'route': route,
'upgrade_repositories': [
{
'version': repository.version,
......@@ -2178,29 +2368,13 @@ class ObdHome(object):
# Get the client
ssh_clients = self.get_clients(deploy_config, [current_repository])
cluster_config = deploy_config.components[current_repository.name]
install_plugins = self.get_install_plugin_and_install(upgrade_repositories, [])
if not install_plugins:
return False
need_lib_repositories = []
for repository in upgrade_repositories[1:]:
cluster_config = deploy_config.components[repository.name]
# cluster files check
self.servers_repository_install(ssh_clients, cluster_config.servers, repository, install_plugins[repository])
# lib check
if not self.servers_repository_lib_check(ssh_clients, cluster_config.servers, repository, install_plugins[repository], 'warn'):
need_lib_repositories.append(repository)
if need_lib_repositories:
self._call_stdio('print', 'Try to get lib-repository')
repositories_lib_map = self.install_lib_for_repositories(need_lib_repositories)
if repositories_lib_map is False:
self._call_stdio('error', 'Failed to install lib package for local')
return False
if self.servers_apply_lib_repository_and_check(ssh_clients, deploy_config, need_lib_repositories, repositories_lib_map):
self._call_stdio('error', 'Failed to install lib package for cluster servers')
return False
if not self.install_repositories_to_servers(deploy_config, upgrade_repositories[1:], install_plugins, ssh_clients, options):
return False
n = len(upgrade_repositories)
while upgrade_ctx['index'] < n:
......@@ -2215,7 +2389,9 @@ class ObdHome(object):
current_repository=current_repository,
upgrade_repositories=upgrade_repositories,
apply_param_plugin=lambda repository: self.search_param_plugin_and_apply([repository], deploy_config),
upgrade_ctx=upgrade_ctx
upgrade_ctx=upgrade_ctx,
install_repository_to_servers=self.install_repository_to_servers,
unuse_lib_repository=deploy_config.unuse_lib_repository
)
deploy.update_upgrade_ctx(**upgrade_ctx)
if not ret:
......@@ -2250,13 +2426,17 @@ class ObdHome(object):
for item in plugin.file_list(info):
path = os.path.join(repo_path, item.src_path)
path = os.path.normcase(path)
if not os.path.exists(path):
if not os.path.exists(path) or os.path.isdir(path) != (item.type == InstallPlugin.FileItemType.DIR):
path = os.path.join(repo_path, item.target_path)
path = os.path.normcase(path)
if not os.path.exists(path):
self._call_stdio('error', 'need %s: %s ' % ('dir' if item.type == InstallPlugin.FileItemType.DIR else 'file', path))
success = False
continue
if os.path.isdir(path) != (item.type == InstallPlugin.FileItemType.DIR):
self._call_stdio('error', 'need %s, but %s is %s' % (item.type, path, 'file' if item.type == InstallPlugin.FileItemType.DIR else 'dir'))
success = False
continue
files[item.src_path] = path
if success is False:
return False
......@@ -2336,9 +2516,14 @@ class ObdHome(object):
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
repositories = self.get_local_repositories({opts.component: deploy_config.components[opts.component]})
repository = repositories[0]
# repositories = self.get_local_repositories({opts.component: deploy_config.components[opts.component]})
repositories = self.load_local_repositories(deploy_info)
for repository in repositories:
if repository.name == opts.component:
break
else:
self._call_stdio('error', 'Can not find the component for mysqltest, use `--component` to select component')
return False
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
self._call_stdio('stop_loading', 'succeed')
......@@ -2370,6 +2555,7 @@ class ObdHome(object):
mysqltest_check_opt_plugin = self.plugin_manager.get_best_py_script_plugin('check_opt', 'mysqltest', repository.version)
mysqltest_check_test_plugin = self.plugin_manager.get_best_py_script_plugin('check_test', 'mysqltest', repository.version)
mysqltest_run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'mysqltest', repository.version)
mysqltest_collect_log_plugin = self.plugin_manager.get_best_py_script_plugin('collect_log', 'mysqltest', repository.version)
env = opts.__dict__
env['cursor'] = cursor
......@@ -2379,66 +2565,84 @@ class ObdHome(object):
ret = mysqltest_check_opt_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env)
if not ret:
return False
self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_check_test_plugin, repository))
ret = mysqltest_check_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env)
if not ret:
self._call_stdio('error', 'Failed to get test set')
return False
if not env['test_set']:
self._call_stdio('error', 'Test set is empty')
return False
if not env['init_only']:
self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_check_test_plugin, repository))
ret = mysqltest_check_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env)
if not ret:
self._call_stdio('error', 'Failed to get test set')
return False
if env['test_set'] is None:
self._call_stdio('error', 'Test set is empty')
return False
if env['need_init']:
if env['need_init'] or env['init_only']:
self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_init_plugin, repository))
if not mysqltest_init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env):
self._call_stdio('error', 'Failed to init for mysqltest')
return False
result = []
for test in env['test_set']:
if env['init_only']:
return True
self._call_stdio('verbose', 'test set: {}'.format(env['test_set']))
self._call_stdio('verbose', 'total: {}'.format(len(env['test_set'])))
reboot_success = True
while True:
self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_run_test_plugin, repository))
ret = mysqltest_run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, test, env)
ret = mysqltest_run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env)
if not ret:
break
case_result = ret.get_return('result')
if case_result['ret'] != 0 and opts.auto_retry:
self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_collect_log_plugin, repository))
mysqltest_collect_log_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {},
self.stdio, env)
if ret.get_return('finished'):
break
if ret.get_return('reboot') and not env['disable_reboot']:
cursor.close()
db.close()
if getattr(self.stdio, 'sub_io'):
stdio = self.stdio.sub_io(msg_lv=MsgLevel.ERROR)
else:
stdio = None
self._call_stdio('start_loading', 'Reboot')
obd = ObdHome(self.home_path, self.dev_mode, stdio=stdio)
obd.lock_manager.set_try_times(-1)
if obd.redeploy_cluster(name):
self._call_stdio('stop_loading', 'succeed')
else:
self._call_stdio('stop_loading', 'fail')
result.append(case_result)
break
obd.lock_manager.set_try_times(6000)
obd = None
connect_plugin = self.search_py_script_plugin(repositories, 'connect')[repository]
ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, target_server=opts.test_server, sys_root=False)
if not ret or not ret.get_return('connect'):
break
db = ret.get_return('connect')
cursor = ret.get_return('cursor')
env['cursor'] = cursor
self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_init_plugin, repository))
if not mysqltest_init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env):
self._call_stdio('error', 'Failed to prepare for mysqltest')
break
ret = mysqltest_run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, test, env)
if not ret:
reboot_timeout = getattr(opts, 'reboot_timeout', 0)
reboot_retries = getattr(opts, 'reboot_retries', 5)
reboot_success = False
while reboot_retries and not reboot_success:
reboot_retries -= 1
with timeout(reboot_timeout):
self._call_stdio('start_loading', 'Reboot')
obd = ObdHome(self.home_path, self.dev_mode, stdio=stdio)
obd.lock_manager.set_try_times(-1)
if obd.redeploy_cluster(
name,
opt=Values({'force_kill': True, 'force': True, 'force_delete': True}), search_repo=False):
self._call_stdio('stop_loading', 'succeed')
else:
self._call_stdio('stop_loading', 'fail')
continue
obd.lock_manager.set_try_times(6000)
obd = None
connect_plugin = self.search_py_script_plugin(repositories, 'connect')[repository]
ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {},
self.stdio, target_server=opts.test_server, sys_root=False)
if not ret or not ret.get_return('connect'):
self._call_stdio('error', 'Failed to connect server')
continue
db = ret.get_return('connect')
cursor = ret.get_return('cursor')
env['cursor'] = cursor
self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_init_plugin, repository))
if mysqltest_init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {},
self.stdio, env):
reboot_success = True
else:
self._call_stdio('error', 'Failed to prepare for mysqltest')
if not reboot_success:
env['collect_log'] = True
mysqltest_collect_log_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, env, test_name='reboot_failed')
break
case_result = ret.get_return('result')
result.append(case_result)
result = env.get('case_results', [])
passcnt = len(list(filter(lambda x: x["ret"] == 0, result)))
totalcnt = len(env['test_set'])
totalcnt = len(env.get('run_test_cases', []))
failcnt = totalcnt - passcnt
if result:
self._call_stdio(
......@@ -2447,7 +2651,9 @@ class ObdHome(object):
title='Result (Total %d, Passed %d, Failed %s)' % (totalcnt, passcnt, failcnt),
align={'Cost (s)': 'r'}
)
if failcnt:
if failcnt or not reboot_success:
if not reboot_success:
self._call_stdio('error', 'reboot cluster failed')
self._call_stdio('print', 'Mysqltest failed')
else:
self._call_stdio('print', 'Mysqltest passed')
......@@ -2529,10 +2735,16 @@ class ObdHome(object):
self._call_stdio('print', '%s %s is stopped' % (server, repository.name))
return False
for repository in repositories:
if repository.name == opts.component:
break
ob_repository = None
repository = None
for tmp_repository in repositories:
if tmp_repository.name in ["oceanbase", "oceanbase-ce"]:
ob_repository = tmp_repository
if tmp_repository.name == opts.component:
repository = tmp_repository
plugin_version = ob_repository.version if ob_repository else repository.version
env = {'sys_root': False}
db = None
cursor = None
......@@ -2542,7 +2754,6 @@ class ObdHome(object):
connect_plugin = self.search_py_script_plugin(repositories, 'connect')[repository]
if repository.name in ['obproxy', 'obproxy-ce']:
ob_optimization = False
allow_components = ['oceanbase', 'oceanbase-ce']
......@@ -2564,8 +2775,8 @@ class ObdHome(object):
return False
db = ret.get_return('connect')
cursor = ret.get_return('cursor')
run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'sysbench', repository.version)
run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'sysbench', plugin_version)
setattr(opts, 'host', opts.test_server.ip)
setattr(opts, 'port', db.port)
......@@ -2685,6 +2896,95 @@ class ObdHome(object):
return True
return False
def tpcds(self, name, opts):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Check deploy status')
if deploy_info.status != DeployStatus.STATUS_RUNNING:
self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value))
return False
self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config
db_component = None
db_components = ['oceanbase', 'oceanbase-ce']
allow_components = ['obproxy', 'obproxy-ce', 'oceanbase', 'oceanbase-ce']
if opts.component is None:
for component_name in allow_components:
if component_name in deploy_config.components:
opts.component = component_name
break
elif opts.component not in allow_components:
self._call_stdio('error', '%s not support. %s is allowed' % (opts.component, allow_components))
return False
if opts.component not in deploy_config.components:
self._call_stdio('error', 'Can not find the component for tpcds, use `--component` to select component')
return False
for component_name in db_components:
if component_name in deploy_config.components:
db_component = component_name
if db_component is None:
self._call_stdio('error', 'Missing database component (%s) in deploy' % ','.join(db_components))
return False
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
# repositories = self.get_local_repositories({opts.component: deploy_config.components[opts.component]})
repositories = self.load_local_repositories(deploy_info)
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
self._call_stdio('stop_loading', 'succeed')
# Get the client
ssh_clients = self.get_clients(deploy_config, repositories)
# Check the status for the deployed cluster
component_status = {}
cluster_status = self.cluster_status_check(ssh_clients, deploy_config, repositories, component_status)
if cluster_status is False or cluster_status == 0:
if self.stdio:
self._call_stdio('error', EC_SOME_SERVER_STOPED)
for repository in component_status:
cluster_status = component_status[repository]
for server in cluster_status:
if cluster_status[server] == 0:
self._call_stdio('print', '%s %s is stopped' % (server, repository.name))
return False
db_cluster_config = deploy_config.components[db_component]
cluster_config = deploy_config.components[opts.component]
if opts.test_server is None:
opts.test_server = cluster_config.servers[0]
else:
for server in cluster_config.servers:
if server.name == opts.test_server:
opts.test_server = server
break
else:
self._call_stdio('error', '%s is not a server in %s' % (opts.test_server, opts.component))
return False
check_opt_plugin = self.plugin_manager.get_best_py_script_plugin('check_opt', 'tpcds', db_cluster_config.version)
load_data_plugin = self.plugin_manager.get_best_py_script_plugin('load_data', 'tpcds', cluster_config.version)
run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'tpcds', cluster_config.version)
self._call_stdio('verbose', 'Call %s for %s' % (check_opt_plugin, cluster_config.name))
if not check_opt_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, db_cluster_config=db_cluster_config):
return False
self._call_stdio('verbose', 'Call %s for %s' % (load_data_plugin, db_cluster_config.name))
if not load_data_plugin(deploy_config.components.keys(), ssh_clients, db_cluster_config, [], opts, self.stdio):
return False
self._call_stdio('verbose', 'Call %s for %s' % (run_test_plugin, cluster_config.name))
return run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio)
def tpcc(self, name, opts):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
......@@ -2730,8 +3030,8 @@ class ObdHome(object):
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
repositories = self.get_local_repositories({opts.component: deploy_config.components[opts.component]})
repository = repositories[0]
# repositories = self.get_local_repositories({opts.component: deploy_config.components[opts.component]})
repositories = self.load_local_repositories(deploy_info)
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
......@@ -2753,9 +3053,15 @@ class ObdHome(object):
self._call_stdio('print', '%s %s is stopped' % (server, repository.name))
return False
for repository in repositories:
if repository.name == opts.component:
break
ob_repository = None
repository = None
for tmp_repository in repositories:
if tmp_repository.name in ["oceanbase", "oceanbase-ce"]:
ob_repository = tmp_repository
if tmp_repository.name == opts.component:
repository = tmp_repository
plugin_version = ob_repository.version if ob_repository else repository.version
env = {'sys_root': False}
odp_db = None
......@@ -2796,12 +3102,11 @@ class ObdHome(object):
return False
db = ret.get_return('connect')
cursor = ret.get_return('cursor')
pre_test_plugin = self.plugin_manager.get_best_py_script_plugin('pre_test', 'tpcc', repository.version)
optimize_plugin = self.plugin_manager.get_best_py_script_plugin('optimize', 'tpcc', repository.version)
build_plugin = self.plugin_manager.get_best_py_script_plugin('build', 'tpcc', repository.version)
run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'tpcc', repository.version)
recover_plugin = self.plugin_manager.get_best_py_script_plugin('recover', 'tpcc', repository.version)
pre_test_plugin = self.plugin_manager.get_best_py_script_plugin('pre_test', 'tpcc', plugin_version)
optimize_plugin = self.plugin_manager.get_best_py_script_plugin('optimize', 'tpcc', plugin_version)
build_plugin = self.plugin_manager.get_best_py_script_plugin('build', 'tpcc', plugin_version)
run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'tpcc', plugin_version)
recover_plugin = self.plugin_manager.get_best_py_script_plugin('recover', 'tpcc', plugin_version)
setattr(opts, 'host', opts.test_server.ip)
setattr(opts, 'port', db.port)
......@@ -2917,5 +3222,103 @@ class ObdHome(object):
if odp_db:
odp_db.close()
def db_connect(self, name, opts):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name, read_only=True)
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config
deploy_info = deploy.deploy_info
if deploy_info.status in (DeployStatus.STATUS_DESTROYED, DeployStatus.STATUS_CONFIGURED):
self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value))
return False
allow_components = ['obproxy', 'obproxy-ce', 'oceanbase', 'oceanbase-ce']
if opts.component is None:
for component_name in allow_components:
if component_name in deploy_config.components:
opts.component = component_name
break
elif opts.component not in allow_components:
self._call_stdio('error', '%s not support. %s is allowed' % (opts.component, allow_components))
return False
if opts.component not in deploy_config.components:
self._call_stdio('error', 'Can not find the component for tpch, use `--component` to select component')
return False
cluster_config = deploy_config.components[opts.component]
if not cluster_config.servers:
self._call_stdio('error', '%s server list is empty' % opts.component)
return False
if opts.server is None:
opts.server = cluster_config.servers[0]
else:
for server in cluster_config.servers:
if server.name == opts.server:
opts.server = server
break
else:
self._call_stdio('error', '%s is not a server in %s' % (opts.server, opts.component))
return False
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
repositories = self.load_local_repositories(deploy_info)
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
self._call_stdio('stop_loading', 'succeed')
sync_config_plugin = self.plugin_manager.get_best_py_script_plugin('sync_cluster_config', 'general', '0.1')
sync_config_plugin(deploy_config.components.keys(), [], cluster_config, [], opts, self.stdio)
db_connect_plugin = self.plugin_manager.get_best_py_script_plugin('db_connect', 'general', '0.1')
return db_connect_plugin(deploy_config.components.keys(), [], cluster_config, [], opts, self.stdio)
def commands(self, name, cmd_name, opts):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name, read_only=True)
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config
deploy_info = deploy.deploy_info
if deploy_info.status in (DeployStatus.STATUS_DESTROYED, DeployStatus.STATUS_CONFIGURED):
self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value))
return False
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
repositories = self.load_local_repositories(deploy_info)
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
self._call_stdio('stop_loading', 'succeed')
check_opt_plugin = self.plugin_manager.get_best_py_script_plugin('check_opt', 'commands', '0.1')
prepare_variables_plugin = self.plugin_manager.get_best_py_script_plugin('prepare_variables', 'commands', '0.1')
commands_plugin = self.plugin_manager.get_best_py_script_plugin('commands', 'commands', '0.1')
ssh_clients = self.get_clients(deploy_config, repositories)
sync_config_plugin = self.plugin_manager.get_best_py_script_plugin('sync_cluster_config', 'general', '0.1')
cluster_config = deploy_config.components[repositories[0].name]
context = {}
sync_config_plugin(deploy_config.components.keys(), [], cluster_config, [], opts, self.stdio)
ret = check_opt_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, name=cmd_name, context=context)
if not ret:
return
for component in context['components']:
cluster_config = deploy_config.components[component]
for server in context['servers']:
ret = prepare_variables_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, name=cmd_name, component=component, server=server, context=context)
if not ret:
return
if not ret.get_return("skip"):
ret = commands_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, context=context)
if context.get('interactive'):
return bool(ret)
results = context.get('results', [])
self._call_stdio("print_list", results, ["Component", "Server", cmd_name.title()], title=cmd_name.title())
return not context.get('failed')
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import os
from tool import YamlLoader, ConfigUtil
ALLOWED_LEVEL = [0, 1, 2]
YAML_LOADER = YamlLoader()
YAML_TEMPLATE_PATH = os.path.join(os.path.dirname(__file__), "command_template.yaml")
class CommandConfig(object):
def __init__(self, yaml_path=YAML_TEMPLATE_PATH, loader=YAML_LOADER, stdio=None):
self.yaml_path = yaml_path
self.loader = loader
self.stdio = stdio
self._load()
def _load(self):
try:
with open(self.yaml_path, 'rb') as f:
self._data = self.loader.load(f)
self.all_variables = self._data.get('variables')
self.global_variables = self.all_variables.get('global', [])
self.server_variables = self.all_variables.get('server', [])
self.ssh_variables = self.all_variables.get('ssh', [])
self.all_commands = self._data.get('commands', [])
self.all_wrappers = self._data.get('wrappers', [])
except:
if self.stdio:
self.stdio.exception('failed to load command template')
def check_opt(plugin_context, name, context, *args, **kwargs):
def get_option(key, default=''):
value = getattr(options, key, default)
if value is None:
value = default
stdio.verbose('get option: %s value %s' % (key, value))
return value
stdio = plugin_context.stdio
cluster_config = plugin_context.cluster_config
options = plugin_context.options
clients = plugin_context.clients
deployed_components = list(plugin_context.components)
components = get_option("components", None)
servers = get_option("servers", None)
interactive = False
command_config = CommandConfig()
for command in command_config.all_commands:
command_name = ConfigUtil.get_value_from_dict(command, 'name', transform_func=str)
if command_name == name:
interactive = ConfigUtil.get_value_from_dict(command, 'interactive', False, transform_func=bool)
if components is None:
if interactive:
components = deployed_components[:1]
stdio.verbose("Component {} will be used according to the order in the deploy configuration yaml.".format(components[0]))
else:
components = deployed_components
stdio.verbose("Component {} will be used because {} is a non-interactive command".format(", ".join(components), name))
elif components == "*":
components = deployed_components
else:
components = components.split(',')
if not clients:
stdio.error("{} server list is empty".format(','.join(components)))
return
if servers is None:
if interactive:
servers = [None, ]
else:
servers = list(clients.keys())
stdio.verbose("Server {} will be used because {} is a non-interactive command".format(", ".join([str(s) for s in servers]), name))
elif servers == '*':
servers = list(clients.keys())
else:
server_names = servers.split(',')
servers = []
for server in clients:
if server.name in server_names:
server_names.remove(server.name)
servers.append(server)
if server_names:
stdio.error("Server {} not found in current deployment".format(','.join(server_names)))
return
failed_components = []
for component in components:
if component not in deployed_components:
failed_components.append(component)
if failed_components:
stdio.error('{} not support. {} is allowed'.format(','.join(failed_components), deployed_components))
return plugin_context.return_false()
context.update(components=components, servers=servers, command_config=command_config)
return plugin_context.return_true(context=context)
variables:
ssh:
- name: host
config_key: host
components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce']
- name: user
config_key: username
components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce']
server:
- name: home_path
config_key: home_path
components: ['oceanbase', 'oceanbase-ce', 'obproxy', 'obproxy-ce']
- name: mysql_port
config_key: mysql_port
components: ['oceanbase', 'oceanbase-ce']
global:
- name: password
config_key: root_password
components: ['oceanbase', 'oceanbase-ce']
- name: password
config_key: observer_root_password
components: ['obproxy', 'obproxy-ce']
wrappers:
- name: ssh
remote_command: ssh {user}@{host} -t '{cmd}'
local_command: "{cmd}"
- name: ssh_client
command: "{cmd}"
executor: "ssh_client"
commands:
- name: ssh
components: ['oceanbase', 'obproxy', 'oceanbase-ce', 'obproxy-ce']
command: "cd {home_path}/log;bash --login"
wrapper: "ssh"
interactive: true
- name: less
command: "less {home_path}/log/observer.log"
components: ['oceanbase', 'oceanbase-ce']
wrapper: "ssh"
interactive: true
no_interruption: true
- name: less
command: "less {home_path}/log/obproxy.log"
components: ['obproxy', 'obproxy-ce']
wrapper: "ssh"
interactive: true
no_interruption: true
- name: pid
wrapper: ssh_client
command: "pgrep -u {user} -f ^{home_path}/bin/observer"
components: ['oceanbase', 'oceanbase-ce']
no_excption: true
- name: pid
wrapper: ssh_client
command: "pgrep -u {user} -f ^{home_path}/bin/obproxy"
components: ['obproxy', 'obproxy-ce']
no_excption: true
- name: gdb
wrapper: "ssh"
command: "cd {home_path}; LD_LIBRARY_PATH=./lib:$LD_LIBRARY_PATH gdb --pid=`$pid`"
components: ['oceanbase', 'oceanbase-ce']
interactive: true
no_interruption: true
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
try:
import subprocess32 as subprocess
except:
import subprocess
import signal
import os
from ssh import LocalClient
from tool import var_replace, COMMAND_ENV
def commands(plugin_context, context, *args, **kwargs):
def get_value_from_context(key, default=None):
value = context.get(key, default)
stdio.verbose('get value from context: %s value %s' % (key, value))
return value
stdio = plugin_context.stdio
command_template = get_value_from_context("command_template")
command_variables = get_value_from_context("command_variables", {})
interactive = get_value_from_context("interactive")
results = get_value_from_context("results", [])
failed = get_value_from_context("failed", False)
no_exception = get_value_from_context("no_exception", False)
no_interruption = get_value_from_context("no_interruption", False)
executor = get_value_from_context("executor", False)
component = get_value_from_context("component", False)
server = get_value_from_context("server", None)
env = get_value_from_context("env", {})
cmd = command_template.format(**command_variables)
cmd = var_replace(cmd, env)
if interactive:
if no_interruption:
stdio.verbose('ctrl c is not accepted in this command')
def _no_interruption(signum, frame):
stdio.verbose('ctrl c is not accepted in this command')
signal.signal(signal.SIGINT, _no_interruption)
stdio.verbose('exec cmd: {}'.format(cmd))
subprocess.call(cmd, env=os.environ.copy(), shell=True)
else:
client = plugin_context.clients[server]
if executor == "ssh_client":
ret = client.execute_command(cmd, stdio=stdio)
else:
ret = LocalClient.execute_command(cmd, env=client.env, stdio=stdio)
if ret and ret.stdout:
results.append([component, server, ret.stdout.strip()])
elif not no_exception:
failed = True
context.update(results=results, failed=failed)
return plugin_context.return_true(context=context)
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from tool import ConfigUtil
class CommandVariables(dict):
def __getitem__(self, item):
if item not in self.items():
return item
else:
return super(CommandVariables, self).__getitem__(item)
def load_variables_from_config(variables, component, config, command_variables, stdio=None):
for variable in variables:
if component not in ConfigUtil.get_list_from_dict(variable, 'components', str):
continue
variable_name = ConfigUtil.get_value_from_dict(variable, 'name', transform_func=str)
config_key = ConfigUtil.get_value_from_dict(variable, 'config_key', transform_func=str)
value = config.get(config_key)
if value is not None:
command_variables[variable_name] = str(value)
if stdio:
stdio.verbose('get variable %s for config key %s, value is %s' % (variable_name, config_key, value))
def prepare_variables(plugin_context, name, context, component, server, *args, **kwargs):
def get_value_from_context(key, default=None):
value = context.get(key, default)
stdio.verbose('get value from context: %s value %s' % (key, value))
return value
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
clients = plugin_context.clients
components = get_value_from_context("components", [])
servers = get_value_from_context("servers", [])
cmd_conf = get_value_from_context("command_config")
loading_env = {}
if server is None:
server = cluster_config.servers[0]
# find command template
command_template = None
interactive = None
wrapper_name = None
no_exception = False
no_interruption = False
executor = None
command_variables = CommandVariables()
for command in cmd_conf.all_commands:
cmd_name = ConfigUtil.get_value_from_dict(command, 'name', transform_func=str)
allow_components = ConfigUtil.get_list_from_dict(command, 'components', str)
if component in allow_components:
current_command = ConfigUtil.get_value_from_dict(command, 'command', transform_func=str)
loading_env[cmd_name] = current_command
if name == cmd_name:
command_template = current_command
interactive = ConfigUtil.get_value_from_dict(command, 'interactive', transform_func=bool)
wrapper_name = ConfigUtil.get_value_from_dict(command, 'wrapper', transform_func=str)
no_exception = ConfigUtil.get_value_from_dict(command, 'no_exception', transform_func=bool)
no_interruption = ConfigUtil.get_value_from_dict(command, 'no_interruption', transform_func=bool)
if command_template is None:
stdio.error(
'There is no command {} in component {}. Please use --components to set the right component.'.format(name,
component))
return
if interactive and (len(components) > 1 or len(servers) > 1):
stdio.error('Interactive commands do not support specifying multiple components or servers.')
return
cmd_input = None
if server not in cluster_config.servers:
if interactive:
stdio.error("{} is not a server in {}".format(server, component))
return plugin_context.return_false()
else:
stdio.verbose("{} is not a server in {}".format(server, component))
return plugin_context.return_true(skip=True)
global_config = cluster_config.get_global_conf()
server_config = cluster_config.get_server_conf(server)
client = clients[server]
ssh_config = vars(client.config)
# load global config
stdio.verbose('load variables from global config')
load_variables_from_config(cmd_conf.global_variables, component, global_config, command_variables, stdio)
# load server config
stdio.verbose('load variables from server config')
load_variables_from_config(cmd_conf.server_variables, component, server_config, command_variables, stdio)
# load ssh config
stdio.verbose('load variables from ssh config')
load_variables_from_config(cmd_conf.ssh_variables, component, ssh_config, command_variables, stdio)
if wrapper_name:
for wrapper in cmd_conf.all_wrappers:
if wrapper_name == ConfigUtil.get_value_from_dict(wrapper, 'name', transform_func=str):
local_command = ConfigUtil.get_value_from_dict(wrapper, "local_command", transform_func=str)
remote_command = ConfigUtil.get_value_from_dict(wrapper, "remote_command", transform_func=str)
command = ConfigUtil.get_value_from_dict(wrapper, "command", transform_func=str)
cmd_input = ConfigUtil.get_value_from_dict(wrapper, "input", transform_func=str)
executor = ConfigUtil.get_value_from_dict(wrapper, "executor", transform_func=str)
if local_command and remote_command:
if client.is_localhost():
command = local_command
else:
command = remote_command
command_template = command.format(cmd=command_template, **command_variables)
if cmd_input:
cmd_input = cmd_input.format(cmd=command_template, **command_variables)
break
else:
stdio.error("Wrapper {} not found in component {}.".format(wrapper_name, component))
for key, value in loading_env.items():
loading_env[key] = str(value).format(**command_variables)
context.update(
command_variables=command_variables, command_config=cmd_conf, command_template=command_template,
interactive=interactive, cmd_input=cmd_input, no_exception=no_exception, no_interruption=no_interruption,
component=component, server=server, env=loading_env, executor=executor)
return plugin_context.return_true()
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from subprocess import call, Popen, PIPE
from ssh import LocalClient
def db_connect(plugin_context, *args, **kwargs):
def get_option(key, default=''):
value = getattr(options, key)
if value is None:
value = default
stdio.verbose('get option: %s value %s' % (key, value))
return value
def local_execute_command(command, env=None, timeout=None):
return LocalClient.execute_command(command, env, timeout, stdio)
def get_connect_cmd():
cmd = r"{obclient_bin} -h{host} -P{port} -u {user}@{tenant} --prompt 'OceanBase(\u@\d)>' -A".format(
obclient_bin=obclient_bin,
host=server.ip,
port=port,
user=user,
tenant=tenant
)
if need_password:
cmd += " -p"
elif password:
cmd += " -p{}".format(password)
if database:
cmd += " -D{}".format(database)
return cmd
def test_connect():
return local_execute_command(get_connect_cmd() + " -e 'help'")
def connect():
conn_cmd = get_connect_cmd()
stdio.verbose('execute cmd: {}'.format(conn_cmd))
p = None
return_code = 255
try:
p = Popen(conn_cmd, shell=True)
return_code = p.wait()
except:
stdio.exception("")
if p:
p.kill()
stdio.verbose('exit code: {}'.format(return_code))
return return_code
options = plugin_context.options
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
user = get_option('user', 'root')
tenant = get_option('tenant', 'sys')
database = get_option('database')
password = get_option('password')
obclient_bin = get_option('obclient_bin')
server = get_option('server')
component = get_option('component')
global_conf = cluster_config.get_global_conf()
server_config = cluster_config.get_server_conf(server)
need_password = False
# use oceanbase if root@sys as default
if not database and user == 'root' and tenant == 'sys':
database = 'oceanbase'
if component in ["oceanbase", "oceanbase-ce"]:
port = server_config.get("mysql_port")
else:
port = server_config.get("listen_port")
if not obclient_bin:
ret = local_execute_command('%s --help' % obclient_bin)
if not ret:
stdio.error(
'%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % (
ret.stderr, obclient_bin))
return
if not password:
connected = test_connect()
if not connected:
if user == "root" and tenant == "sys":
if component in ["oceanbase", "oceanbase-ce"]:
password = global_conf.get('root_password')
elif component in ["obproxy", "obproxy-ce"]:
password = global_conf.get('observer_root_password')
elif user == "root" and tenant == "proxysys":
if component in ["obproxy", "obproxy-ce"]:
password = global_conf.get("obproxy_sys_password")
elif user == "proxyro" and tenant == 'sys':
if component in ["oceanbase", "oceanbase-ce"]:
password = global_conf.get("proxyro_password")
elif component in ["obproxy", "obproxy-ce"]:
password = global_conf.get("observer_sys_password")
if password:
connected = test_connect()
need_password = not connected
try:
code = connect()
except KeyboardInterrupt:
stdio.exception("")
return False
return code == 0
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import os
import re
from _plugin import InstallPlugin
from _deploy import InnerConfigKeywords
from tool import YamlLoader
def install_repo(plugin_context, obd_home, install_repository, install_plugin, check_repository, check_file_map,
msg_lv, *args, **kwargs):
cluster_config = plugin_context.cluster_config
def install_to_home_path():
repo_dir = install_repository.repository_dir.replace(obd_home, remote_obd_home, 1)
if is_lib_repo:
home_path = os.path.join(remote_home_path, 'lib')
else:
home_path = remote_home_path
client.add_env("_repo_dir", repo_dir, True)
client.add_env("_home_path", home_path, True)
mkdir_bash = "mkdir -p ${_home_path} && cd ${_repo_dir} && find -type d | xargs -i mkdir -p ${_home_path}/{}"
if not client.execute_command(mkdir_bash):
return False
success = True
for install_file_item in install_file_items:
source = os.path.join(repo_dir, install_file_item.target_path)
target = os.path.join(home_path, install_file_item.target_path)
client.add_env("source", source, True)
client.add_env("target", target, True)
if install_file_item.install_method == InstallPlugin.InstallMethod.CP:
install_cmd = "cp -f"
else:
install_cmd = "ln -fs"
if install_file_item.type == InstallPlugin.FileItemType.DIR:
if client.execute_command("ls -1 ${source}"):
success = client.execute_command("cd ${source} && find -type f | xargs -i %(install_cmd)s ${source}/{} ${target}/{}" % {"install_cmd": install_cmd}) and success
success = client.execute_command("cd ${source} && find -type l | xargs -i %(install_cmd)s ${source}/{} ${target}/{}" % {"install_cmd": install_cmd}) and success
else:
success = client.execute_command("%(install_cmd)s ${source} ${target}" % {"install_cmd": install_cmd}) and success
return success
stdio = plugin_context.stdio
clients = plugin_context.clients
servers = cluster_config.servers
is_lib_repo = install_repository.name.endswith("-libs")
home_path_map = {}
for server in servers:
server_config = cluster_config.get_server_conf(server)
home_path_map[server] = server_config.get("home_path")
is_ln_install_mode = cluster_config.is_ln_install_mode()
# remote install repository
stdio.start_loading('Remote %s repository install' % install_repository)
stdio.verbose('Remote %s repository integrity check' % install_repository)
for server in servers:
client = clients[server]
remote_home_path = home_path_map[server]
install_file_items = install_plugin.file_map(install_repository).values()
stdio.verbose('%s %s repository integrity check' % (server, install_repository))
if is_ln_install_mode:
remote_obd_home = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
install_path = install_repository.repository_dir.replace(obd_home, remote_obd_home, 1)
else:
if is_lib_repo:
install_path = os.path.join(remote_home_path, 'lib')
else:
install_path = remote_home_path
client.execute_command('mkdir -p {}'.format(install_path))
remote_repository_data_path = os.path.join(install_path, '.data')
remote_repository_data = client.execute_command('cat %s' % remote_repository_data_path).stdout
stdio.verbose('%s %s install check' % (server, install_repository))
try:
yaml_loader = YamlLoader(stdio=stdio)
data = yaml_loader.load(remote_repository_data)
if not data:
stdio.verbose('%s %s need to be installed ' % (server, install_repository))
elif data == install_repository:
# Version sync. Check for damages (TODO)
stdio.verbose('%s %s has installed ' % (server, install_repository))
if not install_to_home_path():
stdio.error("Failed to install repository {} to {}".format(install_repository, remote_home_path))
return False
continue
else:
stdio.verbose('%s %s need to be updated' % (server, install_repository))
except:
stdio.exception('')
stdio.verbose('%s %s need to be installed ' % (server, install_repository))
stdio.verbose('%s %s installing' % (server, install_repository))
for file_item in install_file_items:
file_path = os.path.join(install_repository.repository_dir, file_item.target_path)
remote_file_path = os.path.join(install_path, file_item.target_path)
if file_item.type == InstallPlugin.FileItemType.DIR:
if os.path.isdir(file_path) and not client.put_dir(file_path, remote_file_path):
stdio.stop_loading('fail')
return False
else:
if not client.put_file(file_path, remote_file_path):
stdio.stop_loading('fail')
return False
if is_ln_install_mode:
# save data file for later comparing
client.put_file(install_repository.data_file_path, remote_repository_data_path)
# link files to home_path
install_to_home_path()
stdio.verbose('%s %s installed' % (server, install_repository.name))
stdio.stop_loading('succeed')
# check lib
lib_check = True
stdio.start_loading('Remote %s repository lib check' % check_repository)
for server in servers:
stdio.verbose('%s %s repository lib check' % (server, check_repository))
client = clients[server]
remote_home_path = home_path_map[server]
need_libs = set()
client.add_env('LD_LIBRARY_PATH', '%s/lib:' % remote_home_path, True)
for file_item in check_file_map.values():
if file_item.type == InstallPlugin.FileItemType.BIN:
remote_file_path = os.path.join(remote_home_path, file_item.target_path)
ret = client.execute_command('ldd %s' % remote_file_path)
libs = re.findall('(/?[\w+\-/]+\.\w+[\.\w]+)[\s\\n]*\=\>[\s\\n]*not found', ret.stdout)
if not libs:
libs = re.findall('(/?[\w+\-/]+\.\w+[\.\w]+)[\s\\n]*\=\>[\s\\n]*not found', ret.stderr)
if not libs and not ret:
stdio.error('Failed to execute repository lib check.')
return
need_libs.update(libs)
if need_libs:
for lib in need_libs:
getattr(stdio, msg_lv, '%s %s require: %s' % (server, check_repository, lib))
lib_check = False
client.add_env('LD_LIBRARY_PATH', '', True)
if msg_lv == 'error':
stdio.stop_loading('succeed' if lib_check else 'fail')
elif msg_lv == 'warn':
stdio.stop_loading('succeed' if lib_check else 'warn')
return plugin_context.return_true(checked=lib_check)
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import os
from _deploy import RsyncConfig
def rsync(plugin_context, *args, **kwargs):
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
clients = plugin_context.clients
rsync_configs = cluster_config.get_rsync_list()
if not rsync_configs:
return plugin_context.return_true()
stdio.start_loading("Synchronizing runtime dependencies")
succeed = True
for rsync_config in rsync_configs:
source_path = rsync_config.get(RsyncConfig.SOURCE_PATH)
target_path = rsync_config.get(RsyncConfig.TARGET_PATH)
if os.path.isabs(target_path):
rsync_config[RsyncConfig.TARGET_PATH] = os.path.normpath('./' + target_path)
sub_io = stdio.sub_io()
for server in cluster_config.servers:
server_config = cluster_config.get_server_conf(server)
client = clients[server]
home_path = server_config['home_path']
for rsync_config in rsync_configs:
source_path = rsync_config.get(RsyncConfig.SOURCE_PATH)
target_path = rsync_config.get(RsyncConfig.TARGET_PATH)
if os.path.isdir(source_path):
stdio.verbose('put local dir %s to %s: %s.' % (source_path, server, target_path))
if not client.put_dir(source_path, os.path.join(home_path, target_path), stdio=sub_io):
stdio.warn('failed to put local dir %s to %s: %s.' % (source_path, server, target_path))
succeed = False
elif os.path.exists(source_path):
stdio.verbose('put local file %s to %s: %s.' % (source_path, server, target_path))
if not client.put_file(source_path, os.path.join(home_path, target_path), stdio=sub_io):
stdio.warn('failed to put local file %s to %s: %s.' % (source_path, server, target_path))
succeed = False
else:
stdio.verbose('%s is not found.' % source_path)
if succeed:
stdio.stop_loading("succeed")
return plugin_context.return_true()
else:
stdio.stop_loading("fail")
return plugin_context.return_false()
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
def sync_cluster_config(plugin_context, *args, **kwargs):
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
for comp in ['oceanbase', 'oceanbase-ce']:
if comp in cluster_config.depends:
root_servers = {}
ob_config = cluster_config.get_depend_config(comp)
if not ob_config:
continue
odp_config = cluster_config.get_global_conf()
for server in cluster_config.get_depend_servers(comp):
config = cluster_config.get_depend_config(comp, server)
zone = config['zone']
if zone not in root_servers:
root_servers[zone] = '%s:%s' % (server.ip, config['mysql_port'])
depend_rs_list = ';'.join([root_servers[zone] for zone in root_servers])
cluster_config.update_global_conf('rs_list', depend_rs_list, save=False)
config_map = {
'observer_sys_password': 'proxyro_password',
'cluster_name': 'appname',
'observer_root_password': 'root_password'
}
for key in config_map:
ob_key = config_map[key]
if not odp_config.get(key) and ob_config.get(ob_key):
stdio.verbose("update config, key: {}, value: {}".format(key, ob_config.get(ob_key)))
cluster_config.update_global_conf(key, ob_config.get(ob_key), save=False)
break
......@@ -30,6 +30,23 @@ def check_opt(plugin_context, opt, *args, **kwargs):
server = opt['test_server']
obclient_bin = opt['obclient_bin']
mysqltest_bin = opt['mysqltest_bin']
reboot_retries = opt['reboot_retries']
if int(reboot_retries) <= 0:
stdio.error('invalid reboot-retries')
return
case_filter = opt.get('case_filter')
default_case_filter = './mysql_test/filter.py'
if case_filter is None and os.path.exists(default_case_filter):
stdio.verbose('case-filter not set and {} exists, use it'.format(default_case_filter))
opt['case_filter'] = default_case_filter
case_filter = opt.get('reboot_cases')
default_reboot_case = './mysql_test/rebootcases.py'
if case_filter is None and os.path.exists(default_reboot_case):
stdio.verbose('reboot-cases not set and {} exists, use it'.format(default_reboot_case))
opt['reboot_cases'] = default_reboot_case
if not server:
stdio.error('test server is None. please use `--test-server` to set')
......@@ -42,7 +59,7 @@ def check_opt(plugin_context, opt, *args, **kwargs):
if not ret:
mysqltest_bin = opt['mysqltest_bin'] = 'mysqltest'
if not LocalClient.execute_command('%s --help' % mysqltest_bin, stdio=stdio):
stdio.error('%s\n%s is not an executable file. please use `--mysqltest-bin` to set\nYou may not have obclient installed' % (ret.stderr, mysqltest_bin))
stdio.error('%s\n%s is not an executable file. please use `--mysqltest-bin` to set\nYou may not have mysqltest installed' % (ret.stderr, mysqltest_bin))
return
if 'suite_dir' not in opt or not os.path.exists(opt['suite_dir']):
......@@ -55,5 +72,37 @@ def check_opt(plugin_context, opt, *args, **kwargs):
if 'slb' in opt:
opt['slb_host'], opt['slb_id'] = opt['slb'].split(',')
if 'exclude' in opt and opt['exclude']:
opt['exclude'] = opt['exclude'].split(',')
cluster_config = plugin_context.cluster_config
is_obproxy = opt["component"].startswith("obproxy")
if is_obproxy:
intersection = list({'oceanbase', 'oceanbase-ce'}.intersection(set(cluster_config.depends)))
if not intersection:
stdio.warn('observer config not in the depends.')
return
ob_component = intersection[0]
global_config = cluster_config.get_depend_config(ob_component)
else:
global_config = cluster_config.get_global_conf()
cursor = opt['cursor']
opt['_enable_static_typing_engine'] = None
if '_enable_static_typing_engine' in global_config:
stdio.verbose('load engine from config')
opt['_enable_static_typing_engine'] = global_config['_enable_static_typing_engine']
else:
try:
sql = "select value from oceanbase.__all_virtual_sys_parameter_stat where name like '_enable_static_typing_engine';"
stdio.verbose('execute sql: {}'.format(sql))
cursor.execute(sql)
ret = cursor.fetchone()
stdio.verbose('query engine ret: {}'.format(ret))
if ret:
opt['_enable_static_typing_engine'] = ret.get('value')
except:
stdio.exception('')
stdio.verbose('_enable_static_typing_engine: {}'.format(opt['_enable_static_typing_engine']))
return plugin_context.return_true()
......@@ -21,25 +21,93 @@
from __future__ import absolute_import, division, print_function
import os
import sys
import re
from glob import glob
from mysqltest_lib import case_filter, succtest
from mysqltest_lib.psmallsource import psmall_source
from mysqltest_lib.psmalltest import psmall_test
import tool
from mysqltest_lib import succtest
def get_variable_from_python_file(file_path, var_name, default_file=None, default_value=None, stdio=None):
global_vars = {}
try:
stdio and stdio.verbose('read variable {} from {}'.format(var_name, file_path))
exec(open(file_path).read(), global_vars, global_vars)
except Exception as e:
stdio and stdio.warn(str(e))
if default_file:
try:
default_path = os.path.join(os.path.dirname(__file__), 'mysqltest_lib', default_file)
stdio and stdio.verbose('read variable {} from {}'.format(var_name, file_path))
exec(open(default_path).read(), global_vars, global_vars)
except Exception as ex:
stdio and stdio.warn(str(ex))
return global_vars.get(var_name, default_value)
def find_tag_test_with_file_pat(file_pattern, flag_pattern, tag, filelist):
for test in glob(file_pattern):
if "test_suite/" in test:
if os.path.dirname(test).split('/')[-2] == tag:
filelist.append(test)
continue
test_file = tool.FileUtil.open(test, 'rb')
line_num = 0
line = test_file.readline().decode('utf-8', 'ignore')
while line and line_num <= 30:
line_num += 1
matchobj = re.search(flag_pattern, line)
if matchobj:
tag_set = line.split(':')[1].split(',')
for tag_tmp in tag_set:
tag_t = tag_tmp.strip()
if tag.lower() == tag_t.lower():
filelist.append(test)
line = test_file.readline().decode('utf-8', 'ignore')
def find_tag_tests(opt, flag_pattern, tags):
filelist = []
for tag in tags:
test_pattern = os.path.join(opt['test_dir'], "*.test")
find_tag_test_with_file_pat(test_pattern, flag_pattern, tag, filelist)
test_pattern = os.path.join(opt['suite_dir'], "*/t/*.test")
find_tag_test_with_file_pat(test_pattern, flag_pattern, tag, filelist)
return filelist
def test_name(test_file):
if "test_suite/" in test_file:
suite_name = os.path.dirname(test_file).split('/')[-2]
base_name = os.path.basename(test_file).rsplit('.')[0]
return suite_name + '.' + base_name
else:
base_name = os.path.basename(test_file).rsplit('.')[0]
return base_name
def check_test(plugin_context, opt, *args, **kwargs):
stdio = plugin_context.stdio
cluster_config = plugin_context.cluster_config
tags = []
regress_suites = []
if opt.get('tags'):
tags = opt['tags'].split(',')
if opt.get('regress_suite'):
regress_suites = opt['regress_suite'].split(',')
test_set = []
has_test_point = False
basename = lambda path: os.path.basename(path)
dirname =lambda path: os.path.dirname(path)
if 'all' in opt and opt['all'] and os.path.isdir(os.path.realpath(opt['suite_dir'])):
opt['suite'] = ','.join(os.listdir(os.path.realpath(opt['suite_dir'])))
if 'psmall' in opt and opt['psmall']:
test_set = psmall_test
opt['source_limit'] = psmall_source
test_set = get_variable_from_python_file(
opt.get('psmall_test'), 'psmall_test', default_file='psmalltest.py', default_value=[], stdio=stdio)
opt['source_limit'] = get_variable_from_python_file(
opt.get('psmall_source'), 'psmall_source', default_file='psmallsource.py', default_value={}, stdio=stdio)
has_test_point = True
elif 'suite' not in opt or not opt['suite']:
if 'test_set' in opt and opt['test_set']:
test_set = opt['test_set'].split(',')
......@@ -64,17 +132,79 @@ def check_test(plugin_context, opt, *args, **kwargs):
opt['test_pattern'] = '*.test'
pat = os.path.join(path, opt['test_pattern'])
test_set_tmp = [suitename + '.' + basename(test).rsplit('.', 1)[0] for test in glob(pat)]
test_set.extend(test_set_tmp)
if "all" in opt and opt["all"]:
pat = os.path.join(opt['test_dir'], "*.test")
test_set_t = [basename(test).rsplit('.', 1)[0] for test in glob(pat)]
test_set.extend(test_set_t)
if opt["cluster_mode"]:
opt["filter"] = opt["cluster_mode"]
else:
opt["filter"] = 'c'
if opt.get("java"):
opt["filter"] = 'j'
if opt.get("ps"):
opt["filter"] = opt["filter"] + 'p'
opt['ps_protocol'] = True
if opt["component"].startswith("obproxy"):
opt["filter"] = 'proxy'
else:
test_zone = cluster_config.get_server_conf(opt['test_server'])['zone']
query = 'select zone, count(*) as a from oceanbase.__all_virtual_zone_stat group by region order by a desc limit 1'
try:
stdio.verbose('execute sql: {}'.format(query))
cursor = opt['cursor']
cursor.execute(query)
ret = cursor.fetchone()
except:
msg = 'execute sql exception: %s' % query
raise Exception(msg)
primary_zone = ret.get('zone', '')
if test_zone != primary_zone:
opt["filter"] = 'slave'
if regress_suites:
suite2tags = get_variable_from_python_file(opt.get('regress_suite_map'), 'suite2tags', default_file='regress_suite_map.py', default_value={}, stdio=stdio)
composite_suite = get_variable_from_python_file(opt.get('regress_suite_map'), 'composite_suite', default_file='regress_suite_map.py', default_value={}, stdio=stdio)
for suitename in regress_suites:
if suitename in composite_suite.keys():
regress_suite_list = composite_suite[suitename].split(',')
else:
regress_suite_list = [suitename]
for name in regress_suite_list:
if name in suite2tags.keys():
if suite2tags[name]:
tags.extend(suite2tags[name].split(','))
else:
tags.append(name)
tags = list(set(tags))
if tags:
stdio.verbose('running mysqltest by tag, all tags: {}'.format(tags))
support_test_tags = get_variable_from_python_file(
opt.get('test_tags'), 'test_tags', default_file='test_tags.py', default_value=[], stdio=stdio)
support_test_tags = list(set(support_test_tags).union(set(os.listdir(os.path.join(opt["suite_dir"])))))
diff_tags = list(set(tags).difference(set(support_test_tags)))
if len(diff_tags) > 0:
stdio.error('%s not in test_tags' % ','.join(diff_tags))
return plugin_context.return_false()
test_set_by_tag = [test_name(test) for test in find_tag_tests(opt, r"#[ \t]*tags[ \t]*:", tags)]
if has_test_point:
test_set = list(set(test_set).intersection(set(test_set_by_tag)))
else:
test_set = list(set(test_set_by_tag))
has_test_point = True
stdio.verbose('filter mode: {}'.format(opt["filter"]))
# exclude somt tests.
if 'exclude' not in opt or not opt['exclude']:
opt['exclude'] = []
test_set = filter(lambda k: k not in opt['exclude'], test_set)
if 'filter' in opt and opt['filter']:
exclude_list = getattr(case_filter, '%s_list' % opt['filter'], [])
if opt.get('case_filter'):
exclude_list = get_variable_from_python_file(opt['case_filter'], var_name='%s_list' % opt['filter'],
default_file='case_filter.py', default_value=[], stdio=stdio)
else:
exclude_list = []
test_set = filter(lambda k: k not in exclude_list, test_set)
##有all参数时重新排序,保证运行case的顺序
if 'all' in opt and opt['all'] == 'all':
test_set_suite = filter(lambda k: '.' in k, test_set)
......@@ -86,11 +216,30 @@ def check_test(plugin_context, opt, *args, **kwargs):
test_set = filter(lambda k: k not in succtest.succ_filter, test_set)
else:
test_set = sorted(test_set)
slb_host = opt.get('slb_host')
exec_id = opt.get('exec_id')
use_slb = all([slb_host is not None, exec_id is not None])
slices = opt.get('slices')
slice_idx = opt.get('slice_idx')
use_slices = all([slices is not None, slice_idx is not None])
if not use_slb and use_slices:
slices = int(slices)
slice_idx = int(slice_idx)
test_set = test_set[slice_idx::slices]
if 'mode' in opt and opt['mode'] != 'both':
if opt['mode'] == 'oracle':
not_run = '_mysql'
# test_set = filter(lambda k: not k.endswith(not_run), test_set)
test_set = filter(lambda k: k.endswith('_oracle'), test_set)
if opt['mode'] == 'mysql':
not_run = '_oracle'
test_set = filter(lambda k: not k.endswith(not_run), test_set)
opt['test_set'] = list(set(test_set))
if 'slices' in opt and opt['slices'] and 'slice_idx' in opt and opt['slice_idx']:
slices = int(opt['slices'])
slice_idx = int(opt['slice_idx'])
test_set = test_set[slice_idx::slices]
opt['test_set'] = test_set
if opt.get('reboot_cases'):
reboot_cases = get_variable_from_python_file(opt['reboot_cases'], var_name='reboot_cases',
default_file='rebootcases.py', default_value=[], stdio=stdio)
opt['reboot_cases'] = list(set(test_set).intersection(set(reboot_cases)))
else:
opt['reboot_cases'] = []
return plugin_context.return_true(test_set=test_set)
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import os
def collect_log(plugin_context, env, test_name=None, *args, **kwargs):
cluster_config = plugin_context.cluster_config
clients = plugin_context.clients
stdio = plugin_context.stdio
if not env.get('collect_log', False):
stdio.verbose('collect_log is False')
return
if test_name is None:
case_results = env.get('case_results', [])
if case_results:
test_name = case_results[-1].get('name')
if test_name is None:
stdio.verbose('Undefined: test_name')
return
log_pattern = env.get('log_pattern', '*.log')
if not env.get('log_dir'):
log_dir = os.path.join(env['var_dir'], 'log')
else:
log_dir = env['log_dir']
is_obproxy = env["component"].startswith("obproxy")
ob_component = env["component"]
if is_obproxy:
intersection = list({'oceanbase', 'oceanbase-ce'}.intersection(set(cluster_config.depends)))
if not intersection:
stdio.warn('observer config not in the depends.')
return
ob_component = intersection[0]
ob_services = cluster_config.get_depend_servers(ob_component)
proxy_services = cluster_config.servers
else:
ob_services = cluster_config.servers
proxy_services = []
collect_components = env.get('collect_components')
if not collect_components:
collect_components = [ob_component]
else:
collect_components = collect_components.split(',')
if ob_component in collect_components:
for server in ob_services:
if is_obproxy:
server_config = cluster_config.get_depend_config(ob_component, server)
else:
server_config = cluster_config.get_server_conf(server)
ip = server.ip
port = server_config.get('mysql_port', 0)
client = clients[server]
home_path = server_config['home_path']
remote_path = os.path.join(home_path, 'log', log_pattern)
local_path = os.path.join(log_dir, test_name, '{}:{}'.format(ip, port))
stdio.start_loading('Collect log for {}'.format(server.name))
sub_io = stdio.sub_io()
client.get_dir(local_path, os.path.join(home_path, 'core.*'), stdio=sub_io)
if client.get_dir(local_path, remote_path, stdio=sub_io):
stdio.stop_loading('succeed')
else:
stdio.stop_loading('fail')
if 'obproxy' in collect_components:
if not is_obproxy:
stdio.warn('No obproxy detected.')
return
for server in proxy_services:
server_config = cluster_config.get_server_conf(server)
ip = server.ip
port = server_config.get('listen_port', 0)
client = clients[server]
home_path = server_config['home_path']
remote_path = os.path.join(home_path, 'log')
local_path = os.path.join(log_dir, test_name, '{}:{}'.format(ip, port))
stdio.start_loading('Collect obproxy log for {}'.format(server.name))
if client.get_dir(local_path, remote_path):
stdio.stop_loading('succeed')
else:
stdio.stop_loading('fail')
\ No newline at end of file
......@@ -24,12 +24,13 @@ import re
import os
import time
import shlex
import requests
import urllib
from subprocess import Popen, PIPE
from copy import deepcopy
from ssh import LocalClient
from tool import DirectoryUtil
from _stdio import FormtatText
inner_dir = os.path.split(__file__)[0]
inner_test_dir = os.path.join(inner_dir, 't')
......@@ -39,11 +40,11 @@ inner_suite_dir = os.path.join(inner_dir, 'test_suite')
class Arguments:
def add(self, k, v=None):
self.args.update({k:v})
self.args.update({k: v})
def __str__(self):
s = []
for k,v in self.args.items():
for k, v in self.args.items():
if v != None:
if re.match('^--\w', k):
s.append(' %s=%s' % (k, v))
......@@ -55,232 +56,391 @@ class Arguments:
def __init__(self, opt):
self.args = dict()
if 'connector' in opt and 'java' in opt and opt['java']:
if 'connector' in opt and opt.get('java'):
self.add('--connector', opt['connector'])
self.add('--host', opt['host'])
self.add('--port', opt['port'])
self.add('--tmpdir', opt['tmp_dir'])
self.add('--logdir', '%s/log' % opt['var_dir'])
if not opt.get('log_dir'):
log_dir = os.path.join(opt['var_dir'], 'log')
else:
log_dir = opt['log_dir']
self.add('--logdir', log_dir)
DirectoryUtil.mkdir(opt['tmp_dir'])
DirectoryUtil.mkdir('%s/log' % opt['var_dir'])
DirectoryUtil.mkdir(log_dir)
self.add('--silent')
# our mysqltest doesn't support this option
# self.add('--skip-safemalloc')
self.add('--user', 'root')
if 'user' in opt and opt['user']:
if opt.get('user'):
user = opt['user']
if 'connector' not in opt or opt['connector'] == 'ob':
user = user + '@' + opt['case_mode']
self.add('--user', user)
if 'password' in opt and opt['password']:
if opt.get('password'):
self.add('--password', opt['password'])
if 'full_user' in opt and opt['full_user']:
self.add('--full_username', opt['full_user'].replace('sys',opt['case_mode']))
if 'tenant' in opt and opt['tenant']:
if opt.get('full_user'):
self.add('--full_username', opt['full_user'].replace('sys', opt['case_mode']))
if opt.get('tenant'):
self.add('--user', 'root@' + opt['tenant'])
self.add('--password', '')
if 'cluster' in opt and opt['cluster']:
if opt.get('cluster'):
self.add('--full_username', 'root@' + opt['tenant'] + '#' + opt['cluster'])
else:
self.add('--full_username', 'root@' + opt['tenant'])
if 'rslist_url' in opt and opt['rslist_url']:
if opt.get('rslist_url'):
self.add('--rslist_url', opt['rslist_url'])
if 'database' in opt and opt['database']:
if opt.get('database'):
self.add('--database', opt['database'])
if 'charsetdsdir' in opt and opt['charsetdsdir']:
if opt.get('charsetdsdir'):
self.add('--character-sets-dir', opt['charsetsdir'])
if 'basedir' in opt and opt['basedir']:
if opt.get('basedir'):
self.add('--basedir', opt['basedir'])
if 'use_px' in opt and opt['use_px']:
if opt.get('use_px'):
self.add('--use-px')
if 'force_explain_as_px' in opt and opt['force_explain_as_px']:
if opt.get('force_explain_as_px'):
self.add('--force-explain-as-px')
if 'force-explain-as-no-px' in opt:
self.add('--force-explain-as-no-px')
if 'mark_progress' in opt and opt['mark_progress']:
if opt.get('mark_progress'):
self.add('--mark-progress')
if 'ps_protocol' in opt and opt['ps_protocol']:
if opt.get('ps_protocol'):
self.add('--ps-protocol')
if 'sp_protocol' in opt and opt['sp_protocol']:
if opt.get('sp_protocol'):
self.add('--sp-protocol')
if 'view_protocol' in opt and opt['view_protocol']:
if opt.get('view_protocol'):
self.add('--view-protocol')
if 'cursor_protocol' in opt and opt['cursor_protocol']:
if opt.get('cursor_protocol'):
self.add('--cursor-protocol')
self.add('--timer-file', '%s/log/timer' % opt['var_dir'])
if 'compress' in opt and opt['compress']:
if opt.get('special_run'):
self.add('--disable-explain')
if opt.get('sp_hint'):
self.add('--sp-hint', '"%s"' % opt['sp_hint'])
if opt.get('sort_result'):
self.add('--sort-result')
self.add('--timer-file', os.path.join(log_dir, 'timer'))
if opt.get('compress'):
self.add('--compress')
if 'sleep' in opt and opt['sleep']:
if opt.get('sleep'):
self.add('--sleep', '%d' % opt['sleep'])
if 'max_connections' in opt and opt['max_connections']:
if opt.get('max_connections'):
self.add('--max-connections', '%d' % opt['max_connections'])
if 'test_file' in opt and opt['test_file']:
if opt.get('test_file'):
self.add('--test-file', opt['test_file'])
self.add('--tail-lines', ('tail_lines' in opt and opt['tail_lines']) or 20)
if 'oblog_diff' in opt and opt['oblog_diff']:
self.add('--tail-lines', (opt.get('tail_lines')) or 20)
if opt.get('oblog_diff'):
self.add('--oblog_diff')
if 'record' in opt and opt['record'] and 'record_file' in opt and opt['record_file']:
if opt.get('record') and opt.get('record_file'):
self.add('--record')
self.add('--result-file', opt['record_file'])
DirectoryUtil.mkdir(os.path.dirname(opt['record_file']))
else: # diff result & file
self.add('--result-file', opt['result_file'])
def _return(test, cmd, result):
return {'name' : test, 'ret' : result.code, 'output' : result.stdout, 'cmd' : cmd, 'errput': result.stderr}
return {'name': test, 'ret': result.code, 'output': result.stdout, 'cmd': cmd, 'errput': result.stderr}
def slb_request(case_name, exec_id, slb_host, op='lock', stdio=None):
slb_data = {'eid': exec_id, 'case': case_name}
slb_msg = {
'lock': (
'get lock for case {} successful.',
'get lock for case {} failed.'),
'success': (
'mark successful for case {} successful.',
'mark successful for case {} failed.')
}
assert op in slb_msg
try:
url = 'http://{slb_host}/farm/mysqltest/recorder/{op}.php'.format(slb_host=slb_host, op=op)
stdio.verbose('send request: {}, param: {}'.format(url, slb_data))
resp = requests.get(url, params=slb_data)
verbose_msg = 'response code: {}, content: {}'.format(resp.status_code, resp.content)
stdio.verbose(verbose_msg)
if resp.status_code == 200:
stdio.verbose(slb_msg[op][0].format(case_name))
return True
elif resp.status_code in (202, 300):
stdio.verbose(slb_msg[op][1].format(case_name))
return False
else:
stdio.warn(slb_msg[op][1].format(case_name) + verbose_msg)
return False
except:
stdio.warn('send request failed')
stdio.exception('')
return False
def run_test(plugin_context, env, *args, **kwargs):
def return_true(**kw):
env['run_test_cases'] = run_test_cases
env['index'] = index
env['case_results'] = case_results
env['is_retry'] = is_retry
env['need_reboot'] = need_reboot
env['collect_log'] = collect_log
return plugin_context.return_true(**kw)
def run_test(plugin_context, test, env, *args, **kwargs):
cluster_config = plugin_context.cluster_config
clients = plugin_context.clients
stdio = plugin_context.stdio
stdio.start_loading('Runing case: %s' % test)
test_ori = test
opt = {}
for key in env:
if key != 'cursor':
opt[key] = env[key]
opt['connector'] = 'ob'
opt['mysql_mode'] = True
mysqltest_bin = opt['mysqltest_bin'] if 'mysqltest_bin' in opt and opt['mysqltest_bin'] else 'mysqltest'
obclient_bin = opt['obclient_bin'] if 'obclient_bin' in opt and opt['obclient_bin'] else 'obclient'
soft = 3600
buffer = 0
if 'source_limit' in opt and opt['source_limit']:
if test_ori in opt['source_limit']:
soft = opt['source_limit'][test_ori]
elif 'g.default' in opt['source_limit']:
soft = opt['source_limit']['g.default']
if 'g.buffer' in opt['source_limit']:
buffer = opt['source_limit']['g.buffer']
case_timeout = soft + buffer
opt['filter'] = 'c'
if 'profile' in args:
opt['profile'] = True
opt['record'] = True
if 'ps' in args:
opt['filter'] = opt['filter'] + 'p'
if 'cluster-mode' in opt and opt['cluster-mode'] in ['slave', 'proxy']:
opt['filter'] = opt['cluster-mode']
# support explain select w/o px hit
# force-explain-xxxx 的结果文件目录为
# - explain_r/mysql
# 其余的结果文件目录为
# - r/mysql
suffix = ''
opt_explain_dir = ''
if 'force-explain-as-px' in opt:
suffix = '.use_px'
opt_explain_dir = 'explain_r/'
elif 'force-explain-as-no-px' in opt:
suffix = '.no_use_px'
opt_explain_dir = 'explain_r/'
opt['case_mode'] = 'mysql'
if 'mode' not in opt:
opt['mode'] = 'both'
if opt['mode'] == 'mysql':
opt['case_mode'] = opt['mode']
if opt['mode'] == 'both':
if test.endswith('_mysql'):
opt['case_mode'] = 'mysql'
get_result_dir = lambda path: os.path.join(path, opt_explain_dir, opt['case_mode'])
opt['result_dir'] = get_result_dir(opt['result_dir'])
if opt['filter'] == 'slave':
opt['slave_cmp'] = 1
result_file = os.path.join(opt['result_dir'], test + suffix + '.slave.result')
if os.path.exists(result_file):
opt['slave_cmp'] = 0
opt['result_file'] = result_file
if len(test.split('.')) == 2:
suite_name, test= test.split('.')
opt['result_dir'] = get_result_dir(os.path.join(opt['suite_dir'], suite_name, 'r'))
opt['test_file'] = os.path.join(opt['suite_dir'], suite_name, 't', test + '.test')
if not os.path.isfile(opt['test_file']):
inner_test_file = os.path.join(inner_suite_dir, suite_name, 't', test + '.test')
if os.path.isfile(inner_test_file):
opt['test_file'] = inner_test_file
opt['result_dir'] = get_result_dir(os.path.join(inner_suite_dir, suite_name, 'r'))
else:
opt['test_file'] = os.path.join(opt['test_dir'], test + '.test')
if not os.path.isfile(opt['test_file']):
inner_test_file = os.path.join(inner_test_dir, test + '.test')
if os.path.isfile(inner_test_file):
opt['test_file'] = inner_test_file
opt['result_dir'] = get_result_dir(inner_result_dir)
opt['record_file'] = os.path.join(opt['result_dir'], test + suffix + '.record')
opt['result_file'] = os.path.join(opt['result_dir'], test + suffix + '.result')
if 'my_host' in opt or 'oracle_host' in opt:
# compare mode
pass
sys_pwd = cluster_config.get_global_conf().get('root_password', '')
exec_sql_cmd = "%s -h%s -P%s -uroot %s -A -Doceanbase -e" % (obclient_bin, opt['host'], opt['port'], ("-p'%s'" % sys_pwd) if sys_pwd else '')
server_engine_cmd = '''%s "select value from __all_virtual_sys_parameter_stat where name like '_enable_static_typing_engine';"''' % exec_sql_cmd
result = LocalClient.execute_command(server_engine_cmd, timeout=3600, stdio=stdio)
if not result:
stdio.error('engine failed, exit code %s. error msg: %s' % (result.code, result.stderr))
env = {
'OBMYSQL_PORT': str(opt['port']),
'OBMYSQL_MS0': str(opt['host']),
'OBMYSQL_PWD': str(opt['password']),
'OBMYSQL_USR': opt['user'],
'PATH': os.getenv('PATH')
}
if 'case_mode' in opt and opt['case_mode']:
env['TENANT'] = opt['case_mode']
if 'user' in opt and opt['user']:
env['OBMYSQL_USR'] = str(opt['user'] + '@' + opt['case_mode'])
else:
env['OBMYSQL_USR'] = 'root'
if 'java' in opt:
opt['connector'] = 'ob'
slb_host = env.get('slb_host')
exec_id = env.get('exec_id')
cursor = env.get('cursor')
run_test_cases = env.get('run_test_cases', [])
index = env.get('index', 0)
test_set = env.get('test_set', [])
case_results = env.get('case_results', [])
auto_retry = env.get('auto_retry')
is_retry = env.get('is_retry', False)
reboot_cases = env.get('reboot_cases', [])
need_reboot = env.get('need_reboot', False)
collect_all = env.get('collect_all', False)
collect_log = False
total_test_count = len(test_set)
while index < total_test_count:
test = test_set[index]
if test not in run_test_cases:
if slb_host and exec_id and not slb_request(test, exec_id=exec_id, slb_host=slb_host, stdio=stdio):
index += 1
continue
run_test_cases.append(test)
if test in reboot_cases:
need_reboot = True
if need_reboot:
need_reboot = False
return return_true(reboot=True)
retry_msg = "in auto retry mode" if is_retry else ""
label = FormtatText.info("[ RUN ]")
stdio.start_loading('%sRunning case: %s ( %s / %s ) %s' % (label, test, index+1, total_test_count, retry_msg))
test_name = test
opt = {}
for key in env:
if key != 'cursor':
opt[key] = env[key]
LocalClient.execute_command('%s "alter system set _enable_static_typing_engine = True;select sleep(2);"' % (exec_sql_cmd), stdio=stdio)
opt['connector'] = 'ob'
opt['mysql_mode'] = True
test_file_suffix = opt['test_file_suffix']
result_file_suffix = opt['result_file_suffix']
record_file_suffix = opt['record_file_suffix']
mysqltest_bin = opt.get('mysqltest_bin', 'mysqltest')
obclient_bin = opt.get('obclient_bin', 'obclient')
soft = 3600
buffer = 0
if opt.get('source_limit'):
if test_name in opt['source_limit']:
soft = opt['source_limit'][test_name]
elif 'g.default' in opt['source_limit']:
soft = opt['source_limit']['g.default']
if 'g.buffer' in opt['source_limit']:
buffer = opt['source_limit']['g.buffer']
case_timeout = soft + buffer
if opt.get('case_timeout'):
case_timeout = opt['case_timeout']
# support explain select w/o px hit
# force-explain-xxxx 的结果文件目录为
# - explain_r/mysql
# - explain_r/oracle
# 其余的结果文件目录为
# - r/mysql
# - r/oracle
suffix = ''
opt_explain_dir = ''
if 'force-explain-as-px' in opt:
suffix = '.use_px'
opt_explain_dir = 'explain_r/'
elif 'force-explain-as-no-px' in opt:
suffix = '.no_use_px'
opt_explain_dir = 'explain_r/'
opt['case_mode'] = 'mysql'
if 'mode' not in opt:
opt['mode'] = 'both'
if opt['mode'] == 'mysql' or opt['mode'] == 'oracle':
opt['case_mode'] = opt['mode']
if opt['mode'] == 'both':
if test.endswith('_mysql'):
opt['case_mode'] = 'mysql'
if test.endswith('_oracle'):
opt['case_mode'] = 'oracle'
get_result_dir = lambda path: os.path.join(path, opt_explain_dir, opt['case_mode'])
if len(test.split('.')) == 2:
suite_name, test = test.split('.')
result_dir = get_result_dir(os.path.join(opt['result_dir'], suite_name, 'r'))
if os.path.exists(result_dir):
opt['result_dir'] = result_dir
else:
opt['result_dir'] = get_result_dir(os.path.join(opt['suite_dir'], suite_name, 'r'))
opt['record_dir'] = get_result_dir(os.path.join(opt['record_dir'], suite_name, 'r'))
opt['test_file'] = os.path.join(opt['suite_dir'], suite_name, 't', test + test_file_suffix)
if not os.path.isfile(opt['test_file']):
inner_test_file = os.path.join(inner_suite_dir, suite_name, 't', test + test_file_suffix)
if os.path.isfile(inner_test_file):
opt['test_file'] = inner_test_file
opt['result_dir'] = get_result_dir(os.path.join(inner_suite_dir, suite_name, 'r'))
start_time = time.time()
cmd = 'timeout %s %s %s' % (case_timeout, mysqltest_bin, str(Arguments(opt)))
try:
stdio.verbose('local execute: %s ' % cmd, end='')
p = Popen(shlex.split(cmd), env=env, stdout=PIPE, stderr=PIPE)
output, errput = p.communicate()
retcode = p.returncode
if retcode == 124:
output = ''
if 'source_limit' in opt and 'g.buffer' in opt['source_limit']:
errput = "%s secs out of soft limit (%s secs), sql may be hung, please check" % (opt['source_limit']['g.buffer'], case_timeout)
else:
opt['test_file'] = os.path.join(opt['test_dir'], test + test_file_suffix)
opt['record_dir'] = get_result_dir(os.path.join(opt['record_dir']))
if not os.path.isfile(opt['test_file']):
inner_test_file = os.path.join(inner_test_dir, test + test_file_suffix)
if os.path.isfile(inner_test_file):
opt['test_file'] = inner_test_file
opt['result_dir'] = get_result_dir(inner_result_dir)
else:
errput = "%s seconds timeout, sql may be hung, please check" % case_timeout
elif isinstance(errput, bytes):
errput = errput.decode(errors='replace')
except Exception as e:
errput = str(e)
output = ''
retcode = 255
verbose_msg = 'exited code %s' % retcode
if retcode:
verbose_msg += ', error output:\n%s' % errput
stdio.verbose(verbose_msg)
cost = time.time() - start_time
LocalClient.execute_command('%s "alter system set _enable_static_typing_engine = False;select sleep(2);"' % (exec_sql_cmd), stdio=stdio)
result = {"name" : test_ori, "ret" : retcode, "output" : output, "cmd" : cmd, "errput" : errput, 'cost': cost}
stdio.stop_loading('fail' if retcode else 'succeed')
return plugin_context.return_true(result=result)
opt['result_dir'] = get_result_dir(opt['result_dir'])
# owner
owner = "anonymous"
try:
cmd_t = "grep -E '#\s*owner\s*:' " + opt['test_file'] + " | awk -F':' '{print $2}' | head -n 1"
p = Popen(cmd_t, stdout=PIPE, stderr=PIPE, shell=True)
output, errput = p.communicate()
owner = output.decode("utf-8").strip()
except:
stdio.verbose("fail open %s" % (opt['test_file']))
opt['record_file'] = os.path.join(opt['record_dir'], test + suffix + record_file_suffix)
opt['result_file'] = os.path.join(opt['result_dir'], test + suffix + result_file_suffix)
if opt['filter'] == 'slave':
opt['slave_cmp'] = 1
result_file = os.path.join(opt['result_dir'], test + suffix + '.slave' + result_file_suffix)
if os.path.exists(result_file):
opt['slave_cmp'] = 0
opt['result_file'] = result_file
if 'my_host' in opt or 'oracle_host' in opt:
# compare mode
pass
sys_pwd = cluster_config.get_global_conf().get('root_password', '')
exec_sql_cmd = "%s -h%s -P%s -uroot %s -A -Doceanbase -e" % (obclient_bin, opt['host'], opt['port'], ("-p'%s'" % sys_pwd) if sys_pwd else '')
server_engine_cmd = '''%s "select value from __all_virtual_sys_parameter_stat where name like '_enable_static_typing_engine';"''' % exec_sql_cmd
result = LocalClient.execute_command(server_engine_cmd, timeout=3600, stdio=stdio)
stdio.verbose('query engine result: {}'.format(result.stdout))
if not result:
stdio.error('engine failed, exit code %s. error msg: %s' % (result.code, result.stderr))
update_env = {
'OBMYSQL_PORT': str(opt['port']),
'OBMYSQL_MS0': str(opt['host']),
'OBMYSQL_PWD': str(opt['password']),
'OBMYSQL_USR': opt['user'],
'PATH': os.getenv('PATH'),
'OBSERVER_DIR': cluster_config.get_server_conf(opt['test_server'])['home_path']
}
test_env = deepcopy(os.environ.copy())
test_env.update(update_env)
if opt.get('case_mode'):
test_env['TENANT'] = opt['case_mode']
if opt.get('user'):
test_env['OBMYSQL_USR'] = str(opt['user'] + '@' + opt['case_mode'])
else:
test_env['OBMYSQL_USR'] = 'root'
if 'java' in opt:
opt['connector'] = 'ob'
if opt['_enable_static_typing_engine'] is not None:
ret = None
try:
sql = "select value from oceanbase.__all_virtual_sys_parameter_stat where name like '_enable_static_typing_engine';"
cursor.execute(sql)
ret = cursor.fetchone()
except:
pass
if ret and str(ret.get('value')).lower() != str(opt['_enable_static_typing_engine']).lower():
LocalClient.execute_command('%s "alter system set _enable_static_typing_engine = %s;select sleep(2);"' % (exec_sql_cmd, opt['_enable_static_typing_engine']), stdio=stdio)
start_time = time.time()
cmd = 'timeout %s %s %s' % (case_timeout, mysqltest_bin, str(Arguments(opt)))
try:
stdio.verbose('local execute: %s ' % cmd)
p = Popen(shlex.split(cmd), env=test_env, stdout=PIPE, stderr=PIPE)
output, errput = p.communicate()
retcode = p.returncode
if retcode == 124:
output = ''
if 'source_limit' in opt and 'g.buffer' in opt['source_limit']:
errput = "%s secs out of soft limit (%s secs), sql may be hung, please check" % (opt['source_limit']['g.buffer'], case_timeout)
else:
errput = "%s seconds timeout, sql may be hung, please check" % case_timeout
elif isinstance(errput, bytes):
errput = errput.decode(errors='replace')
except Exception as e:
errput = str(e)
output = ''
retcode = 255
cost = time.time() - start_time
case_info = "%s %s ( %f s )" % (test_name, owner, cost)
patterns = ['output', 'NAME', 'SORT', 'SCAN', 'LIMIT', 'EXCHANGE', 'GET', 'FUNCTION', 'MERGE', 'JOIN', 'MATERIAL',
'DISTINCT', 'SUBPLAN', 'UNION|ALL', 'EXPRESSION', 'SCALAR', 'HASH', 'VALUES', 'DELETE', 'result',
'reject', '=====', '-------', 'conds', 'output', 'access', 'GROUP', 'DELETE', 'UPDATE', 'INSERT',
'CONNECT', 'nil', 'values', 'COUNT', '^$']
count = 0
# 不处理liboblog的结果对比
if re.search("liboblog_r", errput):
stdio.verbose("do nothing for liboblog")
elif (opt['filter'] == 'slave' and opt['slave_cmp'] == 1) or opt['filter'] == 'j' or opt['filter'] == 'jp':
diff = errput.split('\n')
for line in diff:
match = 0
if re.search(r"^\+", line) or re.search(r"^\-", line):
for pattern in patterns:
if re.search(pattern, line):
match = match + 1
continue
if match == 0:
count = count + 1
break
if count == 0:
# 处理slave/java 模式下result文件不存在的情况
if re.search(r"\+", errput):
stdio.verbose('ignore explain plan diff')
retcode = 0
result = {"name": test_name, "ret": retcode, "output": output, "cmd": cmd, "errput": errput, 'cost': cost}
stdio.stop_loading('fail' if retcode else 'succeed')
stdio.verbose('exited code %s' % retcode)
if retcode:
# verbose_msg += ', error output:\n%s' % errput
stdio.print(errput)
case_status = FormtatText.error("[ FAILED ]")
else:
case_status = FormtatText.success('[ OK ]')
stdio.print("%s%s" % (case_status, case_info))
if retcode == 0 and slb_host and exec_id:
slb_request(test_name, exec_id=exec_id, slb_host=slb_host, op='success', stdio=stdio)
if retcode == 0:
# success
case_results.append(result)
index += 1
is_retry = False
elif is_retry or not auto_retry:
# failed and no chance to retry
case_results.append(result)
index += 1
is_retry = False
need_reboot = True
collect_log = collect_all
else:
# retry
is_retry = True
need_reboot = True
return return_true(finished=True)
......@@ -28,7 +28,7 @@ global_ret = True
def destroy(plugin_context, *args, **kwargs):
def clean(server, path):
client = clients[server]
ret = client.execute_command('rm -fr %s/' % (path))
ret = client.execute_command('rm -fr %s/' % (path), timeout=-1)
if not ret:
global global_ret
global_ret = False
......
......@@ -4,4 +4,5 @@
mode: 755
- src_path: ./home/admin/obagent/conf
target_path: conf
type: dir
\ No newline at end of file
type: dir
install_method: cp
\ No newline at end of file
......@@ -29,6 +29,7 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
stdio = plugin_context.stdio
global_ret = True
force = getattr(plugin_context.options, 'force', False)
clean = getattr(plugin_context.options, 'clean', False)
stdio.start_loading('Initializes obagent work home')
for server in cluster_config.servers:
server_config = cluster_config.get_server_conf(server)
......@@ -37,7 +38,18 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
stdio.verbose('%s init cluster work home', server)
if force:
need_clean = force
if clean and not force:
if client.execute_command('bash -c \'if [[ "$(ls -d {0} 2>/dev/null)" != "" && ! -O {0} ]]; then exit 0; else exit 1; fi\''.format(home_path)):
owner = client.execute_command("ls -ld %s | awk '{print $3}'" % home_path).stdout.strip()
global_ret = False
err_msg = ' {} is not empty, and the owner is {}'.format(home_path, owner)
stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=err_msg))
continue
need_clean = True
if need_clean:
client.execute_command("pkill -9 -u `whoami` -f '^%s/bin/monagent -c conf/monagent.yaml'" % home_path)
ret = client.execute_command('rm -fr %s' % home_path)
if not ret:
global_ret = False
......@@ -55,10 +67,7 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=home_path)))
continue
if not (client.execute_command("bash -c 'mkdir -p %s/{run,bin,lib,conf,log}'" % (home_path)) \
and client.execute_command("cp -r %s/conf %s/" % (remote_repository_dir, home_path)) \
and client.execute_command("if [ -d %s/bin ]; then ln -fs %s/bin/* %s/bin; fi" % (remote_repository_dir, remote_repository_dir, home_path)) \
and client.execute_command("if [ -d %s/lib ]; then ln -fs %s/lib/* %s/lib; fi" % (remote_repository_dir, remote_repository_dir, home_path))):
if not client.execute_command("bash -c 'mkdir -p %s/{run,bin,lib,conf,log}'" % home_path):
global_ret = False
stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.PATH_ONLY.format(path=home_path)))
......
......@@ -226,7 +226,7 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
server_config[key] = ''
if isinstance(server_config[key], bool):
server_config[key] = str(server_config[key]).lower()
if server_config.get('crypto_method', 'plain').lower() == 'aes':
secret_key = generate_aes_b64_key()
crypto_path = server_config.get('crypto_path', 'conf/.config_secret.key')
......@@ -247,20 +247,8 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
if not client.put_file(tf.name, path.replace(repository_dir, home_path)):
stdio.error(EC_OBAGENT_SEND_CONFIG_FAILED.format(server=server))
stdio.stop_loading('fail')
return
for path in glob(os.path.join(repository_dir, 'conf/*/*')):
if path.endswith('.yaml'):
continue
if os.path.isdir(path):
ret = client.put_dir(path, path.replace(repository_dir, home_path))
else:
ret = client.put_file(path, path.replace(repository_dir, home_path))
if not ret:
stdio.error(EC_OBAGENT_SEND_CONFIG_FAILED.format(server=server))
stdio.stop_loading('fail')
return
return
config = {
'log': {
'level': server_config.get('log_level', 'info'),
......@@ -287,7 +275,7 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
if not client.put_file(tf.name, os.path.join(home_path, 'conf/monagent.yaml')):
stdio.error(EC_OBAGENT_SEND_CONFIG_FAILED.format(server=server))
stdio.stop_loading('fail')
return
return
log_path = '%s/log/monagent_stdout.log' % home_path
client.execute_command('cd %s;nohup %s/bin/monagent -c conf/monagent.yaml >> %s 2>&1 & echo $! > %s' % (home_path, home_path, log_path, remote_pid_path))
......
......@@ -38,16 +38,6 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
repository_dir = dest_repository.repository_dir
kwargs['repository_dir'] = repository_dir
for server in cluster_config.servers:
client = clients[server]
server_config = cluster_config.get_server_conf(server)
home_path = server_config['home_path']
remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
client.execute_command("bash -c 'mkdir -p %s/{bin,lib}'" % (home_path))
client.execute_command("ln -fs %s/bin/* %s/bin" % (remote_repository_dir, home_path))
client.execute_command("ln -fs %s/lib/* %s/lib" % (remote_repository_dir, home_path))
stop_plugin = search_py_script_plugin([cur_repository], 'stop')[cur_repository]
start_plugin = search_py_script_plugin([dest_repository], 'start')[dest_repository]
connect_plugin = search_py_script_plugin([dest_repository], 'connect')[dest_repository]
......
......@@ -243,18 +243,6 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
stdio.error(EC_OBAGENT_SEND_CONFIG_FAILED.format(server=server))
stdio.stop_loading('fail')
return
for path in glob(os.path.join(repository_dir, 'conf/*/*')):
if path.endswith('.yaml'):
continue
if os.path.isdir(path):
ret = client.put_dir(path, path.replace(repository_dir, home_path))
else:
ret = client.put_file(path, path.replace(repository_dir, home_path))
if not ret:
stdio.error(EC_OBAGENT_SEND_CONFIG_FAILED.format(server=server))
stdio.stop_loading('fail')
return
config = {
'log': {
......
......@@ -28,7 +28,7 @@ global_ret = True
def destroy(plugin_context, *args, **kwargs):
def clean(server, path):
client = clients[server]
ret = client.execute_command('rm -fr %s/' % (path))
ret = client.execute_command('rm -fr %s/' % (path), timeout=-1)
if not ret:
# pring stderror
global global_ret
......
......@@ -28,7 +28,9 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
stdio = plugin_context.stdio
global_ret = True
force = getattr(plugin_context.options, 'force', False)
clean = getattr(plugin_context.options, 'clean', False)
stdio.start_loading('Initializes obproxy work home')
for server in cluster_config.servers:
server_config = cluster_config.get_server_conf(server)
client = clients[server]
......@@ -36,15 +38,25 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
stdio.verbose('%s init cluster work home', server)
if force:
need_clean = force
if clean and not force:
if client.execute_command('bash -c \'if [[ "$(ls -d {0} 2>/dev/null)" != "" && ! -O {0} ]]; then exit 0; else exit 1; fi\''.format(home_path)):
owner = client.execute_command("ls -ld %s | awk '{print $3}'" % home_path).stdout.strip()
global_ret = False
err_msg = ' {} is not empty, and the owner is {}'.format(home_path, owner)
stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=err_msg))
continue
need_clean = True
if need_clean:
client.execute_command("pkill -9 -u `whoami` -f '^bash {home_path}/obproxyd.sh {home_path} {ip} {port} daemon$'".format(home_path=home_path, ip=server.ip, port=server_config.get('listen_port')))
client.execute_command("pkill -9 -u `whoami` -f '^%s/bin/obproxy --listen_port %s'" % (home_path, server_config.get('listen_port')))
ret = client.execute_command('rm -fr %s' % home_path)
if not ret:
global_ret = False
stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=ret.stderr))
continue
if not (client.execute_command("bash -c 'mkdir -p %s/{run,bin,lib}'" % (home_path)) \
and client.execute_command("if [ -d %s/bin ]; then ln -fs %s/bin/* %s/bin; fi" % (remote_repository_dir, remote_repository_dir, home_path)) \
and client.execute_command("if [ -d %s/lib ]; then ln -fs %s/lib/* %s/lib; fi" % (remote_repository_dir, remote_repository_dir, home_path))):
if not client.execute_command("bash -c 'mkdir -p %s/{run,bin,lib}'" % home_path):
global_ret = False
stdio.error(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.NOT_EMPTY.format(path=home_path)))
......
......@@ -22,7 +22,7 @@ from __future__ import absolute_import, division, print_function
import os
import time
from copy import deepcopy
stdio = None
......@@ -49,7 +49,7 @@ def confirm_port(client, pid, port):
def confirm_command(client, pid, command):
command = command.replace(' ', '').strip()
if client.execute_command('bash -c \'cmd=`cat /proc/%s/cmdline`; if [ "$cmd" != "%s" ]; then exot 1; fi\'' % (pid, command)):
if client.execute_command('bash -c \'cmd=`cat /proc/%s/cmdline`; if [ "$cmd" != "%s" ]; then exit 1; fi\'' % (pid, command)):
return True
return False
......@@ -86,6 +86,26 @@ def obproxyd(home_path, client, ip, port):
return False
class EnvVariables(object):
def __init__(self, environments, client):
self.environments = environments
self.client = client
self.env_done = {}
def __enter__(self):
for env_key, env_value in self.environments.items():
self.env_done[env_key] = self.client.get_env(env_key)
self.client.add_env(env_key, env_value, True)
def __exit__(self, *args, **kwargs):
for env_key, env_value in self.env_done.items():
if env_value is not None:
self.client.add_env(env_key, env_value, True)
else:
self.client.del_env(env_key)
def start(plugin_context, local_home_path, repository_dir, need_bootstrap=False, *args, **kwargs):
global stdio
cluster_config = plugin_context.cluster_config
......@@ -152,13 +172,6 @@ def start(plugin_context, local_home_path, repository_dir, need_bootstrap=False,
server_config = cluster_config.get_server_conf(server)
home_path = server_config['home_path']
if client.execute_command("bash -c 'if [ -f %s/bin/obproxy ]; then exit 1; else exit 0; fi;'" % home_path):
remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
client.execute_command("bash -c 'mkdir -p %s/{bin,lib}'" % (home_path))
client.execute_command("ln -fs %s/bin/* %s/bin" % (remote_repository_dir, home_path))
client.execute_command("ln -fs %s/lib/* %s/lib" % (remote_repository_dir, home_path))
pid_path[server] = "%s/run/obproxy-%s-%s.pid" % (home_path, server.ip, server_config["listen_port"])
if use_parameter:
......@@ -187,6 +200,7 @@ def start(plugin_context, local_home_path, repository_dir, need_bootstrap=False,
clusters_cmd[server] = 'cd %s; %s' % (home_path, real_cmd[server])
for server in clusters_cmd:
environments = deepcopy(cluster_config.get_environments())
client = clients[server]
server_config = cluster_config.get_server_conf(server)
port = int(server_config["listen_port"])
......@@ -204,9 +218,10 @@ def start(plugin_context, local_home_path, repository_dir, need_bootstrap=False,
return plugin_context.return_false()
stdio.verbose('starting %s obproxy', server)
client.add_env('LD_LIBRARY_PATH', '%s/lib:' % server_config['home_path'], True)
ret = client.execute_command(clusters_cmd[server])
client.add_env('LD_LIBRARY_PATH', '', True)
if 'LD_LIBRARY_PATH' not in environments:
environments['LD_LIBRARY_PATH'] = '%s/lib:' % server_config['home_path']
with EnvVariables(environments, client):
ret = client.execute_command(clusters_cmd[server])
if not ret:
stdio.stop_loading('fail')
stdio.error('failed to start %s obproxy: %s' % (server, ret.stderr))
......
......@@ -38,16 +38,6 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
repository_dir = dest_repository.repository_dir
kwargs['repository_dir'] = repository_dir
for server in cluster_config.servers:
client = clients[server]
server_config = cluster_config.get_server_conf(server)
home_path = server_config['home_path']
remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
client.execute_command("bash -c 'mkdir -p %s/{bin,lib}'" % (home_path))
client.execute_command("ln -fs %s/bin/* %s/bin" % (remote_repository_dir, home_path))
client.execute_command("ln -fs %s/lib/* %s/lib" % (remote_repository_dir, home_path))
stop_plugin = search_py_script_plugin([cur_repository], 'stop')[cur_repository]
start_plugin = search_py_script_plugin([dest_repository], 'start')[dest_repository]
connect_plugin = search_py_script_plugin([dest_repository], 'connect')[dest_repository]
......
......@@ -29,11 +29,13 @@ from _errno import EC_OBSERVER_CAN_NOT_MIGRATE_IN
def parse_size(size):
_bytes = 0
if isinstance(size, str):
size = size.strip()
if not isinstance(size, str) or size.isdigit():
_bytes = int(size)
else:
units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40}
match = re.match(r'([1-9][0-9]*)\s*([B,K,M,G,T])', size.upper())
match = re.match(r'^([1-9][0-9]*)\s*([B,K,M,G,T])$', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)]
return _bytes
......@@ -59,6 +61,16 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
if not value:
value = default
return value
def get_parsed_option(key, default=''):
value = get_option(key=key, default=default)
try:
parsed_value = parse_size(value)
except:
stdio.exception("")
raise Exception("Invalid option {}: {}".format(key, value))
return parsed_value
def error(*arg, **kwargs):
stdio.error(*arg, **kwargs)
stdio.stop_loading('fail')
......@@ -70,6 +82,11 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
stdio = plugin_context.stdio
options = plugin_context.options
mode = get_option('mode', 'mysql').lower()
if not mode in ['mysql', 'oracle']:
error('No such tenant mode: %s.\n--mode must be `mysql` or `oracle`' % mode)
return
name = get_option('tenant_name', 'test')
unit_name = '%s_unit' % name
pool_name = '%s_pool' % name
......@@ -153,7 +170,7 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
exception('execute sql exception: %s' % sql)
return
units_id = set()
units_id = {}
res = cursor.fetchall()
for row in res:
if str(row['name']) == unit_name:
......@@ -162,7 +179,8 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
continue
for zone in str(row['zone_list']).replace(';', ',').split(','):
if zone in zones:
units_id.add(row['unit_config_id'])
unit_config_id = row['unit_config_id']
units_id[unit_config_id] = units_id.get(unit_config_id, 0) + 1
break
sql = 'select * from oceanbase.__all_unit_config order by name'
......@@ -178,8 +196,8 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
if str(row['name']) == unit_name:
unit_name += '1'
if row['unit_config_id'] in units_id:
cpu_total -= row['max_cpu']
mem_total -= row['max_memory']
cpu_total -= row['max_cpu'] * units_id[row['unit_config_id']]
mem_total -= row['max_memory'] * units_id[row['unit_config_id']]
# disk_total -= row['max_disk_size']
MIN_CPU = 2
......@@ -194,13 +212,18 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
if disk_total < MIN_DISK_SIZE:
return error('%s: resource not enough: disk space less than %s' % (zone_list, format_size(MIN_DISK_SIZE)))
try:
max_memory = get_parsed_option('max_memory', mem_total)
max_disk_size = get_parsed_option('max_disk_size', disk_total)
min_memory = get_parsed_option('min_memory', max_memory)
except Exception as e:
error(e)
return
max_cpu = get_option('max_cpu', cpu_total)
max_memory = parse_size(get_option('max_memory', mem_total))
max_iops = get_option('max_iops', MIN_IOPS)
max_disk_size = parse_size(get_option('max_disk_size', disk_total))
max_session_num = get_option('max_session_num', MIN_SESSION_NUM)
min_cpu = get_option('min_cpu', max_cpu)
min_memory = parse_size(get_option('min_memory', max_memory))
min_iops = get_option('min_iops', max_iops)
if cpu_total < max_cpu:
......@@ -258,7 +281,7 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
stdio.verbose('execute sql: %s' % sql)
cursor.execute(sql)
except:
exception('faild to crate pool, execute sql exception: %s' % sql)
exception('failed to create pool, execute sql exception: %s' % sql)
return
# create tenant
......@@ -274,8 +297,12 @@ def create_tenant(plugin_context, cursor, *args, **kwargs):
sql += ", default tablegroup ='%s'" % tablegroup
if locality:
sql += ", locality = '%s'" % locality
set_mode = "ob_compatibility_mode = '%s'" % mode
if variables:
sql += "set %s" % variables
sql += "set %s, %s" % (variables, set_mode)
else:
sql += "set %s" % set_mode
try:
stdio.verbose('execute sql: %s' % sql)
cursor.execute(sql)
......
......@@ -28,7 +28,7 @@ global_ret = True
def destroy(plugin_context, *args, **kwargs):
def clean(server, path):
client = clients[server]
ret = client.execute_command('rm -fr %s/' % (path))
ret = client.execute_command('rm -fr %s/' % (path), timeout=-1)
if not ret:
# print stderror
global global_ret
......
......@@ -4,4 +4,5 @@
mode: 755
- src_path: ./home/admin/oceanbase/etc
target_path: etc
type: dir
\ No newline at end of file
type: dir
install_method: cp
\ No newline at end of file
......@@ -34,6 +34,7 @@ def critical(*arg, **kwargs):
global_ret = False
stdio.error(*arg, **kwargs)
def init_dir(server, client, key, path, link_path=None):
if force:
ret = client.execute_command('rm -fr %s' % path)
......@@ -66,6 +67,7 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
stdio = plugin_context.stdio
servers_dirs = {}
force = getattr(plugin_context.options, 'force', False)
clean = getattr(plugin_context.options, 'clean', False)
stdio.verbose('option `force` is %s' % force)
stdio.start_loading('Initializes observer work home')
for server in cluster_config.servers:
......@@ -102,9 +104,20 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
'server': server,
'key': key,
}
stdio.verbose('%s initializes observer work home' % server)
if force:
need_clean = force
if clean and not force:
if client.execute_command('bash -c \'if [[ "$(ls -d {0} 2>/dev/null)" != "" && ! -O {0} ]]; then exit 0; else exit 1; fi\''.format(home_path)):
owner = client.execute_command("ls -ld %s | awk '{print $3}'" % home_path).stdout.strip()
err_msg = ' {} is not empty, and the owner is {}'.format(home_path, owner)
critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=err_msg))
continue
need_clean = True
if need_clean:
client.execute_command(
"pkill -9 -u `whoami` -f '^%s/bin/observer -p %s'" % (home_path, server_config['mysql_port']))
ret = client.execute_command('rm -fr %s/*' % home_path)
if not ret:
critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=ret.stderr))
......@@ -117,12 +130,10 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
continue
else:
critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.CREATE_FAILED.format(path=home_path)))
ret = client.execute_command('bash -c "mkdir -p %s/{etc,admin,.conf,log,bin,lib}"' % home_path) \
and client.execute_command("if [ -d %s/bin ]; then ln -fs %s/bin/* %s/bin; fi" % (remote_repository_dir, remote_repository_dir, home_path)) \
and client.execute_command("if [ -d %s/lib ]; then ln -fs %s/lib/* %s/lib; fi" % (remote_repository_dir, remote_repository_dir, home_path))
ret = client.execute_command('bash -c "mkdir -p %s/{etc,admin,.conf,log,bin,lib}"' % home_path)
if ret:
data_path = server_config['data_dir']
if force:
if need_clean:
ret = client.execute_command('rm -fr %s/*' % data_path)
if not ret:
critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='data dir', msg=InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=data_path)))
......@@ -165,7 +176,6 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='data dir', msg=InitDirFailedErrorMessage.PATH_ONLY.format(path=data_path)))
else:
critical(EC_FAIL_TO_INIT_PATH.format(server=server, key='home path', msg=InitDirFailedErrorMessage.PERMISSION_DENIED.format(path=home_path)))
if global_ret:
stdio.stop_loading('succeed')
plugin_context.return_true()
......
......@@ -28,6 +28,8 @@ from copy import deepcopy
from _errno import EC_OBSERVER_FAIL_TO_START
from collections import OrderedDict
def config_url(ocp_config_server, appname, cid):
cfg_url = '%s&Action=ObRootServiceInfo&ObCluster=%s' % (ocp_config_server, appname)
......@@ -57,7 +59,27 @@ def init_config_server(ocp_config_server, appname, cid, force_delete, stdio):
if post(register_to_config_url) != 200:
return False
return cfg_url
class EnvVariables(object):
def __init__(self, environments, client):
self.environments = environments
self.client = client
self.env_done = {}
def __enter__(self):
for env_key, env_value in self.environments.items():
self.env_done[env_key] = self.client.get_env(env_key)
self.client.add_env(env_key, env_value, True)
def __exit__(self, *args, **kwargs):
for env_key, env_value in self.env_done.items():
if env_value is not None:
self.client.add_env(env_key, env_value, True)
else:
self.client.del_env(env_key)
def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
cluster_config = plugin_context.cluster_config
......@@ -75,12 +97,12 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
if obconfig_url:
if not appname or not cluster_id:
stdio.error('need appname and cluster_id')
return
return
try:
cfg_url = init_config_server(obconfig_url, appname, cluster_id, getattr(options, 'force_delete', False), stdio)
if not cfg_url:
stdio.error('failed to register cluster. %s may have been registered in %s.' % (appname, obconfig_url))
return
return
except:
stdio.exception('failed to register cluster')
return
......@@ -98,19 +120,12 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
server_config = cluster_config.get_server_conf(server)
home_path = server_config['home_path']
if client.execute_command("bash -c 'if [ -f %s/bin/observer ]; then exit 1; else exit 0; fi;'" % home_path):
remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
client.execute_command("bash -c 'mkdir -p %s/{bin,lib}'" % (home_path))
client.execute_command("ln -fs %s/bin/* %s/bin" % (remote_repository_dir, home_path))
client.execute_command("ln -fs %s/lib/* %s/lib" % (remote_repository_dir, home_path))
if not server_config.get('data_dir'):
server_config['data_dir'] = '%s/store' % home_path
if client.execute_command('ls %s/ilog/' % server_config['data_dir']).stdout.strip():
need_bootstrap = False
remote_pid_path = '%s/run/observer.pid' % home_path
remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip()
if remote_pid:
......@@ -125,10 +140,10 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
cmd = []
if use_parameter:
not_opt_str = {
'zone': '-z',
not_opt_str = OrderedDict({
'mysql_port': '-p',
'rpc_port': '-P',
'zone': '-z',
'nodaemon': '-N',
'appname': '-n',
'cluster_id': '-c',
......@@ -138,9 +153,9 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
'ipv6': '-6',
'mode': '-m',
'scn': '-f'
}
})
not_cmd_opt = [
'home_path', 'obconfig_url', 'root_password', 'proxyro_password',
'home_path', 'obconfig_url', 'root_password', 'proxyro_password',
'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir', '$_zone_idc'
]
get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key]
......@@ -153,21 +168,25 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
opt_str.append('obconfig_url=\'%s\'' % cfg_url)
else:
cmd.append(rs_list_opt)
cmd.append('-o %s' % ','.join(opt_str))
for key in not_opt_str:
if key in server_config:
value = get_value(key)
cmd.append('%s %s' % (not_opt_str[key], value))
cmd.append('-o %s' % ','.join(opt_str))
else:
cmd.append('-p %s' % server_config['mysql_port'])
clusters_cmd[server] = 'cd %s; %s/bin/observer %s' % (home_path, home_path, ' '.join(cmd))
for server in clusters_cmd:
environments = deepcopy(cluster_config.get_environments())
client = clients[server]
server_config = cluster_config.get_server_conf(server)
stdio.verbose('starting %s observer', server)
client.add_env('LD_LIBRARY_PATH', '%s/lib:' % server_config['home_path'], True)
ret = client.execute_command(clusters_cmd[server])
client.add_env('LD_LIBRARY_PATH', '', True)
if 'LD_LIBRARY_PATH' not in environments:
environments['LD_LIBRARY_PATH'] = '%s/lib:' % server_config['home_path']
with EnvVariables(environments, client):
ret = client.execute_command(clusters_cmd[server])
if not ret:
stdio.stop_loading('fail')
stdio.error(EC_OBSERVER_FAIL_TO_START.format(server=server) + ': ' + ret.stderr)
......
......@@ -20,7 +20,6 @@
from __future__ import absolute_import, division, print_function
import json
import time
import requests
......@@ -29,9 +28,11 @@ def config_url(ocp_config_server, appname, cid):
cfg_url = '%s&Action=ObRootServiceInfo&ObCluster=%s' % (ocp_config_server, appname)
proxy_cfg_url = '%s&Action=GetObProxyConfig&ObRegionGroup=%s' % (ocp_config_server, appname)
# 清除集群URL内容命令
cleanup_config_url_content = '%s&Action=DeleteObRootServiceInfoByClusterName&ClusterName=%s' % (ocp_config_server, appname)
cleanup_config_url_content = '%s&Action=DeleteObRootServiceInfoByClusterName&ClusterName=%s' % (
ocp_config_server, appname)
# 注册集群信息到Config URL命令
register_to_config_url = '%s&Action=ObRootServiceRegister&ObCluster=%s&ObClusterId=%s' % (ocp_config_server, appname, cid)
register_to_config_url = '%s&Action=ObRootServiceRegister&ObCluster=%s&ObClusterId=%s' % (
ocp_config_server, appname, cid)
return cfg_url, cleanup_config_url_content, register_to_config_url
......@@ -40,7 +41,7 @@ def get_port_socket_inode(client, port):
cmd = "bash -c 'cat /proc/net/{tcp,udp}' | awk -F' ' '{print $2,$10}' | grep '00000000:%s' | awk -F' ' '{print $2}' | uniq" % port
res = client.execute_command(cmd)
if not res or not res.stdout.strip():
return False
return []
return res.stdout.strip().split('\n')
......@@ -62,7 +63,6 @@ def stop(plugin_context, *args, **kwargs):
clients = plugin_context.clients
stdio = plugin_context.stdio
global_config = cluster_config.get_global_conf()
global_config = cluster_config.get_global_conf()
appname = global_config['appname'] if 'appname' in global_config else None
cluster_id = global_config['cluster_id'] if 'cluster_id' in global_config else None
obconfig_url = global_config['obconfig_url'] if 'obconfig_url' in global_config else None
......@@ -118,6 +118,14 @@ def stop(plugin_context, *args, **kwargs):
servers = tmp_servers
count -= 1
if count and servers:
if count == 5:
for server in servers:
data = servers[server]
server_config = cluster_config.get_server_conf(server)
client = clients[server]
client.execute_command(
"if [[ -d /proc/%s ]]; then pkill -9 -u `whoami` -f '%s/bin/observer -p %s';fi" %
(data['pid'], server_config['home_path'], server_config['mysql_port']))
time.sleep(3)
if servers:
......
......@@ -126,7 +126,7 @@ class Exector(object):
class Upgrader(object):
def __init__(self, plugin_context, search_py_script_plugin, apply_param_plugin, upgrade_ctx, upgrade_repositories, local_home_path, exector_path):
def __init__(self, plugin_context, search_py_script_plugin, apply_param_plugin, upgrade_ctx, upgrade_repositories, local_home_path, exector_path, install_repository_to_servers, unuse_lib_repository):
self._search_py_script_plugin = search_py_script_plugin
self.apply_param_plugin = apply_param_plugin
self.plugin_context = plugin_context
......@@ -138,6 +138,8 @@ class Upgrader(object):
self._start_plugin = None
self._stop_plugin = None
self._display_plugin = None
self.install_repository_to_servers = install_repository_to_servers
self.unuse_lib_repository = unuse_lib_repository
self.local_home_path = local_home_path
self.exector_path = exector_path
self.components = plugin_context.components
......@@ -386,18 +388,6 @@ class Upgrader(object):
time.sleep(3)
return True
def _replace_repository(self, servers, repository):
repository_dir = repository.repository_dir
for server in servers:
client = self.clients[server]
server_config = self.cluster_config.get_server_conf(server)
home_path = server_config['home_path']
remote_home_path = client.execute_command('echo ${OBD_HOME:-"$HOME"}/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(self.local_home_path, remote_home_path)
client.execute_command("bash -c 'mkdir -p %s/{bin,lib}'" % (home_path))
client.execute_command("ln -fs %s/bin/* %s/bin" % (remote_repository_dir, home_path))
client.execute_command("ln -fs %s/lib/* %s/lib" % (remote_repository_dir, home_path))
def upgrade_zone(self):
zones_servers = {}
for server in self.cluster_config.servers:
......@@ -426,7 +416,8 @@ class Upgrader(object):
self.stdio.start_loading('Upgrade')
repository = self.repositories[self.next_stage]
repository_dir = repository.repository_dir
self._replace_repository(self.cluster_config.servers, repository)
self.install_repository_to_servers(self.components, self.cluster_config, repository, self.clients,
self.unuse_lib_repository)
if not self.stop_plugin(self.components, self.clients, self.cluster_config, self.plugin_context.cmd, self.plugin_context.options, self.stdio):
self.stdio.stop_loading('stop_loading', 'fail')
......@@ -473,7 +464,8 @@ class Upgrader(object):
self.stop_zone(zone)
self.stdio.print('upgrade zone "%s"' % zone)
self._replace_repository(self.cluster_config.servers, repository)
self.install_repository_to_servers(self.components, self.cluster_config, repository, self.clients, self.unuse_lib_repository)
if pre_zone:
self.apply_param_plugin(self.repositories[self.route_index - 1])
......@@ -524,7 +516,7 @@ class Upgrader(object):
return self._exec_script_dest_only('upgrade_post_checker.py')
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args, **kwargs):
def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, install_repository_to_servers, unuse_lib_repository, *args, **kwargs):
components = plugin_context.components
clients = plugin_context.clients
cluster_config = plugin_context.cluster_config
......@@ -535,9 +527,18 @@ def upgrade(plugin_context, search_py_script_plugin, apply_param_plugin, *args,
upgrade_ctx = kwargs.get('upgrade_ctx')
local_home_path = kwargs.get('local_home_path')
upgrade_repositories = kwargs.get('upgrade_repositories')
exector_path = getattr(options, 'exector_path', '/usr/obd/lib/executer')
upgrader = Upgrader(plugin_context, search_py_script_plugin, apply_param_plugin, upgrade_ctx, upgrade_repositories, local_home_path, exector_path)
exector_path = getattr(options, 'executer_path', '/usr/obd/lib/executer')
upgrader = Upgrader(
plugin_context=plugin_context,
search_py_script_plugin=search_py_script_plugin,
apply_param_plugin=apply_param_plugin,
upgrade_ctx=upgrade_ctx,
upgrade_repositories=upgrade_repositories,
local_home_path=local_home_path,
exector_path=exector_path,
install_repository_to_servers=install_repository_to_servers,
unuse_lib_repository=unuse_lib_repository)
if upgrader.run():
if upgrader.route_index >= len(upgrader.route):
upgrader.display_plugin(components, clients, cluster_config, cmd, options, stdio, upgrader.cursor, *args, **kwargs)
......
......@@ -212,7 +212,7 @@ def pre_test(plugin_context, cursor, odp_cursor, *args, **kwargs):
user = get_option('user', 'root')
password = get_option('password', '')
warehouses = get_option('warehouses', cpu_total * 20)
load_workers = get_option('load_workers', int(min(min_cpu, (max_memory >> 30) / 2)))
load_workers = get_option('load_workers', int(max(min(min_cpu, (max_memory >> 30) / 2), 1)))
terminals = get_option('terminals', min(cpu_total * 15, warehouses * 10))
run_mins = get_option('run_mins', 10)
test_only = get_option('test_only')
......
#!/bin/bash
if [ -n "$BASH_VERSION" ]; then
complete -F _obd_complete_func obd
fi
function _obd_complete_func
{
local cur prev cmd obd_cmd cluster_cmd tenant_cmd mirror_cmd test_cmd devmode_cmd
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
obd_cmd="mirror cluster test update repo"
cluster_cmd="autodeploy tenant start deploy redeploy restart reload destroy stop edit-config list display upgrade chst check4ocp change-repo"
tenant_cmd="create drop"
mirror_cmd="clone create list update enable disable"
repo_cmd="list"
test_cmd="mysqltest sysbench tpch"
if [ -f "${OBD_HOME:-"$HOME"}/.obd/.dev_mode" ]; then
obd_cmd="$obd_cmd devmode"
devmode_cmd="enable disable"
fi
if [[ ${cur} == * ]] ; then
case "${prev}" in
obd);&
test);&
cluster);&
tenant);&
mirror);&
devmode);&
repo)
cmd=$(eval echo \$"${prev}_cmd")
COMPREPLY=( $(compgen -W "${cmd}" -- ${cur}) )
;;
clone);&
-p|--path);&
-c|--config)
filename=${cur##*/}
dirname=${cur%*$filename}
res=`ls -p $dirname 2>/dev/null | sed "s#^#$dirname#"`
compopt -o nospace
COMPREPLY=( $(compgen -o filenames -W "${res}" -- ${cur}) )
;;
*)
if [ "$prev" == "list" ]; then
return 0
else
prev="${COMP_WORDS[COMP_CWORD-2]}"
obd_home=${OBD_HOME:-~}
if [[ "$prev" == "cluster" || "$prev" == "test" || "$prev" == "tenant" ]]; then
res=`ls -p $obd_home/.obd/cluster 2>/dev/null | sed "s#/##"`
compopt -o nospace
COMPREPLY=( $(compgen -o filenames -W "${res}" -- ${cur}) )
fi
fi
esac
return 0
function _obd_reply_current_files() {
filename=${cur##*/}
dirname=${cur%*$filename}
res=`ls -a -p $dirname 2>/dev/null | sed "s#^#$dirname#"`
compopt -o nospace
COMPREPLY=( $(compgen -o filenames -W "${res}" -- ${cur}) )
}
function _obd_reply_deploy_names() {
res=`ls -p $obd_home/.obd/cluster 2>/dev/null | sed "s#/##"`
COMPREPLY=( $(compgen -o filenames -W "${res}" -- ${cur}) )
}
function _obd_reply_tool_commands() {
cmd_yaml=$obd_home/.obd/plugins/commands/0.1/command_template.yaml
sections=`grep -En '^[0-9a-zA-Z]:' $cmd_yaml`
for line in sections
do
num=`echo $line | awk -F ':' '{print $1}'`
section=`echo $line | awk -F ':' '{print $2}'`
if [[ "$section" == "commands" ]];then
start_num=num
elif [[ "$start_num" != "" ]];then
end_num=num
fi
done
if [[ "$end_num" == "" ]]; then
end_num=`cat $cmd_yaml | wc -l`
fi
total_num=$((end_num - start_num))
res=`grep -E '^commands:' $cmd_yaml -A $total_num | grep name | awk -F 'name:' '{print $2}' | sort -u | tr '\n' ' '`
COMPREPLY=( $(compgen -o filenames -W "${res}" -- ${cur}) )
}
function _obd_complete_func
{
local all_cmds
declare -A all_cmds
COMPREPLY=()
obd_home=${OBD_HOME:-~}
env_file=${obd_home}/.obd/.obd_environ
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
all_cmds["obd"]="mirror cluster test update repo"
all_cmds["obd cluster"]="autodeploy tenant start deploy redeploy restart reload destroy stop edit-config list display upgrade chst check4ocp reinstall"
all_cmds["obd cluster *"]="_obd_reply_deploy_names"
all_cmds["obd cluster tenant"]="create drop"
all_cmds["obd cluster tenant *"]="_obd_reply_deploy_names"
all_cmds["obd mirror"]="clone create list update enable disable"
all_cmds["obd mirror clone"]="_obd_reply_current_files"
all_cmds["obd repo"]="list"
all_cmds["obd test"]="mysqltest sysbench tpch tpcc"
all_cmds["obd test *"]="_obd_reply_deploy_names"
if [ -f "$env_file" ] && [ "$(grep '"OBD_DEV_MODE": "1"' "$env_file")" != "" ]; then
all_cmds["obd"]="${all_cmds[obd]} devmode env tool"
all_cmds["obd devmode"]="enable disable"
all_cmds["obd tool"]="command db_connect"
all_cmds["obd tool db_connect"]="_obd_reply_deploy_names"
all_cmds["obd tool command"]="_obd_reply_deploy_names"
all_cmds["obd tool command *"]="_obd_reply_tool_commands"
all_cmds["obd env"]="set unset show clear"
fi
case $prev in
list)
return 0
;;
-p|--path);&
-c|--config)
_obd_reply_current_files
;;
*)
valid_len=$COMP_CWORD
words=( ${COMP_WORDS[@]::valid_len} )
index=valid_len
while (( index >= 1 )); do
target="${words[*]}"
cmd=${all_cmds[$target]}
if [[ "$cmd" != "" ]]
then
if [[ $cmd =~ ^_obd_reply.* ]]
then
$cmd
break
else
COMPREPLY=( $(compgen -W "${cmd}" -- ${cur}) )
break
fi
fi
index=$(( index - 1))
tmp=${words[*]::index}
[[ "$tmp" != "" ]] && parent_cmd=${all_cmds[$tmp]}
if [[ "$parent_cmd" =~ ^_obd_reply.* || " $parent_cmd " =~ " ${words[index]} " ]]; then
words[index]='*'
else
break
fi
done
;;
esac
}
\ No newline at end of file
requests==2.24.0
rpmfile==1.0.8
paramiko==2.10.1
paramiko==2.7.2
backports.lzma==0.0.14
MySQL-python==1.2.5
ruamel.yaml.clib==0.2.2
......@@ -11,3 +11,6 @@ enum34==1.1.6
progressbar==2.5
halo==0.0.30
pycryptodome==3.10.1
inspect2==0.1.2
six==1.16.0
pyinstaller==3.6
\ No newline at end of file
rpmfile==1.0.8
paramiko==2.10.1
paramiko==2.7.2
requests==2.25.1
PyMySQL==1.0.2
ruamel.yaml==0.17.4
......@@ -8,3 +8,6 @@ prettytable==2.1.0
progressbar==2.5
halo==0.0.31
pycryptodome==3.10.1
inspect2==0.1.2
six==1.16.0
pyinstaller>=4.3
......@@ -43,7 +43,7 @@ function pacakge_executer27()
rm -fr executer27
mkdir -p ./executer27/{site-packages,bin}
cd executer27
pip install mysql-connector-python==8.0.21 --target=./site-packages -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com || exit 1
pip install -r ../../executer27-requirements.txt --target=./site-packages -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com || exit 1
pyinstaller -F ../../executer27.py
if [ -e dist/executer27 ]; then
cp dist/executer27 ./bin/executer
......@@ -76,7 +76,6 @@ function get_python()
{
if [ `id -u` != 0 ] ; then
echo "Please use root to run"
exit 1
fi
obd_dir=`dirname $0`
......@@ -122,6 +121,7 @@ function build()
mkdir -p $BUILD_DIR/mirror/remote
wget https://mirrors.aliyun.com/oceanbase/OceanBase.repo -O $BUILD_DIR/mirror/remote/OceanBase.repo
cat _cmd.py | sed "s/<CID>/$CID/" | sed "s/<B_BRANCH>/$BRANCH/" | sed "s/<B_TIME>/$DATE/" | sed "s/<DEBUG>/$OBD_DUBUG/" | sed "s/<VERSION>/$VERSION/" > obd.py
sed -i "s|<DOC_LINK>|$OBD_DOC_LINK|" _errno.py
pip install -r $req_fn.txt || exit 1
pip install -r plugins-$req_fn.txt --target=$BUILD_DIR/lib/site-packages || exit 1
pyinstaller --hidden-import=decimal --hidden-import=configparser -F obd.py || exit 1
......@@ -129,15 +129,14 @@ function build()
cp -r plugins $BUILD_DIR/plugins
cp -r config_parser $BUILD_DIR/config_parser
rm -fr $BUILD_DIR/plugins/oceanbase-ce
rm -fr $BUILD_DIR/plugins/obproxy-ce
rm -fr $BUILD_DIR/config_parser/oceanbase-ce
rm -fr /usr/obd /usr/bin/obd
cp ./dist/obd /usr/bin/obd
cp -fr ./profile/* /etc/profile.d/
cd $BUILD_DIR/plugins && ln -s oceanbase oceanbase-ce && mv obproxy obproxy-ce
cd $BUILD_DIR/config_parser && ln -s oceanbase oceanbase-ce
mv $BUILD_DIR /usr/obd
rm -fr dist
cd $BUILD_DIR/plugins && ln -s oceanbase oceanbase-ce && cp -rf obproxy/3.1.0 obproxy-ce/ && cp -rf $DIR/plugins/obproxy-ce/* obproxy-ce/
cd $BUILD_DIR/config_parser && ln -s oceanbase oceanbase-ce
chmod +x /usr/bin/obd
chmod -R 755 /usr/obd/*
chown -R root:root /usr/obd/*
......@@ -168,4 +167,4 @@ case "x$1" in
get_python
build
;;
esac
esac
\ No newline at end of file
......@@ -55,6 +55,7 @@ if [ "$OBD_DUBUG" ]; then
VERSION=$VERSION".`date +%s`"
fi
cat _cmd.py | sed "s/<CID>/$CID/" | sed "s/<B_BRANCH>/$BRANCH/" | sed "s/<B_TIME>/$DATE/" | sed "s/<DEBUG>/$OBD_DUBUG/" | sed "s/<VERSION>/$VERSION/" > obd.py
sed -i "s|<DOC_LINK>|$OBD_DOC_LINK|" _errno.py
mkdir -p $BUILD_DIR/SOURCES ${RPM_BUILD_ROOT}
mkdir -p $BUILD_DIR/SOURCES/{site-packages}
mkdir -p ${RPM_BUILD_ROOT}/usr/bin
......@@ -82,7 +83,8 @@ mkdir -p ${RPM_BUILD_ROOT}/usr/obd/lib/executer
\cp -rf ${RPM_DIR}/executer27 ${RPM_BUILD_ROOT}/usr/obd/lib/executer/
\cp -rf $BUILD_DIR/SOURCES/example ${RPM_BUILD_ROOT}/usr/obd/
cd ${RPM_BUILD_ROOT}/usr/obd/plugins && ln -s oceanbase oceanbase-ce && mv obproxy obproxy-ce
cd ${RPM_BUILD_ROOT}/usr/obd/config_parser && ln -s oceanbase oceanbase-ce
rm -rf obproxy
cd ${RPM_BUILD_ROOT}/usr/obd/config_parser && ln -s oceanbase oceanbase-ce
# package infomation
%files
......@@ -116,6 +118,12 @@ echo -e 'Installation of obd finished successfully\nPlease source /etc/profile.d
#/sbin/chkconfig obd on
%changelog
* Wed Aug 17 2022 obd 1.5.0
- new features: obd cluster reinstall
- new features: obd tool
- new features: support rsync
- new keyword: include
- more option: obd test mysqltest
* Sun Jul 17 2022 obd 1.4.0
- new features: support tpcc
- new features: support mysqltest record
......
......@@ -20,12 +20,14 @@
from __future__ import absolute_import, division, print_function
import os
import sys
import enum
import getpass
import os
import warnings
from copy import deepcopy
from glob import glob
from subprocess32 import Popen, PIPE
# paramiko import cryptography 模块在python2下会报不支持警报
warnings.filterwarnings("ignore")
......@@ -33,14 +35,18 @@ from paramiko import AuthenticationException, SFTPClient
from paramiko.client import SSHClient, AutoAddPolicy
from paramiko.ssh_exception import NoValidConnectionsError, SSHException
from tool import DirectoryUtil
from multiprocessing.queues import Empty
from multiprocessing import Queue, Process
from multiprocessing.pool import ThreadPool
from tool import COMMAND_ENV, DirectoryUtil
from _stdio import SafeStdio
__all__ = ("SshClient", "SshConfig", "LocalClient")
__all__ = ("SshClient", "SshConfig", "LocalClient", "ConcurrentExecutor")
class SshConfig(object):
class SshConfig(object):
def __init__(self, host, username='root', password=None, key_filename=None, port=22, timeout=30):
self.host = host
......@@ -68,11 +74,60 @@ class SshReturn(object):
return self.__bool__()
class LocalClient(object):
class FutureSshReturn(SshReturn):
def __init__(self, client, command, timeout=None, stdio=None):
self.client = client
self.command = command
self.timeout = timeout
self.stdio = stdio if stdio else client.stdio
if self.stdio:
self.stdio = self.stdio.sub_io()
self.finsh = False
super(FutureSshReturn, self).__init__(127, '', '')
def set_return(self, ssh_return):
self.code = ssh_return.code
self.stdout = ssh_return.stdout
self.stderr = ssh_return.stderr
self.finsh = True
class ConcurrentExecutor(object):
def __init__(self, workers=None):
self.workers = workers
self.futures = []
def add_task(self, client, command, timeout=None, stdio=None):
ret = FutureSshReturn(client, command, timeout, stdio=stdio)
self.futures.append(ret)
return ret
@staticmethod
def execute(future):
client = SshClient(future.client.config, future.stdio)
future.set_return(client.execute_command(future.command, timeout=future.timeout))
return future
def submit(self):
rets = []
pool = ThreadPool(processes=self.workers)
try:
results = pool.map(ConcurrentExecutor.execute, tuple(self.futures))
for r in results:
rets.append(r)
finally:
pool.close()
self.futures = []
return rets
class LocalClient(SafeStdio):
@staticmethod
def execute_command(command, env=None, timeout=None, stdio=None):
stdio and getattr(stdio, 'verbose', print)('local execute: %s ' % command, end='')
stdio.verbose('local execute: %s ' % command, end='')
try:
p = Popen(command, env=env, shell=True, stdout=PIPE, stderr=PIPE)
output, error = p.communicate(timeout=timeout)
......@@ -82,14 +137,14 @@ class LocalClient(object):
verbose_msg = 'exited code %s' % code
if code:
verbose_msg += ', error output:\n%s' % error
stdio and getattr(stdio, 'verbose', print)(verbose_msg)
stdio.verbose(verbose_msg)
except Exception as e:
output = ''
error = str(e)
code = 255
verbose_msg = 'exited code 255, error output:\n%s' % error
stdio and getattr(stdio, 'verbose', print)(verbose_msg)
stdio and getattr(stdio, 'exception', print)('')
stdio.verbose(verbose_msg)
stdio.exception('')
return SshReturn(code, output, error)
@staticmethod
......@@ -100,7 +155,12 @@ class LocalClient(object):
@staticmethod
def put_dir(local_dir, remote_dir, stdio=None):
if LocalClient.execute_command('mkdir -p %s && cp -fr %s %s' % (remote_dir, os.path.join(local_dir, '*'), remote_dir), stdio=stdio):
if os.path.isdir(local_dir):
local_dir = os.path.join(local_dir, '*')
if os.path.exists(os.path.dirname(local_dir)) and not glob(local_dir):
stdio.verbose("%s is empty" % local_dir)
return True
if LocalClient.execute_command('mkdir -p %s && cp -fr %s %s' % (remote_dir, local_dir, remote_dir), stdio=stdio):
return True
return False
......@@ -113,7 +173,18 @@ class LocalClient(object):
return LocalClient.put_dir(remote_path, local_path, stdio=stdio)
class SshClient(object):
class RemoteTransporter(enum.Enum):
CLIENT = 0
RSYNC = 1
def __lt__(self, other):
return self.value < other.value
def __gt__(self, other):
return self.value > other.value
class SshClient(SafeStdio):
def __init__(self, config, stdio=None):
self.config = config
......@@ -122,11 +193,19 @@ class SshClient(object):
self.is_connected = False
self.ssh_client = SSHClient()
self.env_str = ''
self._remote_transporter = None
self.task_queue = None
self.result_queue = None
if self._is_local():
self.env = deepcopy(os.environ.copy())
self.env = COMMAND_ENV.copy()
else:
self.env = {'PATH': '/sbin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:'}
self._update_env()
super(SshClient, self).__init__()
def _init_queue(self):
self.task_queue = Queue()
self.result_queue = Queue()
def _update_env(self):
env = []
......@@ -136,18 +215,23 @@ class SshClient(object):
self.env_str = ''.join(env)
def add_env(self, key, value, rewrite=False, stdio=None):
stdio = stdio if stdio else self.stdio
if key not in self.env or not self.env[key] or rewrite:
stdio and getattr(stdio, 'verbose', print)('%s@%s set env %s to \'%s\'' % (self.config.username, self.config.host, key, value))
stdio.verbose('%s@%s set env %s to \'%s\'' % (self.config.username, self.config.host, key, value))
self.env[key] = value
else:
stdio and getattr(stdio, 'verbose', print)('%s@%s append \'%s\' to %s' % (self.config.username, self.config.host, value, key))
stdio.verbose('%s@%s append \'%s\' to %s' % (self.config.username, self.config.host, value, key))
self.env[key] += value
self._update_env()
def get_env(self, key):
def get_env(self, key, stdio=None):
return self.env[key] if key in self.env else None
def del_env(self, key, stdio=None):
if key in self.env:
stdio.verbose('%s@%s delete env %s' % (self.config.username, self.config.host, key))
del self.env[key]
self._update_env()
def __str__(self):
return '%s@%s:%d' % (self.config.username, self.config.host, self.config.port)
......@@ -160,33 +244,32 @@ class SshClient(object):
def _login(self, stdio=None):
if self.is_connected:
return True
stdio = stdio if stdio else self.stdio
try:
self.ssh_client.set_missing_host_key_policy(AutoAddPolicy())
self.ssh_client.connect(
self.config.host,
port=self.config.port,
username=self.config.username,
password=self.config.password,
key_filename=self.config.key_filename,
self.config.host,
port=self.config.port,
username=self.config.username,
password=self.config.password,
key_filename=self.config.key_filename,
timeout=self.config.timeout
)
self.is_connected = True
except AuthenticationException:
stdio and getattr(stdio, 'exception', print)('')
stdio and getattr(stdio, 'critical', print)('%s@%s username or password error' % (self.config.username, self.config.host))
stdio.exception('')
stdio.critical('%s@%s username or password error' % (self.config.username, self.config.host))
except NoValidConnectionsError:
stdio and getattr(stdio, 'exception', print)('')
stdio and getattr(stdio, 'critical', print)('%s@%s connect failed: time out' % (self.config.username, self.config.host))
stdio.exception('')
stdio.critical('%s@%s connect failed: time out' % (self.config.username, self.config.host))
except Exception as e:
stdio and getattr(stdio, 'exception', print)('')
stdio and getattr(stdio, 'critical', print)('%s@%s connect failed: %s' % (self.config.username, self.config.host, e))
stdio.exception('')
stdio.critical('%s@%s connect failed: %s' % (self.config.username, self.config.host, e))
return self.is_connected
def _open_sftp(self, stdio=None):
if self.sftp:
return True
if self._login(stdio):
if self._login(stdio=stdio):
SFTPClient.from_transport(self.ssh_client.get_transport())
self.sftp = self.ssh_client.open_sftp()
return True
......@@ -195,11 +278,11 @@ class SshClient(object):
def connect(self, stdio=None):
if self._is_local():
return True
return self._login(stdio)
return self._login(stdio=stdio)
def reconnect(self, stdio=None):
self.close(stdio)
return self.connect(stdio)
self.close(stdio=stdio)
return self.connect(stdio=stdio)
def close(self, stdio=None):
if self._is_local():
......@@ -212,13 +295,11 @@ class SshClient(object):
def __del__(self):
self.close()
def _execute_command(self, command, retry, stdio):
def _execute_command(self, command, timeout=None, retry=3, stdio=None):
if not self._login(stdio):
return SshReturn(255, '', 'connect failed')
stdio = stdio if stdio else self.stdio
try:
stdin, stdout, stderr = self.ssh_client.exec_command(command)
stdin, stdout, stderr = self.ssh_client.exec_command(command, timeout=timeout)
output = stdout.read().decode(errors='replace')
error = stderr.read().decode(errors='replace')
if output:
......@@ -230,152 +311,332 @@ class SshClient(object):
code, stdout = 1, ''
if code:
verbose_msg = 'exited code %s, error output:\n%s' % (code, error)
stdio and getattr(stdio, 'verbose', print)(verbose_msg)
stdio.verbose(verbose_msg)
return SshReturn(code, stdout, error)
except SSHException as e:
if retry:
self.close()
return self._execute_command(command, retry-1, stdio)
else:
stdio and getattr(stdio, 'exception', print)('')
stdio and getattr(stdio, 'critical', print)('%s@%s connect failed: %s' % (self.config.username, self.config.host, e))
stdio.exception('')
stdio.critical('%s@%s connect failed: %s' % (self.config.username, self.config.host, e))
raise e
except Exception as e:
stdio and getattr(stdio, 'exception', print)('')
stdio and getattr(stdio, 'critical', print)('%s@%s connect failed: %s' % (self.config.username, self.config.host, e))
stdio.exception('')
stdio.critical('%s@%s connect failed: %s' % (self.config.username, self.config.host, e))
raise e
def execute_command(self, command, stdio=None):
def execute_command(self, command, timeout=None, stdio=None):
if timeout is None:
timeout = self.config.timeout
elif timeout <= 0:
timeout = None
if self._is_local():
return LocalClient.execute_command(command, self.env, self.config.timeout, stdio=stdio)
return LocalClient.execute_command(command, self.env, timeout, stdio=stdio)
stdio = stdio if stdio else self.stdio
verbose_msg = '%s execute: %s ' % (self.config, command)
stdio and getattr(stdio, 'verbose', print)(verbose_msg, end='')
stdio.verbose(verbose_msg, end='')
command = '%s %s;echo -e "\n$?\c"' % (self.env_str, command.strip(';'))
return self._execute_command(command, 3, stdio=stdio)
return self._execute_command(command, retry=3, timeout=timeout, stdio=stdio)
@property
def disable_rsync(self):
return COMMAND_ENV.get("OBD_DISABLE_RSYNC") == "1"
@property
def remote_transporter(self):
if self._remote_transporter is not None:
return self._remote_transporter
_transporter = RemoteTransporter.CLIENT
if not self._is_local() and self._remote_transporter is None:
if not self.config.password and not self.disable_rsync:
ret = LocalClient.execute_command('rsync -h', stdio=self.stdio)
if ret:
_transporter = RemoteTransporter.RSYNC
self._remote_transporter = _transporter
self.stdio.verbose("current remote_transporter {}".format(self._remote_transporter))
return self._remote_transporter
def put_file(self, local_path, remote_path, stdio=None):
stdio = stdio if stdio else self.stdio
if not os.path.isfile(local_path):
stdio and getattr(stdio, 'error', print)('%s is not file' % local_path)
stdio.error('path: %s is not file' % local_path)
return False
if self._is_local():
return LocalClient.put_file(local_path, remote_path, stdio=stdio)
if not self._open_sftp(stdio):
if not self._open_sftp(stdio=stdio):
return False
return self._put_file(local_path, remote_path, stdio=stdio)
def _put_file(self, local_path, remote_path, stdio=None):
@property
def _put_file(self):
if self.remote_transporter == RemoteTransporter.RSYNC:
return self._rsync_put_file
else:
return self._client_put_file
def _client_put_file(self, local_path, remote_path, stdio=None):
if self.execute_command('mkdir -p %s && rm -fr %s' % (os.path.dirname(remote_path), remote_path), stdio=stdio):
stdio and getattr(stdio, 'verbose', print)('send %s to %s' % (local_path, remote_path))
stdio.verbose('send %s to %s' % (local_path, remote_path))
if self.sftp.put(local_path, remote_path):
return self.execute_command('chmod %s %s' % (oct(os.stat(local_path).st_mode)[-3: ], remote_path))
return self.execute_command('chmod %s %s' % (oct(os.stat(local_path).st_mode)[-3:], remote_path))
return False
def _rsync(self, source, target, stdio=None):
identity_option = ""
if self.config.key_filename:
identity_option += '-e "ssh -i {key_filename} "'.format(key_filename=self.config.key_filename)
cmd = 'rsync -a -W {identity_option} {source} {target}'.format(
identity_option=identity_option,
source=source,
target=target
)
ret = LocalClient.execute_command(cmd, stdio=stdio)
return bool(ret)
def _rsync_put_dir(self, local_path, remote_path, stdio=None):
stdio.verbose('send %s to %s by rsync' % (local_path, remote_path))
source = os.path.join(local_path, '*')
if os.path.exists(os.path.dirname(source)) and not glob(source):
stdio.verbose("%s is empty" % source)
return True
target = "{user}@{host}:{remote_path}".format(user=self.config.username, host=self.config.host, remote_path=remote_path)
if self._rsync(source, target, stdio=stdio):
return True
else:
return False
def _rsync_put_file(self, local_path, remote_path, stdio=None):
if not self.execute_command('mkdir -p %s' % os.path.dirname(remote_path), stdio=stdio):
return False
stdio.verbose('send %s to %s by rsync' % (local_path, remote_path))
target = "{user}@{host}:{remote_path}".format(user=self.config.username, host=self.config.host, remote_path=remote_path)
if self._rsync(local_path, target, stdio=stdio):
return True
else:
return False
def put_dir(self, local_dir, remote_dir, stdio=None):
stdio = stdio if stdio else self.stdio
if self._is_local():
return LocalClient.put_dir(local_dir, remote_dir, stdio=stdio)
if not self._open_sftp(stdio):
if not self._open_sftp(stdio=stdio):
return False
if not self.execute_command('mkdir -p %s' % remote_dir, stdio=stdio):
return False
failed = []
failed_dirs = []
local_dir_path_len = len(local_dir)
for root, dirs, files in os.walk(local_dir):
for path in failed_dirs:
if root.find(path) == 0:
# 父目录已经在被标记为失败,该层可直接跳过
# break退出不执行else代码段
break
else:
for name in files:
local_path = os.path.join(root, name)
remote_path = os.path.join(remote_dir, root[local_dir_path_len:].lstrip('/'), name)
if not self._put_file(local_path, remote_path, stdio=stdio):
failed.append(remote_path)
for name in dirs:
local_path = os.path.join(root, name)
remote_path = os.path.join(remote_dir, root[local_dir_path_len:].lstrip('/'), name)
if not self.execute_command('mkdir -p %s' % remote_path, stdio=stdio):
failed_dirs.append(local_dir)
failed.append(remote_path)
for path in failed:
stdio and getattr(stdio, 'error', print)('send %s to %s@%s failed' % (path, self.config.username, self.config.host))
return not failed
stdio.start_loading('Send %s to %s' % (local_dir, remote_dir))
ret = self._put_dir(local_dir, remote_dir, stdio=stdio)
stdio.stop_loading('succeed' if ret else 'fail')
return ret
@property
def _put_dir(self):
if self.remote_transporter == RemoteTransporter.RSYNC:
return self._rsync_put_dir
else:
return self._client_put_dir
def _client_put_dir(self, local_dir, remote_dir, stdio=None):
has_failed = False
ret = LocalClient.execute_command('find %s -type f' % local_dir)
if not ret:
has_failed = True
all_files = ret.stdout.strip().split('\n') if ret.stdout else []
ret = LocalClient.execute_command('find %s -type d' % local_dir)
if not ret:
has_failed = True
all_dirs = ret.stdout.strip().split('\n') if ret.stdout else []
self._filter_dir_in_file_path(all_files, all_dirs)
for local_path in all_files:
remote_path = os.path.join(remote_dir, os.path.relpath(local_path, local_dir))
if not self._client_put_file(local_path, remote_path, stdio=stdio):
stdio.error('Fail to get %s' % remote_path)
has_failed = True
for local_path in all_dirs:
remote_path = os.path.join(remote_dir, os.path.relpath(local_path, local_dir))
stat = oct(os.stat(local_path).st_mode)[-3:]
cmd = '[ -d "{remote_path}" ] || (mkdir -p {remote_path}; chmod {stat} {remote_path})'.format(remote_path=remote_path, stat=stat)
if not self.execute_command(cmd):
has_failed = True
return not has_failed
def get_file(self, local_path, remote_path, stdio=None):
stdio = stdio if stdio else self.stdio
dirname, _ = os.path.split(local_path)
if not dirname:
dirname = os.getcwd()
local_path = os.path.join(dirname, local_path)
if os.path.exists(dirname):
if not os.path.isdir(dirname):
stdio and getattr(stdio, 'error', print)('%s is not directory' % dirname)
stdio.error('%s is not directory' % dirname)
return False
elif not DirectoryUtil.mkdir(dirname, stdio=stdio):
return False
if os.path.exists(local_path) and not os.path.isfile(local_path):
stdio and getattr(stdio, 'error', print)('%s is not file' % local_path)
stdio.error('path: %s is not file' % local_path)
return False
if self._is_local():
return LocalClient.get_file(local_path, remote_path, stdio=stdio)
if not self._open_sftp(stdio):
if not self._open_sftp(stdio=stdio):
return False
return self._get_file(local_path, remote_path, stdio=stdio)
def _get_file(self, local_path, remote_path, stdio=None):
stdio and getattr(stdio, 'verbose', print)('get %s to %s' % (remote_path, local_path))
@property
def _get_file(self):
if self.remote_transporter == RemoteTransporter.RSYNC:
return self._rsync_get_file
else:
return self._client_get_file
def _rsync_get_dir(self, local_path, remote_path, stdio=None):
source = "{user}@{host}:{remote_path}".format(user=self.config.username, host=self.config.host, remote_path=remote_path)
if "*" not in remote_path:
source = os.path.join(source, "*")
target = local_path
stdio.verbose('get %s from %s by rsync' % (local_path, remote_path))
if LocalClient.execute_command('mkdir -p {}'.format(local_path), stdio=stdio) and self._rsync(source, target, stdio=stdio):
return True
else:
return False
def _rsync_get_file(self, local_path, remote_path, stdio=None):
source = "{user}@{host}:{remote_path}".format(user=self.config.username, host=self.config.host, remote_path=remote_path)
target = local_path
stdio.verbose('get %s from %s by rsync' % (local_path, remote_path))
if self._rsync(source, target, stdio=stdio):
return True
else:
return False
def _client_get_file(self, local_path, remote_path, stdio=None):
try:
self.sftp.get(remote_path, local_path)
stat = self.sftp.stat(remote_path)
os.chmod(local_path, stat.st_mode)
return True
except Exception as e:
stdio and getattr(stdio, 'exception', print)('from %s@%s get %s to %s failed: %s' % (self.config.username, self.config.host, remote_path, local_path, e))
stdio.exception('get %s from %s@%s:%s failed: %s' % (local_path, self.config.username, self.config.host, remote_path, e))
return False
def get_dir(self, local_dir, remote_dir, stdio=None):
stdio = stdio if stdio else self.stdio
dirname, _ = os.path.split(local_dir)
if not dirname:
dirname = os.getcwd()
local_dir = os.path.join(dirname, local_dir)
if "*" in dirname:
stdio.error('Invalid directory {}'.format(dirname))
return False
if os.path.exists(dirname):
if not os.path.isdir(dirname):
stdio and getattr(stdio, 'error', print)('%s is not directory' % dirname)
stdio.error('%s is not directory' % dirname)
return False
elif not DirectoryUtil.mkdir(dirname, stdio=stdio):
return False
if os.path.exists(local_dir) and not os.path.isdir(local_dir):
stdio and getattr(stdio, 'error', print)('%s is not directory' % local_dir)
stdio.error('%s is not directory' % local_dir)
return False
if self._is_local():
return LocalClient.get_dir(local_dir, remote_dir, stdio=stdio)
if not self._open_sftp(stdio):
if not self._open_sftp(stdio=stdio):
return False
return self._get_dir(local_dir, remote_dir, stdio=stdio)
stdio.start_loading('Get %s from %s' % (local_dir, remote_dir))
ret = self._get_dir(local_dir, remote_dir, stdio=stdio)
stdio.stop_loading('succeed' if ret else 'fail')
return ret
@property
def _get_dir(self):
if self.remote_transporter == RemoteTransporter.RSYNC:
return self._rsync_get_dir
else:
return self._client_get_dir
def _get_dir(self, local_dir, remote_dir, failed=[], stdio=None):
def _client_get_dir(self, local_dir, remote_dir, stdio=None):
task_queue = []
has_failed = False
if DirectoryUtil.mkdir(local_dir, stdio=stdio):
try:
for fn in self.sftp.listdir(remote_dir):
remote_path = os.path.join(remote_dir, fn)
local_path = os.path.join(local_dir, fn)
if self.execute_command('bash -c "if [ -f %s ]; then exit 0; else exit 1; fi;"' % remote_path):
if not self._get_file(local_path, remote_path, stdio=stdio):
failed.append(remote_path)
else:
self._get_dir(local_path, remote_path, failed=failed, stdio=stdio.sub_io())
ret = self.execute_command('find %s -type f' % remote_dir)
if not ret:
stdio.verbose(ret.stderr)
has_failed = True
all_files = ret.stdout.strip().split('\n') if ret.stdout else []
ret = self.execute_command('find %s -type d' % remote_dir)
if not ret:
has_failed = True
all_dirs = ret.stdout.strip().split('\n') if ret.stdout else []
self._filter_dir_in_file_path(all_files, all_dirs)
for f in all_files:
task_queue.append(f)
if "*" in remote_dir:
remote_base_dir = os.path.dirname(remote_dir)
else:
remote_base_dir = remote_dir
for remote_path in task_queue:
local_path = os.path.join(local_dir, os.path.relpath(remote_path, remote_dir))
if not self._client_get_file(local_path, remote_path, stdio=stdio):
stdio.error('Fail to get %s' % remote_path)
has_failed = True
for remote_path in all_dirs:
try:
local_path = os.path.join(local_dir, os.path.relpath(remote_path, remote_base_dir))
if not os.path.exists(local_path):
stat = self.sftp.stat(remote_path)
os.makedirs(local_path, mode=stat.st_mode)
except Exception as e:
stdio.exception('Fail to make directory %s in local: %s' % (remote_path, e))
has_failed = True
return not has_failed
except Exception as e:
stdio and getattr(stdio, 'exception', print)('Fail to get %s: %s' % (remote_dir, e))
failed.append(remote_dir)
else:
failed.append(remote_dir)
return not failed
stdio.exception('Fail to get %s: %s' % (remote_dir, e))
@staticmethod
def _filter_dir_in_file_path(files, directories):
skip_directories = []
for path in files:
dir_name = os.path.dirname(path)
while dir_name not in ["/", ".", ""]:
if dir_name in skip_directories:
break
if dir_name in directories:
directories.remove(dir_name)
skip_directories.append(dir_name)
dir_name = os.path.dirname(dir_name)
def file_downloader(self, local_dir, remote_dir, stdio=None):
try:
client = SshClient(config=self.config, stdio=None)
client._open_sftp(stdio=stdio)
client._remote_transporter = self.remote_transporter
while True:
remote_path = self.task_queue.get(block=False)
local_path = os.path.join(local_dir, os.path.relpath(remote_path, remote_dir))
if client.get_file(local_path, remote_path, stdio=stdio):
self.result_queue.put(remote_path)
else:
stdio.error('Fail to get %s' % remote_path)
except Empty:
return
except:
stdio.exception("")
stdio.exception('Failed to get %s' % remote_dir)
def file_uploader(self, local_dir, remote_dir, stdio=None):
try:
client = SshClient(config=self.config, stdio=None)
client._remote_transporter = self.remote_transporter
while True:
local_path, is_dir = self.task_queue.get(block=False)
remote_path = os.path.join(remote_dir, os.path.relpath(local_path, local_dir))
if is_dir:
stat = oct(os.stat(local_path).st_mode)[-3:]
cmd = '[ -d "{remote_path}" ] || (mkdir -p {remote_path}; chmod {stat} {remote_path})'.format(remote_path=remote_path, stat=stat)
if client.execute_command(cmd):
self.result_queue.put(remote_path)
else:
if client.put_file(local_path, remote_path, stdio=stdio):
self.result_queue.put(remote_path)
else:
stdio.error('Fail to get %s' % remote_path)
except Empty:
return
except:
stdio.exception("")
stdio.verbose('Failed to get %s' % remote_dir)
......@@ -29,29 +29,68 @@ import gzip
import fcntl
import signal
import shutil
import re
import json
from ruamel.yaml import YAML, YAMLContextManager, representer
from _stdio import SafeStdio
_open = open
if sys.version_info.major == 2:
from collections import OrderedDict
from backports import lzma
from io import open
from io import open as _open
def encoding_open(path, _type, encoding=None, *args, **kwrags):
if encoding:
kwrags['encoding'] = encoding
return _open(path, _type, *args, **kwrags)
else:
return open(path, _type, *args, **kwrags)
class TimeoutError(OSError):
def __init__(self, *args, **kwargs):
super(TimeoutError, self).__init__(*args, **kwargs)
else:
import lzma
encoding_open = open
class OrderedDict(dict):
pass
__all__ = ("timeout", "DynamicLoading", "ConfigUtil", "DirectoryUtil", "FileUtil", "YamlLoader", "OrderedDict")
__all__ = ("timeout", "DynamicLoading", "ConfigUtil", "DirectoryUtil", "FileUtil", "YamlLoader", "OrderedDict", "COMMAND_ENV")
_WINDOWS = os.name == 'nt'
class Timeout(object):
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def _is_timeout(self):
return self.seconds and self.seconds > 0
def __enter__(self):
if self._is_timeout():
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
if self._is_timeout():
signal.alarm(0)
timeout = Timeout
class Timeout:
def __init__(self, seconds=1, error_message='Timeout'):
......@@ -162,6 +201,17 @@ class ConfigUtil(object):
except:
return default
@staticmethod
def get_list_from_dict(conf, key, transform_func=None):
try:
return_list = conf[key]
if transform_func:
return [transform_func(value) for value in return_list]
else:
return return_list
except:
return []
class DirectoryUtil(object):
......@@ -320,7 +370,7 @@ class FileUtil(object):
stdio and getattr(stdio, 'verbose', print)('open %s for %s' % (path, _type))
if os.path.exists(path):
if os.path.isfile(path):
return open(path, _type, encoding=encoding)
return encoding_open(path, _type, encoding=encoding)
info = '%s is not file' % path
if stdio:
getattr(stdio, 'error', print)(info)
......@@ -329,7 +379,7 @@ class FileUtil(object):
raise IOError(info)
dir_path, file_name = os.path.split(path)
if not dir_path or DirectoryUtil.mkdir(dir_path, stdio=stdio):
return open(path, _type, encoding=encoding)
return encoding_open(path, _type, encoding=encoding)
info = '%s is not file' % path
if stdio:
getattr(stdio, 'error', print)(info)
......@@ -422,3 +472,116 @@ class YamlLoader(YAML):
if getattr(self.stdio, 'exception', False):
self.stdio.exception('dump error:\n%s' % e)
raise e
_KEYCRE = re.compile(r"\$(\w+)")
def var_replace(string, var, pattern=_KEYCRE):
if not var:
return string
done = []
while string:
m = pattern.search(string)
if not m:
done.append(string)
break
varname = m.group(1).lower()
replacement = var.get(varname, m.group())
start, end = m.span()
done.append(string[:start])
done.append(str(replacement))
string = string[end:]
return ''.join(done)
class CommandEnv(SafeStdio):
def __init__(self):
self.source_path = None
self._env = os.environ.copy()
self._cmd_env = {}
def load(self, source_path, stdio=None):
if self.source_path:
stdio.error("Source path of env already set.")
return False
self.source_path = source_path
try:
if os.path.exists(source_path):
with FileUtil.open(source_path, 'r') as f:
self._cmd_env = json.load(f)
except:
stdio.exception("Failed to load environments from {}".format(source_path))
return False
return True
def save(self, stdio=None):
if self.source_path is None:
stdio.error("Command environments need to load at first.")
return False
stdio.verbose("save environment variables {}".format(self._cmd_env))
try:
with FileUtil.open(self.source_path, 'w', stdio=stdio) as f:
json.dump(self._cmd_env, f)
except:
stdio.exception('Failed to save environment variables')
return False
return True
def get(self, key, default=""):
try:
return self.__getitem__(key)
except KeyError:
return default
def set(self, key, value, save=False, stdio=None):
stdio.verbose("set environment variable {} value {}".format(key, value))
self._cmd_env[key] = str(value)
if save:
return self.save(stdio=stdio)
return True
def delete(self, key, save=False, stdio=None):
stdio.verbose("delete environment variable {}".format(key))
if key in self._cmd_env:
del self._cmd_env[key]
if save:
return self.save(stdio=stdio)
return True
def clear(self, save=True, stdio=None):
self._cmd_env = {}
if save:
return self.save(stdio=stdio)
return True
def __getitem__(self, item):
value = self._cmd_env.get(item)
if value is None:
value = self._env.get(item)
if value is None:
raise KeyError(item)
return value
def __contains__(self, item):
if item in self._cmd_env:
return True
elif item in self._env:
return True
else:
return False
def copy(self):
result = dict(self._env)
result.update(self._cmd_env)
return result
def show_env(self):
return self._cmd_env
COMMAND_ENV = CommandEnv()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册