未验证 提交 5b551d65 编写于 作者: R Rongfeng Fu 提交者: GitHub

v1.1 (#28)

* 1.1

* auto deploy example

* update sysbench script name
上级 f42a7180
......@@ -35,7 +35,7 @@ from tool import DirectoryUtil, FileUtil
ROOT_IO = IO(1)
VERSION = '1.0.2'
VERSION = '1.1.0'
REVISION = '<CID>'
BUILD_BRANCH = '<B_BRANCH>'
BUILD_TIME = '<B_TIME>'
......@@ -52,8 +52,8 @@ class BaseCommand(object):
self.prev_cmd = ''
self.is_init = False
self.parser = OptionParser(add_help_option=False)
self.parser.add_option('-h', '--help', action='callback', callback=self._show_help, help='show this help message and exit')
self.parser.add_option('-v', '--verbose', action='callback', callback=self._set_verbose, help='verbose operation')
self.parser.add_option('-h', '--help', action='callback', callback=self._show_help, help='Show help and exit.')
self.parser.add_option('-v', '--verbose', action='callback', callback=self._set_verbose, help='Activate verbose output.')
def _set_verbose(self, *args, **kwargs):
ROOT_IO.set_verbose_level(0xfffffff)
......@@ -116,27 +116,31 @@ class ObdCommand(BaseCommand):
def do_command(self):
self.parse_command()
self.init_home()
trace_id = uuid()
ret = False
try:
log_dir = os.path.join(self.OBD_PATH, 'log')
DirectoryUtil.mkdir(log_dir)
log_path = os.path.join(log_dir, 'obd')
logger = Logger('obd')
handler = handlers.TimedRotatingFileHandler(log_path, when='midnight', interval=1, backupCount=30)
handler.setFormatter(logging.Formatter("[%%(asctime)s] [%s] [%%(levelname)s] %%(message)s" % uuid(), "%Y-%m-%d %H:%M:%S"))
handler.setFormatter(logging.Formatter("[%%(asctime)s] [%s] [%%(levelname)s] %%(message)s" % trace_id, "%Y-%m-%d %H:%M:%S"))
logger.addHandler(handler)
ROOT_IO.trace_logger = logger
obd = ObdHome(self.OBD_PATH, ROOT_IO)
ROOT_IO.track_limit += 1
return self._do_command(obd)
ret = self._do_command(obd)
except NotImplementedError:
ROOT_IO.exception('command \'%s\' is not implemented' % self.prev_cmd)
except IOError:
ROOT_IO.exception('obd is running')
ROOT_IO.exception('OBD is running')
except SystemExit:
pass
except:
ROOT_IO.exception('Run Error')
return False
ROOT_IO.exception('Running Error.')
# if not ret:
# ROOT_IO.print('Trace ID: %s' % trace_id)
return ret
def _do_command(self, obd):
raise NotImplementedError
......@@ -150,7 +154,7 @@ class MajorCommand(BaseCommand):
def _mk_usage(self):
if self.commands:
usage = ['%s <command> [options]\n\nAvailable Commands:\n' % self.prev_cmd]
usage = ['%s <command> [options]\n\nAvailable commands:\n' % self.prev_cmd]
commands = [x for x in self.commands.values() if not (hasattr(x, 'hidden') and x.hidden)]
commands.sort(key=lambda x: x.name)
for command in commands:
......@@ -163,7 +167,7 @@ class MajorCommand(BaseCommand):
ROOT_IO.error('%s command not init' % self.prev_cmd)
raise SystemExit('command not init')
if len(self.args) < 1:
ROOT_IO.print('You need to give some command')
ROOT_IO.print('You need to give some commands.\n\nTry `obd --help` for more information.')
self._show_help()
return False
base, args = self.args[0], self.args[1:]
......@@ -182,12 +186,12 @@ class MajorCommand(BaseCommand):
class MirrorCloneCommand(ObdCommand):
def __init__(self):
super(MirrorCloneCommand, self).__init__('clone', 'clone remote mirror or local rpmfile as mirror.')
self.parser.add_option('-f', '--force', action='store_true', help="overwrite when mirror exist")
super(MirrorCloneCommand, self).__init__('clone', 'Clone a RPM package to the local mirror repository.')
self.parser.add_option('-f', '--force', action='store_true', help="Force clone, overwrite the mirror.")
def init(self, cmd, args):
super(MirrorCloneCommand, self).init(cmd, args)
self.parser.set_usage('%s [mirror source] [options]' % self.prev_cmd)
self.parser.set_usage('%s [mirror path] [options]' % self.prev_cmd)
return self
def _do_command(self, obd):
......@@ -203,14 +207,13 @@ class MirrorCloneCommand(ObdCommand):
class MirrorCreateCommand(ObdCommand):
def __init__(self):
super(MirrorCreateCommand, self).__init__('create', 'create a local mirror by local binary file')
super(MirrorCreateCommand, self).__init__('create', 'Create a local mirror by using the local binary file.')
self.parser.conflict_handler = 'resolve'
self.parser.add_option('-n', '--name', type='string', help="mirror's name")
self.parser.add_option('-t', '--tag', type='string', help="mirror's tag, use `,` interval")
self.parser.add_option('-n', '--name', type='string', help="mirror's name")
self.parser.add_option('-V', '--version', type='string', help="mirror's version")
self.parser.add_option('-p','--path', type='string', help="mirror's path", default='./')
self.parser.add_option('-f', '--force', action='store_true', help="overwrite when mirror exist")
self.parser.add_option('-n', '--name', type='string', help="Mirror name.")
self.parser.add_option('-t', '--tag', type='string', help="Mirror tags. Multiple tags are separated with commas.")
self.parser.add_option('-V', '--version', type='string', help="Mirror version.")
self.parser.add_option('-p','--path', type='string', help="Mirror path. [./]", default='./')
self.parser.add_option('-f', '--force', action='store_true', help="Force create, overwrite the mirror.")
self.parser.conflict_handler = 'error'
def _do_command(self, obd):
......@@ -220,7 +223,7 @@ class MirrorCreateCommand(ObdCommand):
class MirrorListCommand(ObdCommand):
def __init__(self):
super(MirrorListCommand, self).__init__('list', 'list mirror')
super(MirrorListCommand, self).__init__('list', 'List mirrors.')
def show_pkg(self, name, pkgs):
ROOT_IO.print_list(
......@@ -250,7 +253,7 @@ class MirrorListCommand(ObdCommand):
repos = obd.mirror_manager.get_mirrors()
ROOT_IO.print_list(
repos,
['name', 'type', 'update time'],
['Name', 'Type', 'Update Time'],
lambda x: [x.name, x.mirror_type.value, time.strftime("%Y-%m-%d %H:%M", time.localtime(x.repo_age))],
title='Mirror Repository List'
)
......@@ -260,7 +263,7 @@ class MirrorListCommand(ObdCommand):
class MirrorUpdateCommand(ObdCommand):
def __init__(self):
super(MirrorUpdateCommand, self).__init__('update', 'update remote mirror info')
super(MirrorUpdateCommand, self).__init__('update', 'Update remote mirror information.')
def _do_command(self, obd):
success = True
......@@ -271,35 +274,85 @@ class MirrorUpdateCommand(ObdCommand):
except:
success = False
ROOT_IO.stop_loading('fail')
ROOT_IO.exception('fail to synchronize mirorr (%s)' % repo.name)
ROOT_IO.exception('Fail to synchronize mirorr (%s)' % repo.name)
return success
class MirrorMajorCommand(MajorCommand):
def __init__(self):
super(MirrorMajorCommand, self).__init__('mirror', 'Manage a component repository for obd.')
super(MirrorMajorCommand, self).__init__('mirror', 'Manage a component repository for OBD.')
self.register_command(MirrorListCommand())
self.register_command(MirrorCloneCommand())
self.register_command(MirrorCreateCommand())
self.register_command(MirrorUpdateCommand())
class RepositoryListCommand(ObdCommand):
def __init__(self):
super(RepositoryListCommand, self).__init__('list', 'List local repository.')
def show_repo(self, repos, name=None):
ROOT_IO.print_list(
repos,
['name', 'version', 'release', 'arch', 'md5', 'tags'],
lambda x: [x.name, x.version, x.release, x.arch, x.md5, ', '.join(x.tags)],
title='%s Local Repository List' % name if name else ''
)
def _do_command(self, obd):
name = self.cmds[0] if self.cmds else None
repos = obd.repository_manager.get_repositories_view(name)
self.show_repo(repos, name)
return True
class RepositoryMajorCommand(MajorCommand):
def __init__(self):
super(RepositoryMajorCommand, self).__init__('repo', 'Manage local repository for OBD.')
self.register_command(RepositoryListCommand())
class ClusterMirrorCommand(ObdCommand):
def init(self, cmd, args):
super(ClusterMirrorCommand, self).init(cmd, args)
self.parser.set_usage('%s [cluster name] [options]' % self.prev_cmd)
self.parser.set_usage('%s <deploy name> [options]' % self.prev_cmd)
return self
class ClusterAutoDeployCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterAutoDeployCommand, self).__init__('autodeploy', 'Deploy a cluster automatically by using a simple configuration file.')
self.parser.add_option('-c', '--config', type='string', help="Path to the configuration file.")
self.parser.add_option('-f', '--force', action='store_true', help="Force autodeploy, overwrite the home_path.")
self.parser.add_option('-U', '--unuselibrepo', '--ulp', action='store_true', help="Disable OBD from installing the libs mirror automatically.")
self.parser.add_option('-A', '--auto-create-tenant', '--act', action='store_true', help="Disable OBD from creating a tenant named `test` by using all the available resource of the cluster.")
self.parser.add_option('--force-delete', action='store_true', help="Force delete, delete the registered cluster.")
self.parser.add_option('-s', '--strict-check', action='store_true', help="Throw errors instead of warnings when check fails.")
def _do_command(self, obd):
if self.cmds:
name = self.cmds[0]
if obd.genconfig(name, self.opts):
self.opts.config = ''
return obd.deploy_cluster(name, self.opts) and obd.start_cluster(name, self.cmds[1:], self.opts)
return False
else:
return self._show_help()
class ClusterDeployCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterDeployCommand, self).__init__('deploy', 'use current deploy config or an deploy yaml file to deploy a cluster')
self.parser.add_option('-c', '--config', type='string', help="cluster config yaml path")
self.parser.add_option('-f', '--force', action='store_true', help="remove all when home_path is not empty", default=False)
self.parser.add_option('-U', '--unuselibrepo', '--ulp', action='store_true', help="obd will not install libs when library is not found")
super(ClusterDeployCommand, self).__init__('deploy', 'Deploy a cluster by using the current deploy configuration or a deploy yaml file.')
self.parser.add_option('-c', '--config', type='string', help="Path to the configuration yaml file.")
self.parser.add_option('-f', '--force', action='store_true', help="Force deploy, overwrite the home_path.", default=False)
self.parser.add_option('-U', '--unuselibrepo', '--ulp', action='store_true', help="Disable OBD from installing the libs mirror automatically.")
self.parser.add_option('-A', '--auto-create-tenant', '--act', action='store_false', help="Disable OBD from creating a tenant named `test` by using all the available resource of the cluster.")
# self.parser.add_option('-F', '--fuzzymatch', action='store_true', help="enable fuzzy match when search package")
def _do_command(self, obd):
......@@ -312,9 +365,11 @@ class ClusterDeployCommand(ClusterMirrorCommand):
class ClusterStartCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterStartCommand, self).__init__('start', 'start a cluster had deployed')
self.parser.add_option('-f', '--force-delete', action='store_true', help="cleanup when cluster had registered")
self.parser.add_option('-s', '--strict-check', action='store_true', help="prompt for errors instead of warnings when the check fails")
super(ClusterStartCommand, self).__init__('start', 'Start a deployed cluster.')
self.parser.add_option('-s', '--servers', type='string', help="List the started servers. Multiple servers are separated with commas.")
self.parser.add_option('-c', '--components', type='string', help="List the started components. Multiple components are separated with commas.")
self.parser.add_option('-f', '--force-delete', action='store_true', help="Force delete, delete the registered cluster.")
self.parser.add_option('-S', '--strict-check', action='store_true', help="Throw errors instead of warnings when check fails.")
def _do_command(self, obd):
if self.cmds:
......@@ -326,11 +381,13 @@ class ClusterStartCommand(ClusterMirrorCommand):
class ClusterStopCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterStopCommand, self).__init__('stop', 'stop a cluster had started')
super(ClusterStopCommand, self).__init__('stop', 'Stop a started cluster.')
self.parser.add_option('-s', '--servers', type='string', help="List the started servers. Multiple servers are separated with commas.")
self.parser.add_option('-c', '--components', type='string', help="List the started components. Multiple components are separated with commas.")
def _do_command(self, obd):
if self.cmds:
return obd.stop_cluster(self.cmds[0])
return obd.stop_cluster(self.cmds[0], self.opts)
else:
return self._show_help()
......@@ -338,8 +395,8 @@ class ClusterStopCommand(ClusterMirrorCommand):
class ClusterDestroyCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterDestroyCommand, self).__init__('destroy', 'start a cluster had deployed')
self.parser.add_option('-f', '--force-kill', action='store_true', help="force kill when observer is running")
super(ClusterDestroyCommand, self).__init__('destroy', 'Destroy a started cluster.')
self.parser.add_option('-f', '--force-kill', action='store_true', help="Force kill the running observer process in the working directory.")
def _do_command(self, obd):
if self.cmds:
......@@ -351,7 +408,7 @@ class ClusterDestroyCommand(ClusterMirrorCommand):
class ClusterDisplayCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterDisplayCommand, self).__init__('display', 'display a cluster info')
super(ClusterDisplayCommand, self).__init__('display', 'Display the information for a cluster.')
def _do_command(self, obd):
if self.cmds:
......@@ -363,11 +420,13 @@ class ClusterDisplayCommand(ClusterMirrorCommand):
class ClusterRestartCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterRestartCommand, self).__init__('restart', 'restart a cluster had started')
super(ClusterRestartCommand, self).__init__('restart', 'Restart a started cluster.')
self.parser.add_option('-s', '--servers', type='string', help="List the started servers. Multiple servers are separated with commas.")
self.parser.add_option('-c', '--components', type='string', help="List the started components. Multiple components are separated with commas.")
def _do_command(self, obd):
if self.cmds:
return obd.restart_cluster(self.cmds[0])
return obd.restart_cluster(self.cmds[0], self.opts)
else:
return self._show_help()
......@@ -375,8 +434,8 @@ class ClusterRestartCommand(ClusterMirrorCommand):
class ClusterRedeployCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterRedeployCommand, self).__init__('redeploy', 'redeploy a cluster had started')
self.parser.add_option('-f', '--force-kill', action='store_true', help="force kill when observer is running")
super(ClusterRedeployCommand, self).__init__('redeploy', 'Redeploy a started cluster.')
self.parser.add_option('-f', '--force-kill', action='store_true', help="Force kill the running observer process in the working directory.")
def _do_command(self, obd):
if self.cmds:
......@@ -388,7 +447,7 @@ class ClusterRedeployCommand(ClusterMirrorCommand):
class ClusterReloadCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterReloadCommand, self).__init__('reload', 'reload a cluster had started')
super(ClusterReloadCommand, self).__init__('reload', 'Reload a started cluster.')
def _do_command(self, obd):
if self.cmds:
......@@ -400,7 +459,7 @@ class ClusterReloadCommand(ClusterMirrorCommand):
class ClusterListCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterListCommand, self).__init__('list', 'show all deploy')
super(ClusterListCommand, self).__init__('list', 'List all the deployments.')
def _do_command(self, obd):
if self.cmds:
......@@ -412,7 +471,7 @@ class ClusterListCommand(ClusterMirrorCommand):
class ClusterEditConfigCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterEditConfigCommand, self).__init__('edit-config', 'edit a deploy config')
super(ClusterEditConfigCommand, self).__init__('edit-config', 'Edit a deploy configuration file.')
def _do_command(self, obd):
if self.cmds:
......@@ -424,9 +483,9 @@ class ClusterEditConfigCommand(ClusterMirrorCommand):
class CLusterUpgradeCommand(ClusterMirrorCommand):
def __init__(self):
super(CLusterUpgradeCommand, self).__init__('upgrade', 'upgrade cluster')
self.parser.add_option('-f', '--force', action='store_true', help="force upgrade")
self.parser.add_option('-c', '--components', type='string', help="the updated component list, use `,` interval")
super(CLusterUpgradeCommand, self).__init__('upgrade', 'Upgrade a cluster.')
self.parser.add_option('-f', '--force', action='store_true', help="Force upgrade.")
self.parser.add_option('-c', '--components', type='string', help="List the updated components. Multiple components are separated with commas.")
def _do_command(self, obd):
if self.cmds:
......@@ -434,10 +493,64 @@ class CLusterUpgradeCommand(ClusterMirrorCommand):
else:
return self._show_help()
class ClusterTenantCreateCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterTenantCreateCommand, self).__init__('create', 'Create a tenant.')
self.parser.add_option('-n', '--tenant-name', type='string', help="The tenant name.")
self.parser.add_option('--max-cpu', type='float', help="Unit max CPU number.")
self.parser.add_option('--min-cpu', type='float', help="Unit min CPU number.")
self.parser.add_option('--max-memory', type='int', help="Unit max memory size.")
self.parser.add_option('--min-memory', type='int', help="Unit min memory size.")
self.parser.add_option('--max-disk-size', type='int', help="Unit max disk size.")
self.parser.add_option('--max-iops', type='int', help="Unit max iops number. [128]", default=128)
self.parser.add_option('--min-iops', type='int', help="Unit min iops number.")
self.parser.add_option('--max-session-num', type='int', help="Unit max session number. [64]", default=64)
self.parser.add_option('--unit-num', type='int', help="Pool unit number.")
self.parser.add_option('-z', '--zone-list', type='string', help="Tenant zone list.")
self.parser.add_option('--charset', type='string', help="Tenant charset.")
self.parser.add_option('--collate', type='string', help="Tenant COLLATE.")
self.parser.add_option('--replica-num', type='int', help="tenant replica num")
self.parser.add_option('--logonly-replica-num', type='int', help="Tenant logonly replica number.")
self.parser.add_option('--tablegroup', type='string', help="Tenant tablegroup.")
self.parser.add_option('--primary-zone', type='string', help="Tenant primary zone. [RANDOM]", default='RANDOM')
self.parser.add_option('--locality', type='string', help="Tenant locality.")
self.parser.add_option('-s', '--variables', type='string', help="Set the variables for the system tenant. [ob_tcp_invited_nodes='%']", default="ob_tcp_invited_nodes='%'")
def _do_command(self, obd):
if self.cmds:
return obd.create_tenant(self.cmds[0], self.opts)
else:
return self._show_help()
class ClusterTenantDropCommand(ClusterMirrorCommand):
def __init__(self):
super(ClusterTenantDropCommand, self).__init__('drop', 'Drop a tenant.')
self.parser.add_option('-n', '--tenant-name', type='string', help="Tenant name.")
def _do_command(self, obd):
if self.cmds:
return obd.drop_tenant(self.cmds[0], self.opts)
else:
return self._show_help()
class ClusterTenantCommand(MajorCommand):
def __init__(self):
super(ClusterTenantCommand, self).__init__('tenant', 'Create or drop a tenant.')
self.register_command(ClusterTenantCreateCommand())
self.register_command(ClusterTenantDropCommand())
class ClusterMajorCommand(MajorCommand):
def __init__(self):
super(ClusterMajorCommand, self).__init__('cluster', 'deploy and manager cluster')
super(ClusterMajorCommand, self).__init__('cluster', 'Deploy and manage a cluster.')
self.register_command(ClusterAutoDeployCommand())
self.register_command(ClusterDeployCommand())
self.register_command(ClusterStartCommand())
self.register_command(ClusterStopCommand())
......@@ -449,43 +562,44 @@ class ClusterMajorCommand(MajorCommand):
self.register_command(ClusterEditConfigCommand())
self.register_command(ClusterReloadCommand())
self.register_command(CLusterUpgradeCommand())
self.register_command(ClusterTenantCommand())
class TestMirrorCommand(ObdCommand):
def init(self, cmd, args):
super(TestMirrorCommand, self).init(cmd, args)
self.parser.set_usage('%s [cluster name] [options]' % self.prev_cmd)
self.parser.set_usage('%s <deploy name> [options]' % self.prev_cmd)
return self
class MySQLTestCommand(TestMirrorCommand):
def __init__(self):
super(MySQLTestCommand, self).__init__('mysqltest', 'run mysqltest for a deploy')
self.parser.add_option('--component', type='string', help='the component for mysqltest')
self.parser.add_option('--test-server', type='string', help='the server for mysqltest, default the first root server in the component')
self.parser.add_option('--user', type='string', help='username for test', default='admin')
self.parser.add_option('--password', type='string', help='password for test', default='admin')
self.parser.add_option('--database', type='string', help='database for test', default='test')
self.parser.add_option('--mysqltest-bin', type='string', help='mysqltest bin path', default='/u01/obclient/bin/mysqltest')
self.parser.add_option('--obclient-bin', type='string', help='obclient bin path', default='obclient')
self.parser.add_option('--test-dir', type='string', help='test case file directory', default='./mysql_test/t')
self.parser.add_option('--result-dir', type='string', help='result case file directory', default='./mysql_test/r')
self.parser.add_option('--record-dir', type='string', help='the directory of the result file for mysqltest')
self.parser.add_option('--log-dir', type='string', help='the directory of the log file', default='./log')
self.parser.add_option('--tmp-dir', type='string', help='tmp dir to use when run mysqltest', default='./tmp')
self.parser.add_option('--var-dir', type='string', help='var dir to use when run mysqltest', default='./var')
super(MySQLTestCommand, self).__init__('mysqltest', 'Run mysqltest for a deployment.')
self.parser.add_option('--component', type='string', help='The component for mysqltest.')
self.parser.add_option('--test-server', type='string', help='The server for mysqltest. By default, the first root server in the component is the mysqltest server.')
self.parser.add_option('--user', type='string', help='Username for a test. [admin]', default='admin')
self.parser.add_option('--password', type='string', help='Password for a test. [admin]', default='admin')
self.parser.add_option('--database', type='string', help='Database for a test. [test]', default='test')
self.parser.add_option('--mysqltest-bin', type='string', help='Mysqltest bin path. [/u01/obclient/bin/mysqltest]', default='/u01/obclient/bin/mysqltest')
self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient')
self.parser.add_option('--test-dir', type='string', help='Test case file directory. [./mysql_test/t]', default='./mysql_test/t')
self.parser.add_option('--result-dir', type='string', help='Result case file directory. [./mysql_test/r]', default='./mysql_test/r')
self.parser.add_option('--record-dir', type='string', help='The directory of the result file for mysqltest.')
self.parser.add_option('--log-dir', type='string', help='The log file directory. [./log]', default='./log')
self.parser.add_option('--tmp-dir', type='string', help='Temporary directory for mysqltest. [./tmp]', default='./tmp')
self.parser.add_option('--var-dir', type='string', help='Var directory to use when run mysqltest. [./var]', default='./var')
self.parser.add_option('--test-set', type='string', help='test list, use `,` interval')
self.parser.add_option('--test-pattern', type='string', help='pattern for test file')
self.parser.add_option('--suite', type='string', help='suite list, use `,` interval')
self.parser.add_option('--suite-dir', type='string', help='suite case directory', default='./mysql_test/test_suite')
self.parser.add_option('--init-sql-dir', type='string', help='init sql directory', default='../')
self.parser.add_option('--init-sql-files', type='string', help='init sql file list, use `,` interval')
self.parser.add_option('--need-init', action='store_true', help='exec init sql', default=False)
self.parser.add_option('--auto-retry', action='store_true', help='auto retry when failed', default=False)
self.parser.add_option('--all', action='store_true', help='run all suite-dir case', default=False)
self.parser.add_option('--psmall', action='store_true', help='run psmall case', default=False)
self.parser.add_option('--test-pattern', type='string', help='Pattern for test file.')
self.parser.add_option('--suite', type='string', help='Suite list.Multiple suites are separated with commas.')
self.parser.add_option('--suite-dir', type='string', help='Suite case directory. [./mysql_test/test_suite]', default='./mysql_test/test_suite')
self.parser.add_option('--init-sql-dir', type='string', help='Initiate sql directory. [../]', default='../')
self.parser.add_option('--init-sql-files', type='string', help='Initiate sql file list.Multiple files are separated with commas.')
self.parser.add_option('--need-init', action='store_true', help='Execute init sql file.', default=False)
self.parser.add_option('--auto-retry', action='store_true', help='Auto retry when fails.', default=False)
self.parser.add_option('--all', action='store_true', help='Run all suite-dir cases.', default=False)
self.parser.add_option('--psmall', action='store_true', help='Run psmall cases.', default=False)
# self.parser.add_option('--java', action='store_true', help='use java sdk', default=False)
def _do_command(self, obd):
......@@ -495,11 +609,44 @@ class MySQLTestCommand(TestMirrorCommand):
return self._show_help()
class SysBenchCommand(TestMirrorCommand):
def __init__(self):
super(SysBenchCommand, self).__init__('sysbench', 'Run sysbench for a deployment.')
self.parser.add_option('--component', type='string', help='The component for mysqltest.')
self.parser.add_option('--test-server', type='string', help='The server for mysqltest. By default, the first root server in the component is the mysqltest server.')
self.parser.add_option('--user', type='string', help='Username for a test. [root]', default='root')
self.parser.add_option('--password', type='string', help='Password for a test.')
self.parser.add_option('--tenant', type='string', help='Tenant for a test. [test]', default='test')
self.parser.add_option('--database', type='string', help='Database for a test. [test]', default='test')
self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient')
self.parser.add_option('--sysbench-bin', type='string', help='Sysbench bin path. [sysbench]', default='sysbench')
self.parser.add_option('--script-name', type='string', help='Sysbench lua script file name. [point_select]', default='oltp_point_select.lua')
self.parser.add_option('--sysbench-script-dir', type='string', help='The directory of the sysbench lua script file. [/usr/sysbench/share/sysbench]', default='/usr/sysbench/share/sysbench')
self.parser.add_option('--table-size', type='int', help='Number of data initialized per table. [20000]', default=20000)
self.parser.add_option('--tables', type='int', help='Number of initialization tables. [30]', default=30)
self.parser.add_option('--threads', type='int', help='Number of threads to use. [32]', default=16)
self.parser.add_option('--time', type='int', help='Limit for total execution time in seconds. [60]', default=60)
self.parser.add_option('--interval', type='int', help='Periodically report intermediate statistics with a specified time interval in seconds. 0 disables intermediate reports. [10]', default=10)
self.parser.add_option('--events', type='int', help='Limit for total number of events.')
self.parser.add_option('--rand-type', type='string', help='Random numbers distribution {uniform,gaussian,special,pareto}.')
self.parser.add_option('--percentile', type='int', help='Percentile to calculate in latency statistics. Available values are 1-100. 0 means to disable percentile calculations.')
self.parser.add_option('--skip-trx', dest='{on/off}', type='string', help='Open or close a transaction in a read-only test. ')
self.parser.add_option('-O', '--optimization', dest='{0/1}', type='int', help='optimization', default=1)
def _do_command(self, obd):
if self.cmds:
return obd.sysbench(self.cmds[0], self.opts)
else:
return self._show_help()
class TestMajorCommand(MajorCommand):
def __init__(self):
super(TestMajorCommand, self).__init__('test', 'run test for a running deploy')
super(TestMajorCommand, self).__init__('test', 'Run test for a running deploy deployment.')
self.register_command(MySQLTestCommand())
self.register_command(SysBenchCommand())
class BenchMajorCommand(MajorCommand):
......@@ -511,11 +658,11 @@ class BenchMajorCommand(MajorCommand):
class UpdateCommand(ObdCommand):
def __init__(self):
super(UpdateCommand, self).__init__('update', 'update obd')
super(UpdateCommand, self).__init__('update', 'Update OBD.')
def do_command(self):
if os.getuid() != 0:
ROOT_IO.error('You need to be root to perform this command.')
ROOT_IO.error('To update OBD, you must be a root user.')
return False
return super(UpdateCommand, self).do_command()
......@@ -529,6 +676,7 @@ class MainCommand(MajorCommand):
super(MainCommand, self).__init__('obd', '')
self.register_command(MirrorMajorCommand())
self.register_command(ClusterMajorCommand())
self.register_command(RepositoryMajorCommand())
self.register_command(TestMajorCommand())
self.register_command(UpdateCommand())
self.parser.version = '''OceanBase Deploy: %s
......
......@@ -142,7 +142,9 @@ class ClusterConfig(object):
def get_unconfigured_require_item(self, server):
items = []
config = self.get_server_conf(server)
for key in self._default_conf:
for key in self._temp_conf:
if not self._temp_conf[key].require:
continue
if key in config:
continue
items.append(key)
......@@ -176,7 +178,7 @@ class ClusterConfig(object):
self._default_conf = {}
self._temp_conf = temp_conf
for key in self._temp_conf:
if self._temp_conf[key].require:
if self._temp_conf[key].require and self._temp_conf[key].default is not None:
self._default_conf[key] = self._temp_conf[key].default
self.set_global_conf(self._global_conf) # 更新全局配置
......@@ -204,6 +206,9 @@ class ClusterConfig(object):
self._cache_server[server] = conf
return self._cache_server[server]
def get_original_server_conf(self, server):
return self._server_conf.get(server)
class DeployStatus(Enum):
......@@ -246,6 +251,7 @@ class DeployConfig(object):
def __init__(self, yaml_path, yaml_loader=yaml):
self._user = None
self.unuse_lib_repository = False
self.auto_create_tenant = False
self.components = {}
self._src_data = None
self.yaml_path = yaml_path
......@@ -263,6 +269,13 @@ class DeployConfig(object):
return self._dump()
return True
def set_auto_create_tenant(self, status):
if self.auto_create_tenant != status:
self.auto_create_tenant = status
self._src_data['auto_create_tenant'] = status
return self._dump()
return True
def _load(self):
try:
with open(self.yaml_path, 'rb') as f:
......@@ -278,7 +291,9 @@ class DeployConfig(object):
))
elif key == 'unuse_lib_repository':
self.unuse_lib_repository = self._src_data['unuse_lib_repository']
else:
elif key == 'auto_create_tenant':
self.auto_create_tenant = self._src_data['auto_create_tenant']
elif issubclass(type(self._src_data[key]), dict):
self._add_component(key, self._src_data[key])
except:
pass
......
......@@ -299,6 +299,18 @@ class Repository(PackageInfo):
return True
class RepositoryVO(object):
def __init__(self, name, version, release, arch, md5, path, tags=[]):
self.name = name
self.version = version
self.release = release
self.arch = arch
self.md5 = md5
self.path = path
self.tags = tags
class ComponentRepository(object):
def __init__(self, name, repository_dir, stdio=None):
......@@ -367,6 +379,15 @@ class ComponentRepository(object):
if version:
return self.get_repository_by_version(version, tag)
return None
def get_repositories(self, version=None):
if not version:
version = '*'
repositories = []
path_partten = os.path.join(self.repository_dir, version, '*')
for path in glob(path_partten):
repositories.append(Repository(self.name, path, self.stdio))
return repositories
class RepositoryManager(Manager):
......@@ -379,21 +400,52 @@ class RepositoryManager(Manager):
self.repositories = {}
self.component_repositoies = {}
def get_repositoryies(self, name):
repositories = {}
path_partten = os.path.join(self.path, name, '*')
for path in glob(path_partten):
_, version = os.path.split(path)
Repository = Repository(name, path, version, self.stdio)
def _get_repository_vo(self, repository):
return RepositoryVO(
repository.name,
repository.version,
repository.release,
repository.arch,
repository.md5,
repository.repository_dir,
[]
)
def get_repositories_view(self, name=None):
if name:
repositories = self.get_component_repositoy(name).get_repositories()
else:
repositories = []
path_partten = os.path.join(self.path, '*')
for path in glob(path_partten):
_, name = os.path.split(path)
repositories += self.get_component_repositoy(name).get_repositories()
repositories_vo = {}
for repository in repositories:
if repository.is_shadow_repository():
repository_ist = self.get_instance_repository_from_shadow(repository)
if repository_ist not in repositories_vo:
repositories_vo[repository_ist] = self._get_repository_vo(repository)
_, tag = os.path.split(repository.repository_dir)
repositories_vo[repository_ist].tags.append(tag)
elif repository not in repositories_vo:
repositories_vo[repository] = self._get_repository_vo(repository)
return list(repositories_vo.values())
def get_component_repositoy(self, name):
if name not in self.component_repositoies:
path = os.path.join(self.path, name)
self.component_repositoies[name] = ComponentRepository(name, path, self.stdio)
return self.component_repositoies[name]
def get_repository_by_version(self, name, version, tag=None, instance=True):
if not tag:
tag = name
path = os.path.join(self.path, name, version, tag)
if path not in self.repositories:
if name not in self.component_repositoies:
self.component_repositoies[name] = ComponentRepository(name, os.path.join(self.path, name), self.stdio)
repository = self.component_repositoies[name].get_repository(version, tag)
component_repositoy = self.get_component_repositoy(name)
repository = component_repositoy.get_repository(version, tag)
if repository:
self.repositories[repository.repository_dir] = repository
self.repositories[path] = repository
......@@ -404,10 +456,9 @@ class RepositoryManager(Manager):
def get_repository(self, name, version=None, tag=None, instance=True):
if version:
return self.get_repository_by_version(name, version, tag)
if name not in self.component_repositoies:
path = os.path.join(self.path, name)
self.component_repositoies[name] = ComponentRepository(name, path, self.stdio)
repository = self.component_repositoies[name].get_repository(version, tag)
component_repositoy = self.get_component_repositoy(name)
repository = component_repositoy.get_repository(version, tag)
if repository:
self.repositories[repository.repository_dir] = repository
return self.get_instance_repository_from_shadow(repository) if repository and instance else repository
......
......@@ -445,7 +445,7 @@ class IO(object):
self.error(msg)
else:
msg and self.error(msg)
self._log(MsgLevel.VERBOSE, '\n'.join(exception_msg))
self._log(MsgLevel.ERROR, '\n'.join(exception_msg))
else:
def exception(self, msg, *args, **kwargs):
ei = sys.exc_info()
......@@ -462,5 +462,5 @@ class IO(object):
self.error(msg)
else:
msg and self.error(msg)
self._log(MsgLevel.VERBOSE, ''.join(lines))
self._log(MsgLevel.ERROR, ''.join(lines))
......@@ -635,6 +635,90 @@ class ObdHome(object):
self._call_stdio('stop_loading', 'succeed')
return status
def search_components_from_mirrors_and_install(self, deploy_config):
# Check the best suitable mirror for the components
self._call_stdio('verbose', 'Search best suitable repository')
pkgs, repositories, errors = self.search_components_from_mirrors(deploy_config, only_info=False)
if errors:
self._call_stdio('error', '\n'.join(errors))
return repositories, None
# Get the installation plugins. Install locally
install_plugins = self.get_install_plugin_and_install(repositories, pkgs)
return repositories, install_plugins
def genconfig(self, name, opt=Values()):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
if deploy:
deploy_info = deploy.deploy_info
if deploy_info.status not in [DeployStatus.STATUS_CONFIGURED, DeployStatus.STATUS_DESTROYED]:
self._call_stdio('error', 'Deploy "%s" is %s. You could not reload an %s cluster.' % (name, deploy_info.status.value, deploy_info.status.value))
return False
# self._call_stdio('error', 'Deploy name `%s` have been occupied.' % name)
# return False
config_path = getattr(opt, 'config', '')
if not config_path:
self._call_stdio('error', "Configuration file is need.\nPlease use -c to set configuration file")
return False
self._call_stdio('verbose', 'Create deploy by configuration path')
deploy = self.deploy_manager.create_deploy_config(name, config_path)
self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config
if not deploy_config:
self._call_stdio('error', 'Deploy configuration is empty.\nIt may be caused by a failure to resolve the configuration.\nPlease check your configuration file.')
return False
# Check the best suitable mirror for the components and installation plguins. Install locally
repositories, install_plugins = self.search_components_from_mirrors_and_install(deploy_config)
if not install_plugins or not repositories:
return False
for repository in repositories:
real_servers = set()
cluster_config = deploy_config.components[repository.name]
for server in cluster_config.servers:
if server.ip in real_servers:
self._call_stdio('error', 'Deploying multiple %s instances on the same server is not supported.' % repository.name)
return False
real_servers.add(server.ip)
self._call_stdio('start_loading', 'Cluster param config check')
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
# Parameter check
errors = self.deploy_param_check(repositories, deploy_config)
if errors:
self._call_stdio('stop_loading', 'fail')
self._call_stdio('error', '\n'.join(errors))
return False
self._call_stdio('stop_loading', 'succeed')
# Get the client
ssh_clients = self.get_clients(deploy_config, repositories)
gen_config_plugins = self.search_py_script_plugin(repositories, 'generate_config')
component_num = len(repositories)
for repository in repositories:
cluster_config = deploy_config.components[repository.name]
self._call_stdio('verbose', 'Call %s for %s' % (gen_config_plugins[repository], repository))
ret = gen_config_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], opt, self.stdio, deploy_config)
if ret:
component_num -= 1
if component_num == 0 and deploy_config.dump():
return True
self.deploy_manager.remove_deploy_config(name)
return False
def deploy_cluster(self, name, opt=Values()):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
......@@ -643,7 +727,7 @@ class ObdHome(object):
deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'judge deploy status')
if deploy_info.status not in [DeployStatus.STATUS_CONFIGURED, DeployStatus.STATUS_DESTROYED]:
self._call_stdio('error', 'Deploy "%s" is %s. You could not realod an %s cluster.' % (name, deploy_info.status.value, deploy_info.status.value))
self._call_stdio('error', 'Deploy "%s" is %s. You could not reload an %s cluster.' % (name, deploy_info.status.value, deploy_info.status.value))
return False
if deploy_info.config_status != DeployConfigStatus.UNCHNAGE:
self._call_stdio('verbose', 'Apply temp deploy configuration')
......@@ -653,6 +737,7 @@ class ObdHome(object):
config_path = getattr(opt, 'config', '')
unuse_lib_repo = getattr(opt, 'unuselibrepo', False)
auto_create_tenant = getattr(opt, 'auto_create_tenant', False)
self._call_stdio('verbose', 'config path is None or not')
if config_path:
self._call_stdio('verbose', 'Create deploy by configuration path')
......@@ -679,15 +764,8 @@ class ObdHome(object):
self._call_stdio('error', '%s\'s servers list is empty.' % component_name)
return False
# Check the best suitable mirror for the components
self._call_stdio('verbose', 'Search best suitable repository')
pkgs, repositories, errors = self.search_components_from_mirrors(deploy_config, only_info=False)
if errors:
self._call_stdio('error', '\n'.join(errors))
return False
# Get the installation plugins. Install locally
install_plugins = self.get_install_plugin_and_install(repositories, pkgs)
# Check the best suitable mirror for the components and installation plguins. Install locally
repositories, install_plugins = self.search_components_from_mirrors_and_install(deploy_config)
if not install_plugins:
return False
......@@ -700,14 +778,17 @@ class ObdHome(object):
)
errors = []
self._call_stdio('verbose', 'Repository integrity check')
self._call_stdio('start_loading', 'Repository integrity check')
for repository in repositories:
if not repository.file_check(install_plugins[repository]):
errors.append('%s intstall failed' % repository.name)
if errors:
self._call_stdio('stop_loading', 'fail')
self._call_stdio('error', '\n'.join(errors))
return False
self._call_stdio('stop_loading', 'succeed')
self._call_stdio('start_loading', 'Parameter check')
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
......@@ -715,11 +796,15 @@ class ObdHome(object):
self._call_stdio('verbose', 'Cluster param configuration check')
errors = self.deploy_param_check(repositories, deploy_config)
if errors:
self._call_stdio('stop_loading', 'fail')
self._call_stdio('error', '\n'.join(errors))
return False
self._call_stdio('stop_loading', 'succeed')
if unuse_lib_repo and not deploy_config.unuse_lib_repository:
deploy_config.set_unuse_lib_repository(True)
if auto_create_tenant and not deploy_config.auto_create_tenant:
deploy_config.set_auto_create_tenant(True)
# Get the client
ssh_clients = self.get_clients(deploy_config, repositories)
......@@ -765,12 +850,11 @@ class ObdHome(object):
component_num = len(repositories)
for repository in repositories:
cluster_config = deploy_config.components[repository.name]
init_plugin = self.plugin_manager.get_best_py_script_plugin('init', repository.name, repository.version)
if repository in init_plugins:
init_plugin = init_plugins[repository]
self._call_stdio('verbose', 'Exec %s init plugin' % repository)
self._call_stdio('verbose', 'Apply %s for %s-%s' % (init_plugin, repository.name, repository.version))
if init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opt, self.stdio):
if init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opt, self.stdio, self.home_path, repository.repository_dir):
deploy.use_model(repository.name, repository, False)
component_num -= 1
else:
......@@ -806,12 +890,29 @@ class ObdHome(object):
self._call_stdio('verbose', 'Get deploy config')
deploy_config = deploy.deploy_config
update_deploy_status = True
components = getattr(options, 'components', '')
if components:
deploy_info_components = {}
deploy_config_components = {}
for component in components.split(','):
deploy_info_components[component] = deploy_info.components[component]
deploy_config_components[component] = deploy_config.components[component]
if len(deploy_info.components) != len(deploy_info_components):
update_deploy_status = False
deploy_info.components = deploy_info_components
deploy_config.components = deploy_config_components
servers = getattr(options, 'servers', '')
server_list = servers.split(',') if servers else []
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
repositories = self.load_local_repositories(deploy_config, False)
start_check_plugins = self.search_py_script_plugin(repositories, 'start_check', False)
create_tenant_plugins = self.search_py_script_plugin(repositories, 'create_tenant', False) if deploy_config.auto_create_tenant else {}
start_plugins = self.search_py_script_plugin(repositories, 'start')
connect_plugins = self.search_py_script_plugin(repositories, 'connect')
bootstrap_plugins = self.search_py_script_plugin(repositories, 'bootstrap')
......@@ -859,6 +960,11 @@ class ObdHome(object):
component_num = len(repositories)
for repository in repositories:
cluster_config = deploy_config.components[repository.name]
cluster_servers = cluster_config.servers
if servers:
cluster_config.servers = [srv for srv in cluster_servers if srv.ip in server_list or srv.name in server_list]
start_all = cluster_servers == cluster_config.servers
update_deploy_status = update_deploy_status and start_all
self._call_stdio('verbose', 'Call %s for %s' % (start_plugins[repository], repository))
ret = start_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, cmd, options, self.stdio, self.home_path, repository.repository_dir)
......@@ -877,23 +983,130 @@ class ObdHome(object):
self._call_stdio('error', 'Failed to connect %s' % repository.name)
break
if need_bootstrap:
if need_bootstrap and start_all:
self._call_stdio('print', 'Initialize cluster')
self._call_stdio('verbose', 'Call %s for %s' % (bootstrap_plugins[repository], repository))
if not bootstrap_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, cmd, options, self.stdio, cursor):
self._call_stdio('print', 'Cluster init failed')
break
if repository in create_tenant_plugins:
create_tenant_options = Values({"variables": "ob_tcp_invited_nodes='%'"})
self._call_stdio('verbose', 'Call %s for %s' % (bootstrap_plugins[repository], repository))
create_tenant_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], create_tenant_options, self.stdio, cursor)
if not start_all:
component_num -= 1
continue
self._call_stdio('verbose', 'Call %s for %s' % (display_plugins[repository], repository))
if display_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, cmd, options, self.stdio, cursor):
component_num -= 1
if component_num == 0:
self._call_stdio('verbose', 'Set %s deploy status to running' % name)
if deploy.update_deploy_status(DeployStatus.STATUS_RUNNING):
self._call_stdio('print', '%s running' % name)
if update_deploy_status:
self._call_stdio('verbose', 'Set %s deploy status to running' % name)
if deploy.update_deploy_status(DeployStatus.STATUS_RUNNING):
self._call_stdio('print', '%s running' % name)
return True
else:
self._call_stdio('print', "succeed")
return True
return False
def create_tenant(self, name, options=Values()):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Deploy status judge')
if deploy_info.status != DeployStatus.STATUS_RUNNING:
self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value))
return False
self._call_stdio('verbose', 'Get deploy config')
deploy_config = deploy.deploy_config
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
repositories = self.load_local_repositories(deploy_config)
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
connect_plugins = self.search_py_script_plugin(repositories, 'connect')
create_tenant_plugins = self.search_py_script_plugin(repositories, 'create_tenant', False)
self._call_stdio('stop_loading', 'succeed')
# Get the client
ssh_clients = self.get_clients(deploy_config, repositories)
for repository in create_tenant_plugins:
cluster_config = deploy_config.components[repository.name]
db = None
cursor = None
self._call_stdio('verbose', 'Call %s for %s' % (connect_plugins[repository], repository))
ret = connect_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio)
if ret:
db = ret.get_return('connect')
cursor = ret.get_return('cursor')
if not db:
self._call_stdio('error', 'Failed to connect %s' % repository.name)
return False
self._call_stdio('verbose', 'Call %s for %s' % (create_tenant_plugins[repository], repository))
if not create_tenant_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio, cursor):
return False
return True
def drop_tenant(self, name, options=Values()):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Deploy status judge')
if deploy_info.status != DeployStatus.STATUS_RUNNING:
self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value))
return False
self._call_stdio('verbose', 'Get deploy config')
deploy_config = deploy.deploy_config
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
repositories = self.load_local_repositories(deploy_config)
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
connect_plugins = self.search_py_script_plugin(repositories, 'connect')
drop_tenant_plugins = self.search_py_script_plugin(repositories, 'drop_tenant', False)
self._call_stdio('stop_loading', 'succeed')
# Get the client
ssh_clients = self.get_clients(deploy_config, repositories)
for repository in drop_tenant_plugins:
cluster_config = deploy_config.components[repository.name]
db = None
cursor = None
self._call_stdio('verbose', 'Call %s for %s' % (connect_plugins[repository], repository))
ret = connect_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio)
if ret:
db = ret.get_return('connect')
cursor = ret.get_return('cursor')
if not db:
self._call_stdio('error', 'Failed to connect %s' % repository.name)
return False
self._call_stdio('verbose', 'Call %s for %s' % (drop_tenant_plugins[repository], repository))
if not drop_tenant_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], options, self.stdio, cursor):
return False
return True
def reload_cluster(self, name):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
......@@ -904,12 +1117,12 @@ class ObdHome(object):
deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Deploy status judge')
if deploy_info.status != DeployStatus.STATUS_RUNNING:
self._call_stdio('error', 'Deploy "%s" is %s. You could not realod an %s cluster.' % (name, deploy_info.status.value, deploy_info.status.value))
self._call_stdio('error', 'Deploy "%s" is %s. You could not reload an %s cluster.' % (name, deploy_info.status.value, deploy_info.status.value))
return False
if deploy_info.config_status != DeployConfigStatus.NEED_RELOAD:
self._call_stdio('error', 'Deploy config %s' % deploy_info.config_status.value)
return False
# if deploy_info.config_status != DeployConfigStatus.NEED_RELOAD:
# self._call_stdio('error', 'Deploy config %s' % deploy_info.config_status.value)
# return False
self._call_stdio('verbose', 'Get deploy config')
deploy_config = deploy.deploy_config
......@@ -1032,7 +1245,7 @@ class ObdHome(object):
display_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, cursor)
return True
def stop_cluster(self, name):
def stop_cluster(self, name, options=Values()):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
if not deploy:
......@@ -1041,12 +1254,28 @@ class ObdHome(object):
deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Check the deploy status')
if deploy_info.status != DeployStatus.STATUS_RUNNING:
if deploy_info.status not in [DeployStatus.STATUS_DEPLOYED, DeployStatus.STATUS_STOPPED, DeployStatus.STATUS_RUNNING]:
self._call_stdio('error', 'Deploy "%s" is %s. You could not stop an %s cluster.' % (name, deploy_info.status.value, deploy_info.status.value))
return False
self._call_stdio('verbose', 'Get deploy config')
deploy_config = deploy.deploy_config
update_deploy_status = True
components = getattr(options, 'components', '')
if components:
deploy_info_components = {}
deploy_config_components = {}
for component in components.split(','):
deploy_info_components[component] = deploy_info.components[component]
deploy_config_components[component] = deploy_config.components[component]
if len(deploy_info.components) != len(deploy_info_components):
update_deploy_status = False
deploy_info.components = deploy_info_components
deploy_config.components = deploy_config_components
servers = getattr(options, 'servers', '')
server_list = servers.split(',') if servers else []
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
repositories = self.load_local_repositories(deploy_config)
......@@ -1064,17 +1293,28 @@ class ObdHome(object):
component_num = len(repositories)
for repository in repositories:
cluster_config = deploy_config.components[repository.name]
cluster_servers = cluster_config.servers
if servers:
cluster_config.servers = [srv for srv in cluster_servers if srv.ip in server_list or srv.name in server_list]
start_all = cluster_servers == cluster_config.servers
update_deploy_status = update_deploy_status and start_all
self._call_stdio('verbose', 'Call %s for %s' % (stop_plugins[repository], repository))
if stop_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio):
component_num -= 1
self._call_stdio('verbose', 'Set %s deploy status to stopped' % name)
if component_num == 0 and deploy.update_deploy_status(DeployStatus.STATUS_STOPPED):
self._call_stdio('print', '%s stopped' % name)
return True
if component_num == 0:
if components or servers:
self._call_stdio('print', "succeed")
return True
else:
self._call_stdio('verbose', 'Set %s deploy status to stopped' % name)
if deploy.update_deploy_status(DeployStatus.STATUS_STOPPED):
self._call_stdio('print', '%s stopped' % name)
return True
return False
def restart_cluster(self, name):
def restart_cluster(self, name, opt=Values()):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
if not deploy:
......@@ -1086,9 +1326,9 @@ class ObdHome(object):
if deploy_info.status == DeployStatus.STATUS_RUNNING:
if deploy_info.config_status != DeployConfigStatus.UNCHNAGE:
self.reload_cluster(name)
if not self.stop_cluster(name):
if not self.stop_cluster(name, opt):
return False
return self.start_cluster(name)
return self.start_cluster(name, opt)
def redeploy_cluster(self, name, opt=Values()):
return self.destroy_cluster(name, opt) and self.deploy_cluster(name) and self.start_cluster(name)
......@@ -1479,6 +1719,7 @@ class ObdHome(object):
result = []
for test in env['test_set']:
self._call_stdio('verbose', 'Call %s for %s' % (mysqltest_run_test_plugin, repository))
ret = mysqltest_run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, test, env)
if not ret:
break
......@@ -1534,6 +1775,98 @@ class ObdHome(object):
return True
return False
def sysbench(self, name, opts):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
deploy_info = deploy.deploy_info
self._call_stdio('verbose', 'Check deploy status')
if deploy_info.status != DeployStatus.STATUS_RUNNING:
self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value))
return False
self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config
if opts.component is None:
for component_name in ['obproxy', 'oceanbase', 'oceanbase-ce']:
if component_name in deploy_config.components:
opts.component = component_name
break
if opts.component not in deploy_config.components:
self._call_stdio('error', 'Can not find the component for sysbench, use `--component` to select component')
return False
cluster_config = deploy_config.components[opts.component]
if not cluster_config.servers:
self._call_stdio('error', '%s server list is empty' % opts.component)
return False
if opts.test_server is None:
opts.test_server = cluster_config.servers[0]
else:
for server in cluster_config.servers:
if server.name == opts.test_server:
opts.test_server = server
break
else:
self._call_stdio('error', '%s is not a server in %s' % (opts.test_server, opts.component))
return False
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
repositories = self.get_local_repositories({opts.component: deploy_config.components[opts.component]})
repository = repositories[0]
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
self._call_stdio('stop_loading', 'succeed')
# Get the client
ssh_clients = self.get_clients(deploy_config, repositories)
# Check the status for the deployed cluster
component_status = {}
cluster_status = self.cluster_status_check(ssh_clients, deploy_config, repositories, component_status)
if cluster_status is False or cluster_status == 0:
if self.stdio:
self._call_stdio('error', 'Some of the servers in the cluster have been stopped')
for repository in component_status:
cluster_status = component_status[repository]
for server in cluster_status:
if cluster_status[server] == 0:
self._call_stdio('print', '%s %s is stopped' % (server, repository.name))
return False
connect_plugin = self.search_py_script_plugin(repositories, 'connect')[repository]
ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, target_server=opts.test_server, sys_root=False)
if not ret or not ret.get_return('connect'):
self._call_stdio('error', 'Failed to connect to the server')
return False
db = ret.get_return('connect')
cursor = ret.get_return('cursor')
odp_db = None
odp_cursor = None
if repository.name == 'obproxy':
ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, target_server=opts.test_server, sys_root=False)
if not ret or not ret.get_return('connect'):
self._call_stdio('error', 'Failed to connect to the server')
return False
odp_db = ret.get_return('connect')
odp_cursor = ret.get_return('cursor')
run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'sysbench', repository.version)
setattr(opts, 'host', opts.test_server.ip)
setattr(opts, 'port', db.port)
self._call_stdio('verbose', 'Call %s for %s' % (run_test_plugin, repository))
if run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, db, cursor, odp_db, odp_cursor):
return True
return False
def update_obd(self, version):
component_name = 'ob-deploy'
plugin = self.plugin_manager.get_best_plugin(PluginType.INSTALL, component_name, '1.0.0')
......
## Only need to configure when remote login is required
# user:
# username: your username
# password: your password if need
# key_file: your ssh-key file path if need
# port: your ssh port, default 22
# timeout: ssh connection timeout (second), default 30
oceanbase-ce:
servers:
- name: z1
# Please don't use hostname, only IP can be supported
ip: 172.19.33.2
- name: z2
ip: 172.19.33.3
- name: z3
ip: 172.19.33.4
global:
# Please set devname as the network adaptor's name whose ip is in the setting of severs.
# if set severs as "127.0.0.1", please set devname as "lo"
# if current ip is 192.168.1.10, and the ip's network adaptor's name is "eth0", please use "eth0"
devname: eth0
# if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment.
memory_limit: 64G
datafile_disk_percentage: 20
syslog_level: INFO
enable_syslog_wf: false
enable_syslog_recycle: true
max_syslog_file_count: 4
cluster_id: 1
# root_password: # root user password
# In this example , support multiple ob process in single node, so different process use different ports.
# If deploy ob cluster in multiple nodes, the port and path setting can be same.
z1:
mysql_port: 2881
rpc_port: 2882
home_path: /root/observer
zone: zone1
z2:
mysql_port: 2881
rpc_port: 2882
home_path: /root/observer
zone: zone2
z3:
mysql_port: 2881
rpc_port: 2882
home_path: /root/observer
zone: zone3
## Only need to configure when remote login is required
# user:
# username: your username
# password: your password if need
# key_file: your ssh-key file path if need
# port: your ssh port, default 22
# timeout: ssh connection timeout (second), default 30
oceanbase-ce:
servers:
- name: z1
# Please don't use hostname, only IP can be supported
ip: 192.168.1.2
- name: z2
ip: 192.168.1.3
- name: z3
ip: 192.168.1.4
global:
# The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field.
home_path: /root/observer
# The directory for data storage. The default value is $home_path/store.
# data_dir: /data
# The directory for clog, ilog, and slog. The default value is the same as the data_dir value.
# redo_dir: /redo
# External port for OceanBase Database. The default value is 2881.
# mysql_port: 2881
# Internal port for OceanBase Database. The default value is 2882.
# rpc_port: 2882
# Defines the zone for an observer. The default value is zone1.
# zone: zone1
# The maximum running memory for an observer. When ignored, autodeploy calculates this value based on the current server available resource.
# memory_limit: 58G
# The percentage of the maximum available memory to the total memory. This value takes effect only when memory_limit is 0. The default value is 80.
# memory_limit_percentage: 80
# The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. Autodeploy calculates this value based on the current server available resource.
# system_memory: 22G
# The size of a data file. When ignored, autodeploy calculates this value based on the current server available resource.
# datafile_size: 200G
# The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90.
# datafile_disk_percentage: 90
# System log level. The default value is INFO.
# syslog_level: INFO
# Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false.
# enable_syslog_wf: false
# Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on.
# enable_syslog_recycle: true
# The maximum number of reserved log files before enabling auto recycling. When set to 0, no logs are deleted. The default value for autodeploy mode is 4.
# max_syslog_file_count: 4
# Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy.
# appname: obcluster
# Password for root. The default value is empty.
# root_password:
# Password for proxyro. proxyro_password must be the same as observer_sys_password. The default value is empty.
# proxyro_password:
z1:
zone: zone1
z2:
zone: zone2
z3:
zone: zone3
obproxy:
servers:
- 192.168.1.5
global:
# The working directory for obproxy. Obproxy is started under this directory. This is a required field.
home_path: /root/obproxy
# External port. The default value is 2883.
# listen_port: 2883
# The Prometheus port. The default value is 2884.
# prometheus_listen_port: 2884
# rs_list is the root server list for observers. The default root server is the first server in the zone.
# The format for rs_list is observer_ip:observer_mysql_port;observer_ip:observer_mysql_port.
# Ignore this value in autodeploy mode.
# rs_list: 127.0.0.1:2881
# Cluster name for the proxy OceanBase Database. The default value is obcluster. This value must be set to the same with the appname for OceanBase Database.
# cluster_name: obcluster
# Password for obproxy system tenant. The default value is empty.
# obproxy_sys_password:
# Password for proxyro. proxyro_password must be the same with proxyro_password. The default value is empty.
# observer_sys_password:
\ No newline at end of file
## Only need to configure when remote login is required
# user:
# username: your username
# password: your password if need
# key_file: your ssh-key file path if need
# port: your ssh port, default 22
# timeout: ssh connection timeout (second), default 30
oceanbase-ce:
servers:
# Please don't use hostname, only IP can be supported
- 192.168.1.3
global:
# The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field.
home_path: /root/observer
# The directory for data storage. The default value is $home_path/store.
# data_dir: /data
# The directory for clog, ilog, and slog. The default value is the same as the data_dir value.
# redo_dir: /redo
# External port for OceanBase Database. The default value is 2881.
# mysql_port: 2881
# Internal port for OceanBase Database. The default value is 2882.
# rpc_port: 2882
# Defines the zone for an observer. The default value is zone1.
# zone: zone1
# The maximum running memory for an observer. When ignored, autodeploy calculates this value based on the current server available resource.
# memory_limit: 58G
# The percentage of the maximum available memory to the total memory. This value takes effect only when memory_limit is 0. The default value is 80.
# memory_limit_percentage: 80
# The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. Autodeploy calculates this value based on the current server available resource.
# system_memory: 22G
# The size of a data file. When ignored, autodeploy calculates this value based on the current server available resource.
# datafile_size: 200G
# The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90.
# datafile_disk_percentage: 90
# System log level. The default value is INFO.
# syslog_level: INFO
# Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false.
# enable_syslog_wf: false
# Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on.
# enable_syslog_recycle: true
# The maximum number of reserved log files before enabling auto recycling. When set to 0, no logs are deleted. The default value for autodeploy mode is 4.
# max_syslog_file_count: 4
# Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy.
# appname: obcluster
# Password for root. The default value is empty.
# root_password:
# Password for proxyro. proxyro_password must be the same as observer_sys_password. The default value is empty.
# proxyro_password:
\ No newline at end of file
## Only need to configure when remote login is required
# user:
# username: your username
# password: your password if need
# key_file: your ssh-key file path if need
# port: your ssh port, default 22
# timeout: ssh connection timeout (second), default 30
oceanbase-ce:
servers:
# Please don't use hostname, only IP can be supported
- 192.168.1.3
global:
# The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field.
home_path: /root/observer
# The directory for data storage. The default value is $home_path/store.
# data_dir: /data
# The directory for clog, ilog, and slog. The default value is the same as the data_dir value.
# redo_dir: /redo
# External port for OceanBase Database. The default value is 2881.
# mysql_port: 2881
# Internal port for OceanBase Database. The default value is 2882.
# rpc_port: 2882
# Defines the zone for an observer. The default value is zone1.
# zone: zone1
# The maximum running memory for an observer. When ignored, autodeploy calculates this value based on the current server available resource.
# memory_limit: 58G
# The percentage of the maximum available memory to the total memory. This value takes effect only when memory_limit is 0. The default value is 80.
# memory_limit_percentage: 80
# The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. Autodeploy calculates this value based on the current server available resource.
# system_memory: 22G
# The size of a data file. When ignored, autodeploy calculates this value based on the current server available resource.
# datafile_size: 200G
# The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90.
# datafile_disk_percentage: 90
# System log level. The default value is INFO.
# syslog_level: INFO
# Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false.
# enable_syslog_wf: false
# Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on.
# enable_syslog_recycle: true
# The maximum number of reserved log files before enabling auto recycling. When set to 0, no logs are deleted. The default value for autodeploy mode is 4.
# max_syslog_file_count: 4
# Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy.
# appname: obcluster
# Password for root. The default value is empty.
# root_password:
# Password for proxyro. proxyro_password must be the same as observer_sys_password. The default value is empty.
# proxyro_password:
obproxy:
servers:
- 192.168.1.2
global:
# The working directory for obproxy. Obproxy is started under this directory. This is a required field.
home_path: /root/obproxy
# External port. The default value is 2883.
# listen_port: 2883
# The Prometheus port. The default value is 2884.
# prometheus_listen_port: 2884
# rs_list is the root server list for observers. The default root server is the first server in the zone.
# The format for rs_list is observer_ip:observer_mysql_port;observer_ip:observer_mysql_port.
# Ignore this value in autodeploy mode.
# rs_list: 127.0.0.1:2881
# Cluster name for the proxy OceanBase Database. The default value is obcluster. This value must be set to the same with the appname for OceanBase Database.
# cluster_name: obcluster
# Password for obproxy system tenant. The default value is empty.
# obproxy_sys_password:
# Password for proxyro. proxyro_password must be the same with proxyro_password. The default value is empty.
# observer_sys_password:
\ No newline at end of file
......@@ -32,7 +32,6 @@ spm=['spm.spm_expr','spm.spm_with_acs_new_plan_not_ok']
merge_into=['merge_into.merge_insert','merge_into.merge_into_normal', 'merge_into.merge_subquery', 'merge_into.merge_update']
# TODO bin.lb:
# Temporary failure, remove this after updatable view commit.
updatable_view = ['view.is_views', 'create_frommysql' ]
......
--disable_query_log
set @@session.explicit_defaults_for_timestamp=off;
--enable_query_log
# owner: jim.wjh
# owner group: SQL3
# description: foobar
--echo case 1: commit
connect (conn1,$OBMYSQL_MS0,$OBMYSQL_USR,$OBMYSQL_PWD,test,$OBMYSQL_PORT);
connection conn1;
......
......@@ -27,13 +27,14 @@ def bootstrap(plugin_context, cursor, *args, **kwargs):
for server in cluster_config.servers:
server_config = cluster_config.get_server_conf(server)
for key in ['observer_sys_password', 'obproxy_sys_password']:
if key in server_config and server_config[key]:
if server_config.get(key):
sql = 'alter proxyconfig set %s = %%s' % key
value = None
try:
sql = 'alter proxyconfig set %s = %%s' % key
value = str(server_config[key])
stdio.verbose('execute sql: %s' % (sql % value))
cursor[server].execute(sql, [value])
except:
stdio.exception('execute sql exception')
stdio.exception('execute sql exception: %s' % (sql % (value)))
stdio.warm('failed to set %s for obproxy(%s)' % (key, server))
plugin_context.return_true()
......@@ -20,17 +20,27 @@
from __future__ import absolute_import, division, print_function
def init(plugin_context, *args, **kwargs):
def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
cluster_config = plugin_context.cluster_config
clients = plugin_context.clients
stdio = plugin_context.stdio
global_ret = True
stdio.start_loading('Initializes cluster work home')
for server in cluster_config.servers:
server_config = cluster_config.get_server_conf(server)
client = clients[server]
home_path = server_config['home_path']
stdio.print('%s init cluster work home', server)
if not client.execute_command('mkdir -p %s/run' % (home_path)):
remote_home_path = client.execute_command('echo $HOME/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
stdio.verbose('%s init cluster work home', server)
if not (client.execute_command("bash -c 'mkdir -p %s/{run,bin,lib}'" % (home_path)) \
and client.execute_command("if [ -d %s/bin ]; then ln -s %s/bin/* %s/bin; fi" % (remote_repository_dir, remote_repository_dir, home_path)) \
and client.execute_command("if [ -d %s/lib ]; then ln -s %s/lib/* %s/lib; fi" % (remote_repository_dir, remote_repository_dir, home_path))):
global_ret = False
stdio.print('fail to init %s home path', server)
global_ret and plugin_context.return_true()
stdio.verbose('fail to init %s home path', server)
if global_ret:
stdio.stop_loading('succeed')
plugin_context.return_true()
else:
stdio.stop_loading('fail')
\ No newline at end of file
......@@ -21,7 +21,7 @@
max_value: 65535
need_restart: true
description_en: obproxy prometheus listen port
description_local: SQL服务协议端口号
description_local: 提供prometheus服务端口号
- name: appname
require: false
type: STRING
......
......@@ -49,6 +49,7 @@ def reload(plugin_context, cursor, new_cluster_config, *args, **kwargs):
stdio.verbose('apply new configuration')
success_conf = {}
sql = ''
value = None
for key in global_change_conf:
success_conf[key] = []
for server in servers:
......@@ -56,13 +57,13 @@ def reload(plugin_context, cursor, new_cluster_config, *args, **kwargs):
continue
try:
sql = 'alter proxyconfig set %s = %%s' % key
value = change_conf[server][key]
value = change_conf[server][key] if change_conf[server].get(key) is not None else ''
stdio.verbose('execute sql: %s' % (sql % value))
cursor[server].execute(sql, [value])
success_conf[key].append(server)
except:
global_ret = False
stdio.exception('execute sql exception: %s' % sql)
stdio.exception('execute sql exception: %s' % (sql % value))
for key in success_conf:
if global_change_conf[key] == servers_num == len(success_conf):
cluster_config.update_global_conf(key, value, False)
......
......@@ -49,14 +49,13 @@ def confirm_port(client, pid, port):
def confirm_command(client, pid, command):
command = command.replace(' ', '').strip()
if client.execute_command('cmd=`cat /proc/%s/cmdline`; if [ "$cmd" != "%s" ]; then exit 1; fi' % (pid, command)):
if client.execute_command('bash -c \'cmd=`cat /proc/%s/cmdline`; if [ "$cmd" != "%s" ]; then exot 1; fi\'' % (pid, command)):
return True
return False
def confirm_home_path(client, pid, home_path):
if client.execute_command('path=`ls -l /proc/%s | grep cwd | awk -F\'-> \' \'{print $2}\'`; if [ "$path" != "%s" ]; then exit 1; fi' %
(pid, home_path)):
if client.execute_command('path=`ls -l /proc/%s | grep cwd | awk -F\'-> \' \'{print $2}\'`; bash -c \'if [ "$path" != "%s" ]; then exit 1; fi\'' % (pid, home_path)):
return True
return False
......@@ -77,6 +76,7 @@ def is_started(client, remote_bin_path, port, home_path, command):
return False
return confirm_home_path(client, pid, home_path) and confirm_command(client, pid, command)
def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
global stdio
cluster_config = plugin_context.cluster_config
......@@ -85,9 +85,7 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
clusters_cmd = {}
real_cmd = {}
pid_path = {}
remote_bin_path = {}
need_bootstrap = True
bin_path = os.path.join(repository_dir, 'bin/obproxy')
error = False
for server in cluster_config.servers:
......@@ -99,17 +97,20 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
if error:
return plugin_context.return_false()
servers_remote_home_path = {}
stdio.start_loading('Start obproxy')
for server in cluster_config.servers:
client = clients[server]
remote_home_path = client.execute_command('echo $HOME/.obd').stdout.strip()
servers_remote_home_path[server] = remote_home_path
remote_bin_path[server] = bin_path.replace(local_home_path, remote_home_path)
server_config = cluster_config.get_server_conf(server)
pid_path[server] = "%s/run/obproxy-%s-%s.pid" % (server_config['home_path'], server.ip, server_config["listen_port"])
home_path = server_config['home_path']
if client.execute_command("bash -c 'if [ -f %s/bin/obproxy ]; then exit 1; else exit 0; fi;'" % home_path):
remote_home_path = client.execute_command('echo $HOME/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
client.execute_command("bash -c 'mkdir -p %s/{bin,lib}'" % (home_path))
client.execute_command("ln -s %s/bin/* %s/bin" % (remote_repository_dir, home_path))
client.execute_command("ln -s %s/lib/* %s/lib" % (remote_repository_dir, home_path))
pid_path[server] = "%s/run/obproxy-%s-%s.pid" % (home_path, server.ip, server_config["listen_port"])
not_opt_str = [
'listen_port',
'prometheus_listen_port',
......@@ -128,8 +129,8 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
if key in server_config:
value = get_value(key)
cmd.append('--%s %s' % (key, value))
real_cmd[server] = '%s %s' % (remote_bin_path[server], ' '.join(cmd))
clusters_cmd[server] = 'cd %s; %s' % (server_config['home_path'], real_cmd[server])
real_cmd[server] = '%s/bin/obproxy %s' % (home_path, ' '.join(cmd))
clusters_cmd[server] = 'cd %s; %s' % (home_path, real_cmd[server])
for server in clusters_cmd:
client = clients[server]
......@@ -149,8 +150,7 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
return plugin_context.return_false()
stdio.verbose('starting %s obproxy', server)
remote_repository_path = repository_dir.replace(local_home_path, remote_home_path)
client.add_env('LD_LIBRARY_PATH', '%s/lib:' % remote_repository_path, True)
client.add_env('LD_LIBRARY_PATH', '%s/lib:' % server_config['home_path'], True)
ret = client.execute_command(clusters_cmd[server])
client.add_env('LD_LIBRARY_PATH', '', True)
if not ret:
......
......@@ -72,8 +72,8 @@ def start_check(plugin_context, strict_check=False, *args, **kwargs):
stdio.verbose('%s port check' % server)
for key in ['listen_port', 'prometheus_listen_port']:
port = int(server_config[key])
alert_f = alert if key == 'prometheus_listen_port' else critical
if port in ports:
alert_f = alert if key == 'prometheus_listen_port' else critical
alert_f('Configuration conflict %s: %s port is used for %s\'s %s' % (server, port, ports[port]['server'], ports[port]['key']))
continue
ports[port] = {
......@@ -81,7 +81,7 @@ def start_check(plugin_context, strict_check=False, *args, **kwargs):
'key': key
}
if get_port_socket_inode(client, port):
critical('%s:%s port is already used' % (ip, port))
alert_f('%s:%s port is already used' % (ip, port))
if success:
stdio.stop_loading('succeed')
......
......@@ -37,6 +37,9 @@ def bootstrap(plugin_context, cursor, *args, **kwargs):
floor_servers[zone] = []
bootstrap.append('REGION "sys_region" ZONE "%s" SERVER "%s:%s"' % (server_config['zone'], server.ip, server_config['rpc_port']))
try:
sql = 'set session ob_query_timeout=1000000000'
stdio.verbose('execute sql: %s' % sql)
cursor.execute(sql)
sql = 'alter system bootstrap %s' % (','.join(bootstrap))
stdio.start_loading('Cluster bootstrap')
stdio.verbose('execute sql: %s' % sql)
......@@ -48,7 +51,7 @@ def bootstrap(plugin_context, cursor, *args, **kwargs):
cursor.execute(sql)
global_conf = cluster_config.get_global_conf()
if 'proxyro_password' in global_conf or 'obproxy' in plugin_context.components:
value = global_conf['proxyro_password'] if 'proxyro_password' in global_conf else ''
value = global_conf['proxyro_password'] if global_conf.get('proxyro_password') is not None else ''
sql = 'create user "proxyro" IDENTIFIED BY "%s"' % value
stdio.verbose(sql)
cursor.execute(sql)
......
......@@ -40,8 +40,9 @@ def destroy(plugin_context, *args, **kwargs):
server_config = cluster_config.get_server_conf(server)
stdio.verbose('%s work path cleaning', server)
clean(server, server_config['home_path'])
if 'data_dir' in server_config:
clean(server, server_config['data_dir'])
for key in ['data_dir', 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir']:
if server_config.get(key):
clean(server, server_config[key])
if global_ret:
stdio.stop_loading('succeed')
plugin_context.return_true()
......
......@@ -20,9 +20,7 @@
from __future__ import absolute_import, division, print_function
import sys
import time
from prettytable import PrettyTable
def display(plugin_context, cursor, *args, **kwargs):
......
......@@ -57,7 +57,7 @@ def init_dir(server, client, key, path, link_path=None):
return False
def init(plugin_context, *args, **kwargs):
def init(plugin_context, local_home_path, repository_dir, *args, **kwargs):
global stdio, force
cluster_config = plugin_context.cluster_config
clients = plugin_context.clients
......@@ -65,6 +65,7 @@ def init(plugin_context, *args, **kwargs):
servers_dirs = {}
force = getattr(plugin_context.options, 'force', False)
stdio.verbose('option `force` is %s' % force)
stdio.start_loading('Initializes cluster work home')
for server in cluster_config.servers:
ip = server.ip
if ip not in servers_dirs:
......@@ -73,15 +74,24 @@ def init(plugin_context, *args, **kwargs):
server_config = cluster_config.get_server_conf(server)
client = clients[server]
home_path = server_config['home_path']
if 'data_dir' not in server_config:
remote_home_path = client.execute_command('echo $HOME/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
if not server_config.get('data_dir'):
server_config['data_dir'] = '%s/store' % home_path
if 'clog_dir' not in server_config:
server_config['clog_dir'] = '%s/clog' % server_config['data_dir']
if 'ilog_dir' not in server_config:
server_config['ilog_dir'] = '%s/ilog' % server_config['data_dir']
if 'slog_dir' not in server_config:
server_config['slog_dir'] = '%s/slog' % server_config['data_dir']
for key in ['home_path', 'data_dir', 'clog_dir', 'ilog_dir', 'slog_dir']:
if not server_config.get('redo_dir'):
server_config['redo_dir'] = server_config['data_dir']
if not server_config.get('clog_dir'):
server_config['clog_dir'] = '%s/clog' % server_config['redo_dir']
if not server_config.get('ilog_dir'):
server_config['ilog_dir'] = '%s/ilog' % server_config['redo_dir']
if not server_config.get('slog_dir'):
server_config['slog_dir'] = '%s/slog' % server_config['redo_dir']
if server_config['redo_dir'] == server_config['data_dir']:
keys = ['home_path', 'data_dir', 'clog_dir', 'ilog_dir', 'slog_dir']
else:
keys = ['home_path', 'data_dir', 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir']
for key in keys:
path = server_config[key]
if path in dirs:
critical('Configuration conflict %s: %s is used for %s\'s %s' % (server, path, dirs[path]['server'], dirs[path]['key']))
......@@ -91,7 +101,7 @@ def init(plugin_context, *args, **kwargs):
'key': key,
}
stdio.print('%s initializes cluster work home' % server)
stdio.verbose('%s initializes cluster work home' % server)
if force:
ret = client.execute_command('rm -fr %s/*' % home_path)
if not ret:
......@@ -105,7 +115,9 @@ def init(plugin_context, *args, **kwargs):
continue
else:
critical('fail to init %s home path: create %s failed' % (server, home_path))
ret = client.execute_command('bash -c "mkdir -p %s/{etc,admin,.conf,log}"' % home_path)
ret = client.execute_command('bash -c "mkdir -p %s/{etc,admin,.conf,log,bin,lib}"' % home_path) \
and client.execute_command("if [ -d %s/bin ]; then ln -s %s/bin/* %s/bin; fi" % (remote_repository_dir, remote_repository_dir, home_path)) \
and client.execute_command("if [ -d %s/lib ]; then ln -s %s/lib/* %s/lib; fi" % (remote_repository_dir, remote_repository_dir, home_path))
if ret:
data_path = server_config['data_dir']
if force:
......@@ -121,7 +133,7 @@ def init(plugin_context, *args, **kwargs):
continue
else:
critical('fail to init %s data path: create %s failed' % (server, data_path))
ret = client.execute_command('mkdir -p %s/sstable' % data_path)
ret = client.execute_command('bash -c "mkdir -p %s/sstable"' % data_path)
if ret:
link_path = '%s/store' % home_path
client.execute_command("if [ ! '%s' -ef '%s' ]; then ln -sf %s %s; fi" % (data_path, link_path, data_path, link_path))
......@@ -151,4 +163,9 @@ def init(plugin_context, *args, **kwargs):
critical('failed to initialize %s date path' % (server))
else:
critical('fail to init %s home path: %s permission denied' % (server, ret.stderr))
global_ret and plugin_context.return_true()
if global_ret:
stdio.stop_loading('succeed')
plugin_context.return_true()
else:
stdio.stop_loading('fail')
\ No newline at end of file
......@@ -9,6 +9,7 @@
- name: cluster_id
require: true
type: INT
default: 1
min_value: 1
max_value: 4294901759
need_restart: true
......@@ -21,13 +22,20 @@
need_redeploy: true
description_en: the directory for the data file
description_local: 存储sstable等数据的目录
- name: redo_dir
type: STRING
min_value: NULL
max_value: NULL
need_redeploy: true
description_en: the directory for the redo file
description_local: 存储clog, iclog, slog数据的目录
- name: clog_dir
type: STRING
min_value: NULL
max_value: NULL
need_redeploy: true
description_en: the directory for the clog file
description_local: 存储clog数据的目录
description_local: 存储clog数据的目录, clog 应该与 ilog 同盘
- name: slog_dir
type: STRING
min_value: NULL
......@@ -52,7 +60,7 @@
- name: rpc_port
require: true
type: INT
default: 2500
default: 2882
min_value: 1025
max_value: 65535
need_restart: true
......@@ -61,7 +69,7 @@
- name: mysql_port
require: true
type: INT
default: 2880
default: 2881
min_value: 1025
max_value: 65535
need_restart: true
......@@ -1641,7 +1649,7 @@
min_value: 0
max_value: NULL
section: OBSERVER
need_restart: false
need_restart: true
description_en: the number of CPUs in the system. If this parameter is set to zero, the number will be set according to sysconf; otherwise, this parameter is used.
description_local: 系统CPU总数,如果设置为0,将自动检测
- name: auto_delete_expired_backup
......
......@@ -39,32 +39,37 @@ def reload(plugin_context, cursor, new_cluster_config, *args, **kwargs):
cluster_server[server] = '%s:%s' % (server.ip, config['rpc_port'])
stdio.verbose('compare configuration of %s' % (server))
for key in new_config:
if key not in config or config[key] != new_config[key]:
change_conf[server][key] = new_config[key]
n_value = new_config[key]
if key not in config or config[key] != n_value:
change_conf[server][key] = n_value
if key not in global_change_conf:
global_change_conf[key] = 1
else:
global_change_conf[key] += 1
global_change_conf[key] = {'value': n_value, 'count': 1}
elif n_value == global_change_conf[key]['value']:
global_change_conf[key]['count'] += 1
servers_num = len(servers)
stdio.verbose('apply new configuration')
for key in global_change_conf:
sql = ''
msg = ''
try:
if key in ['proxyro_password', 'root_password']:
if global_change_conf[key] != servers_num:
if global_change_conf[key]['count'] != servers_num:
stdio.warn('Invalid: proxyro_password is not a single server configuration item')
continue
value = change_conf[server][key]
value = change_conf[server][key] if change_conf[server].get(key) is not None else ''
user = key.split('_')[0]
sql = 'alter user "%s" IDENTIFIED BY "%s"' % (user, value if value else '')
msg = sql = 'CREATE USER IF NOT EXISTS %s IDENTIFIED BY "%s"' % (user, value)
stdio.verbose('execute sql: %s' % sql)
cursor.execute(sql)
msg = sql = 'alter user "%s" IDENTIFIED BY "%s"' % (user, value)
stdio.verbose('execute sql: %s' % sql)
cursor.execute(sql)
continue
if global_change_conf[key] == servers_num:
if global_change_conf[key]['count'] == servers_num:
sql = 'alter system set %s = %%s' % key
value = change_conf[server][key]
stdio.verbose('execute sql: %s' % (sql % value))
msg = sql % value
stdio.verbose('execute sql: %s' % msg)
cursor.execute(sql, [value])
cluster_config.update_global_conf(key, value, False)
continue
......@@ -72,13 +77,14 @@ def reload(plugin_context, cursor, new_cluster_config, *args, **kwargs):
if key not in change_conf[server]:
continue
sql = 'alter system set %s = %%s server=%%s' % key
value = change_conf[server][key]
stdio.verbose('execute sql: %s' % (sql % (value, server)))
cursor.execute(sql, [value, server])
value = (change_conf[server][key], cluster_server[server])
msg = sql % value
stdio.verbose('execute sql: %s' % msg)
cursor.execute(sql, value)
cluster_config.update_server_conf(server,key, value, False)
except:
global_ret = False
stdio.exception('execute sql exception: %s' % sql)
stdio.exception('execute sql exception: %s' % msg)
cursor.execute('alter system reload server')
cursor.execute('alter system reload zone')
......
......@@ -64,7 +64,6 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
stdio = plugin_context.stdio
clusters_cmd = {}
need_bootstrap = True
bin_path = os.path.join(repository_dir, 'bin/observer')
root_servers = {}
global_config = cluster_config.get_global_conf()
appname = global_config['appname'] if 'appname' in global_config else None
......@@ -92,26 +91,22 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
root_servers[zone] = '%s:%s:%s' % (server.ip, config['rpc_port'], config['mysql_port'])
rs_list_opt = '-r \'%s\'' % ';'.join([root_servers[zone] for zone in root_servers])
servers_remote_home_path = {}
for server in cluster_config.servers:
client = clients[server]
remote_home_path = client.execute_command('echo $HOME/.obd').stdout.strip()
servers_remote_home_path[server] = remote_home_path
remote_bin_path = bin_path.replace(local_home_path, remote_home_path)
server_config = cluster_config.get_server_conf(server)
home_path = server_config['home_path']
req_check = ['home_path', 'cluster_id']
for key in req_check:
if key not in server_config:
stdio.stop_loading('fail')
stdio.print('%s %s is empty', server, key)
return plugin_context.return_false()
if client.execute_command("bash -c 'if [ -f %s/bin/observer ]; then exit 1; else exit 0; fi;'" % home_path):
remote_home_path = client.execute_command('echo $HOME/.obd').stdout.strip()
remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path)
client.execute_command("bash -c 'mkdir -p %s/{bin,lib}'" % (home_path))
client.execute_command("ln -s %s/bin/* %s/bin" % (remote_repository_dir, home_path))
client.execute_command("ln -s %s/lib/* %s/lib" % (remote_repository_dir, home_path))
home_path = server_config['home_path']
if 'data_dir' not in server_config:
if not server_config.get('data_dir'):
server_config['data_dir'] = '%s/store' % home_path
if client.execute_command('ls %s/clog' % server_config['data_dir']).stdout.strip():
if client.execute_command('ls %s/ilog/' % server_config['data_dir']).stdout.strip():
need_bootstrap = False
remote_pid_path = '%s/run/observer.pid' % home_path
......@@ -151,13 +146,13 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs):
if key in server_config:
value = get_value(key)
cmd.append('%s %s' % (not_opt_str[key], value))
clusters_cmd[server] = 'cd %s; %s %s' % (home_path, remote_bin_path, ' '.join(cmd))
clusters_cmd[server] = 'cd %s; %s/bin/observer %s' % (home_path, home_path, ' '.join(cmd))
for server in clusters_cmd:
client = clients[server]
server_config = cluster_config.get_server_conf(server)
stdio.verbose('starting %s observer', server)
remote_repository_path = repository_dir.replace(local_home_path, remote_home_path)
client.add_env('LD_LIBRARY_PATH', '%s/lib:' % remote_repository_path, True)
client.add_env('LD_LIBRARY_PATH', '%s/lib:' % server_config['home_path'], True)
ret = client.execute_command(clusters_cmd[server])
client.add_env('LD_LIBRARY_PATH', '', True)
if not ret:
......
......@@ -22,6 +22,7 @@ from __future__ import absolute_import, division, print_function
import os
import re
import time
stdio = None
......@@ -58,7 +59,17 @@ def formate_size(size):
return '%.1f%s' % (size, units[idx])
def start_check(plugin_context, strict_check=False, *args, **kwargs):
def time_delta(client):
time_st = time.time() * 1000
time_srv = int(client.execute_command('date +%s%N').stdout) / 1000000
time_ed = time.time() * 1000
time_it = time_ed - time_st
time_srv -= time_it
return time_srv - time_st
def _start_check(plugin_context, strict_check=False, *args, **kwargs):
def alert(*arg, **kwargs):
global success
if strict_check:
......@@ -78,6 +89,8 @@ def start_check(plugin_context, strict_check=False, *args, **kwargs):
servers_port = {}
servers_memory = {}
servers_disk = {}
servers_clog_mount = {}
servers_net_inferface = {}
server_num = len(cluster_config.servers)
stdio.start_loading('Check before start observer')
for server in cluster_config.servers:
......@@ -95,10 +108,14 @@ def start_check(plugin_context, strict_check=False, *args, **kwargs):
if ip not in servers_port:
servers_disk[ip] = {}
servers_port[ip] = {}
servers_clog_mount[ip] = {}
servers_net_inferface[ip] = {}
servers_memory[ip] = {'num': 0, 'percentage': 0}
memory = servers_memory[ip]
ports = servers_port[ip]
disk = servers_disk[ip]
clog_mount = servers_clog_mount[ip]
inferfaces = servers_net_inferface[ip]
stdio.verbose('%s port check' % server)
for key in ['mysql_port', 'rpc_port']:
port = int(server_config[key])
......@@ -112,24 +129,49 @@ def start_check(plugin_context, strict_check=False, *args, **kwargs):
if get_port_socket_inode(client, port):
critical('%s:%s port is already used' % (ip, port))
if 'memory_limit' in server_config:
memory['num'] += parse_size(server_config['memory_limit'])
try:
memory['num'] += parse_size(server_config['memory_limit'])
except:
critical('memory_limit must be an integer')
return
elif 'memory_limit_percentage' in server_config:
memory['percentage'] += int(parse_size(server_config['memory_limit_percentage']))
try:
memory['percentage'] += int(parse_size(server_config['memory_limit_percentage']))
except:
critical('memory_limit_percentage must be an integer')
return
else:
memory['percentage'] += 80
data_path = server_config['data_dir'] if 'data_dir' in server_config else os.path.join(server_config['home_path'], 'store')
data_path = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store')
redo_dir = server_config['redo_dir'] if server_config.get('redo_dir') else data_path
clog_dir = server_config['clog_dir'] if server_config.get('clog_dir') else os.path.join(redo_dir, 'clog')
if not client.execute_command('ls %s/sstable/block_file' % data_path):
if data_path in disk:
critical('Same Path: %s in %s and %s' % (data_path, server, disk[data_path]['server']))
continue
if clog_dir in clog_mount:
critical('Same Path: %s in %s and %s' % (clog_dir, server, clog_mount[clog_dir]['server']))
continue
disk[data_path] = {
'need': 90,
'server': server
}
clog_mount[clog_dir] = {
'threshold': server_config.get('clog_disk_utilization_threshold', 80) / 100.0,
'server': server
}
if 'datafile_size' in server_config and server_config['datafile_size']:
disk[data_path]['need'] = server_config['datafile_size']
elif 'datafile_disk_percentage' in server_config and server_config['datafile_disk_percentage']:
disk[data_path]['need'] = int(server_config['datafile_disk_percentage'])
devname = server_config.get('devname')
if devname:
if not client.execute_command("grep -e ' %s:' /proc/net/dev" % devname):
critical('%s No such net interface: %s' % (server, devname))
if devname not in inferfaces:
inferfaces[devname] = []
inferfaces[devname].append(ip)
for ip in servers_clients:
client = servers_clients[ip]
......@@ -179,9 +221,10 @@ def start_check(plugin_context, strict_check=False, *args, **kwargs):
if ret:
for total, avail, path in re.findall('(\d+)\s+(\d+)\s+(.+)', ret.stdout):
disk[path] = {
'toatl': int(total) << 10,
'total': int(total) << 10,
'avail': int(avail) << 10,
'need': 0
'need': 0,
'threshold': 2
}
for path in servers_disk[ip]:
kp = '/'
......@@ -191,16 +234,77 @@ def start_check(plugin_context, strict_check=False, *args, **kwargs):
kp = p
need = servers_disk[ip][path]['need']
if isinstance(need, int):
disk[kp]['need'] += disk[kp]['toatl'] * need / 100
disk[kp]['need'] += disk[kp]['total'] * need / 100
else:
disk[kp]['need'] += parse_size(need)
try:
disk[kp]['need'] += parse_size(need)
except:
critical('datafile_size must be an integer')
return
for path in servers_clog_mount[ip]:
kp = '/'
for p in disk:
if p in path:
if len(p) > len(kp):
kp = p
disk[kp]['threshold'] = min(disk[kp]['threshold'], servers_clog_mount[ip][path]['threshold'])
for p in disk:
total = disk[p]['total']
avail = disk[p]['avail']
need = disk[p]['need']
threshold = disk[p]['threshold']
if need > 0 and threshold < 2:
alert('(%s) clog and data use the same disk (%s)' % (ip, p))
if need > avail:
critical('(%s) %s not enough disk space. (Avail: %s, Need: %s)' % (ip, kp, formate_size(avail), formate_size(need)))
critical('(%s) %s not enough disk space. (Avail: %s, Need: %s)' % (ip, p, formate_size(avail), formate_size(need)))
elif 1.0 * (total - avail + need) / total > disk[p]['threshold']:
msg = '(%s) %s not enough disk space for clog. Use `redo_dir` to set other disk for clog' % (ip, p)
msg += ', or reduce the value of `datafile_size`' if need > 0 else '.'
critical(msg)
if success:
for ip in servers_net_inferface:
if servers_net_inferface[ip].get(None):
devinfo = client.execute_command('cat /proc/net/dev').stdout
interfaces = []
for interface in re.findall('\n\s+(\w+):', devinfo):
if interface != 'lo':
interfaces.append(interface)
if not interfaces:
interfaces = ['lo']
if len(interfaces) > 1:
servers = ','.join(str(server) for server in servers_net_inferface[ip][None])
critical('%s has more than one network inferface. Please set `devname` for (%s)' % (ip, servers))
else:
servers_net_inferface[ip][interfaces[0]] = servers_net_inferface[ip][None]
del servers_net_inferface[ip][None]
if success:
for ip in servers_net_inferface:
client = servers_clients[ip]
for devname in servers_net_inferface[ip]:
if client.is_localhost() and devname != 'lo' or (not client.is_localhost() and devname == 'lo'):
critical('%s %s fail to ping %s. Please check configuration `devname`' % (server, devname, ip))
continue
for _ip in servers_clients:
if ip == _ip:
continue
if not client.execute_command('ping -W 1 -c 1 -I %s %s' % (devname, _ip)):
critical('%s %s fail to ping %s. Please check configuration `devname`' % (server, devname, _ip))
break
if success:
times = []
for ip in servers_disk:
client = servers_clients[ip]
times.append(time_delta(client))
if times and max(times) - min(times) > 200:
critical('Cluster NTP is out of sync')
def start_check(plugin_context, strict_check=False, *args, **kwargs):
_start_check(plugin_context, strict_check)
if success:
stdio.stop_loading('succeed')
plugin_context.return_true()
......
......@@ -139,7 +139,10 @@ class SshClient(object):
return '%s@%s:%d' % (self.config.username, self.config.host, self.config.port)
def _is_local(self):
return self.config.host in ['127.0.0.1', 'localhost'] and self.config.username == getpass.getuser()
return self.is_localhost() and self.config.username == getpass.getuser()
def is_localhost(self, stdio=None):
return self.config.host in ['127.0.0.1', 'localhost', '127.1', '127.0.1']
def _login(self, stdio=None):
if self.is_connected:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册