diff --git a/LICENSE b/LICENSE index b0eb3345b5eb03348ca8b85c9cd7807b8fcc81f9..e88cf31c124eb77e3a74c113dd49b769e7fa22c1 100644 --- a/LICENSE +++ b/LICENSE @@ -672,4 +672,3 @@ may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . - diff --git a/README-CN.md b/README-CN.md index 1946021de93a4a3cc2ac9ba06d1addb5fa90e48d..30089dae455f4e52fbe6e4bb7d7fa7f3733232c4 100644 --- a/README-CN.md +++ b/README-CN.md @@ -112,6 +112,7 @@ mysql -h127.1 -uroot -P2883 - [三节点配置样例](./example/distributed-example.yaml) - [单节点 + ODP 配置样例](./example/single-with-obproxy-example.yaml) - [三节点 + ODP 配置样例](./example/distributed-with-obproxy-example.yaml) +- [三节点 + ODP + obagent 配置样例](./example/obagent/distributed-with-obproxy-and-obagent-example.yaml) 本文以 [小规格开发模式-本地单节点](./example/mini-local-example.yaml) 为例,启动一个本地单节点的 OceanBase 数据库。 @@ -310,12 +311,19 @@ obd cluster deploy [-c ] [-f] [-U] [-A] 启动已部署的集群,成功时打印集群状态。 ```shell -obd cluster start [-s] +obd cluster start [flags] ``` 参数 `deploy name` 为部署配置名称,可以理解为配置文件名称。 -选项 `-s` 为 `--strict-check`。部分组件在启动前会做相关的检查,当检查不通过的时候会报警告,不会强制停止流程。使用该选项可开启检查失败报错直接退出。建议开启,可以避免一些资源不足导致的启动失败。非必填项。数据类型为 `bool`。默认不开启。 +选项说明见下表: + +选项名 | 是否必选 | 数据类型 | 默认值 | 说明 +--- | --- | --- |--- |--- +-s/--servers | 否 | string | 空 | 机器列表,用 `,` 间隔。用于指定启动的机器。如果组件下的机器没有全部启动,则 start 不会执行 bootstrap。 +-c/--components | 否 | string | 空 | 组件列表,用 `,` 间隔。用于指定启动的组件。如果配置下的组件没有全部启动,该配置不会进入 running 状态。 +--wop/--without-parameter | 否 | bool | false | 无参启动。启动的时候不带参数。节点第一次的启动时,不响应此选项。 +-S/--strict-check | 否 | bool | false | 部分组件在启动前会做相关的检查。检查不通过时,OBD 将发出告警,不会强制停止流程。使用该选项可开启检查失败报错直接退出。建议开启,可以避免一些资源不足导致的启动失败。 #### `obd cluster list` @@ -361,6 +369,14 @@ obd cluster restart 参数 `deploy name` 为部署配置名称,可以理解为配置文件名称。 +选项说明见下表: + +选项名 | 是否必选 | 数据类型 | 默认值 | 说明 +--- | --- | --- |--- |--- +-s/--servers | 否 | string | 空 | 机器列表,用 `,` 间隔。 +-c/--components | 否 | string | 空 | 组件列表,用 `,` 间隔。用于指定启动的组件。如果配置下的组件没有全部启动,该配置不会进入 running 状态。 +--wop/--without-parameter | 否 | bool | false | 无参启动。启动的时候不带参数。节点第一次的启动时,不响应此选项。 + #### `obd cluster redeploy` 重启一个运行中集群。当您使用 `edit-config` 修改一个运行的集群的配置信息后,可以通过 `redeploy` 命令应用修改。 @@ -383,9 +399,16 @@ obd cluster stop 参数 `deploy name` 为部署配置名称,可以理解为配置文件名称。 +选项说明见下表: + +选项名 | 是否必选 | 数据类型 | 默认值 | 说明 +--- | --- | --- |--- |--- +-s/--servers | 否 | string | 空 | 机器列表,用 `,` 间隔。用于指定停止的机器。 +-c/--components | 否 | string | 空 | 组件列表,用 `,` 间隔。用于指定停止的组件。如果配置下的组件没有全部停止,该配置不会进入 stopped 状态。 + #### `obd cluster destroy` -销毁已部署的集群。如果集群处于运行中的状态,该命令会先尝试执行`stop`,成功后再执行`destroy`。 +销毁已部署的集群。如果集群处于运行中的状态,该命令会先尝试执行 `stop`,成功后再执行 `destroy`。 ```shell obd cluster destroy [-f] @@ -410,7 +433,7 @@ obd cluster tenant create [-n ] [flags] 选项名 | 是否必选 | 数据类型 | 默认值 | 说明 --- | --- | --- |--- | --- -n/--tenant-name | 否 | string | test | 租户名。对应的资源单元和资源池根据租户名自动生成,并且避免重名。 ---max-cpu | 否 | float | 0 | 租户可用最大 CPU 数。为 0 时使用集群剩余全部可用 CPU。实际值低于 2 时报错。 +--max-cpu | 否 | float | 0 | 租户可用最大 CPU 数。为 0 时使用集群剩余全部可用 CPU。 --min-cpu | 否 | float | 0 | 租户可用最小 CPU 数。为 0 时等于 --max-cpu。 --max-memory | 否 | int | 0 | 租户可用最大内存。为 0 时使用集群剩余全部可用内存。实际值低于 1G 时报错。 --min-memory | 否 | int | 0 | 租户可用最大内存。为 0 时等于 --max-memory。 @@ -507,6 +530,40 @@ obd test sysbench [flags] --events | 否 | int | 0 | 最大请求数量,定义数量后可以不需要 --time 选项。 --rand-type | 否 | string | 访问数据时使用的随机生成函数。取值可以为 special、uniform、gaussian 或 pareto。 默认值为 special, 早期值为 uniform。 ---skip-trx | 否 | string | 空 | 在只读测试中打开或关闭事务。 +-O/--optimization | 否 | int | 1 | 自动调优等级。为 0 时关闭。 + + +#### `obd test tpch` + +对 OcecanBase 数据库或 ODP 组件的指定节点执行 TPC-H。 TPC-H 需要 OBClient 和 obtpch,请先安装 OBClient 和 obtpch。 +TPC-H 需要指定一台OceanBase目标服务器作为执行对象。在执行TPC-H测试前,OBD会将测试需要的数据文件传输到指定机器的指定目录下,这些文件可能会比较大,请确保机器上足够的磁盘空间。 +当然你也可以提前在目标机器上准备好数据文件,再通过`--dt/--disable-transfer`选项关闭传输。 + +```shell +obd test tpch [flags] +``` + +参数 `deploy name` 为部署配置名称,可以理解为配置文件名称。 + +选项名 | 是否必选 | 数据类型 | 默认值 | 说明 +--- | --- | --- |--- | --- +--test-server | 否 | string | 默指定的组件下服务器中的第一个节点。 | 必须是指定的组件下的某个节点名。 +--user | 否 | string | root | 执行测试的用户名。 +--password | 否 | string | 默认为空 | 执行测试的用户密码。 +--tenant | 否 | string | test | 执行测试的租户名。 +--database | 否 | string | test | 执行测试的数据库。 +--obclient-bin | 否 | string | obclient | OBClient 二进制文件路径。 +--dbgen-bin | 否 | string | /usr/local/tpc-h-tools/bin/dbgen | dbgen 二进制文件路径。 +--dss-config | 否 | string | /usr/local/tpc-h-tools/ | dists.dss所在目录。 +-s/--scale-factor | 否 | int | 1 | 自动生成测试数据的规模,单位为G。 +-tmp-dir | 否 | string | ./tmp | 执行tpch时的临时目录。自动生成的测试数据,自动调优的sql文件,执行测试sql的日志文件等都会存在这里。 +--ddl-path | 否 | string | 默认为空 | ddl 文件路径或目录。为空时,OBD会使用自带的ddl文件。 +--tbl-path | 否 | string | 默认为空 | tbl 文件路径或目录。为空时,使用dbgen生成测试数据。 +--sql-path | 否 | string | 默认为空 | sql 文件路径或目录。为空时,OBD会使用自带的sql文件。 +--remote-tbl-dir | 否 | string | 默认为空 | 目标observer上存放tbl的目录,绝对路径,请保证observer的启动用户对该目录有读写权限。在不开启`--test-only`的情况下该选项为必填项 +--test-only | 否 | bool | false | 不执行初始化,仅执行测试sql。 +--dt/--disable-transfer | 否 | bool | false | 禁用传输。开启后将不会把本地tbl传输到远程remote-tbl-dir下,而是直接使用目标机器remote-tbl-dir下的tbl文件。 +-O/--optimization | 否 | int | 1 | 自动调优等级。为 0 时关闭。 ## Q&A diff --git a/README.md b/README.md index 4c6e17d9c250cd967722ec3a11dd3febca06c64e..bb18d0d844ee2f1e81e9be85f6308d7dcedbdcdf 100644 --- a/README.md +++ b/README.md @@ -93,9 +93,9 @@ To start an OceanBase cluster, follow these steps: Select a configuration file based on your resource configurations: -#### Small-scale development mode +#### Small-scale deployment mode -This development mode applies to personal devices with at least 8 GB of memory. +This deployment mode applies to personal devices with at least 8 GB of memory. - [Sample configuration file for local single-node deployment](./example/mini-local-example.yaml) - [Sample configuration file for single-node deployment](./example/mini-single-example.yaml) @@ -103,17 +103,18 @@ This development mode applies to personal devices with at least 8 GB of memory. - [Sample configuration file for single-node deployment with ODP](./example/mini-single-with-obproxy-example.yaml) - [Sample configuration file for three-node deployment with ODP](./example/mini-distributed-with-obproxy-example.yaml) -#### Professional development mode +#### Professional deployment mode -This development mode applies to advanced Elastic Compute Service (ECS) instances or physical servers with at least 16 CPU cores and 64 GB of memory. +This deployment mode applies to advanced Elastic Compute Service (ECS) instances or physical servers with at least 16 CPU cores and 64 GB of memory. -- [Sample configuration file for local single-node development](./example/local-example.yaml) -- [Sample configuration file for single-node development](./example/single-example.yaml) -- [Sample configuration file for three-node development](./example/distributed-example.yaml) -- [Sample configuration file for single-node development with ODP](./example/single-with-obproxy-example.yaml) -- [Sample configuration file for three-node development with ODP](./example/distributed-with-obproxy-example.yaml) +- [Sample configuration file for local single-node deployment](./example/local-example.yaml) +- [Sample configuration file for single-node deployment](./example/single-example.yaml) +- [Sample configuration file for three-node deployment](./example/distributed-example.yaml) +- [Sample configuration file for single-node deployment with ODP](./example/single-with-obproxy-example.yaml) +- [Sample configuration file for three-node deployment with ODP](./example/distributed-with-obproxy-example.yaml) +- [Sample configuration file for three-node deployment with ODP and obagent](./example/obagent/distributed-with-obproxy-and-obagent-example.yaml) -This section describes how to start a local single-node OceanBase cluster by using the [sample configuration file for local single-node deployment in the small-scale development mode](./example/mini-local-example.yaml). +This section describes how to start a local single-node OceanBase cluster by using the [sample configuration file for local single-node deployment in the small-scale deployment mode](./example/mini-local-example.yaml). ```shell # Modify the working directory of the OceanBase cluster: home_path. @@ -122,7 +123,7 @@ This section describes how to start a local single-node OceanBase cluster by usi vi ./example/mini-local-example.yaml ``` -If the target server to run the OceanBase cluster is not the logged-in server, do not use the `sample configuration file for local single-node development`. Use another configuration file. +If the target server to run the OceanBase cluster is not the logged-in server, do not use the `sample configuration file for local single-node deployment`. Use another configuration file. Do not forget to change the user password at the beginning of the configuration file. ```yaml @@ -409,7 +410,7 @@ This table describes the corresponding options. | Option | Required | Data type | Default value | Description | --- | --- | --- |--- | --- | -n/--tenant-name | No | string | test | The tenant name. OBD will automatically generate resource units and resource pools with unique names based on the tenant name. | -| --max-cpu | No | float | 0 | The maximum number of CPU cores available for the tenant. When this option is set to 0, all available CPU cores of the cluster can be used by the tenant. When the value is less than two, an error is returned. | +| --max-cpu | No | float | 0 | The maximum number of CPU cores available for the tenant. When this option is set to 0, all available CPU cores of the cluster can be used by the tenant. | | --min-cpu | No | float | 0 | The minimum number of CPU cores available for the tenant. When this option is set to 0, the minimum number of CPU cores is the same as the maximum number of CPU cores. | | --max-memory | No | int | 0 | The maximum memory capacity available for the tenant. When this option is set to 0, all available memory capacity of the cluster can be used by the tenant. When the actual value is less than 1 GB, an error is returned. | | --min-memory | No | int | 0 | The minimum memory capacity available for the tenant. When this option is set to 0, the minimum memory capacity is the same as the maximum memory capacity. | diff --git a/_arch.py b/_arch.py index bbff5356611bd48ffc26222f7be50f008db9d440..0dd5b3a2053f352a1ad3263f7ca08f331c038798 100644 --- a/_arch.py +++ b/_arch.py @@ -280,6 +280,7 @@ def getCanonArch(skipRpmPlatform = 0): return getCanonSPARCArch(arch) if arch == "x86_64": return getCanonX86_64Arch(arch) + return arch canonArch = getCanonArch() diff --git a/_cmd.py b/_cmd.py index f4d2888eaa3cdf0e73b9fc1fe601a69764738678..8a673eb0240106c8b9eaf074c128c91b3dc82014 100644 --- a/_cmd.py +++ b/_cmd.py @@ -1,3 +1,4 @@ + # coding: utf-8 # OceanBase Deploy. # Copyright (C) 2021 OceanBase @@ -35,10 +36,11 @@ from tool import DirectoryUtil, FileUtil ROOT_IO = IO(1) -VERSION = '1.1.0' +VERSION = '1.1.1' REVISION = '' BUILD_BRANCH = '' BUILD_TIME = '' +DEBUG = True if '' else False class BaseCommand(object): @@ -129,17 +131,19 @@ class ObdCommand(BaseCommand): ROOT_IO.trace_logger = logger obd = ObdHome(self.OBD_PATH, ROOT_IO) ROOT_IO.track_limit += 1 + ROOT_IO.verbose('cmd: %s' % self.cmds) + ROOT_IO.verbose('opts: %s' % self.opts) ret = self._do_command(obd) except NotImplementedError: ROOT_IO.exception('command \'%s\' is not implemented' % self.prev_cmd) except IOError: - ROOT_IO.exception('OBD is running') + ROOT_IO.exception('OBD is running.') except SystemExit: pass except: ROOT_IO.exception('Running Error.') - # if not ret: - # ROOT_IO.print('Trace ID: %s' % trace_id) + if DEBUG: + ROOT_IO.print('Trace ID: %s' % trace_id) return ret def _do_command(self, obd): @@ -186,7 +190,7 @@ class MajorCommand(BaseCommand): class MirrorCloneCommand(ObdCommand): def __init__(self): - super(MirrorCloneCommand, self).__init__('clone', 'Clone a RPM package to the local mirror repository.') + super(MirrorCloneCommand, self).__init__('clone', 'Clone an RPM package to the local mirror repository.') self.parser.add_option('-f', '--force', action='store_true', help="Force clone, overwrite the mirror.") def init(self, cmd, args): @@ -370,6 +374,7 @@ class ClusterStartCommand(ClusterMirrorCommand): self.parser.add_option('-c', '--components', type='string', help="List the started components. Multiple components are separated with commas.") self.parser.add_option('-f', '--force-delete', action='store_true', help="Force delete, delete the registered cluster.") self.parser.add_option('-S', '--strict-check', action='store_true', help="Throw errors instead of warnings when check fails.") + self.parser.add_option('--without-parameter', '--wop', action='store_true', help='Start without parameters.') def _do_command(self, obd): if self.cmds: @@ -423,6 +428,7 @@ class ClusterRestartCommand(ClusterMirrorCommand): super(ClusterRestartCommand, self).__init__('restart', 'Restart a started cluster.') self.parser.add_option('-s', '--servers', type='string', help="List the started servers. Multiple servers are separated with commas.") self.parser.add_option('-c', '--components', type='string', help="List the started components. Multiple components are separated with commas.") + self.parser.add_option('--without-parameter', '--wop', action='store_true', help='Start without parameters.') def _do_command(self, obd): if self.cmds: @@ -471,7 +477,7 @@ class ClusterListCommand(ClusterMirrorCommand): class ClusterEditConfigCommand(ClusterMirrorCommand): def __init__(self): - super(ClusterEditConfigCommand, self).__init__('edit-config', 'Edit a deploy configuration file.') + super(ClusterEditConfigCommand, self).__init__('edit-config', 'Edit the configuration file for a specific deployment.') def _do_command(self, obd): if self.cmds: @@ -498,25 +504,25 @@ class ClusterTenantCreateCommand(ClusterMirrorCommand): def __init__(self): super(ClusterTenantCreateCommand, self).__init__('create', 'Create a tenant.') - self.parser.add_option('-n', '--tenant-name', type='string', help="The tenant name.") - self.parser.add_option('--max-cpu', type='float', help="Unit max CPU number.") - self.parser.add_option('--min-cpu', type='float', help="Unit min CPU number.") - self.parser.add_option('--max-memory', type='int', help="Unit max memory size.") - self.parser.add_option('--min-memory', type='int', help="Unit min memory size.") - self.parser.add_option('--max-disk-size', type='int', help="Unit max disk size.") - self.parser.add_option('--max-iops', type='int', help="Unit max iops number. [128]", default=128) - self.parser.add_option('--min-iops', type='int', help="Unit min iops number.") - self.parser.add_option('--max-session-num', type='int', help="Unit max session number. [64]", default=64) + self.parser.add_option('-n', '--tenant-name', type='string', help="The tenant name. The default tenant name is [test].", default='test') + self.parser.add_option('--max-cpu', type='float', help="Max CPU unit number.") + self.parser.add_option('--min-cpu', type='float', help="Mind CPU unit number.") + self.parser.add_option('--max-memory', type='int', help="Max memory unit size.") + self.parser.add_option('--min-memory', type='int', help="Min memory unit size.") + self.parser.add_option('--max-disk-size', type='int', help="Max disk unit size.") + self.parser.add_option('--max-iops', type='int', help="Max IOPS unit number. [128].", default=128) + self.parser.add_option('--min-iops', type='int', help="Min IOPS unit number.") + self.parser.add_option('--max-session-num', type='int', help="Max session unit number. [64].", default=64) self.parser.add_option('--unit-num', type='int', help="Pool unit number.") self.parser.add_option('-z', '--zone-list', type='string', help="Tenant zone list.") self.parser.add_option('--charset', type='string', help="Tenant charset.") self.parser.add_option('--collate', type='string', help="Tenant COLLATE.") - self.parser.add_option('--replica-num', type='int', help="tenant replica num") + self.parser.add_option('--replica-num', type='int', help="Tenant replica number.") self.parser.add_option('--logonly-replica-num', type='int', help="Tenant logonly replica number.") self.parser.add_option('--tablegroup', type='string', help="Tenant tablegroup.") - self.parser.add_option('--primary-zone', type='string', help="Tenant primary zone. [RANDOM]", default='RANDOM') + self.parser.add_option('--primary-zone', type='string', help="Tenant primary zone. [RANDOM].", default='RANDOM') self.parser.add_option('--locality', type='string', help="Tenant locality.") - self.parser.add_option('-s', '--variables', type='string', help="Set the variables for the system tenant. [ob_tcp_invited_nodes='%']", default="ob_tcp_invited_nodes='%'") + self.parser.add_option('-s', '--variables', type='string', help="Set the variables for the system tenant. [ob_tcp_invited_nodes='%'].", default="ob_tcp_invited_nodes='%'") def _do_command(self, obd): if self.cmds: @@ -576,8 +582,8 @@ class TestMirrorCommand(ObdCommand): class MySQLTestCommand(TestMirrorCommand): def __init__(self): - super(MySQLTestCommand, self).__init__('mysqltest', 'Run mysqltest for a deployment.') - self.parser.add_option('--component', type='string', help='The component for mysqltest.') + super(MySQLTestCommand, self).__init__('mysqltest', 'Run a mysqltest for a deployment.') + self.parser.add_option('--component', type='string', help='Components for mysqltest.') self.parser.add_option('--test-server', type='string', help='The server for mysqltest. By default, the first root server in the component is the mysqltest server.') self.parser.add_option('--user', type='string', help='Username for a test. [admin]', default='admin') self.parser.add_option('--password', type='string', help='Password for a test. [admin]', default='admin') @@ -592,11 +598,11 @@ class MySQLTestCommand(TestMirrorCommand): self.parser.add_option('--var-dir', type='string', help='Var directory to use when run mysqltest. [./var]', default='./var') self.parser.add_option('--test-set', type='string', help='test list, use `,` interval') self.parser.add_option('--test-pattern', type='string', help='Pattern for test file.') - self.parser.add_option('--suite', type='string', help='Suite list.Multiple suites are separated with commas.') + self.parser.add_option('--suite', type='string', help='Suite list. Multiple suites are separated with commas.') self.parser.add_option('--suite-dir', type='string', help='Suite case directory. [./mysql_test/test_suite]', default='./mysql_test/test_suite') self.parser.add_option('--init-sql-dir', type='string', help='Initiate sql directory. [../]', default='../') self.parser.add_option('--init-sql-files', type='string', help='Initiate sql file list.Multiple files are separated with commas.') - self.parser.add_option('--need-init', action='store_true', help='Execute init sql file.', default=False) + self.parser.add_option('--need-init', action='store_true', help='Execute the init SQL file.', default=False) self.parser.add_option('--auto-retry', action='store_true', help='Auto retry when fails.', default=False) self.parser.add_option('--all', action='store_true', help='Run all suite-dir cases.', default=False) self.parser.add_option('--psmall', action='store_true', help='Run psmall cases.', default=False) @@ -613,8 +619,8 @@ class SysBenchCommand(TestMirrorCommand): def __init__(self): super(SysBenchCommand, self).__init__('sysbench', 'Run sysbench for a deployment.') - self.parser.add_option('--component', type='string', help='The component for mysqltest.') - self.parser.add_option('--test-server', type='string', help='The server for mysqltest. By default, the first root server in the component is the mysqltest server.') + self.parser.add_option('--component', type='string', help='Components for test.') + self.parser.add_option('--test-server', type='string', help='The server for test. By default, the first root server in the component is the test server.') self.parser.add_option('--user', type='string', help='Username for a test. [root]', default='root') self.parser.add_option('--password', type='string', help='Password for a test.') self.parser.add_option('--tenant', type='string', help='Tenant for a test. [test]', default='test') @@ -632,7 +638,7 @@ class SysBenchCommand(TestMirrorCommand): self.parser.add_option('--rand-type', type='string', help='Random numbers distribution {uniform,gaussian,special,pareto}.') self.parser.add_option('--percentile', type='int', help='Percentile to calculate in latency statistics. Available values are 1-100. 0 means to disable percentile calculations.') self.parser.add_option('--skip-trx', dest='{on/off}', type='string', help='Open or close a transaction in a read-only test. ') - self.parser.add_option('-O', '--optimization', dest='{0/1}', type='int', help='optimization', default=1) + self.parser.add_option('-O', '--optimization', type='int', help='optimization level {0/1}', default=1) def _do_command(self, obd): if self.cmds: @@ -641,12 +647,43 @@ class SysBenchCommand(TestMirrorCommand): return self._show_help() +class TPCHCommand(TestMirrorCommand): + + def __init__(self): + super(TPCHCommand, self).__init__('tpch', 'Run a TPC-H test for a deployment.') + self.parser.add_option('--component', type='string', help='Components for a test.') + self.parser.add_option('--test-server', type='string', help='The server for a test. By default, the first root server in the component is the test server.') + self.parser.add_option('--user', type='string', help='Username for a test. [root]', default='root') + self.parser.add_option('--password', type='string', help='Password for a test.') + self.parser.add_option('--tenant', type='string', help='Tenant for a test. [test]', default='test') + self.parser.add_option('--database', type='string', help='Database for a test. [test]', default='test') + self.parser.add_option('--obclient-bin', type='string', help='OBClient bin path. [obclient]', default='obclient') + self.parser.add_option('--dbgen-bin', type='string', help='dbgen bin path. [/usr/local/tpc-h-tools/bin/dbgen]', default='/usr/local/tpc-h-tools/bin/dbgen') + self.parser.add_option('-s', '--scale-factor', type='int', help='Set Scale Factor (SF) to . [1] ', default=1) + self.parser.add_option('--tmp-dir', type='string', help='The temporary directory for executing TPC-H. [./tmp]', default='./tmp') + self.parser.add_option('--ddl-path', type='string', help='Directory for DDL files.') + self.parser.add_option('--tbl-path', type='string', help='Directory for tbl files.') + self.parser.add_option('--sql-path', type='string', help='Directory for SQL files.') + self.parser.add_option('--remote-tbl-dir', type='string', help='Directory for the tbl on target observers. Make sure that you have read and write access to the directory when you start observer.') + self.parser.add_option('--disable-transfer', '--dt', action='store_true', help='Disable the transfer. When enabled, OBD will use the tbl files under remote-tbl-dir instead of transferring local tbl files to remote remote-tbl-dir.') + self.parser.add_option('--dss-config', type='string', help='Directory for dists.dss. [/usr/local/tpc-h-tools]', default='/usr/local/tpc-h-tools/') + self.parser.add_option('-O', '--optimization', type='int', help='Optimization level {0/1}. [1]', default=1) + self.parser.add_option('--test-only', action='store_true', help='Only testing SQLs are executed. No initialization is executed.') + + def _do_command(self, obd): + if self.cmds: + return obd.tpch(self.cmds[0], self.opts) + else: + return self._show_help() + + class TestMajorCommand(MajorCommand): def __init__(self): - super(TestMajorCommand, self).__init__('test', 'Run test for a running deploy deployment.') + super(TestMajorCommand, self).__init__('test', 'Run test for a running deployment.') self.register_command(MySQLTestCommand()) self.register_command(SysBenchCommand()) + self.register_command(TPCHCommand()) class BenchMajorCommand(MajorCommand): diff --git a/_deploy.py b/_deploy.py index a18adb1b72859e44f25faca14582d7aca1aa4141..1d888604a2ecf010b0edcefdb8d3d5f84e99df45 100644 --- a/_deploy.py +++ b/_deploy.py @@ -92,7 +92,9 @@ class ClusterConfig(object): def __init__(self, servers, name, version, tag, package_hash): self.version = version + self.origin_version = version self.tag = tag + self.origin_tag = tag self.name = name self.origin_package_hash = package_hash self.package_hash = package_hash @@ -101,11 +103,14 @@ class ClusterConfig(object): self._global_conf = {} self._server_conf = {} self._cache_server = {} + self._original_global_conf = {} self.servers = servers + self._original_servers = servers # 保证顺序 for server in servers: self._server_conf[server] = {} self._cache_server[server] = None self._deploy_config = None + self._depends = {} def __eq__(self, other): if not isinstance(other, self.__class__): @@ -117,10 +122,40 @@ class ClusterConfig(object): self._deploy_config = _deploy_config return True return False + + @property + def original_servers(self): + return self._original_servers + + @property + def depends(self): + return self._depends.keys() + + def add_depend(self, name, cluster_conf): + self._depends[name] = cluster_conf + + def del_depend(self, name, component_name): + if component_name in self._depends: + del self._depends[component_name] + + def get_depled_servers(self, name): + if name not in self._depends: + return None + cluster_config = self._depends[name] + return deepcopy(cluster_config.original_servers) + + def get_depled_config(self, name, server=None): + if name not in self._depends: + return None + cluster_config = self._depends[name] + config = cluster_config.get_server_conf_with_default(server) if server else cluster_config.get_global_conf() + return deepcopy(config) def update_server_conf(self, server, key, value, save=True): if self._deploy_config is None: return False + if server not in self._server_conf: + return False if not self._deploy_config.update_component_server_conf(self.name, server, key, value, save): return False self._server_conf[server][key] = value @@ -133,24 +168,31 @@ class ClusterConfig(object): return False if not self._deploy_config.update_component_global_conf(self.name, key, value, save): return False - self._global_conf[key] = value + self._update_global_conf(key, value) for server in self._cache_server: if self._cache_server[server] is not None: self._cache_server[server][key] = value return True + def _update_global_conf(self, key, value): + self._original_global_conf[key] = value + self._global_conf[key] = value + def get_unconfigured_require_item(self, server): items = [] config = self.get_server_conf(server) - for key in self._temp_conf: - if not self._temp_conf[key].require: - continue - if key in config: - continue - items.append(key) + if config is not None: + for key in self._temp_conf: + if not self._temp_conf[key].require: + continue + if key in config: + continue + items.append(key) return items def get_server_conf_with_default(self, server): + if server not in self._server_conf: + return None config = {} for key in self._temp_conf: if self._temp_conf[key].default is not None: @@ -159,6 +201,8 @@ class ClusterConfig(object): return config def get_need_redeploy_items(self, server): + if server not in self._server_conf: + return None items = {} config = self.get_server_conf(server) for key in config: @@ -167,6 +211,8 @@ class ClusterConfig(object): return items def get_need_restart_items(self, server): + if server not in self._server_conf: + return None items = {} config = self.get_server_conf(server) for key in config: @@ -183,14 +229,17 @@ class ClusterConfig(object): self.set_global_conf(self._global_conf) # 更新全局配置 def set_global_conf(self, conf): + self._original_global_conf = deepcopy(conf) self._global_conf = deepcopy(self._default_conf) - self._global_conf.update(conf) + self._global_conf.update(self._original_global_conf) for server in self._cache_server: self._cache_server[server] = None def add_server_conf(self, server, conf): if server not in self.servers: self.servers.append(server) + if server not in self._original_servers: + self._original_servers.append(server) self._server_conf[server] = conf self._cache_server[server] = None @@ -279,6 +328,7 @@ class DeployConfig(object): def _load(self): try: with open(self.yaml_path, 'rb') as f: + depends = {} self._src_data = self.yaml_loader.load(f) for key in self._src_data: if key == 'user': @@ -295,6 +345,14 @@ class DeployConfig(object): self.auto_create_tenant = self._src_data['auto_create_tenant'] elif issubclass(type(self._src_data[key]), dict): self._add_component(key, self._src_data[key]) + depends[key] = self._src_data[key].get('depends', []) + for comp in depends: + conf = self.components[comp] + for name in depends[comp]: + if name == comp: + continue + if name in self.components: + conf.add_depend(name, self.components[name]) except: pass if not self.user: @@ -315,6 +373,34 @@ class DeployConfig(object): def set_user_conf(self, conf): self._user = conf + def add_depend_for_component(self, component_name, depend_component_name, save=True): + if component_name not in self.components: + return False + if depend_component_name not in self.components: + return False + cluster_config = self.components[component_name] + if depend_component_name in cluster_config.depends: + return True + cluster_config.add_depend(depend_component_name, self.components[depend_component_name]) + component_config = self._src_data[component_name] + if 'depends' not in component_config: + component_config['depends'] = [] + component_config['depends'].append(depend_component_name) + return self.dump() if save else True + + def del_depend_for_component(self, component_name, depend_component_name, save=True): + if component_name not in self.components: + return False + if depend_component_name not in self.components: + return False + cluster_config = self.components[component_name] + if depend_component_name not in cluster_config.depends: + return True + cluster_config.del_depend(depend_component_name, depend_component_name) + component_config = self._src_data[component_name] + component_config['depends'] = cluster_config.depends + return self.dump() if save else True + def update_component_server_conf(self, component_name, server, key, value, save=True): if component_name not in self.components: return False diff --git a/_mirror.py b/_mirror.py index bc632a0886b60eacc47c16e2d9faa56ff84c0e81..fc2cce5b2879136a2f5b85e5b9495258df281467 100644 --- a/_mirror.py +++ b/_mirror.py @@ -52,6 +52,7 @@ SUP_MAP = { 'opensuse-leap': (([15], 7), ), 'sles': (([15, 2], 7), ), 'fedora': (([33], 7), ), + 'uos': (([20], 8), ), } _SERVER_VARS = { 'basearch': getBaseArch(), @@ -88,6 +89,10 @@ class MirrorRepository(object): self.stdio = stdio self.mirror_path = mirror_path self.name = os.path.split(mirror_path)[1] + self._str = '%s mirror %s' % (self.mirror_type, self.name) + + def __str__(self): + return self._str @property def mirror_type(self): @@ -774,6 +779,7 @@ class MirrorRepositoryManager(Manager): info = [None, None] for mirror in mirrors: new_one = mirror.get_exact_pkg_info(**pattern) + self.stdio.verbose('%s found pkg: %s' % (mirror, new_one)) if new_one and new_one > info[0]: info = [new_one, mirror] return info[0] if info[0] is None or only_info else info[1].get_rpm_pkg_by_info(info[0]) @@ -787,6 +793,7 @@ class MirrorRepositoryManager(Manager): source_mirror = None for mirror in mirrors: t_best = mirror.get_best_pkg_info_with_score(**pattern) + self.stdio.verbose('%s found pkg: %s' % (mirror, t_best)) if best is None: best = t_best source_mirror = mirror diff --git a/_plugin.py b/_plugin.py index 0ff197fa55f5a2d7e21c75a638e572883a9ad2fa..d823665381f682f6854dab494fc58f32fb301a51 100644 --- a/_plugin.py +++ b/_plugin.py @@ -417,10 +417,10 @@ class ComponentPluginLoader(object): plugins.append(plugin) if plugins: plugin = max(plugins, key=lambda x: x.version) - self.stdio and getattr(self.stdio, 'warn', print)( - '%s %s plugin version %s not found, use the best suitable version %s\n. Use `obd update` to update local plugin repository' % - (self.component_name, self.PLUGIN_TYPE.name.lower(), '.'.join(version), '.'.join(plugin.version)) - ) + # self.stdio and getattr(self.stdio, 'warn', print)( + # '%s %s plugin version %s not found, use the best suitable version %s.\n Use `obd update` to update local plugin repository' % + # (self.component_name, self.PLUGIN_TYPE.name.lower(), version, plugin.version) + # ) return plugin return None diff --git a/_repository.py b/_repository.py index 4161756cfbe710915985472d1fa699366c71c8ac..84cbca62a5927d4a7913a0ee2e1d9e634ddcea90 100644 --- a/_repository.py +++ b/_repository.py @@ -161,7 +161,11 @@ class Repository(PackageInfo): files = [] if self.version and self.hash: for file_item in plugin.file_list(): - files.append(os.path.join(self.repository_dir, file_item.target_path)) + path = os.path.join(self.repository_dir, file_item.target_path) + if file_item.type == InstallPlugin.FileItemType.DIR: + files += DirectoryUtil.list_dir(path) + else: + files.append(path) return files def file_check(self, plugin): @@ -252,7 +256,7 @@ class Repository(PackageInfo): if path not in need_files: for n_dir in need_dirs: if path.startswith(n_dir): - need_files[path] = os.path.join(n_dir, path[len(n_dir):]) + need_files[path] = os.path.join(need_dirs[n_dir], path[len(n_dir):]) break for src_path in need_files: if src_path not in files: @@ -278,7 +282,8 @@ class Repository(PackageInfo): self.stdio and getattr(self.stdio, 'verbose', print)('link %s to %s' % (links[link], link)) os.symlink(links[link], link) for n_dir in need_dirs: - if not os.path.isdir(n_dir): + path = os.path.join(self.repository_dir, need_dirs[n_dir]) + if not os.path.isdir(path): raise Exception('%s: No such dir: %s' % (pkg.path, n_dir)) self.set_version(pkg.version) self.set_release(pkg.release) diff --git a/_rpm.py b/_rpm.py index 33ca76d24c1646aa66b141463d4db0bd9cb3c584..1cf34c3a81de02fe4d7fb507eb45c4c80f6b1e44 100644 --- a/_rpm.py +++ b/_rpm.py @@ -45,12 +45,19 @@ class Version(str): def __cmp_value__(self): return [(int(_i), _s) for _i, _s in re.findall('(\d+)([^\.]*)', self.__str__())] + def __eq__(self, value): + if value is None: + return False + return self.__cmp_value__ == self.__class__(value).__cmp_value__ + def __gt__(self, value): if value is None: return True return self.__cmp_value__ > self.__class__(value).__cmp_value__ def __ge__(self, value): + if value is None: + return True return self.__eq__(value) or self.__gt__(value) def __lt__(self, value): @@ -59,6 +66,8 @@ class Version(str): return self.__cmp_value__ < self.__class__(value).__cmp_value__ def __le__(self, value): + if value is None: + return False return self.__eq__(value) or self.__lt__(value) class Release(Version): diff --git a/_stdio.py b/_stdio.py index bdbacd82bfd343d7752caf3a462ce263f2a241ea..a96d8e3e67fb496900c82ef5c4d15dbfa5c949ac 100644 --- a/_stdio.py +++ b/_stdio.py @@ -209,6 +209,7 @@ class IO(object): VERBOSE_LEVEL = 0 WARNING_PREV = FormtatText.warning('[WARN]') ERROR_PREV = FormtatText.error('[ERROR]') + IS_TTY = sys.stdin.isatty() def __init__(self, level, msg_lv=MsgLevel.DEBUG, trace_logger=None, track_limit=0, root_io=None, stream=sys.stdout): self.level = level @@ -363,15 +364,19 @@ class IO(object): self.print(table) def confirm(self, msg): - while True: - try: - ans = raw_input('%s [y/n]: ' % msg) - if ans == 'y': - return True - if ans == 'n': - return False - except: - pass + if self.IS_TTY: + while True: + try: + ans = raw_input('%s [y/n]: ' % msg) + if ans == 'y': + return True + if ans == 'n': + return False + except Exception as e: + if not e: + return False + else: + return False def _format(self, msg, *args): if args: @@ -441,11 +446,11 @@ class IO(object): if lines: exception_msg.append(''.join(lines)) if self.level <= self.VERBOSE_LEVEL: - msg = '%s\n%s' % (msg, '\n'.join(exception_msg)) - self.error(msg) + print_stack = lambda m: self._print(MsgLevel.ERROR, m) else: - msg and self.error(msg) - self._log(MsgLevel.ERROR, '\n'.join(exception_msg)) + print_stack = lambda m: self._log(MsgLevel.ERROR, m) + msg and self.error(msg) + print_stack('\n'.join(exception_msg)) else: def exception(self, msg, *args, **kwargs): ei = sys.exc_info() @@ -458,9 +463,9 @@ class IO(object): for line in traceback_e.format(chain=True): lines.append(line) if self.level <= self.VERBOSE_LEVEL: - msg = '%s\n%s' % (msg, ''.join(lines)) - self.error(msg) + print_stack = lambda m: self._print(MsgLevel.ERROR, m) else: - msg and self.error(msg) - self._log(MsgLevel.ERROR, ''.join(lines)) + print_stack = lambda m: self._log(MsgLevel.ERROR, m) + msg and self.error(msg) + print_stack(''.join(lines)) diff --git a/build.sh b/build.sh index 6c31f1bcbd2f2484900655cec46dfab3d35ff58d..69ca157776a1f9698a76b18fa73a8fa4cbc00608 100644 --- a/build.sh +++ b/build.sh @@ -28,7 +28,7 @@ rm -fr /usr/bin/obd CID=`git log |head -n1 | awk -F' ' '{print $2}'` BRANCH=`git branch | grep -e "^\*" | awk -F' ' '{print $2}'` DATE=`date '+%b %d %Y %H:%M:%S'` -cat /usr/obd/_cmd.py | sed "s//$CID/" | sed "s//$BRANCH/" | sed "s//$DATE/" > /usr/obd/obd.py +cat /usr/obd/_cmd.py | sed "s//$CID/" | sed "s//$BRANCH/" | sed "s//$DATE/" | sed "s//$OBD_DUBUG/" > /usr/obd/obd.py echo -e "#!/bin/bash\n$python_bin /usr/obd/obd.py \$*" > /usr/bin/obd chmod +x /usr/bin/obd echo -e 'Installation of obd finished successfully\nPlease source /etc/profile.d/obd.sh to enable it' \ No newline at end of file diff --git a/core.py b/core.py index e43dc150dc883bba877397760fc57349ff36ffbb..1065f429fdbb73c3f771a96e97b4bbe0b4d34b71 100644 --- a/core.py +++ b/core.py @@ -247,8 +247,25 @@ class ObdHome(object): errors.append('No such package %s.' % ('-'.join(pkg_name))) return pkgs, repositories, errors - def load_local_repositories(self, deploy_config, allow_shadow=True): - return self.get_local_repositories(deploy_config.components, allow_shadow) + def load_local_repositories(self, deploy_info, allow_shadow=True): + repositories = [] + if allow_shadow: + get_repository = self.repository_manager.get_repository_allow_shadow + else: + get_repository = self.repository_manager.get_repository + + components = deploy_info.components + for component_name in components: + data = components[component_name] + version = data.get('version') + pkg_hash = data.get('hash') + self._call_stdio('verbose', 'Get local repository %s-%s-%s' % (component_name, version, pkg_hash)) + repository = get_repository(component_name, version, pkg_hash) + if repository: + repositories.append(repository) + else: + self._call_stdio('critical', 'Local repository %s-%s-%s is empty.' % (component_name, version, pkg_hash)) + return repositories def get_local_repositories(self, components, allow_shadow=True): repositories = [] @@ -325,7 +342,7 @@ class ObdHome(object): deploy_config = DeployConfig(tf.name, YamlLoader(self.stdio)) self._call_stdio('verbose', 'Configure component change check') if not deploy_config.components: - if self._call_stdio('confirm', 'Empty configuration'): + if self._call_stdio('confirm', 'Empty configuration. Continue editing?'): continue return False self._call_stdio('verbose', 'Information check for the configuration component.') @@ -340,20 +357,21 @@ class ObdHome(object): if confirm('Modifying the server list of a deployed cluster is not permitted.'): continue return False + success = True for component_name in deploy_config.components: old_cluster_config = deploy.deploy_config.components[component_name] new_cluster_config = deploy_config.components[component_name] - if new_cluster_config.version and new_cluster_config.version != old_cluster_config.version: - success = False - break - if new_cluster_config.package_hash and new_cluster_config.package_hash != old_cluster_config.package_hash: + if new_cluster_config.version != old_cluster_config.origin_version \ + or new_cluster_config.package_hash != old_cluster_config.origin_package_hash \ + or new_cluster_config.tag != old_cluster_config.origin_tag: success = False break if not success: if confirm('Modifying the version and hash of the component is not permitted.'): continue return False + pkgs, repositories, errors = self.search_components_from_mirrors(deploy_config, update_if_need=False) # Loading the parameter plugins that are available to the application self._call_stdio('start_loading', 'Search param plugin and load') @@ -468,7 +486,9 @@ class ObdHome(object): self._call_stdio('error', 'Failed to extract file from %s' % pkg.path) return None self._call_stdio('stop_loading', 'succeed') + self._call_stdio('verbose', 'get head repository') head_repository = self.repository_manager.get_repository(pkg.name, pkg.version, pkg.name) + self._call_stdio('verbose', 'head repository: %s' % head_repository) if repository > head_repository: self.repository_manager.create_tag_for_repository(repository, pkg.name, True) repositories.append(repository) @@ -540,7 +560,6 @@ class ObdHome(object): remote_file_path = file_path.replace(self.home_path, remote_home_path) self._call_stdio('verbose', '%s %s installing' % (server, repository)) client.put_file(file_path, remote_file_path) - client.execute_command('chmod %s %s' % (oct(os.stat(file_path).st_mode)[-3: ], remote_file_path)) client.put_file(repository.data_file_path, remote_repository_data_path) self._call_stdio('verbose', '%s %s installed' % (server, repository.name)) self._call_stdio('stop_loading', 'succeed') @@ -653,7 +672,7 @@ class ObdHome(object): if deploy: deploy_info = deploy.deploy_info if deploy_info.status not in [DeployStatus.STATUS_CONFIGURED, DeployStatus.STATUS_DESTROYED]: - self._call_stdio('error', 'Deploy "%s" is %s. You could not reload an %s cluster.' % (name, deploy_info.status.value, deploy_info.status.value)) + self._call_stdio('error', 'Deploy "%s" is %s. You could not deploy an %s cluster.' % (name, deploy_info.status.value, deploy_info.status.value)) return False # self._call_stdio('error', 'Deploy name `%s` have been occupied.' % name) # return False @@ -727,7 +746,7 @@ class ObdHome(object): deploy_info = deploy.deploy_info self._call_stdio('verbose', 'judge deploy status') if deploy_info.status not in [DeployStatus.STATUS_CONFIGURED, DeployStatus.STATUS_DESTROYED]: - self._call_stdio('error', 'Deploy "%s" is %s. You could not reload an %s cluster.' % (name, deploy_info.status.value, deploy_info.status.value)) + self._call_stdio('error', 'Deploy "%s" is %s. You could not deploy an %s cluster.' % (name, deploy_info.status.value, deploy_info.status.value)) return False if deploy_info.config_status != DeployConfigStatus.UNCHNAGE: self._call_stdio('verbose', 'Apply temp deploy configuration') @@ -846,19 +865,16 @@ class ObdHome(object): return False self._call_stdio('verbose', 'Search init plugin') - init_plugins = self.search_py_script_plugin(repositories, 'init', False) + init_plugins = self.search_py_script_plugin(repositories, 'init') component_num = len(repositories) for repository in repositories: cluster_config = deploy_config.components[repository.name] - if repository in init_plugins: - init_plugin = init_plugins[repository] - self._call_stdio('verbose', 'Exec %s init plugin' % repository) - self._call_stdio('verbose', 'Apply %s for %s-%s' % (init_plugin, repository.name, repository.version)) - if init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opt, self.stdio, self.home_path, repository.repository_dir): - deploy.use_model(repository.name, repository, False) - component_num -= 1 - else: - self._call_stdio('print', 'No such init plugin for %s' % repository.name) + init_plugin = init_plugins[repository] + self._call_stdio('verbose', 'Exec %s init plugin' % repository) + self._call_stdio('verbose', 'Apply %s for %s-%s' % (init_plugin, repository.name, repository.version)) + if init_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opt, self.stdio, self.home_path, repository.repository_dir): + deploy.use_model(repository.name, repository, False) + component_num -= 1 if component_num == 0 and deploy.update_deploy_status(DeployStatus.STATUS_DEPLOYED): self._call_stdio('print', '%s deployed' % name) @@ -881,7 +897,7 @@ class ObdHome(object): if deploy_info.config_status == DeployConfigStatus.NEED_REDEPLOY: self._call_stdio('error', 'Deploy needs redeploy') return False - if deploy_info.config_status != DeployConfigStatus.UNCHNAGE: + if deploy_info.config_status != DeployConfigStatus.UNCHNAGE and not getattr(options, 'without_parameter', False): self._call_stdio('verbose', 'Apply temp deploy configuration') if not deploy.apply_temp_deploy_config(): self._call_stdio('error', 'Failed to apply new deploy configuration') @@ -893,15 +909,15 @@ class ObdHome(object): update_deploy_status = True components = getattr(options, 'components', '') if components: - deploy_info_components = {} - deploy_config_components = {} - for component in components.split(','): - deploy_info_components[component] = deploy_info.components[component] - deploy_config_components[component] = deploy_config.components[component] - if len(deploy_info.components) != len(deploy_info_components): + components = components.split(',') + for component in components: + if component not in deploy_info.components: + self._call_stdio('error', 'No such component: %s' % component) + return False + if len(components) != len(deploy_info.components): update_deploy_status = False - deploy_info.components = deploy_info_components - deploy_config.components = deploy_config_components + else: + components = deploy_info.components.keys() servers = getattr(options, 'servers', '') server_list = servers.split(',') if servers else [] @@ -909,7 +925,7 @@ class ObdHome(object): self._call_stdio('start_loading', 'Get local repositories and plugins') # Get the repository - repositories = self.load_local_repositories(deploy_config, False) + repositories = self.load_local_repositories(deploy_info, False) start_check_plugins = self.search_py_script_plugin(repositories, 'start_check', False) create_tenant_plugins = self.search_py_script_plugin(repositories, 'create_tenant', False) if deploy_config.auto_create_tenant else {} @@ -922,6 +938,10 @@ class ObdHome(object): # Get the client ssh_clients = self.get_clients(deploy_config, repositories) + self._call_stdio('start_loading', 'Cluster param config check') + # Check whether the components have the parameter plugins and apply the plugins + self.search_param_plugin_and_apply(repositories, deploy_config) + # Check the status for the deployed cluster component_status = {} if DeployStatus.STATUS_RUNNING == deploy_info.status: @@ -930,10 +950,6 @@ class ObdHome(object): self._call_stdio('print', 'Deploy "%s" is running' % name) return True - self._call_stdio('start_loading', 'Cluster param config check') - # Check whether the components have the parameter plugins and apply the plugins - self.search_param_plugin_and_apply(repositories, deploy_config) - # Parameter check errors = self.deploy_param_check(repositories, deploy_config) if errors: @@ -945,6 +961,8 @@ class ObdHome(object): strict_check = getattr(options, 'strict_check', False) success = True for repository in repositories: + if repository.name not in components: + continue if repository not in start_check_plugins: continue cluster_config = deploy_config.components[repository.name] @@ -957,12 +975,17 @@ class ObdHome(object): # self._call_stdio('verbose', 'Starting check failed. Use --skip-check to skip the starting check. However, this may lead to a starting failure.') return False - component_num = len(repositories) + component_num = len(components) for repository in repositories: + if repository.name not in components: + continue cluster_config = deploy_config.components[repository.name] cluster_servers = cluster_config.servers if servers: cluster_config.servers = [srv for srv in cluster_servers if srv.ip in server_list or srv.name in server_list] + if not cluster_config.servers: + component_num -= 1 + continue start_all = cluster_servers == cluster_config.servers update_deploy_status = update_deploy_status and start_all @@ -1030,7 +1053,7 @@ class ObdHome(object): self._call_stdio('start_loading', 'Get local repositories and plugins') # Get the repository - repositories = self.load_local_repositories(deploy_config) + repositories = self.load_local_repositories(deploy_info) # Check whether the components have the parameter plugins and apply the plugins self.search_param_plugin_and_apply(repositories, deploy_config) @@ -1077,7 +1100,7 @@ class ObdHome(object): self._call_stdio('start_loading', 'Get local repositories and plugins') # Get the repository - repositories = self.load_local_repositories(deploy_config) + repositories = self.load_local_repositories(deploy_info) # Check whether the components have the parameter plugins and apply the plugins self.search_param_plugin_and_apply(repositories, deploy_config) @@ -1126,12 +1149,12 @@ class ObdHome(object): self._call_stdio('verbose', 'Get deploy config') deploy_config = deploy.deploy_config - self._call_stdio('verbose', 'Apply new deploy config') + self._call_stdio('verbose', 'Get new deploy config') new_deploy_config = DeployConfig(deploy.get_temp_deploy_yaml_path(deploy.config_dir), YamlLoader(self.stdio)) self._call_stdio('start_loading', 'Get local repositories and plugins') # Get the repository - repositories = self.load_local_repositories(deploy_config) + repositories = self.load_local_repositories(deploy_info) # Check whether the components have the parameter plugins and apply the plugins self.search_param_plugin_and_apply(repositories, deploy_config) @@ -1173,7 +1196,9 @@ class ObdHome(object): continue self._call_stdio('verbose', 'Call %s for %s' % (reload_plugins[repository], repository)) - if not reload_plugins[repository](deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, cursor, new_cluster_config): + if not reload_plugins[repository]( + deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, + cursor=cursor, new_cluster_config=new_cluster_config, repository_dir=repository.repository_dir): continue component_num -= 1 if component_num == 0: @@ -1202,7 +1227,7 @@ class ObdHome(object): self._call_stdio('start_loading', 'Get local repositories and plugins') # Get the repository - repositories = self.load_local_repositories(deploy_config) + repositories = self.load_local_repositories(deploy_info) # Check whether the components have the parameter plugins and apply the plugins self.search_param_plugin_and_apply(repositories, deploy_config) @@ -1263,22 +1288,22 @@ class ObdHome(object): update_deploy_status = True components = getattr(options, 'components', '') if components: - deploy_info_components = {} - deploy_config_components = {} - for component in components.split(','): - deploy_info_components[component] = deploy_info.components[component] - deploy_config_components[component] = deploy_config.components[component] - if len(deploy_info.components) != len(deploy_info_components): + components = components.split(',') + for component in components: + if component not in deploy_info.components: + self._call_stdio('error', 'No such component: %s' % component) + return False + if len(components) != len(deploy_info.components): update_deploy_status = False - deploy_info.components = deploy_info_components - deploy_config.components = deploy_config_components + else: + components = deploy_info.components.keys() servers = getattr(options, 'servers', '') server_list = servers.split(',') if servers else [] self._call_stdio('start_loading', 'Get local repositories and plugins') # Get the repository - repositories = self.load_local_repositories(deploy_config) + repositories = self.load_local_repositories(deploy_info) # Check whether the components have the parameter plugins and apply the plugins @@ -1290,12 +1315,18 @@ class ObdHome(object): # Get the client ssh_clients = self.get_clients(deploy_config, repositories) - component_num = len(repositories) + component_num = len(components) for repository in repositories: + if repository.name not in components: + continue cluster_config = deploy_config.components[repository.name] cluster_servers = cluster_config.servers if servers: cluster_config.servers = [srv for srv in cluster_servers if srv.ip in server_list or srv.name in server_list] + if not cluster_config.servers: + component_num -= 1 + continue + start_all = cluster_servers == cluster_config.servers update_deploy_status = update_deploy_status and start_all @@ -1304,7 +1335,7 @@ class ObdHome(object): component_num -= 1 if component_num == 0: - if components or servers: + if len(components) != len(repositories) or servers: self._call_stdio('print', "succeed") return True else: @@ -1324,11 +1355,11 @@ class ObdHome(object): deploy_info = deploy.deploy_info self._call_stdio('verbose', 'Check the deploy status') if deploy_info.status == DeployStatus.STATUS_RUNNING: - if deploy_info.config_status != DeployConfigStatus.UNCHNAGE: - self.reload_cluster(name) - if not self.stop_cluster(name, opt): + # if deploy_info.config_status != DeployConfigStatus.UNCHNAGE and not getattr(opt, 'without_parameter', False): + # self.reload_cluster(name) + if not self.stop_cluster(name, options=opt): return False - return self.start_cluster(name, opt) + return self.start_cluster(name, options=opt) def redeploy_cluster(self, name, opt=Values()): return self.destroy_cluster(name, opt) and self.deploy_cluster(name) and self.start_cluster(name) @@ -1353,7 +1384,7 @@ class ObdHome(object): self._call_stdio('start_loading', 'Get local repositories and plugins') # Get the repository - repositories = self.load_local_repositories(deploy_config) + repositories = self.load_local_repositories(deploy_info) # Check whether the components have the parameter plugins and apply the plugins self.search_param_plugin_and_apply(repositories, deploy_config) @@ -1790,11 +1821,15 @@ class ObdHome(object): self._call_stdio('verbose', 'Get deploy configuration') deploy_config = deploy.deploy_config + allow_components = ['obproxy', 'oceanbase', 'oceanbase-ce'] if opts.component is None: - for component_name in ['obproxy', 'oceanbase', 'oceanbase-ce']: + for component_name in allow_components: if component_name in deploy_config.components: opts.component = component_name break + elif opts.component not in allow_components: + self._call_stdio('error', '%s not support. %s is allowed' % (opts.component, allow_components)) + return False if opts.component not in deploy_config.components: self._call_stdio('error', 'Can not find the component for sysbench, use `--component` to select component') return False @@ -1867,6 +1902,97 @@ class ObdHome(object): return True return False + def tpch(self, name, opts): + self._call_stdio('verbose', 'Get Deploy by name') + deploy = self.deploy_manager.get_deploy_config(name) + if not deploy: + self._call_stdio('error', 'No such deploy: %s.' % name) + return False + + deploy_info = deploy.deploy_info + self._call_stdio('verbose', 'Check deploy status') + if deploy_info.status != DeployStatus.STATUS_RUNNING: + self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value)) + return False + self._call_stdio('verbose', 'Get deploy configuration') + deploy_config = deploy.deploy_config + + allow_components = ['oceanbase', 'oceanbase-ce'] + if opts.component is None: + for component_name in allow_components: + if component_name in deploy_config.components: + opts.component = component_name + break + elif opts.component not in allow_components: + self._call_stdio('error', '%s not support. %s is allowed' % (opts.component, allow_components)) + return False + if opts.component not in deploy_config.components: + self._call_stdio('error', 'Can not find the component for tpch, use `--component` to select component') + return False + + cluster_config = deploy_config.components[opts.component] + if not cluster_config.servers: + self._call_stdio('error', '%s server list is empty' % opts.component) + return False + if opts.test_server is None: + opts.test_server = cluster_config.servers[0] + else: + for server in cluster_config.servers: + if server.name == opts.test_server: + opts.test_server = server + break + else: + self._call_stdio('error', '%s is not a server in %s' % (opts.test_server, opts.component)) + return False + + self._call_stdio('start_loading', 'Get local repositories and plugins') + # Get the repository + repositories = self.get_local_repositories({opts.component: deploy_config.components[opts.component]}) + repository = repositories[0] + + # Check whether the components have the parameter plugins and apply the plugins + self.search_param_plugin_and_apply(repositories, deploy_config) + self._call_stdio('stop_loading', 'succeed') + + # Get the client + ssh_clients = self.get_clients(deploy_config, repositories) + + # Check the status for the deployed cluster + component_status = {} + cluster_status = self.cluster_status_check(ssh_clients, deploy_config, repositories, component_status) + if cluster_status is False or cluster_status == 0: + if self.stdio: + self._call_stdio('error', 'Some of the servers in the cluster have been stopped') + for repository in component_status: + cluster_status = component_status[repository] + for server in cluster_status: + if cluster_status[server] == 0: + self._call_stdio('print', '%s %s is stopped' % (server, repository.name)) + return False + + connect_plugin = self.search_py_script_plugin(repositories, 'connect')[repository] + ret = connect_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], {}, self.stdio, target_server=opts.test_server, sys_root=False) + if not ret or not ret.get_return('connect'): + self._call_stdio('error', 'Failed to connect to the server') + return False + db = ret.get_return('connect') + cursor = ret.get_return('cursor') + + pre_test_plugin = self.plugin_manager.get_best_py_script_plugin('pre_test', 'tpch', repository.version) + run_test_plugin = self.plugin_manager.get_best_py_script_plugin('run_test', 'tpch', repository.version) + + setattr(opts, 'host', opts.test_server.ip) + setattr(opts, 'port', db.port) + + + self._call_stdio('verbose', 'Call %s for %s' % (pre_test_plugin, repository)) + if pre_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio): + self._call_stdio('verbose', 'Call %s for %s' % (run_test_plugin, repository)) + if run_test_plugin(deploy_config.components.keys(), ssh_clients, cluster_config, [], opts, self.stdio, db, cursor): + return True + return False + + def update_obd(self, version): component_name = 'ob-deploy' plugin = self.plugin_manager.get_best_plugin(PluginType.INSTALL, component_name, '1.0.0') diff --git a/example/autodeploy/distributed-with-obproxy-and-obagent-example.yaml b/example/autodeploy/distributed-with-obproxy-and-obagent-example.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c207a9ce6a91c6cca696c8a58139f332dcbe268e --- /dev/null +++ b/example/autodeploy/distributed-with-obproxy-and-obagent-example.yaml @@ -0,0 +1,148 @@ +## Only need to configure when remote login is required +# user: +# username: your username +# password: your password if need +# key_file: your ssh-key file path if need +# port: your ssh port, default 22 +# timeout: ssh connection timeout (second), default 30 +oceanbase-ce: + servers: + - name: z1 + # Please don't use hostname, only IP can be supported + ip: 172.19.33.2 + - name: z2 + ip: 172.19.33.3 + - name: z3 + ip: 172.19.33.4 + global: + # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. + home_path: /root/observer + # The directory for data storage. The default value is $home_path/store. + # data_dir: /data + # The directory for clog, ilog, and slog. The default value is the same as the data_dir value. + # redo_dir: /redo + # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. + # mysql_port: 2881 + # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + # rpc_port: 2882 + # Defines the zone for an observer. The default value is zone1. + # zone: zone1 + # The maximum running memory for an observer. When ignored, autodeploy calculates this value based on the current server available resource. + # memory_limit: 58G + # The percentage of the maximum available memory to the total memory. This value takes effect only when memory_limit is 0. The default value is 80. + # memory_limit_percentage: 80 + # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. Autodeploy calculates this value based on the current server available resource. + # system_memory: 22G + # The size of a data file. When ignored, autodeploy calculates this value based on the current server available resource. + # datafile_size: 200G + # The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90. + # datafile_disk_percentage: 90 + # System log level. The default value is INFO. + # syslog_level: INFO + # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false. + # enable_syslog_wf: false + # Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on. + # enable_syslog_recycle: true + # The maximum number of reserved log files before enabling auto recycling. When set to 0, no logs are deleted. The default value for autodeploy mode is 4. + # max_syslog_file_count: 4 + # Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy. + # appname: obcluster + # Password for root. The default value is empty. + # root_password: + # Password for proxyro. proxyro_password must be the same as observer_sys_password. The default value is empty. + # proxyro_password: + z1: + zone: zone1 + z2: + zone: zone2 + z3: + zone: zone3 +obproxy: + servers: + - 192.168.1.5 + global: + # The working directory for obproxy. Obproxy is started under this directory. This is a required field. + home_path: /root/obproxy + # External port. The default value is 2883. + # listen_port: 2883 + # The Prometheus port. The default value is 2884. + # prometheus_listen_port: 2884 + # rs_list is the root server list for observers. The default root server is the first server in the zone. + # The format for rs_list is observer_ip:observer_mysql_port;observer_ip:observer_mysql_port. + # Ignore this value in autodeploy mode. + # rs_list: 127.0.0.1:2881 + # Cluster name for the proxy OceanBase Database. The default value is obcluster. This value must be set to the same with the appname for OceanBase Database. + # cluster_name: obcluster + # Password for obproxy system tenant. The default value is empty. + # obproxy_sys_password: + # Password for proxyro. proxyro_password must be the same with proxyro_password. The default value is empty. + # observer_sys_password: +obagent: + # The list of servers to be monitored. This list is consistent with the servers in oceanbase-ce. + servers: + - name: z1 + # Please don't use hostname, only IP is supported. + ip: 172.19.33.2 + - name: z2 + ip: 172.19.33.3 + - name: z3 + ip: 172.19.33.4 + # Set dependent components for the component. + # When the associated configurations are not done, OBD will automatically get the these configurations from the dependent components. + depends: + - oceanbase-ce + global: + # The working directory for obagent. obagent is started under this directory. This is a required field. + home_path: /root/observer + # The port that pulls and manages the metrics. The default port number is 8088. + # server_port: 8088 + # Debug port for pprof. The default port number is 8089. + # pprof_port: 8089 + # Log level. The default value is INFO. + # log_level: INFO + # Log path. The default value is log/monagent.log. + # log_path: log/monagent.log + # Encryption method. OBD supports aes and plain. The default value is plain. + # crypto_method: plain + # Path to store the crypto key. The default value is conf/.config_secret.key. + # crypto_path: conf/.config_secret.key + # Size for a single log file. Log size is measured in Megabytes. The default value is 30M. + # log_size: 30 + # Expiration time for logs. The default value is 7 days. + # log_expire_day: 7 + # The maximum number for log files. The default value is 10. + # log_file_count: 10 + # Whether to use local time for log files. The default value is true. + # log_use_localtime: true + # Whether to enable log compression. The default value is true. + # log_compress: true + # Username for HTTP authentication. The default value is admin. + # http_basic_auth_user: admin + # Password for HTTP authentication. The default value is root. + # http_basic_auth_password: root + # Username for debug service. The default value is admin. + # pprof_basic_auth_user: admin + # Password for debug service. The default value is root. + # pprof_basic_auth_password: root + # Monitor username for OceanBase Database. The user must have read access to OceanBase Database as a system tenant. The default value is root. + # monitor_user: root + # Monitor password for OceanBase Database. The default value is empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the root_password in oceanbase-ce. + # monitor_password: + # The SQL port for observer. The default value is 2881. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the mysql_port in oceanbase-ce. + # sql_port: 2881 + # The RPC port for observer. The default value is 2882. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the rpc_port in oceanbase-ce. + # rpc_port: 2882 + # Cluster name for OceanBase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the appname in oceanbase-ce. + # cluster_name: obcluster + # Cluster ID for OceanBase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the cluster_id in oceanbase-ce. + # cluster_id: 1 + # Zone name for your observer. The default value is zone1. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the zone name in oceanbase-ce. + # zone_name: zone1 + # Monitor status for OceanBase Database. Active is to enable. Inactive is to disable. The default value is active. When you deploy an cluster automatically, OBD decides whether to enable this parameter based on depends. + # ob_monitor_status: active + # Monitor status for your host. Active is to enable. Inactive is to disable. The default value is active. + # host_monitor_status: active + # Whether to disable the basic authentication for HTTP service. True is to disable. False is to enable. The default value is false. + # disable_http_basic_auth: false + # Whether to disable the basic authentication for the debug interface. True is to disable. False is to enable. The default value is false. + # disable_pprof_basic_auth: false \ No newline at end of file diff --git a/example/distributed-with-obproxy-example.yaml b/example/distributed-with-obproxy-example.yaml index 945b759d26a07c017338ce635a9bac5240ac2ebf..794ade20c749258e69a7e9ef653b06c4af18eb32 100644 --- a/example/distributed-with-obproxy-example.yaml +++ b/example/distributed-with-obproxy-example.yaml @@ -72,10 +72,10 @@ obproxy: prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884. home_path: /root/obproxy # oceanbase root server list - # format: ip:mysql_port;ip:mysql_port + # format: ip:mysql_port;ip:mysql_port. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. rs_list: 192.168.1.2:2881;192.168.1.3:2881;192.168.1.4:2881 enable_cluster_checkout: false - # observer cluster name, consistent with oceanbase-ce's appname + # observer cluster name, consistent with oceanbase-ce's appname. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. cluster_name: obcluster - # obproxy_sys_password: # obproxy sys user password, can be empty - # observer_sys_password: # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty + # obproxy_sys_password: # obproxy sys user password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + # observer_sys_password: # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. diff --git a/example/mini-distributed-with-obproxy-example.yaml b/example/mini-distributed-with-obproxy-example.yaml index 3d8cc133240b5bc6aabf6ff2014f25ccb070bcf2..dccfc3d0242f2d30d9bd8186786aaaec4c20cf80 100644 --- a/example/mini-distributed-with-obproxy-example.yaml +++ b/example/mini-distributed-with-obproxy-example.yaml @@ -82,10 +82,10 @@ obproxy: prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884. home_path: /root/obproxy # oceanbase root server list - # format: ip:mysql_port;ip:mysql_port + # format: ip:mysql_port;ip:mysql_port. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. rs_list: 192.168.1.2:2881;192.168.1.3:2881;192.168.1.4:2881 enable_cluster_checkout: false - # observer cluster name, consistent with oceanbase-ce's appname + # observer cluster name, consistent with oceanbase-ce's appname. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. cluster_name: obcluster - # obproxy_sys_password: # obproxy sys user password, can be empty - # observer_sys_password: # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty + # obproxy_sys_password: # obproxy sys user password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + # observer_sys_password: # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. diff --git a/example/mini-single-with-obproxy-example.yaml b/example/mini-single-with-obproxy-example.yaml index 42c77bbff8a0cec2ebd43a71154bf9c8091dff80..d8a5f356b81cb86a3d3f32922cc2768965e2af63 100644 --- a/example/mini-single-with-obproxy-example.yaml +++ b/example/mini-single-with-obproxy-example.yaml @@ -1,17 +1,17 @@ ## Only need to configure when remote login is required -# user: -# username: your username -# password: your password if need +user: + username: rongfeng.frf + password: fRf19941116 # key_file: your ssh-key file path if need # port: your ssh port, default 22 # timeout: ssh connection timeout (second), default 30 oceanbase-ce: servers: # Please don't use hostname, only IP can be supported - - 192.168.1.3 + - 100.81.252.4 global: # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. - home_path: /root/observer + home_path: /home/rongfeng.frf/data/obd/t1/observer # The directory for data storage. The default value is $home_path/store. # data_dir: /data # The directory for clog, ilog, and slog. The default value is the same as the data_dir value. @@ -19,7 +19,7 @@ oceanbase-ce: # Please set devname as the network adaptor's name whose ip is in the setting of severs. # if set severs as "127.0.0.1", please set devname as "lo" # if current ip is 192.168.1.10, and the ip's network adaptor's name is "eth0", please use "eth0" - devname: eth0 + devname: bond0 mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. zone: zone1 @@ -50,16 +50,16 @@ oceanbase-ce: # proxyro_password: # proxyro user pasword, consistent with obproxy's observer_sys_password, can be empty obproxy: servers: - - 192.168.1.2 + - 100.81.252.4 global: listen_port: 2883 # External port. The default value is 2883. prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884. - home_path: /root/obproxy + home_path: /home/rongfeng.frf/data/obd/t1/odp # oceanbase root server list - # format: ip:mysql_port;ip:mysql_port + # format: ip:mysql_port;ip:mysql_port. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. rs_list: 192.168.1.3:2881 enable_cluster_checkout: false - # observer cluster name, consistent with oceanbase-ce's appname + # observer cluster name, consistent with oceanbase-ce's appname. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. cluster_name: obcluster - # obproxy_sys_password: # obproxy sys user password, can be empty - # observer_sys_password: # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty + # obproxy_sys_password: # obproxy sys user password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + # observer_sys_password: # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. diff --git a/example/obagent/distributed-with-obproxy-and-obagent-example.yaml b/example/obagent/distributed-with-obproxy-and-obagent-example.yaml new file mode 100644 index 0000000000000000000000000000000000000000..af8d4846cb2c07b1d5a800674708c1ef50b4fd3b --- /dev/null +++ b/example/obagent/distributed-with-obproxy-and-obagent-example.yaml @@ -0,0 +1,151 @@ +## Only need to configure when remote login is required +# user: +# username: your username +# password: your password if need +# key_file: your ssh-key file path if need +# port: your ssh port, default 22 +# timeout: ssh connection timeout (second), default 30 +oceanbase-ce: + servers: + - name: z1 + # Please don't use hostname, only IP can be supported + ip: 172.19.33.2 + - name: z2 + ip: 172.19.33.3 + - name: z3 + ip: 172.19.33.4 + global: + # Please set devname as the network adaptor's name whose ip is in the setting of severs. + # if set severs as "127.0.0.1", please set devname as "lo" + # if current ip is 192.168.1.10, and the ip's network adaptor's name is "eth0", please use "eth0" + devname: eth0 + # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. + memory_limit: 64G # The maximum running memory for an observer + # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. + # system_memory: 30G + datafile_disk_percentage: 20 # The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90. + syslog_level: INFO # System log level. The default value is INFO. + enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. + enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. + max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. + # root_password: # root user password + # In this example , support multiple ob process in single node, so different process use different ports. + # If deploy ob cluster in multiple nodes, the port and path setting can be same. + z1: + mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. + rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. + home_path: /root/observer + # The directory for data storage. The default value is $home_path/store. + # data_dir: /data + # The directory for clog, ilog, and slog. The default value is the same as the data_dir value. + # redo_dir: /redo + zone: zone1 + z2: + mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. + rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. + home_path: /root/observer + # The directory for data storage. The default value is $home_path/store. + # data_dir: /data + # The directory for clog, ilog, and slog. The default value is the same as the data_dir value. + # redo_dir: /redo + zone: zone2 + z3: + mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. + rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. + home_path: /root/observer + # The directory for data storage. The default value is $home_path/store. + # data_dir: /data + # The directory for clog, ilog, and slog. The default value is the same as the data_dir value. + # redo_dir: /redo + zone: zone3 +obproxy: + servers: + - 192.168.1.5 + # Set dependent components for the component. + # When the associated configurations are not done, OBD will automatically get the these configurations from the dependent components. + depends: + - oceanbase-ce + global: + listen_port: 2883 # External port. The default value is 2883. + prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884. + home_path: /root/obproxy + # oceanbase root server list + # format: ip:mysql_port;ip:mysql_port. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + # rs_list: 192.168.1.2:2881;192.168.1.3:2881;192.168.1.4:2881 + enable_cluster_checkout: false + # observer cluster name, consistent with oceanbase-ce's appname. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + # cluster_name: obcluster + # obproxy_sys_password: # obproxy sys user password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + # observer_sys_password: # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. +obagent: + # The list of servers to be monitored. This list is consistent with the servers in oceanbase-ce. + servers: + - name: z1 + # Please don't use hostname, only IP can be supported + ip: 172.19.33.2 + - name: z2 + ip: 172.19.33.3 + - name: z3 + ip: 172.19.33.4 + # Set dependent components for the component. + # When the associated configurations are not done, OBD will automatically get the these configurations from the dependent components. + depends: + - oceanbase-ce + global: + # The working directory for obagent. obagent is started under this directory. This is a required field. + home_path: /root/observer + # The port that pulls and manages the metrics. The default port number is 8088. + server_port: 8088 + # Debug port for pprof. The default port number is 8089. + pprof_port: 8089 + # Log level. The default value is INFO. + log_level: INFO + # Log path. The default value is log/monagent.log. + log_path: log/monagent.log + # Encryption method. OBD supports aes and plain. The default value is plain. + crypto_method: plain + # Path to store the crypto key. The default value is conf/.config_secret.key. + # crypto_path: conf/.config_secret.key + # Size for a single log file. Log size is measured in Megabytes. The default value is 30M. + log_size: 30 + # Expiration time for logs. The default value is 7 days. + log_expire_day: 7 + # The maximum number for log files. The default value is 10. + log_file_count: 10 + # Whether to use local time for log files. The default value is true. + # log_use_localtime: true + # Whether to enable log compression. The default value is true. + # log_compress: true + # Username for HTTP authentication. The default value is admin. + http_basic_auth_user: admin + # Password for HTTP authentication. The default value is root. + http_basic_auth_password: root + # Username for debug service. The default value is admin. + pprof_basic_auth_user: admin + # Password for debug service. The default value is root. + pprof_basic_auth_password: root + # Monitor username for OceanBase Database. The user must have read access to OceanBase Database as a system tenant. The default value is root. + # monitor_user: root + # Monitor password for OceanBase Database. The default value is empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the root_password in oceanbase-ce. + # monitor_password: + # The SQL port for observer. The default value is 2881. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the mysql_port in oceanbase-ce. + # sql_port: 2881 + # The RPC port for observer. The default value is 2882. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the rpc_port in oceanbase-ce. + # rpc_port: 2882 + # Cluster name for OceanBase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the appname in oceanbase-ce. + # cluster_name: obcluster + # Cluster ID for OceanBase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the cluster_id in oceanbase-ce. + # cluster_id: 1 + # Zone name for your observer. The default value is zone1. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the zone name in oceanbase-ce. + # zone_name: zone1 + # Monitor status for OceanBase Database. Active is to enable. Inactive is to disable. The default value is active. When you deploy an cluster automatically, OBD decides whether to enable this parameter based on depends. + ob_monitor_status: active + # Monitor status for your host. Active is to enable. Inactive is to disable. The default value is active. + host_monitor_status: active + # Whether to disable the basic authentication for HTTP service. True is to disable. False is to enable. The default value is false. + disable_http_basic_auth: false + # Whether to disable the basic authentication for the debug interface. True is to disable. False is to enable. The default value is false. + disable_pprof_basic_auth: false \ No newline at end of file diff --git a/example/obagent/obagent-only-example.yaml b/example/obagent/obagent-only-example.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85daf0eea36dccb5d8e400bb8a440ea26496454d --- /dev/null +++ b/example/obagent/obagent-only-example.yaml @@ -0,0 +1,68 @@ +## Only need to configure when remote login is required +# user: +# username: your username +# password: your password if need +# key_file: your ssh-key file path if need +# port: your ssh port, default 22 +# timeout: ssh connection timeout (second), default 30 +obagent: + servers: + # Please don't use hostname, only IP can be supported + - 192.168.1.2 + - 192.168.1.3 + - 192.168.1.4 + global: + # The working directory for obagent. obagent is started under this directory. This is a required field. + home_path: /root/observer + # The port that pulls and manages the metrics. The default port number is 8088. + server_port: 8088 + # Debug port for pprof. The default port number is 8089. + pprof_port: 8089 + # Log level. The default value is INFO. + log_level: INFO + # Log path. The default value is log/monagent.log. + log_path: log/monagent.log + # Encryption method. OBD supports aes and plain. The default value is plain. + crypto_method: plain + # Path to store the crypto key. The default value is conf/.config_secret.key. + # crypto_path: conf/.config_secret.key + # Size for a single log file. Log size is measured in Megabytes. The default value is 30M. + log_size: 30 + # Expiration time for logs. The default value is 7 days. + log_expire_day: 7 + # The maximum number for log files. The default value is 10. + log_file_count: 10 + # Whether to use local time for log files. The default value is true. + # log_use_localtime: true + # Whether to enable log compression. The default value is true. + # log_compress: true + # Username for HTTP authentication. The default value is admin. + http_basic_auth_user: admin + # Password for HTTP authentication. The default value is root. + http_basic_auth_password: root + # Username for debug service. The default value is admin. + pprof_basic_auth_user: admin + # Password for debug service. The default value is root. + pprof_basic_auth_password: root + # Monitor username for OceanBase Database. The user must have read access to OceanBase Database as a system tenant. The default value is root. + monitor_user: root + # Monitor password for OceanBase Database. The default value is empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the root_password in oceanbase-ce. + monitor_password: + # The SQL port for observer. The default value is 2881. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the mysql_port in oceanbase-ce. + sql_port: 2881 + # The RPC port for observer. The default value is 2882. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the rpc_port in oceanbase-ce. + rpc_port: 2882 + # Cluster name for OceanBase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the appname in oceanbase-ce. + cluster_name: obcluster + # Cluster ID for OceanBase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the cluster_id in oceanbase-ce. + cluster_id: 1 + # Zone name for your observer. The default value is zone1. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the zone name in oceanbase-ce. + zone_name: zone1 + # Monitor status for OceanBase Database. Active is to enable. Inactive is to disable. The default value is active. When you deploy an cluster automatically, OBD decides whether to enable this parameter based on depends. + ob_monitor_status: active + # Monitor status for your host. Active is to enable. Inactive is to disable. The default value is active. + host_monitor_status: active + # Whether to disable the basic authentication for HTTP service. True is to disable. False is to enable. The default value is false. + disable_http_basic_auth: false + # Whether to disable the basic authentication for the debug interface. True is to disable. False is to enable. The default value is false. + disable_pprof_basic_auth: false \ No newline at end of file diff --git a/example/obproxy/distributed-with-obproxy-example.yaml b/example/obproxy/distributed-with-obproxy-example.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a6abe79b5da1c15490f4cc535f4e9abced320b31 --- /dev/null +++ b/example/obproxy/distributed-with-obproxy-example.yaml @@ -0,0 +1,85 @@ +## Only need to configure when remote login is required +# user: +# username: your username +# password: your password if need +# key_file: your ssh-key file path if need +# port: your ssh port, default 22 +# timeout: ssh connection timeout (second), default 30 +oceanbase-ce: + servers: + - name: z1 + # Please don't use hostname, only IP can be supported + ip: 192.168.1.2 + - name: z2 + ip: 192.168.1.3 + - name: z3 + ip: 192.168.1.4 + global: + # Please set devname as the network adaptor's name whose ip is in the setting of severs. + # if set severs as "127.0.0.1", please set devname as "lo" + # if current ip is 192.168.1.10, and the ip's network adaptor's name is "eth0", please use "eth0" + devname: eth0 + # if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment. + memory_limit: 64G # The maximum running memory for an observer + # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. + # system_memory: 30G + datafile_disk_percentage: 20 # The percentage of the data_dir space to the total disk space. This value takes effect only when datafile_size is 0. The default value is 90. + syslog_level: INFO # System log level. The default value is INFO. + enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. + enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. + max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. + # observer cluster name, consistent with obproxy's cluster_name + appname: obcluster + # root_password: # root user password, can be empty + # proxyro_password: # proxyro user pasword, consistent with obproxy's observer_sys_password, can be empty + # In this example , support multiple ob process in single node, so different process use different ports. + # If deploy ob cluster in multiple nodes, the port and path setting can be same. + z1: + mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. + rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. + home_path: /root/observer + # The directory for data storage. The default value is $home_path/store. + # data_dir: /data + # The directory for clog, ilog, and slog. The default value is the same as the data_dir value. + # redo_dir: /redo + zone: zone1 + z2: + mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. + rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. + home_path: /root/observer + # The directory for data storage. The default value is $home_path/store. + # data_dir: /data + # The directory for clog, ilog, and slog. The default value is the same as the data_dir value. + # redo_dir: /redo + zone: zone2 + z3: + mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. + rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. + # The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field. + home_path: /root/observer + # The directory for data storage. The default value is $home_path/store. + # data_dir: /data + # The directory for clog, ilog, and slog. The default value is the same as the data_dir value. + # redo_dir: /redo + zone: zone3 +obproxy: + # Set dependent components for the component. + # When the associated configurations are not done, OBD will automatically get the these configurations from the dependent components. + depends: + - oceanbase-ce + servers: + - 192.168.1.5 + global: + listen_port: 2883 # External port. The default value is 2883. + prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884. + home_path: /root/obproxy + # oceanbase root server list + # format: ip:mysql_port;ip:mysql_port. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + # rs_list: 192.168.1.2:2881;192.168.1.3:2881;192.168.1.4:2881 + enable_cluster_checkout: false + # observer cluster name, consistent with oceanbase-ce's appname. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + # cluster_name: obcluster + # obproxy_sys_password: # obproxy sys user password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + # observer_sys_password: # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. diff --git a/example/obproxy/obproxy-only-example.yaml b/example/obproxy/obproxy-only-example.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16a741fa32bd746df4646305c3f7e3eb08551726 --- /dev/null +++ b/example/obproxy/obproxy-only-example.yaml @@ -0,0 +1,22 @@ +## Only need to configure when remote login is required +# user: +# username: your username +# password: your password if need +# key_file: your ssh-key file path if need +# port: your ssh port, default 22 +# timeout: ssh connection timeout (second), default 30 +obproxy: + servers: + - 192.168.1.5 + global: + listen_port: 2883 # External port. The default value is 2883. + prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884. + home_path: /root/obproxy + # oceanbase root server list + # format: ip:mysql_port;ip:mysql_port. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + rs_list: 192.168.1.2:2881;192.168.1.3:2881;192.168.1.4:2881 + enable_cluster_checkout: false + # observer cluster name, consistent with oceanbase-ce's appname. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + cluster_name: obcluster + # obproxy_sys_password: # obproxy sys user password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + # observer_sys_password: # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. diff --git a/example/single-with-obproxy-example.yaml b/example/single-with-obproxy-example.yaml index 8e64918a51eeda38d3a3bc0370eac62eff6123db..bd97da9c7ad418264d19da54340d6295c9736b64 100644 --- a/example/single-with-obproxy-example.yaml +++ b/example/single-with-obproxy-example.yaml @@ -44,10 +44,10 @@ obproxy: prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884. home_path: /root/obproxy # oceanbase root server list - # format: ip:mysql_port;ip:mysql_port + # format: ip:mysql_port;ip:mysql_port. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. rs_list: 192.168.1.3:2881 enable_cluster_checkout: false - # observer cluster name, consistent with oceanbase-ce's appname + # observer cluster name, consistent with oceanbase-ce's appname. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. cluster_name: obcluster - # obproxy_sys_password: # obproxy sys user password, can be empty - # observer_sys_password: # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty + # obproxy_sys_password: # obproxy sys user password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. + # observer_sys_password: # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. diff --git a/ob-deploy.spec b/ob-deploy.spec index 4a587ecfc2f3de98a2ea71eb8e49bb3e7321bf83..6cc87aa66934ec0082abb9bf34291ba0b7171a9f 100644 --- a/ob-deploy.spec +++ b/ob-deploy.spec @@ -1,5 +1,5 @@ Name: ob-deploy -Version: 1.1.0 +Version: 1.1.1 Release: 1%{?dist} # if you want use the parameter of rpm_create on build time, # uncomment below @@ -44,7 +44,7 @@ rm -rf $BUILD_DIR build.log ${RPM_BUILD_ROOT} build dist obd.spec CID=`git log |head -n1 | awk -F' ' '{print $2}'` BRANCH=`git branch | grep -e "^\*" | awk -F' ' '{print $2}'` DATE=`date '+%b %d %Y %H:%M:%S'` -cat _cmd.py | sed "s//$CID/" | sed "s//$BRANCH/" | sed "s//$DATE/" > obd.py +cat _cmd.py | sed "s//$CID/" | sed "s//$BRANCH/" | sed "s//$DATE/" | sed "s//$OBD_DUBUG/" > obd.py mkdir -p $BUILD_DIR/SOURCES ${RPM_BUILD_ROOT} mkdir -p $BUILD_DIR/SOURCES/{site-packages} mkdir -p ${RPM_BUILD_ROOT}/usr/bin @@ -98,6 +98,13 @@ echo -e 'Installation of obd finished successfully\nPlease source /etc/profile.d #/sbin/chkconfig obd on %changelog +* Thu Sep 30 2021 obd 1.1.1 + - new features: obd test tych + - new features: new keyword "depends" for configuration file + - new features: new option "--wop/--without-parameter" for start/restart + - new features: a daemon will be started when obproxy is started + - new features: support obagent + - fix bug: fail to get devname when devname length more than 5 * Mon Aug 09 2021 obd 1.1.0 - new features: obd cluster autdeploy - new features: obd cluster tenant diff --git a/plugins-requirements.txt b/plugins-requirements.txt index d9b3a39e73a4033aea81d3197303b5c159671264..25e3f9a0bd98d5876e14c5dd4a0bcf38b49bd651 100644 --- a/plugins-requirements.txt +++ b/plugins-requirements.txt @@ -1 +1,2 @@ -MySQL-python==1.2.5 \ No newline at end of file +MySQL-python==1.2.5 +pycryptodome==3.10.1 \ No newline at end of file diff --git a/plugins-requirements3.txt b/plugins-requirements3.txt index cdcefb7fc8feba9751ec1a69c97fe500dd42969c..b680690ff291b996af2e68a84ddef79f981b792f 100644 --- a/plugins-requirements3.txt +++ b/plugins-requirements3.txt @@ -1 +1,2 @@ -PyMySQL==1.0.2 \ No newline at end of file +PyMySQL==1.0.2 +pycryptodome==3.10.1 \ No newline at end of file diff --git a/plugins/mysqltest/3.1.0/run_test.py b/plugins/mysqltest/3.1.0/run_test.py index 74a009322dd8587a25703e54a710b628fd0fd2a0..908d0b9e71d205adf62cc63de9f5bc480658401d 100644 --- a/plugins/mysqltest/3.1.0/run_test.py +++ b/plugins/mysqltest/3.1.0/run_test.py @@ -148,6 +148,7 @@ def run_test(plugin_context, test, env, *args, **kwargs): opt['connector'] = 'ob' opt['mysql_mode'] = True mysqltest_bin = opt['mysqltest_bin'] if 'mysqltest_bin' in opt and opt['mysqltest_bin'] else 'mysqltest' + obclient_bin = opt['obclient_bin'] if 'obclient_bin' in opt and opt['obclient_bin'] else 'obclient' soft = 3600 buffer = 0 @@ -224,8 +225,15 @@ def run_test(plugin_context, test, env, *args, **kwargs): opt['result_file'] = os.path.join(opt['result_dir'], test + suffix + '.result') - server_engine_cmd = '''obclient -h%s -P%s -uroot -Doceanbase -e "select value from __all_virtual_sys_parameter_stat where name like '_enable_static_typing_engine';"''' % (opt['host'], opt['port']) - result = LocalClient.execute_command(server_engine_cmd, env={}, timeout=3600, stdio=stdio) + if 'my_host' in opt or 'oracle_host' in opt: + # compare mode + pass + + + sys_pwd = cluster_config.get_global_conf().get('root_password', '') + exec_sql_cmd = "%s -h%s -P%s -uroot %s -A -Doceanbase -e" % (obclient_bin, opt['host'], opt['port'], ("-p'%s'" % sys_pwd) if sys_pwd else '') + server_engine_cmd = '''%s "select value from __all_virtual_sys_parameter_stat where name like '_enable_static_typing_engine';"''' % exec_sql_cmd + result = LocalClient.execute_command(server_engine_cmd, timeout=3600, stdio=stdio) if not result: stdio.error('engine failed, exit code %s. error msg: %s' % (result.code, result.stderr)) @@ -245,7 +253,7 @@ def run_test(plugin_context, test, env, *args, **kwargs): if 'java' in opt: opt['connector'] = 'ob' - LocalClient.execute_command('obclient -h %s -P %s -uroot -Doceanbase -e "alter system set _enable_static_typing_engine = True;select sleep(2);"' % (opt['host'], opt['port']), stdio=stdio) + LocalClient.execute_command('%s "alter system set _enable_static_typing_engine = True;select sleep(2);"' % (exec_sql_cmd), stdio=stdio) start_time = time.time() cmd = 'timeout %s %s %s' % (case_timeout, mysqltest_bin, str(Arguments(opt))) @@ -272,7 +280,7 @@ def run_test(plugin_context, test, env, *args, **kwargs): stdio.verbose(verbose_msg) cost = time.time() - start_time - LocalClient.execute_command('obclient -h %s -P %s -uroot -Doceanbase -e "alter system set _enable_static_typing_engine = False;select sleep(2);"' % (opt['host'], opt['port']), stdio=stdio) + LocalClient.execute_command('%s "alter system set _enable_static_typing_engine = False;select sleep(2);"' % (exec_sql_cmd), stdio=stdio) result = {"name" : test_ori, "ret" : retcode, "output" : output, "cmd" : cmd, "errput" : errput, 'cost': cost} stdio.stop_loading('fail' if retcode else 'succeed') return plugin_context.return_true(result=result) diff --git a/plugins/obagent/0.1/bootstrap.py b/plugins/obagent/0.1/bootstrap.py new file mode 100644 index 0000000000000000000000000000000000000000..2bcf94fdc77c3f9b177fb522c29169ad9c187883 --- /dev/null +++ b/plugins/obagent/0.1/bootstrap.py @@ -0,0 +1,25 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + + +def bootstrap(plugin_context, cursor, *args, **kwargs): + plugin_context.return_true() diff --git a/plugins/obagent/0.1/connect.py b/plugins/obagent/0.1/connect.py new file mode 100644 index 0000000000000000000000000000000000000000..9999bfdb0e2e76d4a436c324720008b75d3c2466 --- /dev/null +++ b/plugins/obagent/0.1/connect.py @@ -0,0 +1,37 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + + +def connect(plugin_context, target_server=None, sys_root=True, *args, **kwargs): + stdio = plugin_context.stdio + cluster_config = plugin_context.cluster_config + servers = cluster_config.servers + result = {} + for server in servers: + config = cluster_config.get_server_conf_with_default(server) + if config.get('disable_http_basic_auth'): + auth = '' + else: + auth = '--user %s:%s' % (config['http_basic_auth_user'], config['http_basic_auth_password']) + cmd = '''curl %s -H "Content-Type:application/json" -L "http://%s:%s/metrics/stat"''' % (auth, server.ip, config['server_port']) + result[server] = cmd + return plugin_context.return_true(connect=result, cursor=result) diff --git a/plugins/obagent/0.1/destroy.py b/plugins/obagent/0.1/destroy.py new file mode 100644 index 0000000000000000000000000000000000000000..b3a4f2b6a9adcf4b91935042022ad67563b04518 --- /dev/null +++ b/plugins/obagent/0.1/destroy.py @@ -0,0 +1,46 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + + +def destroy(plugin_context, *args, **kwargs): + def clean(server, path): + client = clients[server] + ret = client.execute_command('rm -fr %s/*' % (path)) + if not ret: + global_ret = False + stdio.warn('fail to clean %s:%s' % (server, path)) + else: + stdio.verbose('%s:%s cleaned' % (server, path)) + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + stdio = plugin_context.stdio + global_ret = True + stdio.start_loading('obagent work dir cleaning') + for server in cluster_config.servers: + server_config = cluster_config.get_server_conf(server) + stdio.verbose('%s work path cleaning' % server) + clean(server, server_config['home_path']) + if global_ret: + stdio.stop_loading('succeed') + plugin_context.return_true() + else: + stdio.stop_loading('fail') \ No newline at end of file diff --git a/plugins/obagent/0.1/display.py b/plugins/obagent/0.1/display.py new file mode 100644 index 0000000000000000000000000000000000000000..377e784496e9087951335c63abea52df5e91b2ce --- /dev/null +++ b/plugins/obagent/0.1/display.py @@ -0,0 +1,47 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + +from __future__ import absolute_import, division, print_function + + +def display(plugin_context, cursor, *args, **kwargs): + stdio = plugin_context.stdio + clients = plugin_context.clients + cluster_config = plugin_context.cluster_config + servers = cluster_config.servers + result = [] + for server in servers: + client = clients[server] + config = cluster_config.get_server_conf_with_default(server) + if config.get('disable_http_basic_auth'): + auth = '' + else: + auth = '--user %s:%s' % (config['http_basic_auth_user'], config['http_basic_auth_password']) + cmd = '''curl %s -H "Content-Type:application/json" -L "http://%s:%s/metrics/stat"''' % (auth, server.ip, config['server_port']) + + result.append({ + 'ip': server.ip, + 'status': 'active' if client.execute_command(cmd) else 'inactive', + 'server_port': config['server_port'], + 'pprof_port': config['pprof_port'] + }) + + stdio.print_list(result, ['ip', 'server_port', 'pprof_port', 'status'], + lambda x: [x['ip'], x['server_port'], x['pprof_port'], x['status']], title='obagent') + plugin_context.return_true() diff --git a/plugins/obagent/0.1/file_map.yaml b/plugins/obagent/0.1/file_map.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ff1d31afb1378eab8b78ad7e1eb215194c9e6577 --- /dev/null +++ b/plugins/obagent/0.1/file_map.yaml @@ -0,0 +1,7 @@ +- src_path: ./home/admin/obagent/bin/monagent + target_path: bin/monagent + type: bin + mode: 755 +- src_path: ./home/admin/obagent/conf + target_path: conf + type: dir \ No newline at end of file diff --git a/plugins/obagent/0.1/generate_config.py b/plugins/obagent/0.1/generate_config.py new file mode 100644 index 0000000000000000000000000000000000000000..e5435ca1302d385e011f4db6006ebb422b55ffae --- /dev/null +++ b/plugins/obagent/0.1/generate_config.py @@ -0,0 +1,65 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + + +def generate_config(plugin_context, deploy_config, *args, **kwargs): + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + stdio = plugin_context.stdio + success = True + have_depend = False + depends = ['oceanbase', 'oceanbase-ce'] + server_depends = {} + stdio.start_loading('Generate obagent configuration') + + for server in cluster_config.servers: + server_depends[server] = [] + server_config = cluster_config.get_server_conf(server) + if not server_config.get('home_path'): + stdio.error("obagent %s: missing configuration 'home_path' in configuration file" % server) + success = False + continue + if not success: + stdio.stop_loading('fail') + return + + for comp in cluster_config.depends: + if comp in depends: + have_depend = True + for server in cluster_config.servers: + obs_config = cluster_config.get_depled_config(comp, server) + if obs_config is not None: + server_depends[server].append(comp) + + if have_depend: + server_num = len(cluster_config.servers) + for server in cluster_config.servers: + for comp in depends: + if comp in server_depends[server]: + break + else: + cluster_config.update_server_conf(server, 'ob_monitor_status', 'inactive', False) + else: + cluster_config.update_global_conf('ob_monitor_status', 'inactive', False) + + stdio.stop_loading('succeed') + plugin_context.return_true() diff --git a/plugins/obagent/0.1/init.py b/plugins/obagent/0.1/init.py new file mode 100644 index 0000000000000000000000000000000000000000..a1c77a5f745f6f45a8e7d24f50f4b0602bf89c81 --- /dev/null +++ b/plugins/obagent/0.1/init.py @@ -0,0 +1,67 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + + +def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + stdio = plugin_context.stdio + global_ret = True + force = getattr(plugin_context.options, 'force', False) + stdio.start_loading('Initializes obagent work home') + for server in cluster_config.servers: + server_config = cluster_config.get_server_conf(server) + client = clients[server] + home_path = server_config['home_path'] + remote_home_path = client.execute_command('echo $HOME/.obd').stdout.strip() + remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path) + stdio.verbose('%s init cluster work home', server) + if force: + ret = client.execute_command('rm -fr %s/*' % home_path) + if not ret: + global_ret = False + stdio.error('failed to initialize %s home path: %s' % (server, ret.stderr)) + continue + else: + if client.execute_command('mkdir -p %s' % home_path): + ret = client.execute_command('ls %s' % (home_path)) + if not ret or ret.stdout.strip(): + global_ret = False + stdio.error('fail to init %s home path: %s is not empty' % (server, home_path)) + continue + else: + global_ret = False + stdio.error('fail to init %s home path: create %s failed' % (server, home_path)) + continue + + if not (client.execute_command("bash -c 'mkdir -p %s/{run,bin,lib,conf,log}'" % (home_path)) \ + and client.execute_command("cp -r %s/conf %s/" % (remote_repository_dir, home_path)) \ + and client.execute_command("if [ -d %s/bin ]; then ln -s %s/bin/* %s/bin; fi" % (remote_repository_dir, remote_repository_dir, home_path)) \ + and client.execute_command("if [ -d %s/lib ]; then ln -s %s/lib/* %s/lib; fi" % (remote_repository_dir, remote_repository_dir, home_path))): + global_ret = False + stdio.error('fail to init %s home path', server) + + if global_ret: + stdio.stop_loading('succeed') + plugin_context.return_true() + else: + stdio.stop_loading('fail') \ No newline at end of file diff --git a/plugins/obagent/0.1/parameter.yaml b/plugins/obagent/0.1/parameter.yaml new file mode 100644 index 0000000000000000000000000000000000000000..99db386f6c74f875688c9cb7ce8609300e700f7e --- /dev/null +++ b/plugins/obagent/0.1/parameter.yaml @@ -0,0 +1,240 @@ +- name: home_path + require: true + type: STRING + need_restart: true + description_en: working directory for obagent + description_local: Obagent工作目录 +- name: server_port + require: true + type: INT + default: 8088 + min_value: 1025 + max_value: 65535 + need_restart: true + description_en: port number for pulling metrics and management + description_local: 提供拉取 metrics 和管理的端口 +- name: pprof_port + require: true + type: INT + default: 8089 + min_value: 1025 + max_value: 65535 + need_restart: true + description_en: port number for pprof + description_local: pprof 调试端口 +- name: log_level + require: true + type: STRING + default: INFO + min_value: NULL + max_value: NULL + need_restart: true + description_en: log level + description_local: 日志等级 +- name: log_path + require: true + type: STRING + default: log/monagent.log + min_value: NULL + max_value: NULL + need_restart: true + description_en: log path + description_local: 日志路径 +- name: crypto_method + require: true + type: STRING + default: plain + min_value: NULL + max_value: NULL + need_restart: true + description_en: crypto method {aes/plain} + description_local: 加密方式,仅支持 aes 和 plain +- name: crypto_path + require: true + type: STRING + default: conf/.config_secret.key + min_value: NULL + max_value: NULL + need_restart: true + description_en: file path for crypto key + description_local: 秘钥存放路径 +- name: log_size + require: true + type: INT + default: 30 + min_value: 1 + max_value: 256 + need_restart: true + description_en: size for a log file, measured in megabytes + description_local: 单个日志文件大小,单位为 M +- name: log_expire_day + require: true + type: INT + default: 7 + min_value: 1 + max_value: 30 + need_restart: true + description_en: log file expiration time, measured in days + description_local: 日志保留天数 +- name: log_file_count + require: true + type: INT + default: 10 + min_value: 1 + max_value: NULL + need_restart: true + description_en: the maximum number for log files. The default value is 10. + description_local: 最大保留日志数 +- name: log_use_localtime + require: true + type: BOOL + default: true + min_value: NULL + max_value: NULL + need_restart: true + description_en: whether to use local time for log files + description_local: 日志文件是否使用本地时间 +- name: log_compress + require: true + type: BOOL + default: true + min_value: NULL + max_value: NULL + need_restart: true + description_en: whether to enable log compression + description_local: 是否开启日志压缩 +- name: http_basic_auth_user + require: true + type: STRING + default: admin + min_value: NULL + max_value: NULL + need_restart: false + description_en: username for HTTP authentication + description_local: HTTP 服务认证用户名 +- name: http_basic_auth_password + require: false + type: STRING + default: root + min_value: NULL + max_value: NULL + need_restart: false + description_en: password for HTTP authentication + description_local: HTTP 服务认证密码 +- name: pprof_basic_auth_user + require: true + type: STRING + default: admin + min_value: NULL + max_value: NULL + need_restart: false + description_en: username for debug service + description_local: debug 接口认证用户名 +- name: pprof_basic_auth_password + require: false + type: STRING + default: root + min_value: NULL + max_value: NULL + need_restart: false + description_en: password for debug service + description_local: debug 接口认证密码 +- name: monitor_user + require: true + type: STRING + default: root + min_value: NULL + max_value: NULL + need_restart: false + description_en: monitor username for OceanBase Database. The user must have read access to OceanBase Database as a system tenant. + description_local: OceanBase 数据库监控数据采集用户名, 需要该用户具有sys租户下 oceanbase 库的读权限 +- name: monitor_password + require: false + type: STRING + default: NULL + min_value: NULL + max_value: NULL + need_restart: false + description_en: monitor password for OceanBase Database + description_local: OceanBase 数据库监控数据采集用户密码 +- name: sql_port + require: false + type: INT + default: 2881 + min_value: 1025 + max_value: 65535 + need_restart: false + description_en: SQL port for observer + description_local: observer的 SQL 端口 +- name: rpc_port + require: false + type: INT + default: 2882 + min_value: 1025 + max_value: 65535 + need_restart: false + description_en: the RPC port for observer + description_local: observer 的 RPC 端口 +- name: cluster_name + require: false + type: STRING + default: obcluster + min_value: NULL + max_value: NULL + need_restart: false + description_en: cluster name for OceanBase Database + description_local: OceanBase Database 集群名 +- name: cluster_id + require: false + type: INT + default: 1 + min_value: 1 + max_value: 4294901759 + need_restart: false + description_en: cluster ID for OceanBase Database + description_local: OceanBase 集群 ID +- name: zone_name + require: false + type: STRING + default: zone1 + min_value: NULL + max_value: NULL + need_restart: false + description_en: zone name for your observer + description_local: observer 所在的 zone 名字 +- name: ob_monitor_status + require: true + type: STRING + default: active + min_value: NULL + max_value: NULL + need_restart: false + description_en: monitor status for OceanBase Database. Active is to enable. Inactive is to disable. + description_local: OceanBase 监控指标采集状态,active 表示开启,inactive 表示关闭 +- name: host_monitor_status + require: true + type: STRING + default: active + min_value: NULL + max_value: NULL + need_restart: false + description_en: monitor status for your host. Active is to enable. Inactive is to disable. + description_local: 主机监控指标采集状态, active 表示开启, inactive 表示关闭 +- name: disable_http_basic_auth + require: true + type: BOOL + default: false + min_value: NULL + max_value: NULL + need_restart: false + description_en: whether to disable the basic authentication for HTTP service. True is to disable. False is to enable. + description_local: 是否禁用 HTTP 服务的basic auth 认证,true 表示禁用,false 表示不禁用 +- name: disable_pprof_basic_auth + require: true + type: BOOL + default: false + min_value: NULL + max_value: NULL + need_restart: false + description_en: whether to disable the basic authentication for the debug interface. True is to disable. False is to enable. + description_local: 是否禁用 debug 接口的basic auth 认证,true 表示禁用,false 表示不禁用 \ No newline at end of file diff --git a/plugins/obagent/0.1/reload.py b/plugins/obagent/0.1/reload.py new file mode 100644 index 0000000000000000000000000000000000000000..c4c57d384d502552c552fb89b92882bf02df1cf1 --- /dev/null +++ b/plugins/obagent/0.1/reload.py @@ -0,0 +1,96 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import os +import json +from copy import deepcopy +from glob import glob +from tool import YamlLoader + + +def reload(plugin_context, repository_dir, new_cluster_config, *args, **kwargs): + stdio = plugin_context.stdio + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + servers = cluster_config.servers + yaml = YamlLoader(stdio) + + config_map = { + "monitor_password": "root_password", + "sql_port": "mysql_port", + "rpc_port": "rpc_port", + "cluster_name": "appname", + "cluster_id": "cluster_id", + "zone_name": "zone", + } + global_change_conf = {} + for comp in ['oceanbase', 'oceanbase-ce']: + if comp in cluster_config.depends: + root_servers = {} + ob_config = cluster_config.get_depled_config(comp) + new_ob_config = new_cluster_config.get_depled_config(comp) + ob_config = {} if ob_config is None else ob_config + new_ob_config = {} if new_ob_config is None else new_ob_config + for key in config_map: + if ob_config.get(key) != new_ob_config.get(key): + global_change_conf[config_map[key]] = new_ob_config.get(key) + + config_kv = {} + stdio.verbose('load config properties') + for path in glob(os.path.join(repository_dir, 'conf/config_properties/*.yaml')): + with open(path) as f: + data = yaml.load(f)['configs'] + for config in data: + key = list(config['value'].keys())[0] + config_kv[key] = config['key'] + + global_ret = True + for server in servers: + change_conf = deepcopy(global_change_conf) + client = clients[server] + stdio.verbose('get %s old configuration' % (server)) + config = cluster_config.get_server_conf_with_default(server) + stdio.verbose('get %s new configuration' % (server)) + new_config = new_cluster_config.get_server_conf_with_default(server) + stdio.verbose('get %s cluster address' % (server)) + stdio.verbose('compare configuration of %s' % (server)) + for key in new_config: + if key not in config_kv: + continue + if key not in config or config[key] != new_config[key]: + change_conf[config_kv[key]] = new_config[key] + + if change_conf: + stdio.verbose('%s apply new configuration' % server) + if config.get('disable_http_basic_auth'): + auth = '' + else: + auth = '--user %s:%s' % (config['http_basic_auth_user'], config['http_basic_auth_password']) + data = [{'key': key, 'value': change_conf[key]} for key in change_conf] + cmd = '''curl %s -H "Content-Type:application/json" -d '%s' -L "http://%s:%s/api/v1/module/config/update"''' % ( + auth, json.dumps({'configs': data}), server.ip, config['server_port'] + ) + if not client.execute_command(cmd): + global_ret = False + stdio.error('fail to reload %s' % server) + + return plugin_context.return_true() if global_ret else None diff --git a/plugins/obagent/0.1/start.py b/plugins/obagent/0.1/start.py new file mode 100644 index 0000000000000000000000000000000000000000..2a9a5e11aadd2c5c2cfd24585762dcb4f6cb5f38 --- /dev/null +++ b/plugins/obagent/0.1/start.py @@ -0,0 +1,297 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import os +import re +import sys +import time +import random +import base64 +import tempfile +from glob import glob +from copy import deepcopy + +from Crypto import Random +from Crypto.Cipher import AES + +from tool import YamlLoader + + +stdio = None + + +if sys.version_info.major == 2: + + def generate_key(key): + genKey = [chr(0)] * 16 + for i in range(min(16, len(key))): + genKey[i] = key[i] + i = 16 + while i < len(key): + j = 0 + while j < 16 and i < len(key): + genKey[j] = chr(ord(genKey[j]) ^ ord(key[i])) + j, i = j+1, i+1 + return "".join(genKey) + + class AESCipher: + bs = AES.block_size + + def __init__(self, key): + self.key = generate_key(key) + + def encrypt(self, message): + message = self._pad(message) + iv = Random.new().read(AES.block_size) + cipher = AES.new(self.key, AES.MODE_CBC, iv) + return base64.b64encode(iv + cipher.encrypt(message)).decode('utf-8') + + def _pad(self, s): + return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs) + +else: + def generate_key(key): + genKey = [0] * 16 + for i in range(min(16, len(key))): + genKey[i] = key[i] + i = 16 + while i < len(key): + j = 0 + while j < 16 and i < len(key): + genKey[j] = genKey[j] ^ key[i] + j, i = j+1, i+1 + genKey = [chr(k) for k in genKey] + return bytes("".join(genKey), encoding="utf-8") + + class AESCipher: + bs = AES.block_size + + def __init__(self, key): + self.key = generate_key(key) + + def encrypt(self, message): + message = self._pad(message) + iv = Random.new().read(AES.block_size) + cipher = AES.new(self.key, AES.MODE_CBC, iv) + return str(base64.b64encode(iv + cipher.encrypt(bytes(message, encoding='utf-8'))), encoding="utf-8") + + def _pad(self, s): + return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs) + + +def encrypt(key, data): + key = base64.b64decode(key) + cipher = AESCipher(key) + return cipher.encrypt(data) + + +def get_port_socket_inode(client, port): + port = hex(port)[2:].zfill(4).upper() + cmd = "bash -c 'cat /proc/net/{tcp,udp}' | awk -F' ' '{print $2,$10}' | grep '00000000:%s' | awk -F' ' '{print $2}' | uniq" % port + res = client.execute_command(cmd) + if not res or not res.stdout.strip(): + return False + stdio.verbose(res.stdout) + return res.stdout.strip().split('\n') + + +def confirm_port(client, pid, port): + socket_inodes = get_port_socket_inode(client, port) + if not socket_inodes: + return False + ret = client.execute_command("ls -l /proc/%s/fd/ |grep -E 'socket:\[(%s)\]'" % (pid, '|'.join(socket_inodes))) + if ret and ret.stdout.strip(): + return True + return False + + +def generate_aes_b64_key(): + n = random.randint(1, 3) * 8 + key = [] + c = 0 + while c < n: + key += chr(random.randint(33, 127)) + c += 1 + key = ''.join(key) + return base64.b64encode(key.encode('utf-8')) + + +def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): + global stdio + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + stdio = plugin_context.stdio + options = plugin_context.options + config_files = {} + pid_path = {} + targets = [] + yaml = YamlLoader(stdio) + need_encrypted = [] + config_map = { + "monitor_password": "root_password", + "sql_port": "mysql_port", + "rpc_port": "rpc_port", + "cluster_name": "appname", + "cluster_id": "cluster_id", + "zone_name": "zone", + } + + for server in cluster_config.servers: + client = clients[server] + server_config = cluster_config.get_server_conf(server) + targets.append('%s:%s' % (server.ip, server_config["server_port"])) + + for path in glob(os.path.join(repository_dir, 'conf/*/*.yaml')): + with open(path) as f: + text = f.read() + target = set(re.findall('\n((\s+)-\s+\{target\})', text)) + for pt in target: + text = text.replace(pt[0], ('%s- ' % pt[1]) + ('\n%s- ' % pt[1]).join(targets)) + + keys = set(re.findall('\${([\.\w]+)\}', text)) + for key in keys: + text = text.replace('${%s}' % key, '$\[[%s\]]' % key) + config_files[path] = text + + for path in glob(os.path.join(repository_dir, 'conf/config_properties/*.yaml')): + with open(path) as f: + data = yaml.load(f).get('configs', []) + for conf in data: + if conf.get('encrypted'): + key = conf.get('value') + if key and isinstance(key, dict): + key = list(key.keys())[0] + need_encrypted.append(key) + + for server in cluster_config.servers: + client = clients[server] + server_config = deepcopy(cluster_config.get_server_conf(server)) + default_server_config = cluster_config.get_server_conf_with_default(server) + obs_config = {} + home_path = server_config['home_path'] + remote_pid_path = '%s/run/obagent-%s-%s.pid' % (home_path, server.ip, server_config["server_port"]) + pid_path[server] = remote_pid_path + + remote_pid = client.execute_command("cat %s" % pid_path[server]).stdout.strip() + if remote_pid and client.execute_command('ls /proc/%s' % remote_pid): + continue + + for comp in ['oceanbase', 'oceanbase-ce']: + obs_config = cluster_config.get_depled_config(comp, server) + if obs_config is not None: + break + + if obs_config is None: + obs_config = {} + + for key in config_map: + k = config_map[key] + if not server_config.get(key): + server_config[key] = obs_config.get(k, default_server_config.get(key)) + + for key in default_server_config: + if not server_config.get(key): + server_config[key] = default_server_config.get(key) + + server_config['host_ip'] = server.ip + for key in server_config: + if server_config[key] is None: + server_config[key] = '' + if isinstance(server_config[key], bool): + server_config[key] = str(server_config[key]).lower() + + if server_config.get('crypto_method', 'plain').lower() == 'aes': + secret_key = generate_aes_b64_key() + crypto_path = server_config.get('crypto_path', 'conf/.config_secret.key') + crypto_path = os.path.join(home_path, crypto_path) + client.execute_command('echo "%s" > %s' % (secret_key.decode('utf-8') if isinstance(secret_key, bytes) else secret_key, crypto_path)) + for key in need_encrypted: + value = server_config.get(key) + if value: + server_config[key] = encrypt(secret_key, value) + + for path in config_files: + with tempfile.NamedTemporaryFile(suffix=".yaml", mode='w') as tf: + text = config_files[path].format(**server_config) + text = text.replace('\[[', '{').replace('\]]', '}') + tf.write(text) + tf.flush() + if not client.put_file(tf.name, path.replace(repository_dir, home_path)): + stdio.error('Fail to send config file to %s' % server) + return + + config = { + 'log': { + 'level': server_config.get('log_level', 'info'), + 'filename': server_config.get('log_path', 'log/monagent.log'), + 'maxsize': int(server_config.get('log_size', 30)), + 'maxage': int(server_config.get('log_expire_day', 7)), + 'maxbackups': int(server_config.get('maxbackups', 10)), + 'localtime': True if server_config.get('log_use_localtime', True) else False, + 'compress': True if server_config.get('log_compress', True) else False + }, + 'server': { + 'address': '0.0.0.0:%d' % int(server_config.get('server_port', 8088)), + 'adminAddress': '0.0.0.0:%d' % int(server_config.get('pprof_port', 8089)), + 'runDir': 'run' + }, + 'cryptoMethod': server_config['crypto_method'] if server_config.get('crypto_method').lower() in ['aes', 'plain'] else 'plain', + 'cryptoPath': server_config.get('crypto_path'), + 'modulePath': 'conf/module_config', + 'propertiesPath': 'conf/config_properties' + } + + with tempfile.NamedTemporaryFile(suffix=".yaml") as tf: + yaml.dump(config, tf) + if not client.put_file(tf.name, os.path.join(home_path, 'conf/monagent.yaml')): + stdio.error('Fail to send config file to %s' % server) + return + + log_path = '%s/log/monagent_stdout.log' % home_path + client.execute_command('cd %s;nohup %s/bin/monagent -c conf/monagent.yaml >> %s 2>&1 & echo $! > %s' % (home_path, home_path, log_path, remote_pid_path)) + + stdio.start_loading('obagent program health check') + time.sleep(1) + failed = [] + fail_time = 0 + for server in cluster_config.servers: + client = clients[server] + server_config = cluster_config.get_server_conf(server) + stdio.verbose('%s program health check' % server) + pid = client.execute_command("cat %s" % pid_path[server]).stdout.strip() + if pid: + if confirm_port(client, pid, int(server_config["server_port"])): + stdio.verbose('%s obagent[pid: %s] started', server, pid) + client.execute_command('echo %s > %s' % (pid, pid_path[server])) + else: + fail_time += 1 + else: + failed.append('failed to start %s obagent' % server) + + if failed: + stdio.stop_loading('fail') + for msg in failed: + stdio.warn(msg) + plugin_context.return_false() + else: + stdio.stop_loading('succeed') + plugin_context.return_true(need_bootstrap=False) diff --git a/plugins/obagent/0.1/start_check.py b/plugins/obagent/0.1/start_check.py new file mode 100644 index 0000000000000000000000000000000000000000..6c963704c8a91d1fa47866a211ab95e5b957672e --- /dev/null +++ b/plugins/obagent/0.1/start_check.py @@ -0,0 +1,90 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + + +stdio = None +success = True + + +def get_port_socket_inode(client, port): + port = hex(port)[2:].zfill(4).upper() + cmd = "bash -c 'cat /proc/net/{tcp,udp}' | awk -F' ' '{print $2,$10}' | grep '00000000:%s' | awk -F' ' '{print $2}' | uniq" % port + res = client.execute_command(cmd) + if not res or not res.stdout.strip(): + return False + stdio.verbose(res.stdout) + return res.stdout.strip().split('\n') + + +def start_check(plugin_context, strict_check=False, *args, **kwargs): + def alert(*arg, **kwargs): + global success + if strict_check: + success = False + stdio.error(*arg, **kwargs) + else: + stdio.warn(*arg, **kwargs) + def critical(*arg, **kwargs): + global success + success = False + stdio.error(*arg, **kwargs) + global stdio + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + stdio = plugin_context.stdio + servers_port = {} + stdio.start_loading('Check before start obagent') + for server in cluster_config.servers: + ip = server.ip + client = clients[server] + server_config = cluster_config.get_server_conf(server) + port = int(server_config["server_port"]) + prometheus_port = int(server_config["pprof_port"]) + remote_pid_path = "%s/run/obagent-%s-%s.pid" % (server_config['home_path'], server.ip, server_config["server_port"]) + remote_pid = client.execute_command("cat %s" % remote_pid_path).stdout.strip() + if remote_pid: + if client.execute_command('ls /proc/%s' % remote_pid): + continue + + if ip not in servers_port: + servers_port[ip] = {} + ports = servers_port[ip] + server_config = cluster_config.get_server_conf_with_default(server) + stdio.verbose('%s port check' % server) + for key in ['server_port', 'pprof_port']: + port = int(server_config[key]) + alert_f = alert if key == 'pprof_port' else critical + if port in ports: + alert_f('Configuration conflict %s: %s port is used for %s\'s %s' % (server, port, ports[port]['server'], ports[port]['key'])) + continue + ports[port] = { + 'server': server, + 'key': key + } + if get_port_socket_inode(client, port): + alert_f('%s:%s port is already used' % (ip, port)) + + if success: + stdio.stop_loading('succeed') + plugin_context.return_true() + else: + stdio.stop_loading('fail') \ No newline at end of file diff --git a/plugins/obagent/0.1/status.py b/plugins/obagent/0.1/status.py new file mode 100644 index 0000000000000000000000000000000000000000..4a561973397c866f48e4539b37bf41fd195c89a9 --- /dev/null +++ b/plugins/obagent/0.1/status.py @@ -0,0 +1,40 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + + +def status(plugin_context, *args, **kwargs): + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + stdio = plugin_context.stdio + cluster_status = {} + for server in cluster_config.servers: + server_config = cluster_config.get_server_conf(server) + client = clients[server] + cluster_status[server] = 0 + if 'home_path' not in server_config: + stdio.print('%s home_path is empty', server) + continue + remote_pid_path = '%s/run/obagent-%s-%s.pid' % (server_config["home_path"], server.ip, server_config["server_port"]) + remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip() + if remote_pid and client.execute_command('ls /proc/%s' % remote_pid): + cluster_status[server] = 1 + return plugin_context.return_true(cluster_status=cluster_status) diff --git a/plugins/obagent/0.1/stop.py b/plugins/obagent/0.1/stop.py new file mode 100644 index 0000000000000000000000000000000000000000..3f4fd9f85a369e2da1eefa3673d020c76197249d --- /dev/null +++ b/plugins/obagent/0.1/stop.py @@ -0,0 +1,107 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + +import time + + +stdio = None + + +def get_port_socket_inode(client, port): + port = hex(port)[2:].zfill(4).upper() + cmd = "bash -c 'cat /proc/net/{tcp,udp}' | awk -F' ' '{print $2,$10}' | grep '00000000:%s' | awk -F' ' '{print $2}' | uniq" % port + res = client.execute_command(cmd) + inode = res.stdout.strip() + if not res or not inode: + return False + stdio.verbose("inode: %s" % inode) + return inode.split('\n') + + +def confirm_port(client, pid, port): + socket_inodes = get_port_socket_inode(client, port) + if not socket_inodes: + return False + ret = client.execute_command("ls -l /proc/%s/fd/ |grep -E 'socket:\[(%s)\]'" % (pid, '|'.join(socket_inodes))) + if ret and ret.stdout.strip(): + return True + return False + + +def stop(plugin_context, *args, **kwargs): + global stdio + cluster_config = plugin_context.cluster_config + clients = plugin_context.clients + stdio = plugin_context.stdio + + servers = {} + stdio.start_loading('Stop obagent') + for server in cluster_config.servers: + server_config = cluster_config.get_server_conf(server) + client = clients[server] + if 'home_path' not in server_config: + stdio.verbose('%s home_path is empty', server) + continue + remote_pid_path = '%s/run/obagent-%s-%s.pid' % (server_config["home_path"], server.ip, server_config["server_port"]) + remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip() + if remote_pid: + if client.execute_command('ls /proc/%s' % remote_pid): + stdio.verbose('%s obagent[pid:%s] stopping ...' % (server, remote_pid)) + client.execute_command('kill -9 %s' % (remote_pid)) + servers[server] = { + 'client': client, + 'server_port': server_config['server_port'], + 'pprof_port': server_config['pprof_port'], + 'pid': remote_pid, + 'path': remote_pid_path + } + else: + stdio.verbose('%s obagent is not running' % server) + + count = 10 + time.sleep(1) + while count and servers: + tmp_servers = {} + for server in servers: + data = servers[server] + client = clients[server] + stdio.verbose('%s check whether the port is released' % server) + for key in ['pprof_port', 'server_port']: + if data[key] and confirm_port(data['client'], data['pid'], data[key]): + tmp_servers[server] = data + break + data[key] = '' + else: + client.execute_command('rm -f %s' % data['path']) + stdio.verbose('%s obagent is stopped', server) + servers = tmp_servers + count -= 1 + if count and servers: + time.sleep(3) + + if servers: + stdio.stop_loading('fail') + for server in servers: + stdio.warn('%s port not released', server) + else: + stdio.stop_loading('succeed') + plugin_context.return_true() \ No newline at end of file diff --git a/plugins/obproxy/3.1.0/generate_config.py b/plugins/obproxy/3.1.0/generate_config.py index 09d0cec2a8c6245a575e09487d24db1e0b4a48cc..a54b7997c2a8194794b3d512b33853634735bc00 100644 --- a/plugins/obproxy/3.1.0/generate_config.py +++ b/plugins/obproxy/3.1.0/generate_config.py @@ -34,35 +34,22 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): stdio.error("obproxy %s: missing configuration 'home_path' in configuration file" % server) success = False continue - cluster_config.update_server_conf(server, 'enable_cluster_checkout', False) if not success: stdio.stop_loading('fail') return - ob_cluster_config = None - for comp in ['oceanbase', 'oceanbase-ce']: - if comp in deploy_config.components: - ob_cluster_config = deploy_config.components[comp] - break + global_config = cluster_config.get_global_conf() + if global_config.get('enable_cluster_checkout') is None: + cluster_config.update_global_conf('enable_cluster_checkout', False) - if ob_cluster_config: - root_servers = {} - cluster_name = ob_cluster_config.get_global_conf().get('appname') - for server in ob_cluster_config.servers: - config = ob_cluster_config.get_server_conf_with_default(server) - zone = config['zone'] - cluster_name = cluster_name if cluster_name else config.get('appname') - if zone not in root_servers: - root_servers[zone] = '%s:%s' % (server.ip, config['mysql_port']) - rs_list = ';'.join([root_servers[zone] for zone in root_servers]) + have_depend = False + depends = ['oceanbase', 'oceanbase-ce'] - cluster_name = cluster_name if cluster_name else 'obcluster' - for server in cluster_config.servers: - server_config = cluster_config.get_server_conf(server) - if not server_config.get('rs_list'): - cluster_config.update_server_conf(server, 'rs_list', rs_list, False) - if not server_config.get('cluster_name'): - cluster_config.update_server_conf(server, 'cluster_name', cluster_name, False) + for comp in depends: + if comp in deploy_config.components: + deploy_config.add_depend_for_component('obagent', comp, False) + have_depend = True + break stdio.stop_loading('succeed') return plugin_context.return_true() \ No newline at end of file diff --git a/plugins/obproxy/3.1.0/init.py b/plugins/obproxy/3.1.0/init.py index ad6f9cc9b3fc0a86ad71d54b07cff2282babfc8e..ac682eefe673e2e83105f78ffa036f0b708bab5b 100644 --- a/plugins/obproxy/3.1.0/init.py +++ b/plugins/obproxy/3.1.0/init.py @@ -26,7 +26,7 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): stdio = plugin_context.stdio global_ret = True force = getattr(plugin_context.options, 'force', False) - stdio.start_loading('Initializes cluster work home') + stdio.start_loading('Initializes obproxy work home') for server in cluster_config.servers: server_config = cluster_config.get_server_conf(server) client = clients[server] diff --git a/plugins/obproxy/3.1.0/obproxyd.sh b/plugins/obproxy/3.1.0/obproxyd.sh new file mode 100644 index 0000000000000000000000000000000000000000..d5dd90e8d50d5a6b94a2077e055bf494765550d3 --- /dev/null +++ b/plugins/obproxy/3.1.0/obproxyd.sh @@ -0,0 +1,44 @@ + +path=$1 +ip=$2 +port=$3 + +function start() { + obproxyd_path=$path/run/obproxyd-$ip-$port.pid + obproxy_path=$path/run/obproxy-$ip-$port.pid + + cat $obproxyd_path | xargs kill -9 + + echo $$ > $obproxyd_path + if [ $? != 0 ]; then + exit $? + fi + + pid=`cat $obproxy_path` + ls /proc/$pid > /dev/null + if [ $? != 0 ]; then + exit $? + fi + + while [ 1 ]; + do + sleep 1 + ls /proc/$pid > /dev/null + if [ $? != 0 ]; then + cd $path + $path/bin/obproxy --listen_port $port + pid=`ps -aux | egrep "$path/bin/obproxy --listen_port $port$" | grep -v grep | awk '{print $2}'` + echo $pid > $obproxy_path + if [ $? != 0 ]; then + exit $? + fi + fi + done +} + +if [ "$4" == "daemon" ] +then + start +else + nohup bash $0 $path $ip $port daemon > /dev/null 2>&1 & +fi \ No newline at end of file diff --git a/plugins/obproxy/3.1.0/reload.py b/plugins/obproxy/3.1.0/reload.py index 5900c0324c7c0f9a5f719338b01f4101b5a8fe5f..cfb593e929a11aad806c9ad4eee1df79dbdfa923 100644 --- a/plugins/obproxy/3.1.0/reload.py +++ b/plugins/obproxy/3.1.0/reload.py @@ -29,6 +29,22 @@ def reload(plugin_context, cursor, new_cluster_config, *args, **kwargs): change_conf = {} global_change_conf = {} global_ret = True + + config_map = { + 'observer_sys_password': 'proxyro_password', + 'cluster_name': 'appname' + } + for comp in ['oceanbase', 'oceanbase-ce']: + if comp in cluster_config.depends: + root_servers = {} + ob_config = cluster_config.get_depled_config(comp) + new_ob_config = new_cluster_config.get_depled_config(comp) + ob_config = {} if ob_config is None else ob_config + new_ob_config = {} if new_ob_config is None else new_ob_config + for key in config_map: + if ob_config.get(key) != new_ob_config.get(key): + global_change_conf[config_map[key]] = new_ob_config.get(key) + for server in servers: change_conf[server] = {} stdio.verbose('get %s old configuration' % (server)) @@ -45,6 +61,7 @@ def reload(plugin_context, cursor, new_cluster_config, *args, **kwargs): global_change_conf[key] = 1 else: global_change_conf[key] += 1 + servers_num = len(servers) stdio.verbose('apply new configuration') success_conf = {} diff --git a/plugins/obproxy/3.1.0/start.py b/plugins/obproxy/3.1.0/start.py index 3ed90cc503d503d2e145e47294a3be1ad40506c4..f54849b9b47400b335e7ece06dc0aaa67043dd0e 100644 --- a/plugins/obproxy/3.1.0/start.py +++ b/plugins/obproxy/3.1.0/start.py @@ -77,16 +77,51 @@ def is_started(client, remote_bin_path, port, home_path, command): return confirm_home_path(client, pid, home_path) and confirm_command(client, pid, command) +def obproxyd(home_path, client, ip, port): + path = os.path.join(os.path.split(__file__)[0], 'obproxyd.sh') + retmoe_path = os.path.join(home_path, 'obproxyd.sh') + if os.path.exists(path): + shell = '''bash %s %s %s %s''' % (retmoe_path, home_path, ip, port) + return client.put_file(path, retmoe_path) and client.execute_command(shell) + return False + + def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): global stdio cluster_config = plugin_context.cluster_config clients = plugin_context.clients stdio = plugin_context.stdio + options = plugin_context.options clusters_cmd = {} real_cmd = {} pid_path = {} need_bootstrap = True + for comp in ['oceanbase', 'oceanbase-ce']: + if comp in cluster_config.depends: + root_servers = {} + ob_config = cluster_config.get_depled_config(comp) + if not ob_config: + continue + odp_config = cluster_config.get_global_conf() + for server in cluster_config.get_depled_servers(comp): + config = cluster_config.get_depled_config(comp, server) + zone = config['zone'] + if zone not in root_servers: + root_servers[zone] = '%s:%s' % (server.ip, config['mysql_port']) + depend_rs_list = ';'.join([root_servers[zone] for zone in root_servers]) + cluster_config.update_global_conf('rs_list', depend_rs_list, save=False) + + config_map = { + 'observer_sys_password': 'proxyro_password', + 'cluster_name': 'appname' + } + for key in config_map: + ob_key = config_map[key] + if not odp_config.get(key) and ob_config.get(ob_key): + cluster_config.update_global_conf(key, ob_config.get(ob_key), save=False) + break + error = False for server in cluster_config.servers: client = clients[server] @@ -107,28 +142,38 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): remote_home_path = client.execute_command('echo $HOME/.obd').stdout.strip() remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path) client.execute_command("bash -c 'mkdir -p %s/{bin,lib}'" % (home_path)) - client.execute_command("ln -s %s/bin/* %s/bin" % (remote_repository_dir, home_path)) - client.execute_command("ln -s %s/lib/* %s/lib" % (remote_repository_dir, home_path)) + client.execute_command("ln -fs %s/bin/* %s/bin" % (remote_repository_dir, home_path)) + client.execute_command("ln -fs %s/lib/* %s/lib" % (remote_repository_dir, home_path)) pid_path[server] = "%s/run/obproxy-%s-%s.pid" % (home_path, server.ip, server_config["listen_port"]) - not_opt_str = [ - 'listen_port', - 'prometheus_listen_port', - 'rs_list', - 'cluster_name' - ] - start_unuse = ['home_path', 'observer_sys_password', 'obproxy_sys_password'] - get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] - opt_str = [] - for key in server_config: - if key not in start_unuse and key not in not_opt_str: - value = get_value(key) - opt_str.append('%s=%s' % (key, value)) - cmd = ['-o %s' % ','.join(opt_str)] - for key in not_opt_str: - if key in server_config: - value = get_value(key) - cmd.append('--%s %s' % (key, value)) + + if getattr(options, 'without_parameter', False) and client.execute_command('ls %s/etc/obproxy_config.bin' % home_path): + use_parameter = False + else: + use_parameter = True + + if use_parameter: + not_opt_str = [ + 'listen_port', + 'prometheus_listen_port', + 'rs_list', + 'cluster_name' + ] + start_unuse = ['home_path', 'observer_sys_password', 'obproxy_sys_password'] + get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] + opt_str = [] + for key in server_config: + if key not in start_unuse and key not in not_opt_str: + value = get_value(key) + opt_str.append('%s=%s' % (key, value)) + cmd = ['-o %s' % ','.join(opt_str)] + for key in not_opt_str: + if key in server_config: + value = get_value(key) + cmd.append('--%s %s' % (key, value)) + else: + cmd = ['--listen_port %s' % server_config.get('listen_port')] + real_cmd[server] = '%s/bin/obproxy %s' % (home_path, ' '.join(cmd)) clusters_cmd[server] = 'cd %s; %s' % (home_path, real_cmd[server]) @@ -157,7 +202,7 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): stdio.stop_loading('fail') stdio.error('failed to start %s obproxy: %s' % (server, ret.stderr)) return plugin_context.return_false() - client.execute_command('''ps -aux | grep '%s' | grep -v grep | awk '{print $2}' > %s''' % (cmd, pid_path[server])) + client.execute_command('''ps -aux | grep -e '%s$' | grep -v grep | awk '{print $2}' > %s''' % (cmd, pid_path[server])) stdio.stop_loading('succeed') stdio.start_loading('obproxy program health check') @@ -175,13 +220,13 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): if confirm: stdio.verbose('%s obproxy[pid: %s] started', server, pid) client.execute_command('echo %s > %s' % (pid, pid_path[server])) + obproxyd(server_config["home_path"], client, server.ip, server_config["listen_port"]) break else: fail_time += 1 if fail_time == len(remote_pid.split('\n')): failed.append('failed to start %s obproxy' % server) else: - stdio.verbose('No such file: %s' % pid_path[server]) failed.append('failed to start %s obproxy' % server) if failed: stdio.stop_loading('fail') diff --git a/plugins/obproxy/3.1.0/stop.py b/plugins/obproxy/3.1.0/stop.py index 2bc924392322ba6b4bb0c18a59e3ad40314c4344..b1a17922dd0afec00d760a085b6d5ab08f3ddd16 100644 --- a/plugins/obproxy/3.1.0/stop.py +++ b/plugins/obproxy/3.1.0/stop.py @@ -62,11 +62,12 @@ def stop(plugin_context, *args, **kwargs): stdio.verbose('%s home_path is empty', server) continue remote_pid_path = '%s/run/obproxy-%s-%s.pid' % (server_config["home_path"], server.ip, server_config["listen_port"]) + obproxyd_pid_path = '%s/run/obproxyd-%s-%s.pid' % (server_config["home_path"], server.ip, server_config["listen_port"]) remote_pid = client.execute_command('cat %s' % remote_pid_path).stdout.strip() if remote_pid: if client.execute_command('ls /proc/%s' % remote_pid): stdio.verbose('%s obproxy[pid:%s] stopping ...' % (server, remote_pid)) - client.execute_command('kill -9 -%s' % remote_pid) + client.execute_command('cat %s | xargs kill -9; kill -9 -%s' % (obproxyd_pid_path, remote_pid)) servers[server] = { 'client': client, 'listen_port': server_config['listen_port'], @@ -84,6 +85,7 @@ def stop(plugin_context, *args, **kwargs): tmp_servers = {} for server in servers: data = servers[server] + client = clients[server] stdio.verbose('%s check whether the port is released' % server) for key in ['prometheus_listen_port', 'listen_port']: if data[key] and check(data['client'], data['pid'], data[key]): diff --git a/plugins/obproxy/3.1.0/upgrade.py b/plugins/obproxy/3.1.0/upgrade.py index ebd52bdba7d3d271bbc3650efd2bc2b6c0c742d9..041a0638e905110c2f0abfe1848b4d1079c82449 100644 --- a/plugins/obproxy/3.1.0/upgrade.py +++ b/plugins/obproxy/3.1.0/upgrade.py @@ -30,6 +30,18 @@ def upgrade(plugin_context, stop_plugin, start_plugin, connect_plugin, display_p cmd = plugin_context.cmd options = plugin_context.options stdio = plugin_context.stdio + local_home_path = kwargs.get('local_home_path') + repository_dir = kwargs.get('repository_dir') + + for server in cluster_config.servers: + client = clients[server] + server_config = cluster_config.get_server_conf(server) + home_path = server_config['home_path'] + remote_home_path = client.execute_command('echo $HOME/.obd').stdout.strip() + remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path) + client.execute_command("bash -c 'mkdir -p %s/{bin,lib}'" % (home_path)) + client.execute_command("ln -fs %s/bin/* %s/bin" % (remote_repository_dir, home_path)) + client.execute_command("ln -fs %s/lib/* %s/lib" % (remote_repository_dir, home_path)) if not stop_plugin(components, clients, cluster_config, cmd, options, stdio, *args, **kwargs): return diff --git a/plugins/oceanbase/3.1.0/generate_config.py b/plugins/oceanbase/3.1.0/generate_config.py index 0dce3b8c0812ceab099bdf272cce63690101c5f2..098b2685b0d547c3dc3f4d2dc01fa64f8fc99339 100644 --- a/plugins/oceanbase/3.1.0/generate_config.py +++ b/plugins/oceanbase/3.1.0/generate_config.py @@ -167,9 +167,9 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): cluster_config.update_server_conf(server, 'system_memory', get_system_memory(memory_limit), False) # cpu - if not user_server_config.get('cpu_count'): + if not server_config.get('cpu_count'): ret = client.execute_command("grep -e 'processor\s*:' /proc/cpuinfo | wc -l") - if ret and ret.stdout.isdigit(): + if ret and ret.stdout.strip().isdigit(): cpu_num = int(ret.stdout) server_config['cpu_count'] = max(16, int(cpu_num * 0.8)) else: @@ -178,11 +178,11 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): cluster_config.update_server_conf(server, 'cpu_count', max(16, server_config['cpu_count']), False) # disk - if not user_server_config.get('datafile_size') or not user_server_config.get('datafile_disk_percentage'): + if not server_config.get('datafile_size') and not user_server_config.get('datafile_disk_percentage'): disk = {'/': 0} - ret = client.execute_command('df --output=size,avail,target') + ret = client.execute_command('df --block-size=1024') if ret: - for total, avail, path in re.findall('(\d+)\s+(\d+)\s+(.+)', ret.stdout): + for total, used, avail, puse, path in re.findall('(\d+)\s+(\d+)\s+(\d+)\s+(\d+%)\s+(.+)', ret.stdout): disk[path] = { 'total': int(total) << 10, 'avail': int(avail) << 10, diff --git a/plugins/oceanbase/3.1.0/init.py b/plugins/oceanbase/3.1.0/init.py index a8d21670f0045840cdac007679c8e6ddfefec46f..532bf20a61cfc4744f8a2d4a615880c67e64c746 100644 --- a/plugins/oceanbase/3.1.0/init.py +++ b/plugins/oceanbase/3.1.0/init.py @@ -65,7 +65,7 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): servers_dirs = {} force = getattr(plugin_context.options, 'force', False) stdio.verbose('option `force` is %s' % force) - stdio.start_loading('Initializes cluster work home') + stdio.start_loading('Initializes observer work home') for server in cluster_config.servers: ip = server.ip if ip not in servers_dirs: @@ -101,7 +101,7 @@ def init(plugin_context, local_home_path, repository_dir, *args, **kwargs): 'key': key, } - stdio.verbose('%s initializes cluster work home' % server) + stdio.verbose('%s initializes observer work home' % server) if force: ret = client.execute_command('rm -fr %s/*' % home_path) if not ret: diff --git a/plugins/oceanbase/3.1.0/start.py b/plugins/oceanbase/3.1.0/start.py index e437880702d5ef54d5881ca6fdb844e0c3bdc2d4..182f087be2d4052fcda3cb11a847722349c7b954 100644 --- a/plugins/oceanbase/3.1.0/start.py +++ b/plugins/oceanbase/3.1.0/start.py @@ -84,7 +84,7 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): return stdio.start_loading('Start observer') - for server in cluster_config.servers: + for server in cluster_config.original_servers: config = cluster_config.get_server_conf(server) zone = config['zone'] if zone not in root_servers: @@ -116,36 +116,44 @@ def start(plugin_context, local_home_path, repository_dir, *args, **kwargs): continue stdio.verbose('%s start command construction' % server) - not_opt_str = { - 'zone': '-z', - 'mysql_port': '-p', - 'rpc_port': '-P', - 'nodaemon': '-N', - 'appname': '-n', - 'cluster_id': '-c', - 'data_dir': '-d', - 'devname': '-i', - 'syslog_level': '-l', - 'ipv6': '-6', - 'mode': '-m', - 'scn': '-f' - } - get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] - opt_str = [] - for key in server_config: - if key not in ['home_path', 'obconfig_url', 'proxyro_password'] and key not in not_opt_str: - value = get_value(key) - opt_str.append('%s=%s' % (key, value)) - cmd = [] - if cfg_url: - opt_str.append('obconfig_url=\'%s\'' % cfg_url) + if getattr(options, 'without_parameter', False) and client.execute_command('ls %s/etc/observer.config.bin' % home_path): + use_parameter = False else: - cmd.append(rs_list_opt) - cmd.append('-o %s' % ','.join(opt_str)) - for key in not_opt_str: - if key in server_config: - value = get_value(key) - cmd.append('%s %s' % (not_opt_str[key], value)) + use_parameter = True + + cmd = [] + if use_parameter: + not_opt_str = { + 'zone': '-z', + 'mysql_port': '-p', + 'rpc_port': '-P', + 'nodaemon': '-N', + 'appname': '-n', + 'cluster_id': '-c', + 'data_dir': '-d', + 'devname': '-i', + 'syslog_level': '-l', + 'ipv6': '-6', + 'mode': '-m', + 'scn': '-f' + } + not_cmd_opt = ['home_path', 'obconfig_url', 'proxyro_password', 'redo_dir', 'clog_dir', 'ilog_dir', 'slog_dir'] + get_value = lambda key: "'%s'" % server_config[key] if isinstance(server_config[key], str) else server_config[key] + opt_str = [] + for key in server_config: + if key not in not_cmd_opt and key not in not_opt_str: + value = get_value(key) + opt_str.append('%s=%s' % (key, value)) + if cfg_url: + opt_str.append('obconfig_url=\'%s\'' % cfg_url) + else: + cmd.append(rs_list_opt) + cmd.append('-o %s' % ','.join(opt_str)) + for key in not_opt_str: + if key in server_config: + value = get_value(key) + cmd.append('%s %s' % (not_opt_str[key], value)) + clusters_cmd[server] = 'cd %s; %s/bin/observer %s' % (home_path, home_path, ' '.join(cmd)) for server in clusters_cmd: diff --git a/plugins/oceanbase/3.1.0/start_check.py b/plugins/oceanbase/3.1.0/start_check.py index 03685eb6736c68b6211bc9e98e34bb055b9a07ae..8ce384a52f9e65b06605675d594ff9e29de3dcd8 100644 --- a/plugins/oceanbase/3.1.0/start_check.py +++ b/plugins/oceanbase/3.1.0/start_check.py @@ -96,6 +96,7 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs): for server in cluster_config.servers: ip = server.ip client = clients[server] + servers_clients[ip] = client server_config = cluster_config.get_server_conf_with_default(server) home_path = server_config['home_path'] remote_pid_path = '%s/run/observer.pid' % home_path @@ -104,7 +105,6 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs): if client.execute_command('ls /proc/%s' % remote_pid): continue - servers_clients[ip] = client if ip not in servers_port: servers_disk[ip] = {} servers_port[ip] = {} @@ -173,7 +173,7 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs): inferfaces[devname] = [] inferfaces[devname].append(ip) - for ip in servers_clients: + for ip in servers_disk: client = servers_clients[ip] ret = client.execute_command('cat /proc/sys/fs/aio-max-nr /proc/sys/fs/aio-nr') if not ret: @@ -217,9 +217,9 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs): critical('(%s) not enough memory. (Free: %s, Need: %s)' % (ip, formate_size(free_memory), formate_size(total_use))) # disk disk = {'/': 0} - ret = client.execute_command('df --output=size,avail,target') + ret = client.execute_command('df --block-size=1024') if ret: - for total, avail, path in re.findall('(\d+)\s+(\d+)\s+(.+)', ret.stdout): + for total, used, avail, puse, path in re.findall('(\d+)\s+(\d+)\s+(\d+)\s+(\d+%)\s+(.+)', ret.stdout): disk[path] = { 'total': int(total) << 10, 'avail': int(avail) << 10, @@ -296,9 +296,11 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs): if success: times = [] - for ip in servers_disk: + for ip in servers_clients: client = servers_clients[ip] - times.append(time_delta(client)) + delta = time_delta(client) + stdio.verbose('%s time delta %s' % (ip, delta)) + times.append(delta) if times and max(times) - min(times) > 200: critical('Cluster NTP is out of sync') diff --git a/plugins/oceanbase/3.1.0/stop.py b/plugins/oceanbase/3.1.0/stop.py index fd0cfb85195c31b894d29ab105a88403550e6ba3..026378e36d724718472af60449f083852827f80b 100644 --- a/plugins/oceanbase/3.1.0/stop.py +++ b/plugins/oceanbase/3.1.0/stop.py @@ -103,6 +103,7 @@ def stop(plugin_context, *args, **kwargs): tmp_servers = {} for server in servers: data = servers[server] + client = clients[server] stdio.verbose('%s check whether the port is released' % server) for key in ['rpc_port', 'mysql_port']: if data[key] and check(data['client'], data['pid'], data[key]): diff --git a/plugins/oceanbase/3.1.0/upgrade.py b/plugins/oceanbase/3.1.0/upgrade.py index 4d9f7c4cfd9c200c7e6b2adc3e854210b1f18383..3face39be07b47ffb407e2d92f870da7faf816b9 100644 --- a/plugins/oceanbase/3.1.0/upgrade.py +++ b/plugins/oceanbase/3.1.0/upgrade.py @@ -30,6 +30,8 @@ def upgrade(plugin_context, stop_plugin, start_plugin, connect_plugin, display_p cmd = plugin_context.cmd options = plugin_context.options stdio = plugin_context.stdio + local_home_path = kwargs.get('local_home_path') + repository_dir = kwargs.get('repository_dir') zones_servers = {} for server in cluster_config.servers: @@ -41,6 +43,16 @@ def upgrade(plugin_context, stop_plugin, start_plugin, connect_plugin, display_p all_servers = cluster_config.servers for zone in zones_servers: + for server in zones_servers[zone]: + client = clients[server] + server_config = cluster_config.get_server_conf(server) + home_path = server_config['home_path'] + remote_home_path = client.execute_command('echo $HOME/.obd').stdout.strip() + remote_repository_dir = repository_dir.replace(local_home_path, remote_home_path) + client.execute_command("bash -c 'mkdir -p %s/{bin,lib}'" % (home_path)) + client.execute_command("ln -fs %s/bin/* %s/bin" % (remote_repository_dir, home_path)) + client.execute_command("ln -fs %s/lib/* %s/lib" % (remote_repository_dir, home_path)) + cluster_config.servers = zones_servers[zone] stdio.print('upgrade zone "%s"' % zone) if not stop_plugin(components, clients, cluster_config, cmd, options, stdio, *args, **kwargs): diff --git a/plugins/sysbench/3.1.0/run_test.py b/plugins/sysbench/3.1.0/run_test.py index 3e3b4cd63f589a5c4ef51698204739e517faa4e5..1cea5761ad45e768d0b6903a2326802a5406ea1f 100644 --- a/plugins/sysbench/3.1.0/run_test.py +++ b/plugins/sysbench/3.1.0/run_test.py @@ -77,7 +77,7 @@ def exec_cmd(cmd): def run_test(plugin_context, db, cursor, odp_db, odp_cursor=None, *args, **kwargs): def get_option(key, default=''): value = getattr(options, key, default) - if not value: + if value is None: value = default return value def execute(cursor, query, args=None): @@ -97,7 +97,7 @@ def run_test(plugin_context, db, cursor, odp_db, odp_cursor=None, *args, **kwarg stdio = plugin_context.stdio options = plugin_context.options - optimization = get_option('optimization', 1) > 0 + optimization = get_option('optimization') > 0 host = get_option('host', '127.0.0.1') port = get_option('port', 2881) @@ -119,6 +119,10 @@ def run_test(plugin_context, db, cursor, odp_db, odp_cursor=None, *args, **kwarg sysbench_bin = get_option('sysbench_bin', 'sysbench') sysbench_script_dir = get_option('sysbench_script_dir', '/usr/sysbench/share/sysbench') + if tenant_name == 'sys': + stdio.error('DO NOT use sys tenant for testing.') + return + ret = LocalClient.execute_command('%s --help' % obclient_bin, stdio=stdio) if not ret: stdio.error('%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % (ret.stderr, obclient_bin)) @@ -152,23 +156,24 @@ def run_test(plugin_context, db, cursor, odp_db, odp_cursor=None, *args, **kwarg except: return - sql = "select * from oceanbase.__all_user where user_name = %s" - try: - stdio.verbose('execute sql: %s' % (sql % user)) - cursor.execute(sql, [user]) - if not cursor.fetchone(): - stdio.error('User %s not exists.' % user) - return - except: + sql = "select * from oceanbase.__all_user where user_name = '%s'" % user + sys_pwd = cluster_config.get_global_conf().get('root_password', '') + exec_sql_cmd = "%s -h%s -P%s -uroot@%s %s -A -e" % (obclient_bin, host, port, tenant_name, ("-p'%s'" % sys_pwd) if sys_pwd else '') + ret = LocalClient.execute_command('%s "%s"' % (exec_sql_cmd, sql), stdio=stdio) + if not ret or not ret.stdout: + stdio.error('User %s not exists.' % user) return - exec_sql_cmd = "%s -h%s -P%s -u%s@%s %s -A -e" % (obclient_bin, host, port, user, tenant_name, "-p'%s'" if password else '') + exec_sql_cmd = "%s -h%s -P%s -u%s@%s %s -A -e" % (obclient_bin, host, port, user, tenant_name, ("-p'%s'" % password) if password else '') ret = LocalClient.execute_command('%s "%s"' % (exec_sql_cmd, 'select version();'), stdio=stdio) if not ret: stdio.error(ret.stderr) return sql = '' + odp_configs_done = [] + system_configs_done = [] + tenant_variables_done = [] odp_configs = [ # [配置名, 新值, 旧值, 替换条件: lambda n, o: n != o] ['enable_compression_protocol', False, False, lambda n, o: n != o], @@ -202,8 +207,8 @@ def run_test(plugin_context, db, cursor, odp_db, odp_cursor=None, *args, **kwarg ['_flush_clog_aggregation_buffer_timeout', '1ms', '1ms', lambda n, o: n != o, False], ] - if odp_cursor and optimization: - try: + try: + if odp_cursor and optimization: for config in odp_configs: sql = 'show proxyconfig like "%s"' % config[0] ret = execute(odp_cursor, sql) @@ -211,14 +216,12 @@ def run_test(plugin_context, db, cursor, odp_db, odp_cursor=None, *args, **kwarg config[2] = ret['value'] if config[3](config[1], config[2]): sql = 'alter proxyconfig set %s=%%s' % config[0] + odp_configs_done.append(config) execute(odp_cursor, sql, [config[1]]) - except: - return - tenant_q = ' tenant="%s"' % tenant_name - server_num = len(cluster_config.servers) - if optimization: - try: + tenant_q = ' tenant="%s"' % tenant_name + server_num = len(cluster_config.servers) + if optimization: for config in system_configs: if config[0] == 'sleep': sleep(config[1]) @@ -233,69 +236,65 @@ def run_test(plugin_context, db, cursor, odp_db, odp_cursor=None, *args, **kwarg sql = 'alter system set %s=%%s' % config[0] if config[4]: sql += tenant_q + system_configs_done.append(config) execute(cursor, sql, [config[1]]) sql = "select count(1) server_num from oceanbase.__all_server where status = 'active'" ret = execute(cursor, sql) if ret: server_num = ret.get("server_num", server_num) - except: - return - parallel_max_servers = max_cpu * 10 - parallel_servers_target = int(parallel_max_servers * server_num * 0.8) - - tenant_variables = [ - # [变量名, 新值, 旧值, 替换条件: lambda n, o: n != o] - ['ob_timestamp_service', 1, 1, lambda n, o: n != o], - ['autocommit', 1, 1, lambda n, o: n != o], - ['ob_query_timeout', 36000000000, 36000000000, lambda n, o: n != o], - ['ob_trx_timeout', 36000000000, 36000000000, lambda n, o: n != o], - ['max_allowed_packet', 67108864, 67108864, lambda n, o: n != o], - ['ob_sql_work_area_percentage', 100, 100, lambda n, o: n != o], - ['parallel_max_servers', parallel_max_servers, parallel_max_servers, lambda n, o: n != o], - ['parallel_servers_target', parallel_servers_target, parallel_servers_target, lambda n, o: n != o] - ] - select_sql_t = "select value from oceanbase.__all_virtual_sys_variable where tenant_id = %d and name = '%%s'" % tenant_meta['tenant_id'] - update_sql_t = "ALTER TENANT %s SET VARIABLES %%s = %%%%s" % tenant_name + parallel_max_servers = int(max_cpu * 10) + parallel_servers_target = int(max_cpu * server_num * 8) + + tenant_variables = [ + # [变量名, 新值, 旧值, 替换条件: lambda n, o: n != o] + ['ob_timestamp_service', 1, 1, lambda n, o: n != o], + ['autocommit', 1, 1, lambda n, o: n != o], + ['ob_query_timeout', 36000000000, 36000000000, lambda n, o: n != o], + ['ob_trx_timeout', 36000000000, 36000000000, lambda n, o: n != o], + ['max_allowed_packet', 67108864, 67108864, lambda n, o: n != o], + ['ob_sql_work_area_percentage', 100, 100, lambda n, o: n != o], + ['parallel_max_servers', parallel_max_servers, parallel_max_servers, lambda n, o: n != o], + ['parallel_servers_target', parallel_servers_target, parallel_servers_target, lambda n, o: n != o] + ] + select_sql_t = "select value from oceanbase.__all_virtual_sys_variable where tenant_id = %d and name = '%%s'" % tenant_meta['tenant_id'] + update_sql_t = "ALTER TENANT %s SET VARIABLES %%s = %%%%s" % tenant_name - try: for config in tenant_variables: sql = select_sql_t % config[0] ret = execute(cursor, sql) if ret: value = ret['value'] - config[2] = int(value) if isinstance(value, str) or value.isdigit() else value + config[2] = int(value) if isinstance(value, str) and value.isdigit() else value if config[3](config[1], config[2]): sql = update_sql_t % config[0] + tenant_variables_done.append(config) execute(cursor, sql, [config[1]]) - except: - return - sysbench_cmd = "cd %s; %s %s --mysql-host=%s --mysql-port=%s --mysql-user=%s@%s --mysql-db=%s" % (sysbench_script_dir, sysbench_bin, script_name, host, port, user, tenant_name, mysql_db) - - if password: - sysbench_cmd += ' --mysql-password=%s' % password - if table_size: - sysbench_cmd += ' --table_size=%s' % table_size - if tables: - sysbench_cmd += ' --tables=%s' % tables - if threads: - sysbench_cmd += ' --threads=%s' % threads - if time: - sysbench_cmd += ' --time=%s' % time - if interval: - sysbench_cmd += ' --report-interval=%s' % interval - if events: - sysbench_cmd += ' --events=%s' % events - if rand_type: - sysbench_cmd += ' --rand-type=%s' % rand_type - if skip_trx in ['on', 'off']: - sysbench_cmd += ' --skip_trx=%s' % skip_trx - if percentile: - sysbench_cmd += ' --percentile=%s' % percentile + sysbench_cmd = "cd %s; %s %s --mysql-host=%s --mysql-port=%s --mysql-user=%s@%s --mysql-db=%s" % (sysbench_script_dir, sysbench_bin, script_name, host, port, user, tenant_name, mysql_db) + + if password: + sysbench_cmd += ' --mysql-password=%s' % password + if table_size: + sysbench_cmd += ' --table_size=%s' % table_size + if tables: + sysbench_cmd += ' --tables=%s' % tables + if threads: + sysbench_cmd += ' --threads=%s' % threads + if time: + sysbench_cmd += ' --time=%s' % time + if interval: + sysbench_cmd += ' --report-interval=%s' % interval + if events: + sysbench_cmd += ' --events=%s' % events + if rand_type: + sysbench_cmd += ' --rand-type=%s' % rand_type + if skip_trx in ['on', 'off']: + sysbench_cmd += ' --skip_trx=%s' % skip_trx + if percentile: + sysbench_cmd += ' --percentile=%s' % percentile - try: if exec_cmd('%s cleanup' % sysbench_cmd) and exec_cmd('%s prepare' % sysbench_cmd) and exec_cmd('%s --db-ps-mode=disable run' % sysbench_cmd): return plugin_context.return_true() except KeyboardInterrupt: @@ -305,12 +304,12 @@ def run_test(plugin_context, db, cursor, odp_db, odp_cursor=None, *args, **kwarg finally: try: if optimization: - for config in tenant_variables[::-1]: + for config in tenant_variables_done[::-1]: if config[3](config[1], config[2]): sql = update_sql_t % config[0] execute(cursor, sql, [config[2]]) - for config in system_configs[::-1]: + for config in system_configs_done[::-1]: if config[0] == 'sleep': sleep(config[1]) continue @@ -321,7 +320,7 @@ def run_test(plugin_context, db, cursor, odp_db, odp_cursor=None, *args, **kwarg execute(cursor, sql, [config[2]]) if odp_cursor: - for config in odp_configs[::-1]: + for config in odp_configs_done[::-1]: if config[3](config[1], config[2]): sql = 'alter proxyconfig set %s=%%s' % config[0] execute(odp_cursor, sql, [config[2]]) diff --git a/plugins/tpch/3.1.0/create_tpch_mysql_table_part.ddl b/plugins/tpch/3.1.0/create_tpch_mysql_table_part.ddl new file mode 100644 index 0000000000000000000000000000000000000000..520986b0c75e15fbc59ed545c6da2475cfb075c1 --- /dev/null +++ b/plugins/tpch/3.1.0/create_tpch_mysql_table_part.ddl @@ -0,0 +1,112 @@ +create tablegroup if not exists tpch_tg_lineitem_order_group binding true partition by key 1 partitions 192; +create tablegroup if not exists tpch_tg_partsupp_part binding true partition by key 1 partitions 192; + +drop table if exists lineitem; + create table lineitem ( + l_orderkey bigint not null, + l_partkey bigint not null, + l_suppkey bigint not null, + l_linenumber bigint not null, + l_quantity bigint not null, + l_extendedprice bigint not null, + l_discount bigint not null, + l_tax bigint not null, + l_returnflag char(1) default null, + l_linestatus char(1) default null, + l_shipdate date not null, + l_commitdate date default null, + l_receiptdate date default null, + l_shipinstruct char(25) default null, + l_shipmode char(10) default null, + l_comment varchar(44) default null, + primary key(l_orderkey, l_linenumber)) + tablegroup = tpch_tg_lineitem_order_group + partition by key (l_orderkey) partitions 192; + create index I_L_ORDERKEY on lineitem(l_orderkey) local; + create index I_L_SHIPDATE on lineitem(l_shipdate) local; + +drop table if exists orders; + create table orders ( + o_orderkey bigint not null, + o_custkey bigint not null, + o_orderstatus char(1) default null, + o_totalprice bigint default null, + o_orderdate date not null, + o_orderpriority char(15) default null, + o_clerk char(15) default null, + o_shippriority bigint default null, + o_comment varchar(79) default null, + primary key (o_orderkey)) + tablegroup = tpch_tg_lineitem_order_group + partition by key(o_orderkey) partitions 192; + create index I_O_ORDERDATE on orders(o_orderdate) local; + + +drop table if exists partsupp; + create table partsupp ( + ps_partkey bigint not null, + ps_suppkey bigint not null, + ps_availqty bigint default null, + ps_supplycost bigint default null, + ps_comment varchar(199) default null, + primary key (ps_partkey, ps_suppkey)) + tablegroup tpch_tg_partsupp_part + partition by key(ps_partkey) partitions 192; + + +drop table if exists part; + create table part ( + p_partkey bigint not null, + p_name varchar(55) default null, + p_mfgr char(25) default null, + p_brand char(10) default null, + p_type varchar(25) default null, + p_size bigint default null, + p_container char(10) default null, + p_retailprice bigint default null, + p_comment varchar(23) default null, + primary key (p_partkey)) + tablegroup tpch_tg_partsupp_part + partition by key(p_partkey) partitions 192; + + +drop table if exists customer; + create table customer ( + c_custkey bigint not null, + c_name varchar(25) default null, + c_address varchar(40) default null, + c_nationkey bigint default null, + c_phone char(15) default null, + c_acctbal bigint default null, + c_mktsegment char(10) default null, + c_comment varchar(117) default null, + primary key (c_custkey)) + partition by key(c_custkey) partitions 192; + +drop table if exists supplier; + create table supplier ( + s_suppkey bigint not null, + s_name char(25) default null, + s_address varchar(40) default null, + s_nationkey bigint default null, + s_phone char(15) default null, + s_acctbal bigint default null, + s_comment varchar(101) default null, + primary key (s_suppkey) +) partition by key(s_suppkey) partitions 192; + + +drop table if exists nation; + create table nation ( + n_nationkey bigint not null, + n_name char(25) default null, + n_regionkey bigint default null, + n_comment varchar(152) default null, + primary key (n_nationkey)); + +drop table if exists region; + create table region ( + r_regionkey bigint not null, + r_name char(25) default null, + r_comment varchar(152) default null, + primary key (r_regionkey)); diff --git a/plugins/tpch/3.1.0/pre_test.py b/plugins/tpch/3.1.0/pre_test.py new file mode 100644 index 0000000000000000000000000000000000000000..b42bfa811649f94fdcf82855b72caa9cd9a4f261 --- /dev/null +++ b/plugins/tpch/3.1.0/pre_test.py @@ -0,0 +1,149 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + + +import re +import os +from glob import glob +try: + import subprocess32 as subprocess +except: + import subprocess +from ssh import LocalClient +from tool import DirectoryUtil + + +def pre_test(plugin_context, *args, **kwargs): + def get_option(key, default=''): + value = getattr(options, key, default) + if not value: + value = default + stdio.verbose('get option: %s value %s' % (key, value)) + return value + + def get_path(key, default): + path = get_option('%s_path' % key) + if path and os.path.exists(path): + if os.path.isfile(path): + path = [path] + else: + path = glob(os.path.join(path, '*.%s' % key)) + stdio.verbose('get %s_path: %s' % (key, path)) + return path if path else default + + def local_execute_command(command, env=None, timeout=None): + return LocalClient.execute_command(command, env, timeout, stdio) + + cluster_config = plugin_context.cluster_config + stdio = plugin_context.stdio + options = plugin_context.options + clients = plugin_context.clients + + local_dir, _ = os.path.split(__file__) + dbgen_bin = get_option('dbgen_bin', 'dbgen') + dss_config = get_option('dss_config', '.') + scale_factor = get_option('scale_factor', 1) + disable_transfer = get_option('disable_transfer', False) + remote_tbl_dir = get_option('remote_tbl_dir') + tenant_name = get_option('tenant', 'test') + if tenant_name == 'sys': + stdio.error('DO NOT use sys tenant for testing.') + return + + test_server = get_option('test_server') + tmp_dir = os.path.abspath(get_option('tmp_dir', './tmp')) + tbl_tmp_dir = os.path.join(tmp_dir, 's%s' % scale_factor) + ddl_path = get_path('ddl', [os.path.join(local_dir, 'create_tpch_mysql_table_part.ddl')]) + stdio.verbose('set ddl_path: %s' % ddl_path) + setattr(options, 'ddl_path', ddl_path) + tbl_path = get_path('tbl', glob(os.path.join(tbl_tmp_dir, '*.tbl'))) + sql_path = get_path('sql', glob(os.path.join(local_dir, 'queries/*.sql'))) + stdio.verbose('set sql_path: %s' % sql_path) + setattr(options, 'sql_path', sql_path) + obclient_bin = get_option('obclient_bin', 'obclient') + + ret = local_execute_command('%s --help' % obclient_bin) + if not ret: + stdio.error('%s\n%s is not an executable file. Please use `--obclient-bin` to set.\nYou may not have obclient installed' % (ret.stderr, obclient_bin)) + return + + if not DirectoryUtil.mkdir(tmp_dir, stdio=stdio): + return + stdio.verbose('set tmp_dir: %s' % tmp_dir) + setattr(options, 'tmp_dir', tmp_dir) + + if get_option('test_only'): + return plugin_context.return_true() + + if not remote_tbl_dir: + stdio.error('Please use --remote-tbl-dir to set a dir for remote tbl files') + return + + if disable_transfer: + ret = clients[test_server].execute_command('ls %s' % (os.path.join(remote_tbl_dir, '*.tbl'))) + tbl_path = ret.stdout.strip().split('\n') if ret else [] + if not tbl_path: + stdio.error('No tbl file in %s:%s' % (test_server, remote_tbl_dir)) + return + else: + if not tbl_path: + ret = local_execute_command('%s -h' % dbgen_bin) + if ret.code > 1: + stdio.error('%s\n%s is not an executable file. Please use `--dbgen-bin` to set.\nYou may not have obtpch installed' % (ret.stderr, dbgen_bin)) + return + + dss_path = os.path.join(dss_config, 'dists.dss') + if not os.path.exists(dss_path): + stdio.error('No such file: %s' % dss_path) + return + + tbl_tmp_dir = os.path.join(tmp_dir, 's%s' % scale_factor) + if not DirectoryUtil.mkdir(tbl_tmp_dir, stdio=stdio): + return + + stdio.start_loading('Generate Data (Scale Factor: %s)' % scale_factor) + ret = local_execute_command('cd %s; %s -s %s -b %s' % (tbl_tmp_dir, dbgen_bin, scale_factor, dss_path)) + if ret: + stdio.stop_loading('succeed') + tbl_path = glob(os.path.join(tbl_tmp_dir, '*.tbl')) + else: + stdio.stop_loading('fail') + return + + stdio.start_loading('Send tbl to remote (%s)' % test_server) + new_tbl_path = [] + for path in tbl_path: + _, fn = os.path.split(path) + fp = os.path.join(remote_tbl_dir, fn) + if not clients[test_server].put_file(path, fp): + stdio.stop_loading('fail') + return + + new_tbl_path.append(fp) + tbl_path = new_tbl_path + + stdio.stop_loading('succeed') + stdio.verbose('set tbl_path: %s' % tbl_path) + setattr(options, 'tbl_path', tbl_path) + return plugin_context.return_true() + + diff --git a/plugins/tpch/3.1.0/queries/db1.sql b/plugins/tpch/3.1.0/queries/db1.sql new file mode 100644 index 0000000000000000000000000000000000000000..b4ac3a4be290241be057e6f87f039963a2946cc1 --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db1.sql @@ -0,0 +1,24 @@ +-- using default substitutions + + +select /*+ TPCH_Q1 parallel(cpu_num) */ + l_returnflag, + l_linestatus, + sum(l_quantity) as sum_qty, + sum(l_extendedprice) as sum_base_price, + sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, + sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, + avg(l_quantity) as avg_qty, + avg(l_extendedprice) as avg_price, + avg(l_discount) as avg_disc, + count(*) as count_order +from + lineitem +where + l_shipdate <= date '1998-12-01' - interval '90' day +group by + l_returnflag, + l_linestatus +order by + l_returnflag, + l_linestatus; diff --git a/plugins/tpch/3.1.0/queries/db10.sql b/plugins/tpch/3.1.0/queries/db10.sql new file mode 100644 index 0000000000000000000000000000000000000000..65924d87e3c5c5bdac3b265ec32da389f55e37c9 --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db10.sql @@ -0,0 +1,33 @@ +-- using default substitutions + +select /*+ TPCH_Q10 parallel(cpu_num) */ + c_custkey + c_name, + sum(l_extendedprice * (1 - l_discount)) as revenue, + c_acctbal, + n_name, + c_address, + c_phone, + c_comment +from + customer, + orders, + lineitem, + nation +where + c_custkey = o_custkey + and l_orderkey = o_orderkey + and o_orderdate >= date '1993-10-01' + and o_orderdate < date '1993-10-01' + interval '3' month + and l_returnflag = 'R' + and c_nationkey = n_nationkey +group by + c_custkey, + c_name, + c_acctbal, + c_phone, + n_name, + c_address, + c_comment +order by + revenue desc; diff --git a/plugins/tpch/3.1.0/queries/db11.sql b/plugins/tpch/3.1.0/queries/db11.sql new file mode 100644 index 0000000000000000000000000000000000000000..708ea4ec3a8d70a9e65e5a42c50cbe872515b07a --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db11.sql @@ -0,0 +1,30 @@ +-- using default substitutions + + +select /*+ TPCH_Q11 parallel(cpu_num) */ + ps_partkey, + sum(ps_supplycost * ps_availqty) as value +from + partsupp, + supplier, + nation +where + ps_suppkey = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'GERMANY' +group by + ps_partkey having + sum(ps_supplycost * ps_availqty) > ( + select + sum(ps_supplycost * ps_availqty) * 0.0000100000 + from + partsupp, + supplier, + nation + where + ps_suppkey = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'GERMANY' + ) +order by + value desc; diff --git a/plugins/tpch/3.1.0/queries/db12.sql b/plugins/tpch/3.1.0/queries/db12.sql new file mode 100644 index 0000000000000000000000000000000000000000..3b0a8d281a8dd4b3191e4214449a9841deb3760f --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db12.sql @@ -0,0 +1,31 @@ +-- using default substitutions + + +select /*+ TPCH_Q12 parallel(cpu_num) */ + l_shipmode, + sum(case + when o_orderpriority = '1-URGENT' + or o_orderpriority = '2-HIGH' + then 1 + else 0 + end) as high_line_count, + sum(case + when o_orderpriority <> '1-URGENT' + and o_orderpriority <> '2-HIGH' + then 1 + else 0 + end) as low_line_count +from + orders, + lineitem +where + o_orderkey = l_orderkey + and l_shipmode in ('MAIL', 'SHIP') + and l_commitdate < l_receiptdate + and l_shipdate < l_commitdate + and l_receiptdate >= date '1994-01-01' + and l_receiptdate < date '1994-01-01' + interval '1' year +group by + l_shipmode +order by + l_shipmode; diff --git a/plugins/tpch/3.1.0/queries/db13.sql b/plugins/tpch/3.1.0/queries/db13.sql new file mode 100644 index 0000000000000000000000000000000000000000..9b0ae05055aa2fa25bda8a412ef330f0e8ed75be --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db13.sql @@ -0,0 +1,7 @@ +-- using default substitutions +SELECT /*+ TPCH_Q13 parallel(cpu_num) */ c_count, count(*) as custdist +from ( SELECT c_custkey, count(o_orderkey) as c_count + from customer left join orders on c_custkey = o_custkey and o_comment not like '%special%requests%' + group by c_custkey ) c_orders +group by c_count +order by custdist desc, c_count desc; diff --git a/plugins/tpch/3.1.0/queries/db14.sql b/plugins/tpch/3.1.0/queries/db14.sql new file mode 100644 index 0000000000000000000000000000000000000000..7eab8ace29ae5f085afcf05115921705ac341b99 --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db14.sql @@ -0,0 +1,16 @@ +-- using default substitutions + + +select /*+ TPCH_Q14 parallel(cpu_num) */ + 100.00 * sum(case + when p_type like 'PROMO%' + then l_extendedprice * (1 - l_discount) + else 0 + end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue +from + lineitem, + part +where + l_partkey = p_partkey + and l_shipdate >= date '1995-09-01' + and l_shipdate < date '1995-09-01' + interval '1' month; diff --git a/plugins/tpch/3.1.0/queries/db15.sql b/plugins/tpch/3.1.0/queries/db15.sql new file mode 100644 index 0000000000000000000000000000000000000000..ba16e11c12415490818bb6f2381e9a70d2466291 --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db15.sql @@ -0,0 +1,36 @@ +-- using default substitutions + +create view revenue0 (supplier_no, total_revenue) as + select + l_suppkey, + sum(l_extendedprice * (1 - l_discount)) + from + lineitem + where + l_shipdate >= date '1996-01-01' + and l_shipdate < date '1996-01-01' + interval '3' month + group by + l_suppkey; + + +select /*+ TPCH_Q15 parallel(cpu_num) */ + s_suppkey, + s_name, + s_address, + s_phone, + total_revenue +from + supplier, + revenue0 +where + s_suppkey = supplier_no + and total_revenue = ( + select + max(total_revenue) + from + revenue0 + ) +order by + s_suppkey; + +drop view revenue0; diff --git a/plugins/tpch/3.1.0/queries/db16.sql b/plugins/tpch/3.1.0/queries/db16.sql new file mode 100644 index 0000000000000000000000000000000000000000..61ab5c5d8e88a5810bf08ab6099d577100895cc8 --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db16.sql @@ -0,0 +1,33 @@ +-- using default substitutions + + +select /*+ TPCH_Q16 parallel(cpu_num) */ + p_brand, + p_type, + p_size, + count(distinct ps_suppkey) as supplier_cnt +from + partsupp, + part +where + p_partkey = ps_partkey + and p_brand <> 'Brand#45' + and p_type not like 'MEDIUM POLISHED%' + and p_size in (49, 14, 23, 45, 19, 3, 36, 9) + and ps_suppkey not in ( + select + s_suppkey + from + supplier + where + s_comment like '%Customer%Complaints%' + ) +group by + p_brand, + p_type, + p_size +order by + supplier_cnt desc, + p_brand, + p_type, + p_size; diff --git a/plugins/tpch/3.1.0/queries/db17.sql b/plugins/tpch/3.1.0/queries/db17.sql new file mode 100644 index 0000000000000000000000000000000000000000..5748361fb23f1d08c41390a069890b34be99fd54 --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db17.sql @@ -0,0 +1,20 @@ +-- using default substitutions + + +select /*+ TPCH_Q17 parallel(cpu_num) */ + sum(l_extendedprice) / 7.0 as avg_yearly +from + lineitem, + part +where + p_partkey = l_partkey + and p_brand = 'Brand#23' + and p_container = 'MED BOX' + and l_quantity < ( + select + 0.2 * avg(l_quantity) + from + lineitem + where + l_partkey = p_partkey + ); diff --git a/plugins/tpch/3.1.0/queries/db18.sql b/plugins/tpch/3.1.0/queries/db18.sql new file mode 100644 index 0000000000000000000000000000000000000000..64be93bc982759181b9e80919c21fb3137d7b051 --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db18.sql @@ -0,0 +1,35 @@ +-- using default substitutions + + +select /*+ TPCH_Q18 parallel(cpu_num) */ + c_name, + c_custkey, + o_orderkey, + o_orderdate, + o_totalprice, + sum(l_quantity) +from + customer, + orders, + lineitem +where + o_orderkey in ( + select + l_orderkey + from + lineitem + group by + l_orderkey having + sum(l_quantity) > 300 + ) + and c_custkey = o_custkey + and o_orderkey = l_orderkey +group by + c_name, + c_custkey, + o_orderkey, + o_orderdate, + o_totalprice +order by + o_totalprice desc, + o_orderdate; diff --git a/plugins/tpch/3.1.0/queries/db19.sql b/plugins/tpch/3.1.0/queries/db19.sql new file mode 100644 index 0000000000000000000000000000000000000000..851edbb87fb5fe932fb650f86d755914166ba7e4 --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db19.sql @@ -0,0 +1,38 @@ +-- using default substitutions + + +select /*+ TPCH_Q19 parallel(cpu_num) */ + sum(l_extendedprice* (1 - l_discount)) as revenue +from + lineitem, + part +where + ( + p_partkey = l_partkey + and p_brand = 'Brand#12' + and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') + and l_quantity >= 1 and l_quantity <= 1 + 10 + and p_size between 1 and 5 + and l_shipmode in ('AIR', 'AIR REG') + and l_shipinstruct = 'DELIVER IN PERSON' + ) + or + ( + p_partkey = l_partkey + and p_brand = 'Brand#23' + and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') + and l_quantity >= 10 and l_quantity <= 10 + 10 + and p_size between 1 and 10 + and l_shipmode in ('AIR', 'AIR REG') + and l_shipinstruct = 'DELIVER IN PERSON' + ) + or + ( + p_partkey = l_partkey + and p_brand = 'Brand#34' + and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') + and l_quantity >= 20 and l_quantity <= 20 + 10 + and p_size between 1 and 15 + and l_shipmode in ('AIR', 'AIR REG') + and l_shipinstruct = 'DELIVER IN PERSON' + ); diff --git a/plugins/tpch/3.1.0/queries/db2.sql b/plugins/tpch/3.1.0/queries/db2.sql new file mode 100644 index 0000000000000000000000000000000000000000..6a0b19d8feaa7a496f746c3ecd419452b91f7482 --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db2.sql @@ -0,0 +1,46 @@ +-- using default substitutions + + +select /*+ TPCH_Q2 parallel(cpu_num) */ + s_acctbal, + s_name, + n_name, + p_partkey, + p_mfgr, + s_address, + s_phone, + s_comment +from + part, + supplier, + partsupp, + nation, + region +where + p_partkey = ps_partkey + and s_suppkey = ps_suppkey + and p_size = 15 + and p_type like '%BRASS' + and s_nationkey = n_nationkey + and n_regionkey = r_regionkey + and r_name = 'EUROPE' + and ps_supplycost = ( + select + min(ps_supplycost) + from + partsupp, + supplier, + nation, + region + where + p_partkey = ps_partkey + and s_suppkey = ps_suppkey + and s_nationkey = n_nationkey + and n_regionkey = r_regionkey + and r_name = 'EUROPE' + ) +order by + s_acctbal desc, + n_name, + s_name, + p_partkey; diff --git a/plugins/tpch/3.1.0/queries/db20.sql b/plugins/tpch/3.1.0/queries/db20.sql new file mode 100644 index 0000000000000000000000000000000000000000..e7a3142d4ae61bc3f4c423fab1954efdde80b859 --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db20.sql @@ -0,0 +1,40 @@ +-- using default substitutions + + +select /*+ TPCH_Q20 parallel(cpu_num) */ + s_name, + s_address +from + supplier, + nation +where + s_suppkey in ( + select + ps_suppkey + from + partsupp + where + ps_partkey in ( + select + p_partkey + from + part + where + p_name like 'forest%' + ) + and ps_availqty > ( + select + 0.5 * sum(l_quantity) + from + lineitem + where + l_partkey = ps_partkey + and l_suppkey = ps_suppkey + and l_shipdate >= date '1994-01-01' + and l_shipdate < date '1994-01-01' + interval '1' year + ) + ) + and s_nationkey = n_nationkey + and n_name = 'CANADA' +order by + s_name; diff --git a/plugins/tpch/3.1.0/queries/db21.sql b/plugins/tpch/3.1.0/queries/db21.sql new file mode 100644 index 0000000000000000000000000000000000000000..da23dec1b3d6d58e989a32aece396d1755d30f7a --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db21.sql @@ -0,0 +1,42 @@ +-- using default substitutions + + +select /*+ TPCH_Q21 parallel(cpu_num) */ + s_name, + count(*) as numwait +from + supplier, + lineitem l1, + orders, + nation +where + s_suppkey = l1.l_suppkey + and o_orderkey = l1.l_orderkey + and o_orderstatus = 'F' + and l1.l_receiptdate > l1.l_commitdate + and exists ( + select + * + from + lineitem l2 + where + l2.l_orderkey = l1.l_orderkey + and l2.l_suppkey <> l1.l_suppkey + ) + and not exists ( + select + * + from + lineitem l3 + where + l3.l_orderkey = l1.l_orderkey + and l3.l_suppkey <> l1.l_suppkey + and l3.l_receiptdate > l3.l_commitdate + ) + and s_nationkey = n_nationkey + and n_name = 'SAUDI ARABIA' +group by + s_name +order by + numwait desc, + s_name; diff --git a/plugins/tpch/3.1.0/queries/db22.sql b/plugins/tpch/3.1.0/queries/db22.sql new file mode 100644 index 0000000000000000000000000000000000000000..8f3440f5f1e58ed0773bafaf89076ab826e2116f --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db22.sql @@ -0,0 +1,40 @@ +-- using default substitutions + + +select /*+ TPCH_Q22 parallel(cpu_num) */ + cntrycode, + count(*) as numcust, + sum(c_acctbal) as totacctbal +from + ( + select + substring(c_phone from 1 for 2) as cntrycode, + c_acctbal + from + customer + where + substring(c_phone from 1 for 2) in + ('13', '31', '23', '29', '30', '18', '17') + and c_acctbal > ( + select + avg(c_acctbal) + from + customer + where + c_acctbal > 0.00 + and substring(c_phone from 1 for 2) in + ('13', '31', '23', '29', '30', '18', '17') + ) + and not exists ( + select + * + from + orders + where + o_custkey = c_custkey + ) + ) as custsale +group by + cntrycode +order by + cntrycode; diff --git a/plugins/tpch/3.1.0/queries/db3.sql b/plugins/tpch/3.1.0/queries/db3.sql new file mode 100644 index 0000000000000000000000000000000000000000..941d623ac7943d2c313f6664072fa2e3cdec3c28 --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db3.sql @@ -0,0 +1,25 @@ +-- using default substitutions + + +select /*+ TPCH_Q3 parallel(cpu_num) */ + l_orderkey, + sum(l_extendedprice * (1 - l_discount)) as revenue, + o_orderdate, + o_shippriority +from + customer, + orders, + lineitem +where + c_mktsegment = 'BUILDING' + and c_custkey = o_custkey + and l_orderkey = o_orderkey + and o_orderdate < date '1995-03-15' + and l_shipdate > date '1995-03-15' +group by + l_orderkey, + o_orderdate, + o_shippriority +order by + revenue desc, + o_orderdate; diff --git a/plugins/tpch/3.1.0/queries/db4.sql b/plugins/tpch/3.1.0/queries/db4.sql new file mode 100644 index 0000000000000000000000000000000000000000..6e7bcb9119333dfa839282390ad5b5e61db532ba --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db4.sql @@ -0,0 +1,11 @@ +-- using default substitutions +SELECT /*+ TPCH_Q4 parallel(cpu_num) no_unnest */ o_orderpriority, count(*) as order_count +from orders +where o_orderdate >= DATE'1993-07-01' and + o_orderdate < DATE'1993-07-01' + interval '3' month and + exists ( SELECT * + from lineitem + where l_orderkey = o_orderkey and + l_commitdate < l_receiptdate ) + group by o_orderpriority + order by o_orderpriority; diff --git a/plugins/tpch/3.1.0/queries/db5.sql b/plugins/tpch/3.1.0/queries/db5.sql new file mode 100644 index 0000000000000000000000000000000000000000..c74dd8cdd257825b5abb19f9a24eff5b2fff7b17 --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db5.sql @@ -0,0 +1,27 @@ +-- using default substitutions + + +select /*+ TPCH_Q5 parallel(cpu_num) */ + n_name, + sum(l_extendedprice * (1 - l_discount)) as revenue +from + customer, + orders, + lineitem, + supplier, + nation, + region +where + c_custkey = o_custkey + and l_orderkey = o_orderkey + and l_suppkey = s_suppkey + and c_nationkey = s_nationkey + and s_nationkey = n_nationkey + and n_regionkey = r_regionkey + and r_name = 'ASIA' + and o_orderdate >= date '1994-01-01' + and o_orderdate < date '1994-01-01' + interval '1' year +group by + n_name +order by + revenue desc; diff --git a/plugins/tpch/3.1.0/queries/db6.sql b/plugins/tpch/3.1.0/queries/db6.sql new file mode 100644 index 0000000000000000000000000000000000000000..bda99bac1a00947aa32d80b7e08daa7793904ebd --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db6.sql @@ -0,0 +1,12 @@ +-- using default substitutions + + +select /*+ TPCH_Q6 parallel(cpu_num) */ + sum(l_extendedprice * l_discount) as revenue +from + lineitem +where + l_shipdate >= date '1994-01-01' + and l_shipdate < date '1994-01-01' + interval '1' year + and l_discount between .06 - 0.01 and .06 + 0.01 + and l_quantity < 24; diff --git a/plugins/tpch/3.1.0/queries/db7.sql b/plugins/tpch/3.1.0/queries/db7.sql new file mode 100644 index 0000000000000000000000000000000000000000..4f954116e37837ed04ac0bd3022b3c864abffe46 --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db7.sql @@ -0,0 +1,42 @@ +-- using default substitutions + + +select /*+ TPCH_Q7 parallel(cpu_num) */ + supp_nation, + cust_nation, + l_year, + sum(volume) as revenue +from + ( + select + n1.n_name as supp_nation, + n2.n_name as cust_nation, + extract(year from l_shipdate) as l_year, + l_extendedprice * (1 - l_discount) as volume + from + supplier, + lineitem, + orders, + customer, + nation n1, + nation n2 + where + s_suppkey = l_suppkey + and o_orderkey = l_orderkey + and c_custkey = o_custkey + and s_nationkey = n1.n_nationkey + and c_nationkey = n2.n_nationkey + and ( + (n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') + or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE') + ) + and l_shipdate between date '1995-01-01' and date '1996-12-31' + ) as shipping +group by + supp_nation, + cust_nation, + l_year +order by + supp_nation, + cust_nation, + l_year; diff --git a/plugins/tpch/3.1.0/queries/db8.sql b/plugins/tpch/3.1.0/queries/db8.sql new file mode 100644 index 0000000000000000000000000000000000000000..b8df462c6114270ecf26fa1f83bb605438ea0e11 --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db8.sql @@ -0,0 +1,40 @@ +-- using default substitutions + + +select /*+ TPCH_Q8 parallel(cpu_num) */ + o_year, + sum(case + when nation = 'BRAZIL' then volume + else 0 + end) / sum(volume) as mkt_share +from + ( + select + extract(year from o_orderdate) as o_year, + l_extendedprice * (1 - l_discount) as volume, + n2.n_name as nation + from + part, + supplier, + lineitem, + orders, + customer, + nation n1, + nation n2, + region + where + p_partkey = l_partkey + and s_suppkey = l_suppkey + and l_orderkey = o_orderkey + and o_custkey = c_custkey + and c_nationkey = n1.n_nationkey + and n1.n_regionkey = r_regionkey + and r_name = 'AMERICA' + and s_nationkey = n2.n_nationkey + and o_orderdate between date '1995-01-01' and date '1996-12-31' + and p_type = 'ECONOMY ANODIZED STEEL' + ) as all_nations +group by + o_year +order by + o_year; diff --git a/plugins/tpch/3.1.0/queries/db9.sql b/plugins/tpch/3.1.0/queries/db9.sql new file mode 100644 index 0000000000000000000000000000000000000000..3bbf5f0085ab952ba890dc7d95cc8b75f7513917 --- /dev/null +++ b/plugins/tpch/3.1.0/queries/db9.sql @@ -0,0 +1,35 @@ +-- using default substitutions + + +select /*+ TPCH_Q9 parallel(cpu_num) */ + nation, + o_year, + sum(amount) as sum_profit +from + ( + select + n_name as nation, + extract(year from o_orderdate) as o_year, + l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount + from + part, + supplier, + lineitem, + partsupp, + orders, + nation + where + s_suppkey = l_suppkey + and ps_suppkey = l_suppkey + and ps_partkey = l_partkey + and p_partkey = l_partkey + and o_orderkey = l_orderkey + and s_nationkey = n_nationkey + and p_name like '%green%' + ) as profit +group by + nation, + o_year +order by + nation, + o_year desc; diff --git a/plugins/tpch/3.1.0/run_test.py b/plugins/tpch/3.1.0/run_test.py new file mode 100644 index 0000000000000000000000000000000000000000..f4bdf39902c39e03d75f163ad35eb7d805171f11 --- /dev/null +++ b/plugins/tpch/3.1.0/run_test.py @@ -0,0 +1,365 @@ +# coding: utf-8 +# OceanBase Deploy. +# Copyright (C) 2021 OceanBase +# +# This file is part of OceanBase Deploy. +# +# OceanBase Deploy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OceanBase Deploy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with OceanBase Deploy. If not, see . + + +from __future__ import absolute_import, division, print_function + + +import re +import os +import time +try: + import subprocess32 as subprocess +except: + import subprocess +from ssh import LocalClient + + +stdio = None + + +def parse_size(size): + _bytes = 0 + if not isinstance(size, str) or size.isdigit(): + _bytes = int(size) + else: + units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} + match = re.match(r'([1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) + _bytes = int(match.group(1)) * units[match.group(2)] + return _bytes + + +def format_size(size, precision=1): + units = ['B', 'K', 'M', 'G'] + units_num = len(units) - 1 + idx = 0 + if precision: + div = 1024.0 + formate = '%.' + str(precision) + 'f%s' + limit = 1024 + else: + div = 1024 + limit = 1024 + formate = '%d%s' + while idx < units_num and size >= limit: + size /= div + idx += 1 + return formate % (size, units[idx]) + + +def exec_cmd(cmd): + stdio.verbose('execute: %s' % cmd) + process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + while process.poll() is None: + line = process.stdout.readline() + line = line.strip() + if line: + stdio.print(line.decode("utf8", 'ignore')) + return process.returncode == 0 + + +def run_test(plugin_context, db, cursor, *args, **kwargs): + def get_option(key, default=''): + value = getattr(options, key, default) + if value is None: + value = default + return value + def execute(cursor, query, args=None): + msg = query % tuple(args) if args is not None else query + stdio.verbose('execute sql: %s' % msg) + stdio.verbose("query: %s. args: %s" % (query, args)) + try: + cursor.execute(query, args) + return cursor.fetchone() + except: + msg = 'execute sql exception: %s' % msg + stdio.exception(msg) + raise Exception(msg) + + def local_execute_command(command, env=None, timeout=None): + return LocalClient.execute_command(command, env, timeout, stdio) + + global stdio + cluster_config = plugin_context.cluster_config + stdio = plugin_context.stdio + clients = plugin_context.clients + options = plugin_context.options + + optimization = get_option('optimization') > 0 + not_test_only = not get_option('test_only') + + host = get_option('host', '127.0.0.1') + port = get_option('port', 2881) + mysql_db = get_option('database', 'test') + user = get_option('user', 'root') + tenant_name = get_option('tenant', 'test') + password = get_option('password', '') + ddl_path = get_option('ddl_path') + tbl_path = get_option('tbl_path') + sql_path = get_option('sql_path') + tmp_dir = get_option('tmp_dir') + obclient_bin = get_option('obclient_bin', 'obclient') + + sql_path = sorted(sql_path, key=lambda x: (len(x), x)) + + sql = "select * from oceanbase.gv$tenant where tenant_name = %s" + max_cpu = 2 + cpu_total = 0 + min_memory = 0 + unit_count = 0 + tenant_meta = None + tenant_unit = None + try: + stdio.verbose('execute sql: %s' % (sql % tenant_name)) + cursor.execute(sql, [tenant_name]) + tenant_meta = cursor.fetchone() + if not tenant_meta: + stdio.error('Tenant %s not exists. Use `obd cluster tenant create` to create tenant.' % tenant_name) + return + sql = "select * from oceanbase.__all_resource_pool where tenant_id = %d" % tenant_meta['tenant_id'] + pool = execute(cursor, sql) + sql = "select * from oceanbase.__all_unit_config where unit_config_id = %d" % pool['unit_config_id'] + tenant_unit = execute(cursor, sql) + max_cpu = tenant_unit['max_cpu'] + min_memory = tenant_unit['min_memory'] + unit_count = pool['unit_count'] + except: + stdio.error('fail to get tenant info') + return + + sql = "select * from oceanbase.__all_user where user_name = '%s'" % user + sys_pwd = cluster_config.get_global_conf().get('root_password', '') + exec_sql_cmd = "%s -h%s -P%s -uroot@%s %s -A -e" % (obclient_bin, host, port, tenant_name, ("-p'%s'" % sys_pwd) if sys_pwd else '') + ret = LocalClient.execute_command('%s "%s"' % (exec_sql_cmd, sql), stdio=stdio) + if not ret or not ret.stdout: + stdio.error('User %s not exists.' % user) + return + + if not_test_only: + sql_cmd_prefix = '%s -h%s -P%s -u%s@%s %s -A' % (obclient_bin, host, port, user, tenant_name, ("-p'%s'" % password) if password else '') + ret = local_execute_command('%s -e "%s"' % (sql_cmd_prefix, 'create database if not exists %s' % mysql_db)) + sql_cmd_prefix += ' -D %s' % mysql_db + if not ret: + stdio.error(ret.stderr) + return + else: + sql_cmd_prefix = '%s -h%s -P%s -u%s@%s %s -D %s -A' % (obclient_bin, host, port, user, tenant_name, ("-p'%s'" % password) if password else '', mysql_db) + + for server in cluster_config.servers: + client = clients[server] + ret = client.execute_command("grep -e 'processor\s*:' /proc/cpuinfo | wc -l") + if ret and ret.stdout.strip().isdigit(): + cpu_total += int(ret.stdout) + else: + server_config = cluster_config.get_server_conf(server) + cpu_total += int(server_config.get('cpu_count', 0)) + + sql = '' + system_configs_done = [] + tenant_variables_done = [] + + try: + cache_wash_threshold = format_size(int(min_memory * 0.2), 0) + system_configs = [ + # [配置名, 新值, 旧值, 替换条件: lambda n, o: n != o, 是否是租户级] + ['syslog_level', 'PERF', 'PERF', lambda n, o: n != o, False], + ['max_syslog_file_count', 100, 100, lambda n, o: n != o, False], + ['enable_syslog_recycle', True, True, lambda n, o: n != o, False], + ['enable_merge_by_turn', False, False, lambda n, o: n != o, False], + ['trace_log_slow_query_watermark', '100s', '100s', lambda n, o: n != o, False], + ['max_kept_major_version_number', 1, 1, lambda n, o: n != o, False], + ['enable_sql_operator_dump', True, True, lambda n, o: n != o, False], + ['_hash_area_size', '3g', '3g', lambda n, o: n != o, False], + ['memstore_limit_percentage', 50, 50, lambda n, o: n != o, False], + ['enable_rebalance', False, False, lambda n, o: n != o, False], + ['memory_chunk_cache_size', '1g', '1g', lambda n, o: n != o, False], + ['minor_freeze_times', 5, 5, lambda n, o: n != o, False], + ['merge_thread_count', 20, 20, lambda n, o: n != o, False], + ['cache_wash_threshold', cache_wash_threshold, cache_wash_threshold, lambda n, o: n != o, False], + ['ob_enable_batched_multi_statement', True, True, lambda n, o: n != o, False], + ] + + tenant_q = ' tenant="%s"' % tenant_name + server_num = len(cluster_config.servers) + if optimization: + for config in system_configs: + if config[0] == 'sleep': + time.sleep(config[1]) + continue + sql = 'show parameters like "%s"' % config[0] + if config[4]: + sql += tenant_q + ret = execute(cursor, sql) + if ret: + config[2] = ret['value'] + if config[3](config[1], config[2]): + sql = 'alter system set %s=%%s' % config[0] + if config[4]: + sql += tenant_q + system_configs_done.append(config) + execute(cursor, sql, [config[1]]) + + sql = "select count(1) server_num from oceanbase.__all_server where status = 'active'" + ret = execute(cursor, sql) + if ret: + server_num = ret.get("server_num", server_num) + + parallel_max_servers = int(max_cpu * 10) + parallel_servers_target = int(max_cpu * server_num * 8) + tenant_variables = [ + # [变量名, 新值, 旧值, 替换条件: lambda n, o: n != o] + ['ob_sql_work_area_percentage', 80, 80, lambda n, o: n != o], + ['optimizer_use_sql_plan_baselines', True, True, lambda n, o: n != o], + ['optimizer_capture_sql_plan_baselines', True, True, lambda n, o: n != o], + ['ob_query_timeout', 36000000000, 36000000000, lambda n, o: n != o], + ['ob_trx_timeout', 36000000000, 36000000000, lambda n, o: n != o], + ['max_allowed_packet', 67108864, 67108864, lambda n, o: n != o], + ['secure_file_priv', "", "", lambda n, o: n != o], + ['parallel_max_servers', parallel_max_servers, parallel_max_servers, lambda n, o: n != o], + ['parallel_servers_target', parallel_servers_target, parallel_servers_target, lambda n, o: n != o] + ] + select_sql_t = "select value from oceanbase.__all_virtual_sys_variable where tenant_id = %d and name = '%%s'" % tenant_meta['tenant_id'] + update_sql_t = "ALTER TENANT %s SET VARIABLES %%s = %%%%s" % tenant_name + + for config in tenant_variables: + sql = select_sql_t % config[0] + ret = execute(cursor, sql) + if ret: + value = ret['value'] + config[2] = int(value) if isinstance(value, str) and value.isdigit() else value + if config[3](config[1], config[2]): + sql = update_sql_t % config[0] + tenant_variables_done.append(config) + execute(cursor, sql, [config[1]]) + else: + sql = "select value from oceanbase.__all_virtual_sys_variable where tenant_id = %d and name = 'secure_file_priv'" % tenant_meta['tenant_id'] + ret = execute(cursor, sql)['value'] + if ret is None: + stdio.error('Access denied. Please set `secure_file_priv` to "".') + return + if ret: + for path in tbl_path: + if not path.startswith(ret): + stdio.error('Access denied. Please set `secure_file_priv` to "".') + return + + parallel_num = int(max_cpu * unit_count) + + if not_test_only: + stdio.start_loading('Create table') + for path in ddl_path: + path = os.path.abspath(path) + stdio.verbose('load %s' % path) + ret = local_execute_command('%s < %s' % (sql_cmd_prefix, path)) + if not ret: + raise Exception(ret.stderr) + stdio.stop_loading('succeed') + + stdio.start_loading('Load data') + for path in tbl_path: + _, fn = os.path.split(path) + stdio.verbose('load %s' % path) + ret = local_execute_command("""%s -c -e "load data /*+ parallel(%s) */ infile '%s' into table %s fields terminated by '|';" """ % (sql_cmd_prefix, parallel_num, path, fn[:-4])) + if not ret: + raise Exception(ret.stderr) + stdio.stop_loading('succeed') + + merge_version = execute(cursor, "select value from oceanbase.__all_zone where name='frozen_version'")['value'] + stdio.start_loading('Merge') + execute(cursor, 'alter system major freeze') + sql = "select value from oceanbase.__all_zone where name='frozen_version' and value != %s" % merge_version + while True: + if execute(cursor, sql): + break + time.sleep(1) + + while True: + if not execute(cursor, """select * from oceanbase.__all_zone + where name='last_merged_version' + and value != (select value from oceanbase.__all_zone where name='frozen_version' limit 1) + and zone in (select zone from oceanbase.__all_zone where name='status' and info = 'ACTIVE') + """): + break + time.sleep(5) + stdio.stop_loading('succeed') + + + # 替换并发数 + stdio.start_loading('Format SQL') + n_sql_path = [] + for fp in sql_path: + _, fn = os.path.split(fp) + nfp = os.path.join(tmp_dir, fn) + ret = local_execute_command("sed %s -e 's/parallel(cpu_num)/parallel(%d)/' > %s" % (fp, cpu_total, nfp)) + if not ret: + raise Exception(ret.stderr) + n_sql_path.append(nfp) + sql_path = n_sql_path + stdio.stop_loading('succeed') + + #warmup预热 + stdio.start_loading('Warmup') + times = 2 + for path in sql_path: + _, fn = os.path.split(path) + log_path = os.path.join(tmp_dir, '%s.log' % fn) + ret = local_execute_command('source %s | %s -c > %s' % (path, sql_cmd_prefix, log_path)) + if not ret: + raise Exception(ret.stderr) + stdio.stop_loading('succeed') + + total_cost = 0 + for path in sql_path: + start_time = time.time() + _, fn = os.path.split(path) + log_path = os.path.join(tmp_dir, '%s.log' % fn) + stdio.print('[%s]: start %s' % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time)), path)) + ret = local_execute_command('echo source %s | %s -c > %s' % (path, sql_cmd_prefix, log_path)) + end_time = time.time() + cost = end_time - start_time + total_cost += cost + stdio.print('[%s]: end %s, cost %.1fs' % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time)), path, cost)) + if not ret: + raise Exception(ret.stderr) + stdio.print('Total Cost: %.1fs' % total_cost) + + except KeyboardInterrupt: + stdio.stop_loading('fail') + except Exception as e: + stdio.stop_loading('fail') + stdio.exception(str(e)) + finally: + try: + if optimization: + for config in tenant_variables_done[::-1]: + if config[3](config[1], config[2]): + sql = update_sql_t % config[0] + execute(cursor, sql, [config[2]]) + + for config in system_configs_done[::-1]: + if config[0] == 'sleep': + time.sleep(config[1]) + continue + if config[3](config[1], config[2]): + sql = 'alter system set %s=%%s' % config[0] + if config[4]: + sql += tenant_q + execute(cursor, sql, [config[2]]) + except: + pass diff --git a/profile/obd.sh b/profile/obd.sh index 62ebf62136a6f8f5a5759fbe4946c6db98c32726..44c8f94a3114aaf3fba4bdc1aa948105a637378f 100644 --- a/profile/obd.sh +++ b/profile/obd.sh @@ -15,7 +15,7 @@ function _obd_complete_func tenant_cmd="create drop" mirror_cmd="clone create list update" repo_cmd="list" - test_cmd="mysqltest sysbench" + test_cmd="mysqltest sysbench tpch" if [[ ${cur} == * ]] ; then case "${prev}" in obd);& diff --git a/requirements.txt b/requirements.txt index d63974a205035a658262e8247cc097ecf1bdb3b2..7fdf0eaca6b15b26cb242b556695fc36d835ffd7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,10 +2,12 @@ requests==2.24.0 rpmfile==1.0.8 paramiko==2.7.2 backports.lzma==0.0.14 -MySQL-python -ruamel.yaml +MySQL-python==1.2.5 +ruamel.yaml.clib==0.2.2 +ruamel.yaml==0.16.13 subprocess32==3.5.4 prettytable==1.0.1 enum34==1.1.6 progressbar==2.5 -halo==0.0.30 \ No newline at end of file +halo==0.0.30 +pycryptodome==3.10.1 diff --git a/requirements3.txt b/requirements3.txt index 3201e0ef7e39ee73d72139ee78e8a8069c696ea9..b78e60708ddba8c6d474df6570b2f0f9466812f1 100644 --- a/requirements3.txt +++ b/requirements3.txt @@ -2,8 +2,9 @@ rpmfile==1.0.8 paramiko==2.7.2 requests==2.25.1 PyMySQL==1.0.2 -ruamel.yaml +ruamel.yaml==0.17.4 subprocess32==3.5.4 prettytable==2.1.0 progressbar==2.5 -halo==0.0.31 \ No newline at end of file +halo==0.0.31 +pycryptodome==3.10.1 \ No newline at end of file diff --git a/ssh.py b/ssh.py index 0ecd634d976e9f3d7db177879e34b6b0b5dbaa41..aa6bc49051bf1b24502caab7871972ac5427e948 100644 --- a/ssh.py +++ b/ssh.py @@ -34,6 +34,9 @@ from paramiko.client import SSHClient, AutoAddPolicy from paramiko.ssh_exception import NoValidConnectionsError +__all__ = ("SshClient", "SshConfig", "LocalClient") + + class SshConfig(object): @@ -89,13 +92,13 @@ class LocalClient(object): @staticmethod def put_file(local_path, remote_path, stdio=None): - if LocalClient.execute_command('cp -f %s %s' % (local_path, remote_path), stdio=stdio): + if LocalClient.execute_command('mkdir -p %s && cp -f %s %s' % (os.path.dirname(remote_path), local_path, remote_path), stdio=stdio): return True return False @staticmethod - def put_dir(self, local_dir, remote_dir, stdio=None): - if LocalClient.execute_command('cp -fr %s %s' % (local_dir, remote_dir), stdio=stdio): + def put_dir(local_dir, remote_dir, stdio=None): + if LocalClient.execute_command('mkdir -p && cp -fr %s %s' % (os.path.dirname(remote_dir), local_dir, remote_dir), stdio=stdio): return True return False @@ -232,9 +235,13 @@ class SshClient(object): return False if not self._open_sftp(stdio): return False - - if self.execute_command('mkdir -p %s' % os.path.split(remote_path)[0], stdio): - return self.sftp.put(local_path, remote_path) + return self._put_file(local_path, remote_path, stdio) + + def _put_file(self, local_path, remote_path, stdio=None): + if self.execute_command('mkdir -p %s && rm -fr %s' % (os.path.dirname(remote_path), remote_path), stdio): + stdio and getattr(stdio, 'verbose', print)('send %s to %s' % (local_path, remote_path)) + if self.sftp.put(local_path, remote_path): + return self.execute_command('chmod %s %s' % (oct(os.stat(local_path).st_mode)[-3: ], remote_path)) return False def put_dir(self, local_dir, remote_dir, stdio=None): @@ -259,7 +266,7 @@ class SshClient(object): for name in files: local_path = os.path.join(root, name) remote_path = os.path.join(remote_dir, root[local_dir_path_len:].lstrip('/'), name) - if not self.sftp.put(local_path, remote_path): + if not self._put_file(local_path, remote_path, stdio): failed.append(remote_path) for name in dirs: local_path = os.path.join(root, name) diff --git a/tool.py b/tool.py index 259a784ad17fbbb0834ff11a541649b785c35c12..79be6e567ef37d56269fc7d0690ca522ed41f12a 100644 --- a/tool.py +++ b/tool.py @@ -129,6 +129,17 @@ class ConfigUtil(object): class DirectoryUtil(object): + @staticmethod + def list_dir(path, stdio=None): + files = [] + for fn in os.listdir(path): + fp = os.path.join(path, fn) + if os.path.isdir(fp): + files += DirectoryUtil.list_dir(fp) + else: + files.append(fp) + return files + @staticmethod def copy(src, dst, stdio=None): if not os.path.isdir(src):