未验证 提交 5a5bf043 编写于 作者: R Rongfeng Fu 提交者: GitHub

v1.6.1 (#146)

上级 1530c143
...@@ -854,6 +854,7 @@ class ClusterTenantCreateCommand(ClusterMirrorCommand): ...@@ -854,6 +854,7 @@ class ClusterTenantCreateCommand(ClusterMirrorCommand):
self.parser.add_option('--max-session-num', type='int', help="Max session unit number. Not supported after version 4.0") self.parser.add_option('--max-session-num', type='int', help="Max session unit number. Not supported after version 4.0")
self.parser.add_option('--unit-num', type='int', help="Pool unit number.") self.parser.add_option('--unit-num', type='int', help="Pool unit number.")
self.parser.add_option('-z', '--zone-list', type='string', help="Tenant zone list.") self.parser.add_option('-z', '--zone-list', type='string', help="Tenant zone list.")
self.parser.add_option('--mode', type='string', help='Tenant compatibility mode. {mysql,oracle} [mysql]', default='mysql')
self.parser.add_option('--charset', type='string', help="Tenant charset.") self.parser.add_option('--charset', type='string', help="Tenant charset.")
self.parser.add_option('--collate', type='string', help="Tenant COLLATE.") self.parser.add_option('--collate', type='string', help="Tenant COLLATE.")
self.parser.add_option('--replica-num', type='int', help="Tenant replica number.") self.parser.add_option('--replica-num', type='int', help="Tenant replica number.")
...@@ -925,6 +926,8 @@ class MySQLTestCommand(TestMirrorCommand): ...@@ -925,6 +926,8 @@ class MySQLTestCommand(TestMirrorCommand):
def __init__(self): def __init__(self):
super(MySQLTestCommand, self).__init__('mysqltest', 'Run a mysqltest for a deployment.') super(MySQLTestCommand, self).__init__('mysqltest', 'Run a mysqltest for a deployment.')
self.parser.add_option('--mode', type='string', help='Test mode. Available values are mysql, oracle, and both.', default='both')
# self.parser.add_option('--case-mode', type='string', help='case run mode [mysql,oracle]', default='mysql')
self.parser.add_option('--component', type='string', help='Components for mysqltest.') self.parser.add_option('--component', type='string', help='Components for mysqltest.')
self.parser.add_option('--test-server', type='string', help='The server for mysqltest. By default, the first root server in the component is the mysqltest server.') self.parser.add_option('--test-server', type='string', help='The server for mysqltest. By default, the first root server in the component is the mysqltest server.')
self.parser.add_option('--user', type='string', help='Username for a test. [admin]', default='admin') self.parser.add_option('--user', type='string', help='Username for a test. [admin]', default='admin')
...@@ -1042,7 +1045,7 @@ class TPCHCommand(TestMirrorCommand): ...@@ -1042,7 +1045,7 @@ class TPCHCommand(TestMirrorCommand):
self.parser.add_option('--remote-tbl-dir', type='string', help='Directory for the tbl on target observers. Make sure that you have read and write access to the directory when you start observer.') self.parser.add_option('--remote-tbl-dir', type='string', help='Directory for the tbl on target observers. Make sure that you have read and write access to the directory when you start observer.')
self.parser.add_option('--disable-transfer', '--dt', action='store_true', help='Disable the transfer. When enabled, OBD will use the tbl files under remote-tbl-dir instead of transferring local tbl files to remote remote-tbl-dir.') self.parser.add_option('--disable-transfer', '--dt', action='store_true', help='Disable the transfer. When enabled, OBD will use the tbl files under remote-tbl-dir instead of transferring local tbl files to remote remote-tbl-dir.')
self.parser.add_option('--dss-config', type='string', help='Directory for dists.dss. [/usr/tpc-h-tools/tpc-h-tools]', default='/usr/tpc-h-tools/tpc-h-tools/') self.parser.add_option('--dss-config', type='string', help='Directory for dists.dss. [/usr/tpc-h-tools/tpc-h-tools]', default='/usr/tpc-h-tools/tpc-h-tools/')
self.parser.add_option('-O', '--optimization', type='int', help='Optimization level {0/1}. [1]', default=1) self.parser.add_option('-O', '--optimization', type='int', help='Optimization level {0/1/2}. [1] 0 - No optimization. 1 - Optimize some of the parameters which do not need to restart servers. 2 - Optimize all the parameters and maybe RESTART SERVERS for better performance.', default=1)
self.parser.add_option('--test-only', action='store_true', help='Only testing SQLs are executed. No initialization is executed.') self.parser.add_option('--test-only', action='store_true', help='Only testing SQLs are executed. No initialization is executed.')
self.parser.add_option('-S', '--skip-cluster-status-check', action='store_true', help='Skip cluster status check', default=False) self.parser.add_option('-S', '--skip-cluster-status-check', action='store_true', help='Skip cluster status check', default=False)
......
...@@ -625,6 +625,8 @@ class ClusterConfig(object): ...@@ -625,6 +625,8 @@ class ClusterConfig(object):
return error return error
def set_global_conf(self, conf): def set_global_conf(self, conf):
if not isinstance(conf, dict):
raise Exception('%s global config is not a dictionary. Please check the syntax of your configuration file.\n See https://github.com/oceanbase/obdeploy/blob/master/docs/zh-CN/4.configuration-file-description.md' % self.name)
self._original_global_conf = deepcopy(conf) self._original_global_conf = deepcopy(conf)
self._global_conf = None self._global_conf = None
self._clear_cache_server() self._clear_cache_server()
......
...@@ -62,6 +62,8 @@ EC_FAIL_TO_CONNECT = OBDErrorCode(1006, 'Failed to connect to {component}') ...@@ -62,6 +62,8 @@ EC_FAIL_TO_CONNECT = OBDErrorCode(1006, 'Failed to connect to {component}')
EC_ULIMIT_CHECK = OBDErrorCode(1007, '({server}) {key} must not be less than {need} (Current value: {now})') EC_ULIMIT_CHECK = OBDErrorCode(1007, '({server}) {key} must not be less than {need} (Current value: {now})')
EC_OBSERVER_NOT_ENOUGH_MEMORY = OBDErrorCode(2000, '({ip}) not enough memory. (Free: {free}, Need: {need})') EC_OBSERVER_NOT_ENOUGH_MEMORY = OBDErrorCode(2000, '({ip}) not enough memory. (Free: {free}, Need: {need})')
EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE = OBDErrorCode(2000, '({ip}) not enough memory. (Available: {available}, Need: {need})')
EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED = OBDErrorCode(2000, '({ip}) not enough memory. (Free: {free}, Buff/Cache: {cached}, Need: {need})')
EC_OBSERVER_CAN_NOT_MIGRATE_IN = OBDErrorCode(2001, 'server can not migrate in') EC_OBSERVER_CAN_NOT_MIGRATE_IN = OBDErrorCode(2001, 'server can not migrate in')
EC_OBSERVER_FAIL_TO_START = OBDErrorCode(2002, 'Failed to start {server} observer') EC_OBSERVER_FAIL_TO_START = OBDErrorCode(2002, 'Failed to start {server} observer')
EC_OBSERVER_NOT_ENOUGH_DISK_4_CLOG = OBDErrorCode(2003, '({ip}) {path} not enough disk space for clog. Use redo_dir to set other disk for clog, or reduce the value of datafile_size') EC_OBSERVER_NOT_ENOUGH_DISK_4_CLOG = OBDErrorCode(2003, '({ip}) {path} not enough disk space for clog. Use redo_dir to set other disk for clog, or reduce the value of datafile_size')
......
...@@ -172,7 +172,9 @@ class ParallerExtractWorker(object): ...@@ -172,7 +172,9 @@ class ParallerExtractWorker(object):
class ParallerExtractor(object): class ParallerExtractor(object):
MAX_PARALLER = cpu_count() if cpu_count() else 8 MAX_PARALLER = cpu_count() * 2 if cpu_count() else 8
MAX_SIZE = 100
MIN_SIZE = 20
def __init__(self, pkg, files, stdio=None): def __init__(self, pkg, files, stdio=None):
self.pkg = pkg self.pkg = pkg
...@@ -180,11 +182,13 @@ class ParallerExtractor(object): ...@@ -180,11 +182,13 @@ class ParallerExtractor(object):
self.stdio = stdio self.stdio = stdio
def extract(self): def extract(self):
if not self.files:
return
workers = [] workers = []
file_num = len(self.files) file_num = len(self.files)
paraler = int(min(self.MAX_PARALLER, file_num)) paraller = int(min(self.MAX_PARALLER, file_num))
size = min(100, int(file_num / paraler)) size = min(self.MAX_SIZE, int(file_num / paraller)) #
size = int(max(10, size)) size = int(max(self.MIN_SIZE, size))
index = 0 index = 0
while index < file_num: while index < file_num:
p_index = index + size p_index = index + size
...@@ -195,7 +199,7 @@ class ParallerExtractor(object): ...@@ -195,7 +199,7 @@ class ParallerExtractor(object):
)) ))
index = p_index index = p_index
pool = Pool(processes=paraler) pool = Pool(processes=paraller)
try: try:
results = pool.map(ParallerExtractWorker.extract, workers) results = pool.map(ParallerExtractWorker.extract, workers)
for r in results: for r in results:
......
...@@ -890,7 +890,7 @@ class ObdHome(object): ...@@ -890,7 +890,7 @@ class ObdHome(object):
self._call_stdio('verbose', 'Get deploy configuration') self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config deploy_config = deploy.deploy_config
if not deploy_config: if not deploy_config:
self._call_stdio('error', 'Deploy configuration is empty.\nIt may be caused by a failure to resolve the configuration.\nPlease check your configuration file.') self._call_stdio('error', 'Deploy configuration is empty.\nIt may be caused by a failure to resolve the configuration.\nPlease check your configuration file.\nSee https://github.com/oceanbase/obdeploy/blob/master/docs/zh-CN/4.configuration-file-description.md')
return False return False
# Check the best suitable mirror for the components and installation plugins. Install locally # Check the best suitable mirror for the components and installation plugins. Install locally
...@@ -1061,7 +1061,7 @@ class ObdHome(object): ...@@ -1061,7 +1061,7 @@ class ObdHome(object):
return False return False
deploy_config = deploy.deploy_config deploy_config = deploy.deploy_config
if not deploy_config: if not deploy_config:
self._call_stdio('error', 'Deploy configuration is empty.\nIt may be caused by a failure to resolve the configuration.\nPlease check your configuration file.') self._call_stdio('error', 'Deploy configuration is empty.\nIt may be caused by a failure to resolve the configuration.\nPlease check your configuration file.\nSee https://github.com/oceanbase/obdeploy/blob/master/docs/zh-CN/4.configuration-file-description.md')
return False return False
style = getattr(options, 'style', '') style = getattr(options, 'style', '')
...@@ -1211,11 +1211,11 @@ class ObdHome(object): ...@@ -1211,11 +1211,11 @@ class ObdHome(object):
self._call_stdio('verbose', 'Get deploy configuration') self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config deploy_config = deploy.deploy_config
if not deploy_config: if not deploy_config:
self._call_stdio('error', 'Deploy configuration is empty.\nIt may be caused by a failure to resolve the configuration.\nPlease check your configuration file.') self._call_stdio('error', 'Deploy configuration is empty.\nIt may be caused by a failure to resolve the configuration.\nPlease check your configuration file.\nSee https://github.com/oceanbase/obdeploy/blob/master/docs/zh-CN/4.configuration-file-description.md')
return False return False
if not deploy_config.components: if not deploy_config.components:
self._call_stdio('error', 'Components not detected.\nPlease check the syntax of your configuration file.') self._call_stdio('error', 'Components not detected.\nPlease check the syntax of your configuration file.\nSee https://github.com/oceanbase/obdeploy/blob/master/docs/zh-CN/4.configuration-file-description.md')
return False return False
for component_name in deploy_config.components: for component_name in deploy_config.components:
......
# Quick deployment command
## obd demo
You can use this command to deploy and start the specified component on the local server without loading the configuration file. The fixed name of the cluster deployed is `demo`. After the deployment, you can run the `obd cluster list` command to view the cluster in the cluster list. You can also run other cluster commands, such as `obd cluster display demo`, to manage the cluster.
```bash
obd demo [-c/--components]
```
The following table describes the parameters.
| Parameter | Required | Data type | Default value | Description |
|------------------|---------|------------|----------|--------------------------------------------------------------------|
| -c/--components | No | String | oceanbase-ce,obproxy-ce,obagent,prometheus,grafana | The list of components that are separated with commas (`,`). You can use this parameter to specify the components to be deployed. |
By default, this command deploys the minimum specifications in the home directory of the current user, and the latest versions are deployed by default. You can use this command to deploy OceanBase Community Edition, OBProxy Community Edition, OBAgent, Grafana, and Prometheus.
You can select the version and specify the configurations of a component to be deployed.
```bash
# Deploy components of the specified version.
obd demo -c oceanbase-ce,obproxy-ce --oceanbase-ce.version=3.1.3
# Specify the components to be deployed and the package hash of OceanBase Community Edition.
obd demo -c oceanbase-ce,obproxy-ce --oceanbase-ce.package_hash=f38723204d49057d3e062ffad778edc1552a7c114622bf2a86fea769fbd202ea
# Specify the installation path for all components to be deployed.
## Deploy OceanBase Community Edition and OBProxy Community Edition in the /data/demo directory and create corresponding working directories for them.
obd demo -c oceanbase-ce,obproxy-ce --home_path=/data/demo
# Specify the installation path for all components to be deployed.
obd demo --home_path=/path
# Specify the installation path for a specific component to be deployed.
## Deploy OceanBase Community Edition in the home directory and create a working directory for it, and deploy OBProxy Community Edition in the /data/playground/obproxy-ce directory.
obd demo -c oceanbase-ce,obproxy-ce --obproxy-ce.home_path=/data/demo/
# Specify the configurations of a component to be deployed.
## Specify the mysql_port parameter of OceanBase Community Edition.
obd demo --oceanbase-ce.mysql_port=3881
```
> **Notice**
>
> This command supports only level-1 configurations under global that are specified by using options.
test:
system_config:
- name: proxy_mem_limited
value: 2G
- name: slow_proxy_process_time_threshold
value: 500ms
- name: syslog_level
value: error
query_key: level
- name: enable_prometheus
value: false
- name: enable_compression_protocol
value: false
need_restart: true
value_type: BOOL
- name: enable_ob_protocol_v2
value: false
need_restart: true
value_type: BOOL
- name: work_thread_num
value: 128
need_restart: true
- name: enable_async_log
value: true
\ No newline at end of file
build:
system_config:
- name: proxy_mem_limited
value: 2G
- name: slow_proxy_process_time_threshold
value: 500ms
- name: syslog_level
value: error
query_key: level
- name: enable_prometheus
value: false
- name: enable_compression_protocol
value: false
need_restart: true
value_type: BOOL
- name: enable_ob_protocol_v2
value: false
need_restart: true
value_type: BOOL
- name: work_thread_num
value: 128
need_restart: true
- name: enable_async_log
value: true
\ No newline at end of file
...@@ -218,11 +218,11 @@ ...@@ -218,11 +218,11 @@
"exemplar": true, "exemplar": true,
"expr": "(sum(rate(ob_sysstat{stat_id=\"40003\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40005\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40009\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40009\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40001\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))\n/\n(sum(rate(ob_sysstat{stat_id=\"40002\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40004\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40006\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40008\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40000\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))", "expr": "(sum(rate(ob_sysstat{stat_id=\"40003\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40005\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40009\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40009\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40001\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))\n/\n(sum(rate(ob_sysstat{stat_id=\"40002\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40004\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40006\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40008\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"40000\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group))",
"interval": "", "interval": "",
"legendFormat": "qps rt {{$group}}", "legendFormat": "sql latency {{$group}}",
"refId": "A" "refId": "A"
} }
], ],
"title": "QPS rt", "title": "Sql Latency",
"type": "timeseries" "type": "timeseries"
}, },
{ {
...@@ -2129,7 +2129,7 @@ ...@@ -2129,7 +2129,7 @@
"exemplar": true, "exemplar": true,
"expr": "(sum(rate(ob_sysstat{stat_id=\"10005\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"10006\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group)) / sum(rate(ob_sysstat{stat_id=\"10000\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group)", "expr": "(sum(rate(ob_sysstat{stat_id=\"10005\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"10006\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group)) / sum(rate(ob_sysstat{stat_id=\"10000\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group)",
"interval": "", "interval": "",
"legendFormat": "roc in rt {{$group}}", "legendFormat": "rpc in Latency {{$group}}",
"refId": "A" "refId": "A"
}, },
{ {
...@@ -2137,11 +2137,11 @@ ...@@ -2137,11 +2137,11 @@
"expr": "(sum(rate(ob_sysstat{stat_id=\"10005\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"10006\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group)) / sum(rate(ob_sysstat{stat_id=\"10002\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group)", "expr": "(sum(rate(ob_sysstat{stat_id=\"10005\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group) + sum(rate(ob_sysstat{stat_id=\"10006\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group)) / sum(rate(ob_sysstat{stat_id=\"10002\",ob_cluster_name=~\"$obcluster\",obzone=~\"$obzone\",svr_ip=~\"$observer\",tenant_name=~\"$tenant_name\"}[$__rate_interval])) by ($group)",
"hide": false, "hide": false,
"interval": "", "interval": "",
"legendFormat": "rpc out rt {{$group}}", "legendFormat": "rpc out latency {{$group}}",
"refId": "B" "refId": "B"
} }
], ],
"title": "Rpc rt", "title": "Rpc Latency",
"type": "timeseries" "type": "timeseries"
}, },
{ {
......
...@@ -36,7 +36,7 @@ def parse_size(size): ...@@ -36,7 +36,7 @@ def parse_size(size):
_bytes = int(size) _bytes = int(size)
else: else:
units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40}
match = re.match(r'([1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)] _bytes = int(match.group(1)) * units[match.group(2)]
return _bytes return _bytes
...@@ -127,6 +127,7 @@ def init(plugin_context, env, *args, **kwargs): ...@@ -127,6 +127,7 @@ def init(plugin_context, env, *args, **kwargs):
exec_init = 'init.sql' exec_init = 'init.sql'
exec_mini_init = 'init_mini.sql' exec_mini_init = 'init_mini.sql'
exec_init_user = 'init_user.sql|root@mysql|test' exec_init_user = 'init_user.sql|root@mysql|test'
exec_init_user_for_oracle = 'init_user_oracle.sql|SYS@oracle|SYS'
client = plugin_context.clients[server] client = plugin_context.clients[server]
memory_limit = get_memory_limit(cursor, client) memory_limit = get_memory_limit(cursor, client)
is_mini = memory_limit and parse_size(memory_limit) < (16<<30) is_mini = memory_limit and parse_size(memory_limit) < (16<<30)
......
...@@ -36,7 +36,7 @@ def parse_size(size): ...@@ -36,7 +36,7 @@ def parse_size(size):
_bytes = int(size) _bytes = int(size)
else: else:
units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40}
match = re.match(r'([1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)] _bytes = int(match.group(1)) * units[match.group(2)]
return _bytes return _bytes
......
...@@ -56,12 +56,12 @@ def display(plugin_context, cursor, *args, **kwargs): ...@@ -56,12 +56,12 @@ def display(plugin_context, cursor, *args, **kwargs):
continue continue
password = ob_config.get('root_password', '') password = ob_config.get('root_password', '')
with_observer = True with_observer = True
cmd = 'obclient -h%s -P%s -uroot %s-Doceanbase' % (server.ip, server_config['listen_port'], '-p%s ' % password if password else '') cmd = 'obclient -h%s -P%s -uroot %s-Doceanbase -A' % (server.ip, server_config['listen_port'], '-p%s ' % password if password else '')
break break
if not with_observer: if not with_observer:
password = server_config.get('obproxy_sys_password', '') password = server_config.get('obproxy_sys_password', '')
cmd = 'obclient -h%s -P%s -uroot@proxysys %s-Doceanbase' % (server.ip, server_config['listen_port'], '-p%s ' % password if password else '') cmd = 'obclient -h%s -P%s -uroot@proxysys %s-Doceanbase -A' % (server.ip, server_config['listen_port'], '-p%s ' % password if password else '')
stdio.print(cmd) stdio.print(cmd)
......
- src_path: ./home/admin/obproxy-$version/bin/obproxy - src_path: ./opt/taobao/install/obproxy-$version/bin/obproxy
target_path: bin/obproxy target_path: bin/obproxy
type: bin type: bin
mode: 755 mode: 755
\ No newline at end of file
...@@ -47,7 +47,7 @@ def generate_config(plugin_context, deploy_config, auto_depend=False, *args, **k ...@@ -47,7 +47,7 @@ def generate_config(plugin_context, deploy_config, auto_depend=False, *args, **k
if getattr(plugin_context.options, 'mini', False): if getattr(plugin_context.options, 'mini', False):
if 'proxy_mem_limited' not in global_config: if 'proxy_mem_limited' not in global_config:
cluster_config.update_global_conf('proxy_mem_limited', '200M', False) cluster_config.update_global_conf('proxy_mem_limited', '500M', False)
ob_comps = ['oceanbase', 'oceanbase-ce'] ob_comps = ['oceanbase', 'oceanbase-ce']
ob_cluster_config = None ob_cluster_config = None
......
...@@ -35,7 +35,7 @@ def parse_size(size): ...@@ -35,7 +35,7 @@ def parse_size(size):
_bytes = int(size) _bytes = int(size)
else: else:
units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40}
match = re.match(r'^([1-9][0-9]*)\s*([B,K,M,G,T])$', size.upper()) match = re.match(r'^(0|[1-9][0-9]*)\s*([B,K,M,G,T])$', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)] _bytes = int(match.group(1)) * units[match.group(2)]
return _bytes return _bytes
......
...@@ -36,7 +36,7 @@ def display(plugin_context, cursor, *args, **kwargs): ...@@ -36,7 +36,7 @@ def display(plugin_context, cursor, *args, **kwargs):
stdio.print_list(servers, ['ip', 'version', 'port', 'zone', 'status'], stdio.print_list(servers, ['ip', 'version', 'port', 'zone', 'status'],
lambda x: [x['svr_ip'], x['build_version'].split('_')[0], x['inner_port'], x['zone'], x['status']], title='observer') lambda x: [x['svr_ip'], x['build_version'].split('_')[0], x['inner_port'], x['zone'], x['status']], title='observer')
password = cluster_config.get_global_conf().get('root_password', '') password = cluster_config.get_global_conf().get('root_password', '')
cmd = 'obclient -h%s -P%s -uroot %s-Doceanbase' % (servers[0]['svr_ip'], servers[0]['inner_port'], '-p%s ' % password if password else '') cmd = 'obclient -h%s -P%s -uroot %s-Doceanbase -A' % (servers[0]['svr_ip'], servers[0]['inner_port'], '-p%s ' % password if password else '')
stdio.print(cmd) stdio.print(cmd)
stdio.stop_loading('succeed') stdio.stop_loading('succeed')
return plugin_context.return_true() return plugin_context.return_true()
......
...@@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function ...@@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function
import re, os import re, os
from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED
def parse_size(size): def parse_size(size):
...@@ -32,7 +32,7 @@ def parse_size(size): ...@@ -32,7 +32,7 @@ def parse_size(size):
_bytes = int(size) _bytes = int(size)
else: else:
units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40}
match = re.match(r'([1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)] _bytes = int(match.group(1)) * units[match.group(2)]
return _bytes return _bytes
...@@ -90,6 +90,7 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): ...@@ -90,6 +90,7 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs):
MIN_MEMORY = 8 << 30 MIN_MEMORY = 8 << 30
MIN_CPU_COUNT = 16 MIN_CPU_COUNT = 16
START_NEED_MEMORY = 3 << 30
clog_disk_utilization_threshold_max = 95 clog_disk_utilization_threshold_max = 95
clog_disk_usage_limit_percentage_max = 98 clog_disk_usage_limit_percentage_max = 98
global_config = cluster_config.get_original_global_conf() global_config = cluster_config.get_original_global_conf()
...@@ -158,16 +159,32 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): ...@@ -158,16 +159,32 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs):
if not server_config.get('memory_limit'): if not server_config.get('memory_limit'):
ret = client.execute_command('cat /proc/meminfo') ret = client.execute_command('cat /proc/meminfo')
if ret: if ret:
free_memory = 0 server_memory_stats = {}
memory_key_map = {
'MemTotal': 'total',
'MemFree': 'free',
'MemAvailable': 'available',
'Buffers': 'buffers',
'Cached': 'cached'
}
for key in memory_key_map:
server_memory_stats[memory_key_map[key]] = 0
for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout):
if k == 'MemAvailable': if k in memory_key_map:
free_memory = parse_size(str(v)) key = memory_key_map[k]
memory_limit = free_memory server_memory_stats[key] = parse_size(str(v))
if memory_limit < MIN_MEMORY:
stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(free_memory), need=format_size(MIN_MEMORY))) if server_memory_stats['available'] < START_NEED_MEMORY:
stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(START_NEED_MEMORY)))
success = False
continue
if server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached'] < MIN_MEMORY:
stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(MIN_MEMORY)))
success = False success = False
continue continue
memory_limit = max(MIN_MEMORY, memory_limit * 0.9)
memory_limit = max(MIN_MEMORY, server_memory_stats['available'] * 0.9)
server_config['memory_limit'] = format_size(memory_limit, 0) server_config['memory_limit'] = format_size(memory_limit, 0)
cluster_config.update_server_conf(server, 'memory_limit', server_config['memory_limit'], False) cluster_config.update_server_conf(server, 'memory_limit', server_config['memory_limit'], False)
else: else:
......
...@@ -115,10 +115,11 @@ def reload(plugin_context, cursor, new_cluster_config, *args, **kwargs): ...@@ -115,10 +115,11 @@ def reload(plugin_context, cursor, new_cluster_config, *args, **kwargs):
for server in servers: for server in servers:
if key not in change_conf[server]: if key not in change_conf[server]:
continue continue
value = change_conf[server][key]
msg = sql = 'alter system set %s = %%s server=%%s' % key msg = sql = 'alter system set %s = %%s server=%%s' % key
stdio.verbose('execute sql: %s' % msg) stdio.verbose('execute sql: %s' % msg)
cursor.execute(sql, [change_conf[server][key], cluster_server[server]]) cursor.execute(sql, [value, cluster_server[server]])
cluster_config.update_server_conf(server,key, value, False) cluster_config.update_server_conf(server, key, value, False)
except: except:
global_ret = False global_ret = False
stdio.exception('execute sql exception: %s' % msg) stdio.exception('execute sql exception: %s' % msg)
......
...@@ -24,7 +24,11 @@ import os ...@@ -24,7 +24,11 @@ import os
import re import re
import time import time
from _errno import EC_OBSERVER_NOT_ENOUGH_DISK_4_CLOG, EC_CONFIG_CONFLICT_PORT, EC_OBSERVER_NOT_ENOUGH_MEMORY, EC_ULIMIT_CHECK, WC_ULIMIT_CHECK from _errno import (
EC_OBSERVER_NOT_ENOUGH_DISK_4_CLOG, EC_CONFIG_CONFLICT_PORT,
EC_OBSERVER_NOT_ENOUGH_MEMORY, EC_ULIMIT_CHECK, WC_ULIMIT_CHECK,
EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED
)
stdio = None stdio = None
...@@ -46,8 +50,9 @@ def parse_size(size): ...@@ -46,8 +50,9 @@ def parse_size(size):
if not isinstance(size, str) or size.isdigit(): if not isinstance(size, str) or size.isdigit():
_bytes = int(size) _bytes = int(size)
else: else:
print (size)
units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40}
match = re.match(r'([1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)] _bytes = int(match.group(1)) * units[match.group(2)]
return _bytes return _bytes
...@@ -90,6 +95,7 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs): ...@@ -90,6 +95,7 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs):
global success global success
success = False success = False
stdio.error(*arg, **kwargs) stdio.error(*arg, **kwargs)
global stdio global stdio
cluster_config = plugin_context.cluster_config cluster_config = plugin_context.cluster_config
clients = plugin_context.clients clients = plugin_context.clients
...@@ -101,6 +107,8 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs): ...@@ -101,6 +107,8 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs):
servers_clog_mount = {} servers_clog_mount = {}
servers_net_inferface = {} servers_net_inferface = {}
server_num = len(cluster_config.servers) server_num = len(cluster_config.servers)
START_NEED_MEMORY = 3 << 30
stdio.start_loading('Check before start observer') stdio.start_loading('Check before start observer')
for server in cluster_config.servers: for server in cluster_config.servers:
ip = server.ip ip = server.ip
...@@ -119,7 +127,7 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs): ...@@ -119,7 +127,7 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs):
servers_port[ip] = {} servers_port[ip] = {}
servers_clog_mount[ip] = {} servers_clog_mount[ip] = {}
servers_net_inferface[ip] = {} servers_net_inferface[ip] = {}
servers_memory[ip] = {'num': 0, 'percentage': 0} servers_memory[ip] = {'num': 0, 'percentage': 0, 'server_num': 0}
memory = servers_memory[ip] memory = servers_memory[ip]
ports = servers_port[ip] ports = servers_port[ip]
disk = servers_disk[ip] disk = servers_disk[ip]
...@@ -137,6 +145,8 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs): ...@@ -137,6 +145,8 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs):
} }
if get_port_socket_inode(client, port): if get_port_socket_inode(client, port):
critical('%s:%s port is already used' % (ip, port)) critical('%s:%s port is already used' % (ip, port))
memory['server_num'] += 1
if 'memory_limit' in server_config: if 'memory_limit' in server_config:
try: try:
memory['num'] += parse_size(server_config['memory_limit']) memory['num'] += parse_size(server_config['memory_limit'])
...@@ -234,16 +244,29 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs): ...@@ -234,16 +244,29 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs):
# memory # memory
ret = client.execute_command('cat /proc/meminfo') ret = client.execute_command('cat /proc/meminfo')
if ret: if ret:
total_memory = 0 server_memory_stats = {}
free_memory = 0 memory_key_map = {
'MemTotal': 'total',
'MemFree': 'free',
'MemAvailable': 'available',
'Buffers': 'buffers',
'Cached': 'cached'
}
for key in memory_key_map:
server_memory_stats[memory_key_map[key]] = 0
for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout):
if k == 'MemTotal': if k in memory_key_map:
total_memory = parse_size(str(v)) key = memory_key_map[k]
elif k == 'MemAvailable': server_memory_stats[key] = parse_size(str(v))
free_memory = parse_size(str(v))
total_use = servers_memory[ip]['percentage'] * total_memory / 100 + servers_memory[ip]['num'] min_start_need = servers_memory[ip]['server_num'] * START_NEED_MEMORY
if total_use > free_memory: total_use = servers_memory[ip]['percentage'] * server_memory_stats['total'] / 100 + servers_memory[ip]['num']
error(EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(free_memory), need=format_size(total_use))) if min_start_need > server_memory_stats['available']:
error(EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(min_start_need)))
elif total_use > server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached']:
error(EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(min_start_need)))
elif total_use > server_memory_stats['free']:
alert(EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(server_memory_stats['free']), need=format_size(min_start_need)))
# disk # disk
disk = {'/': 0} disk = {'/': 0}
ret = client.execute_command('df --block-size=1024') ret = client.execute_command('df --block-size=1024')
......
...@@ -35,7 +35,7 @@ def parse_size(size): ...@@ -35,7 +35,7 @@ def parse_size(size):
_bytes = int(size) _bytes = int(size)
else: else:
units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40}
match = re.match(r'^([1-9][0-9]*)\s*([B,K,M,G,T])$', size.upper()) match = re.match(r'^(0|[1-9][0-9]*)\s*([B,K,M,G,T])$', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)] _bytes = int(match.group(1)) * units[match.group(2)]
return _bytes return _bytes
......
...@@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function ...@@ -23,7 +23,7 @@ from __future__ import absolute_import, division, print_function
import re, os import re, os
from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED
def parse_size(size): def parse_size(size):
...@@ -32,7 +32,7 @@ def parse_size(size): ...@@ -32,7 +32,7 @@ def parse_size(size):
_bytes = int(size) _bytes = int(size)
else: else:
units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40}
match = re.match(r'([1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)] _bytes = int(match.group(1)) * units[match.group(2)]
return _bytes return _bytes
...@@ -106,6 +106,7 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): ...@@ -106,6 +106,7 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs):
PRO_MEMORY_MIN = 16 << 30 PRO_MEMORY_MIN = 16 << 30
SLOG_SIZE = 10 << 30 SLOG_SIZE = 10 << 30
MIN_CPU_COUNT = 16 MIN_CPU_COUNT = 16
START_NEED_MEMORY = 3 << 30
if getattr(plugin_context.options, 'mini', False): if getattr(plugin_context.options, 'mini', False):
if not global_config.get('memory_limit_percentage') and not global_config.get('memory_limit'): if not global_config.get('memory_limit_percentage') and not global_config.get('memory_limit'):
cluster_config.update_global_conf('memory_limit', format_size(MIN_MEMORY, 0), False) cluster_config.update_global_conf('memory_limit', format_size(MIN_MEMORY, 0), False)
...@@ -161,18 +162,35 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): ...@@ -161,18 +162,35 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs):
if not server_config.get('memory_limit'): if not server_config.get('memory_limit'):
ret = client.execute_command('cat /proc/meminfo') ret = client.execute_command('cat /proc/meminfo')
if ret: if ret:
free_memory = 0 server_memory_stats = {}
memory_key_map = {
'MemTotal': 'total',
'MemFree': 'free',
'MemAvailable': 'available',
'Buffers': 'buffers',
'Cached': 'cached'
}
for key in memory_key_map:
server_memory_stats[memory_key_map[key]] = 0
for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout):
if k == 'MemAvailable': if k in memory_key_map:
free_memory = parse_size(str(v)) key = memory_key_map[k]
memory_limit = free_memory server_memory_stats[key] = parse_size(str(v))
if memory_limit < min_memory:
stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(free_memory), need=format_size(min_memory))) if server_memory_stats['available'] < START_NEED_MEMORY:
stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(START_NEED_MEMORY)))
success = False
continue
if server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached'] < MIN_MEMORY:
stdio.error(EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(MIN_MEMORY)))
success = False success = False
continue continue
memory_limit = max(min_memory, memory_limit * 0.9)
memory_limit = max(MIN_MEMORY, server_memory_stats['available'] * 0.9)
server_config['memory_limit'] = format_size(memory_limit, 0) server_config['memory_limit'] = format_size(memory_limit, 0)
cluster_config.update_server_conf(server, 'memory_limit', server_config['memory_limit'], False) cluster_config.update_server_conf(server, 'memory_limit', server_config['memory_limit'], False)
auto_set_memory = True
else: else:
stdio.error("%s: fail to get memory info.\nPlease configure 'memory_limit' manually in configuration file") stdio.error("%s: fail to get memory info.\nPlease configure 'memory_limit' manually in configuration file")
success = False success = False
...@@ -180,7 +198,6 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): ...@@ -180,7 +198,6 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs):
else: else:
try: try:
memory_limit = parse_size(server_config.get('memory_limit')) memory_limit = parse_size(server_config.get('memory_limit'))
auto_set_memory = True
except: except:
stdio.error('memory_limit must be an integer') stdio.error('memory_limit must be an integer')
return return
...@@ -300,14 +317,14 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs): ...@@ -300,14 +317,14 @@ def generate_config(plugin_context, deploy_config, *args, **kwargs):
continue continue
disk_free = disk_free - log_size - SLOG_SIZE disk_free = disk_free - log_size - SLOG_SIZE
memory_factor = 0 memory_factor = 6
if auto_set_datafile_size is False: if auto_set_datafile_size is False:
disk_free -= min_datafile_size disk_free -= min_datafile_size
memory_factor += 3 memory_factor -= 3
if auto_set_log_disk_size is False: if auto_set_log_disk_size is False:
disk_free -= min_log_disk_size disk_free -= min_log_disk_size
memory_factor += 3 memory_factor -= 3
memory_limit = format_size(disk_free / memory_factor, 0) memory_limit = format_size(disk_free / max(1, memory_factor), 0)
cluster_config.update_server_conf(server, 'memory_limit', memory_limit, False) cluster_config.update_server_conf(server, 'memory_limit', memory_limit, False)
memory_limit = parse_size(memory_limit) memory_limit = parse_size(memory_limit)
if auto_set_system_memory: if auto_set_system_memory:
......
...@@ -24,8 +24,11 @@ import os ...@@ -24,8 +24,11 @@ import os
import re import re
import time import time
from _errno import EC_OBSERVER_NOT_ENOUGH_DISK_4_CLOG, EC_CONFIG_CONFLICT_PORT, EC_OBSERVER_NOT_ENOUGH_MEMORY, EC_ULIMIT_CHECK, WC_ULIMIT_CHECK from _errno import (
EC_OBSERVER_NOT_ENOUGH_DISK_4_CLOG, EC_CONFIG_CONFLICT_PORT,
EC_OBSERVER_NOT_ENOUGH_MEMORY, EC_ULIMIT_CHECK, WC_ULIMIT_CHECK,
EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED
)
stdio = None stdio = None
success = True success = True
...@@ -47,7 +50,7 @@ def parse_size(size): ...@@ -47,7 +50,7 @@ def parse_size(size):
_bytes = int(size) _bytes = int(size)
else: else:
units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40}
match = re.match(r'([1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)] _bytes = int(match.group(1)) * units[match.group(2)]
return _bytes return _bytes
...@@ -113,6 +116,7 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs): ...@@ -113,6 +116,7 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs):
PRO_MEMORY_MIN = 16 << 30 PRO_MEMORY_MIN = 16 << 30
PRO_POOL_MEM_MIN = 2147483648 PRO_POOL_MEM_MIN = 2147483648
START_NEED_MEMORY = 3 << 30
stdio.start_loading('Check before start observer') stdio.start_loading('Check before start observer')
for server in cluster_config.servers: for server in cluster_config.servers:
ip = server.ip ip = server.ip
...@@ -131,7 +135,7 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs): ...@@ -131,7 +135,7 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs):
servers_port[ip] = {} servers_port[ip] = {}
servers_clog_mount[ip] = {} servers_clog_mount[ip] = {}
servers_net_inferface[ip] = {} servers_net_inferface[ip] = {}
servers_memory[ip] = {'num': 0, 'percentage': 0} servers_memory[ip] = {'num': 0, 'percentage': 0, 'server_num': 0}
memory = servers_memory[ip] memory = servers_memory[ip]
ports = servers_port[ip] ports = servers_port[ip]
disk = servers_disk[ip] disk = servers_disk[ip]
...@@ -154,6 +158,7 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs): ...@@ -154,6 +158,7 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs):
if server_config.get('production_mode') and __min_full_resource_pool_memory < PRO_POOL_MEM_MIN: if server_config.get('production_mode') and __min_full_resource_pool_memory < PRO_POOL_MEM_MIN:
error('(%s): when production_mode is True, __min_full_resource_pool_memory can not be less then %s' % (server, PRO_POOL_MEM_MIN)) error('(%s): when production_mode is True, __min_full_resource_pool_memory can not be less then %s' % (server, PRO_POOL_MEM_MIN))
memory['server_num'] += 1
if 'memory_limit' in server_config: if 'memory_limit' in server_config:
try: try:
memory_limit = parse_size(server_config['memory_limit']) memory_limit = parse_size(server_config['memory_limit'])
...@@ -171,6 +176,7 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs): ...@@ -171,6 +176,7 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs):
return return
else: else:
memory['percentage'] += 80 memory['percentage'] += 80
data_path = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store') data_path = server_config['data_dir'] if server_config.get('data_dir') else os.path.join(server_config['home_path'], 'store')
redo_dir = server_config['redo_dir'] if server_config.get('redo_dir') else data_path redo_dir = server_config['redo_dir'] if server_config.get('redo_dir') else data_path
clog_dir = server_config['clog_dir'] if server_config.get('clog_dir') else os.path.join(redo_dir, 'clog') clog_dir = server_config['clog_dir'] if server_config.get('clog_dir') else os.path.join(redo_dir, 'clog')
...@@ -261,16 +267,29 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs): ...@@ -261,16 +267,29 @@ def _start_check(plugin_context, strict_check=False, *args, **kwargs):
# memory # memory
ret = client.execute_command('cat /proc/meminfo') ret = client.execute_command('cat /proc/meminfo')
if ret: if ret:
total_memory = 0 server_memory_stats = {}
free_memory = 0 memory_key_map = {
'MemTotal': 'total',
'MemFree': 'free',
'MemAvailable': 'available',
'Buffers': 'buffers',
'Cached': 'cached'
}
for key in memory_key_map:
server_memory_stats[memory_key_map[key]] = 0
for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout): for k, v in re.findall('(\w+)\s*:\s*(\d+\s*\w+)', ret.stdout):
if k == 'MemTotal': if k in memory_key_map:
total_memory = parse_size(str(v)) key = memory_key_map[k]
elif k == 'MemAvailable': server_memory_stats[key] = parse_size(str(v))
free_memory = parse_size(str(v))
total_use = servers_memory[ip]['percentage'] * total_memory / 100 + servers_memory[ip]['num'] min_start_need = servers_memory[ip]['server_num'] * START_NEED_MEMORY
if total_use > free_memory: total_use = servers_memory[ip]['percentage'] * server_memory_stats['total'] / 100 + servers_memory[ip]['num']
error(EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(free_memory), need=format_size(total_use))) if min_start_need > server_memory_stats['available']:
error(EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE.format(ip=ip, available=format_size(server_memory_stats['available']), need=format_size(min_start_need)))
elif total_use > server_memory_stats['free'] + server_memory_stats['buffers'] + server_memory_stats['cached']:
error(EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(min_start_need)))
elif total_use > server_memory_stats['free']:
alert(EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(server_memory_stats['free']), need=format_size(min_start_need)))
# disk # disk
disk = {'/': 0} disk = {'/': 0}
ret = client.execute_command('df --block-size=1024') ret = client.execute_command('df --block-size=1024')
......
...@@ -35,7 +35,7 @@ def parse_size(size): ...@@ -35,7 +35,7 @@ def parse_size(size):
_bytes = int(size) _bytes = int(size)
else: else:
units = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40} units = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40}
match = re.match(r'([1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)] _bytes = int(match.group(1)) * units[match.group(2)]
return _bytes return _bytes
......
...@@ -35,7 +35,7 @@ def parse_size(size): ...@@ -35,7 +35,7 @@ def parse_size(size):
_bytes = int(size) _bytes = int(size)
else: else:
units = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40} units = {"B": 1, "K": 1 << 10, "M": 1 << 20, "G": 1 << 30, "T": 1 << 40}
match = re.match(r'([1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)] _bytes = int(match.group(1)) * units[match.group(2)]
return _bytes return _bytes
......
...@@ -40,7 +40,7 @@ def parse_size(size): ...@@ -40,7 +40,7 @@ def parse_size(size):
_bytes = int(size) _bytes = int(size)
else: else:
units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40}
match = re.match(r'([1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)] _bytes = int(match.group(1)) * units[match.group(2)]
return _bytes return _bytes
......
...@@ -41,7 +41,7 @@ def parse_size(size): ...@@ -41,7 +41,7 @@ def parse_size(size):
_bytes = int(size) _bytes = int(size)
else: else:
units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40} units = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40}
match = re.match(r'([1-9][0-9]*)\s*([B,K,M,G,T])', size.upper()) match = re.match(r'(0|[1-9][0-9]*)\s*([B,K,M,G,T])', size.upper())
_bytes = int(match.group(1)) * units[match.group(2)] _bytes = int(match.group(1)) * units[match.group(2)]
return _bytes return _bytes
......
...@@ -368,7 +368,7 @@ class SshClient(SafeStdio): ...@@ -368,7 +368,7 @@ class SshClient(SafeStdio):
_transporter = RemoteTransporter.CLIENT _transporter = RemoteTransporter.CLIENT
if not self._is_local() and self._remote_transporter is None: if not self._is_local() and self._remote_transporter is None:
if not self.config.password and not self.disable_rsync: if not self.config.password and not self.disable_rsync:
ret = LocalClient.execute_command('rsync -h', stdio=self.stdio) ret = LocalClient.execute_command('rsync -h', stdio=self.stdio) and self.execute_command('rsync -h', stdio=self.stdio)
if ret: if ret:
_transporter = RemoteTransporter.RSYNC _transporter = RemoteTransporter.RSYNC
self._remote_transporter = _transporter self._remote_transporter = _transporter
...@@ -418,8 +418,10 @@ class SshClient(SafeStdio): ...@@ -418,8 +418,10 @@ class SshClient(SafeStdio):
def _rsync(self, source, target, stdio=None): def _rsync(self, source, target, stdio=None):
identity_option = "" identity_option = ""
if self.config.key_filename: if self.config.key_filename:
identity_option += '-e "ssh -i {key_filename} "'.format(key_filename=self.config.key_filename) identity_option += '-i {key_filename} '.format(key_filename=self.config.key_filename)
cmd = 'rsync -a -W {identity_option} {source} {target}'.format( if self.config.port:
identity_option += '-p {}'.format(self.config.port)
cmd = 'rsync -a -W -e "ssh {identity_option}" {source} {target}'.format(
identity_option=identity_option, identity_option=identity_option,
source=source, source=source,
target=target target=target
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册