未验证 提交 5655ea2f 编写于 作者: R Rongfeng Fu 提交者: GitHub

V2.0.0 (#162)

上级 97895924
...@@ -6,4 +6,33 @@ dist ...@@ -6,4 +6,33 @@ dist
.vscode .vscode
.git .git
__pycache__ __pycache__
.idea/workspace.xml .idea
.obd
plugins/oceanbase-ce
config_parser/oceanbase-ce
tags
.DS_store
# dependencies
/web/node_modules
/web/npm-debug.log*
/web/yarn-error.log
/web/yarn.lock
/web/package-lock.json
/web/.mfsu-dev
/web/.mfsu-prod
# production
/web/dist
# misc
/web/**/.DS_Store
/web/.DS_Store
# umi
/web/src/.umi
/web/src/.umi-production
/web/src/.umi-test
/web/.env.local
此差异已折叠。
...@@ -23,7 +23,6 @@ from __future__ import absolute_import, division, print_function ...@@ -23,7 +23,6 @@ from __future__ import absolute_import, division, print_function
import os import os
import re import re
import sys import sys
import pickle
import getpass import getpass
import hashlib import hashlib
from copy import deepcopy from copy import deepcopy
...@@ -31,9 +30,9 @@ from enum import Enum ...@@ -31,9 +30,9 @@ from enum import Enum
from ruamel.yaml.comments import CommentedMap from ruamel.yaml.comments import CommentedMap
import _errno as err
from tool import ConfigUtil, FileUtil, YamlLoader, OrderedDict, COMMAND_ENV from tool import ConfigUtil, FileUtil, YamlLoader, OrderedDict, COMMAND_ENV
from _manager import Manager from _manager import Manager
from _repository import Repository
from _stdio import SafeStdio from _stdio import SafeStdio
from _environ import ENV_BASE_DIR from _environ import ENV_BASE_DIR
...@@ -360,6 +359,7 @@ class ClusterConfig(object): ...@@ -360,6 +359,7 @@ class ClusterConfig(object):
self.origin_package_hash = package_hash self.origin_package_hash = package_hash
self._package_hash = package_hash self._package_hash = package_hash
self._temp_conf = {} self._temp_conf = {}
self._all_default_conf = {}
self._default_conf = {} self._default_conf = {}
self._global_conf = None self._global_conf = None
self._server_conf = {} self._server_conf = {}
...@@ -371,6 +371,8 @@ class ClusterConfig(object): ...@@ -371,6 +371,8 @@ class ClusterConfig(object):
self._include_file = None self._include_file = None
self._origin_include_file = None self._origin_include_file = None
self._origin_include_config = None self._origin_include_config = None
self._unprocessed_global_conf = None
self._unprocessed_server_conf = {}
self._environments = None self._environments = None
self._origin_environments = {} self._origin_environments = {}
self._inner_config = {} self._inner_config = {}
...@@ -414,7 +416,14 @@ class ClusterConfig(object): ...@@ -414,7 +416,14 @@ class ClusterConfig(object):
if not isinstance(other, self.__class__): if not isinstance(other, self.__class__):
return False return False
# todo 检查 rsync include等 # todo 检查 rsync include等
return self._global_conf == other._global_conf and self._server_conf == other._server_conf if self.servers != other.servers:
return False
if self.get_global_conf() != other.get_global_conf():
return False
for server in self.servers:
if self.get_server_conf(server) != other.get_server_conf(server):
return False
return True
def __deepcopy__(self, memo): def __deepcopy__(self, memo):
cluster_config = self.__class__(deepcopy(self.servers), self.name, self.version, self.tag, self.package_hash, self.parser) cluster_config = self.__class__(deepcopy(self.servers), self.name, self.version, self.tag, self.package_hash, self.parser)
...@@ -451,6 +460,8 @@ class ClusterConfig(object): ...@@ -451,6 +460,8 @@ class ClusterConfig(object):
def _clear_cache_server(self): def _clear_cache_server(self):
for server in self._cache_server: for server in self._cache_server:
self._cache_server[server] = None self._cache_server[server] = None
if server in self._unprocessed_server_conf:
del self._unprocessed_server_conf[server]
def get_inner_config(self): def get_inner_config(self):
return self._inner_config return self._inner_config
...@@ -485,11 +496,14 @@ class ClusterConfig(object): ...@@ -485,11 +496,14 @@ class ClusterConfig(object):
cluster_config = self._depends[name] cluster_config = self._depends[name]
return deepcopy(cluster_config.original_servers) return deepcopy(cluster_config.original_servers)
def get_depend_config(self, name, server=None): def get_depend_config(self, name, server=None, with_default=True):
if name not in self._depends: if name not in self._depends:
return None return None
cluster_config = self._depends[name] cluster_config = self._depends[name]
config = cluster_config.get_server_conf_with_default(server) if server else cluster_config.get_global_conf() if with_default:
config = cluster_config.get_server_conf_with_default(server) if server else cluster_config.get_global_conf_with_default()
else:
config = cluster_config.get_server_conf(server) if server else cluster_config.get_global_conf()
return deepcopy(config) return deepcopy(config)
def update_server_conf(self, server, key, value, save=True): def update_server_conf(self, server, key, value, save=True):
...@@ -514,15 +528,13 @@ class ClusterConfig(object): ...@@ -514,15 +528,13 @@ class ClusterConfig(object):
if not self._deploy_config.update_component_global_conf(self.name, key, value, save): if not self._deploy_config.update_component_global_conf(self.name, key, value, save):
return False return False
self._update_global_conf(key, value) self._update_global_conf(key, value)
for server in self._cache_server:
if self._cache_server[server] is not None:
self._cache_server[server][key] = value
return True return True
def _update_global_conf(self, key, value): def _update_global_conf(self, key, value):
self._original_global_conf[key] = value self._original_global_conf[key] = value
if self._global_conf: self._global_conf = None
self._global_conf[key] = value self._unprocessed_global_conf = None
self._clear_cache_server()
def update_rsync_list(self, rsync_list, save=True): def update_rsync_list(self, rsync_list, save=True):
if self._deploy_config is None: if self._deploy_config is None:
...@@ -541,11 +553,13 @@ class ClusterConfig(object): ...@@ -541,11 +553,13 @@ class ClusterConfig(object):
self._environments = None self._environments = None
return True return True
def get_unconfigured_require_item(self, server): def get_unconfigured_require_item(self, server, skip_keys=[]):
items = [] items = []
config = self.get_server_conf(server) config = self._get_unprocessed_server_conf(server)
if config is not None: if config is not None:
for key in self._temp_conf: for key in self._temp_conf:
if key in skip_keys:
continue
if not self._temp_conf[key].require: if not self._temp_conf[key].require:
continue continue
if key in config: if key in config:
...@@ -556,11 +570,10 @@ class ClusterConfig(object): ...@@ -556,11 +570,10 @@ class ClusterConfig(object):
def get_server_conf_with_default(self, server): def get_server_conf_with_default(self, server):
if server not in self._server_conf: if server not in self._server_conf:
return None return None
config = {} config = deepcopy(self._all_default_conf)
for key in self._temp_conf: server_config = self.get_server_conf(server)
if self._temp_conf[key].default is not None: if server_config:
config[key] = self._temp_conf[key].default config.update(server_config)
config.update(self.get_server_conf(server))
return config return config
def get_need_redeploy_items(self, server): def get_need_redeploy_items(self, server):
...@@ -585,11 +598,15 @@ class ClusterConfig(object): ...@@ -585,11 +598,15 @@ class ClusterConfig(object):
def update_temp_conf(self, temp_conf): def update_temp_conf(self, temp_conf):
self._default_conf = {} self._default_conf = {}
self._all_default_conf = {}
self._temp_conf = temp_conf self._temp_conf = temp_conf
for key in self._temp_conf: for key in self._temp_conf:
if self._temp_conf[key].require and self._temp_conf[key].default is not None: if self._temp_conf[key].require and self._temp_conf[key].default is not None:
self._default_conf[key] = self._temp_conf[key].default self._default_conf[key] = self._temp_conf[key].default
if self._temp_conf[key].default is not None:
self._all_default_conf[key] = self._temp_conf[key].default
self._global_conf = None self._global_conf = None
self._unprocessed_global_conf = None
self._clear_cache_server() self._clear_cache_server()
def _apply_temp_conf(self, conf): def _apply_temp_conf(self, conf):
...@@ -606,23 +623,44 @@ class ClusterConfig(object): ...@@ -606,23 +623,44 @@ class ClusterConfig(object):
return None return None
def check_param(self): def check_param(self):
error = [] errors = []
if self._temp_conf:
_, g_errs = self.global_check_param()
errors += g_errs
for server in self._server_conf:
s_errs, _ = self._check_param(self._server_conf[server])
errors += s_errs
return not errors, set(errors)
def global_check_param(self):
errors = []
if self._temp_conf:
errors, _ = self._check_param(self._get_unprocessed_global_conf())
return not errors, errors
def servers_check_param(self):
check_res = {}
if self._temp_conf: if self._temp_conf:
error += self._check_param(self.get_global_conf()) global_config = self._get_unprocessed_global_conf()
for server in self._server_conf: for server in self._server_conf:
error += self._check_param(self._server_conf[server]) config = deepcopy(self._server_conf[server])
return not error, set(error) config.update(global_config)
errors, items = self._check_param(config)
check_res[server] = {'errors': errors, 'items': items}
return check_res
def _check_param(self, config): def _check_param(self, config):
error = [] errors = []
items = []
for key in config: for key in config:
item = self._temp_conf.get(key) item = self._temp_conf.get(key)
if item: if item:
try: try:
item.check_value(config[key]) item.check_value(config[key])
except Exception as e: except Exception as e:
error.append(str(e)) errors.append(str(e))
return error items.append(item)
return errors, items
def set_global_conf(self, conf): def set_global_conf(self, conf):
if not isinstance(conf, dict): if not isinstance(conf, dict):
...@@ -652,15 +690,24 @@ class ClusterConfig(object): ...@@ -652,15 +690,24 @@ class ClusterConfig(object):
self._server_conf[server] = conf self._server_conf[server] = conf
self._cache_server[server] = None self._cache_server[server] = None
def _get_unprocessed_global_conf(self):
if self._unprocessed_global_conf is None:
self._unprocessed_global_conf = deepcopy(self._default_conf)
self._unprocessed_global_conf.update(self._get_include_config('config', {}))
if self._original_global_conf:
self._unprocessed_global_conf.update(self._original_global_conf)
return self._unprocessed_global_conf
def get_global_conf(self): def get_global_conf(self):
if self._global_conf is None: if self._global_conf is None:
self._global_conf = deepcopy(self._default_conf) self._global_conf = self._apply_temp_conf(self._get_unprocessed_global_conf())
self._global_conf.update(self._get_include_config('config', {}))
if self._original_global_conf:
self._global_conf.update(self._original_global_conf)
self._global_conf = self._apply_temp_conf(self._global_conf)
return self._global_conf return self._global_conf
def get_global_conf_with_default(self):
config = deepcopy(self._all_default_conf)
config.update(self.get_global_conf())
return config
def _add_base_dir(self, path): def _add_base_dir(self, path):
if not os.path.isabs(path): if not os.path.isabs(path):
if self._base_dir: if self._base_dir:
...@@ -758,22 +805,32 @@ class ClusterConfig(object): ...@@ -758,22 +805,32 @@ class ClusterConfig(object):
self._environments.update(self._origin_environments) self._environments.update(self._origin_environments)
return self._environments return self._environments
def _get_unprocessed_server_conf(self, server):
if server not in self._unprocessed_server_conf:
conf = deepcopy(self._inner_config.get(server.name, {}))
conf.update(self._get_unprocessed_global_conf())
conf.update(self._server_conf[server])
self._unprocessed_server_conf[server] = conf
return self._unprocessed_server_conf[server]
def get_server_conf(self, server): def get_server_conf(self, server):
if server not in self._server_conf: if server not in self._server_conf:
return None return None
if self._cache_server[server] is None: if self._cache_server[server] is None:
conf = self._apply_temp_conf(deepcopy(self._inner_config.get(server.name, {}))) self._cache_server[server] = self._apply_temp_conf(self._get_unprocessed_server_conf(server))
conf.update(self.get_global_conf())
conf.update(self._apply_temp_conf(self._server_conf[server]))
self._cache_server[server] = conf
return self._cache_server[server] return self._cache_server[server]
def get_original_global_conf(self): def get_original_global_conf(self):
return self._original_global_conf return deepcopy(self._original_global_conf)
def get_original_server_conf(self, server): def get_original_server_conf(self, server):
return self._server_conf.get(server) return self._server_conf.get(server)
def get_original_server_conf_with_global(self, server):
config = self.get_original_global_conf()
config.update(self._server_conf.get(server, {}))
return config
class DeployStatus(Enum): class DeployStatus(Enum):
......
...@@ -23,6 +23,9 @@ from __future__ import absolute_import, division, print_function ...@@ -23,6 +23,9 @@ from __future__ import absolute_import, division, print_function
# obd dev mode. {0/1} # obd dev mode. {0/1}
ENV_DEV_MODE = "OBD_DEV_MODE" ENV_DEV_MODE = "OBD_DEV_MODE"
# obd lock mode. 0 - No lock mode, 1 - The deploy lock wiil be downgraded to shared lock, 2 - Default lock mode.
ENV_LOCK_MODE = "OBD_LOCK_MODE"
# base path which will be used by runtime dependencies sync and include config. {absolute path style} # base path which will be used by runtime dependencies sync and include config. {absolute path style}
ENV_BASE_DIR = "OBD_DEPLOY_BASE_DIR" ENV_BASE_DIR = "OBD_DEPLOY_BASE_DIR"
...@@ -31,3 +34,5 @@ ENV_REPO_INSTALL_MODE = "OBD_REPO_INSTALL_MODE" ...@@ -31,3 +34,5 @@ ENV_REPO_INSTALL_MODE = "OBD_REPO_INSTALL_MODE"
# disable rsync mode even if the rsync exists. {0/1} # disable rsync mode even if the rsync exists. {0/1}
ENV_DISABLE_RSYNC = "OBD_DISABLE_RSYNC" ENV_DISABLE_RSYNC = "OBD_DISABLE_RSYNC"
ENV_DISABLE_PARALLER_EXTRACT = "OBD_DISALBE_PARALLER_EXTRACT"
...@@ -29,16 +29,76 @@ class LockError(Exception): ...@@ -29,16 +29,76 @@ class LockError(Exception):
class OBDErrorCode(object): class OBDErrorCode(object):
def __init__(self, code, msg):
self.code = code
self.msg = msg
def __str__(self):
return self.msg
class OBDErrorCodeTemplate(object):
def __init__(self, code, msg): def __init__(self, code, msg):
self.code = code self.code = code
self.msg = msg self.msg = msg
self._str_ = ('OBD-%04d: ' % code) + msg self._str_ = ('OBD-%04d: ' % code) + msg
def format(self, *args, **kwargs): def format(self, *args, **kwargs):
return self._str_.format(*args, **kwargs) return OBDErrorCode(
self.code,
self._str_.format(*args, **kwargs),
)
def __str__(self): def __str__(self):
return self._str_ return self.msg
class FixEval(object):
DEL = 0
SET = 1
def __init__(self, operation, key, value=None, is_global=False):
self.operation = operation
self.key = key
self.value = value
self.is_global = is_global
class OBDErrorSuggestion(object):
def __init__(self, msg, auto_fix=False, fix_eval=[]):
self.msg = msg
self.auto_fix = auto_fix
self.fix_eval = fix_eval
class OBDErrorSuggestionTemplate(object):
def __init__(self, msg, auto_fix=False, fix_eval=[]):
self._msg = msg
self.auto_fix = auto_fix
self.fix_eval = fix_eval if isinstance(fix_eval, list) else [fix_eval]
def format(self, *args, **kwargs):
return OBDErrorSuggestion(
self._msg.format(*args, **kwargs),
auto_fix=kwargs.get('auto_fix', self.auto_fix),
fix_eval=kwargs.get('fix_eval', self.fix_eval)
)
class CheckStatus(object):
FAIL = "FAIL"
PASS = "PASS"
WAIT = "WAIT"
def __init__(self, status=WAIT, error=None, suggests=[]):
self.status = status
self.error = error
self.suggests = suggests
class InitDirFailedErrorMessage(object): class InitDirFailedErrorMessage(object):
...@@ -46,36 +106,119 @@ class InitDirFailedErrorMessage(object): ...@@ -46,36 +106,119 @@ class InitDirFailedErrorMessage(object):
PATH_ONLY = ': {path}.' PATH_ONLY = ': {path}.'
NOT_EMPTY = ': {path} is not empty.' NOT_EMPTY = ': {path} is not empty.'
CREATE_FAILED = ': create {path} failed.' CREATE_FAILED = ': create {path} failed.'
NOT_DIR = ': {path} is not a directory .'
PERMISSION_DENIED = ': {path} permission denied .' PERMISSION_DENIED = ': {path} permission denied .'
DOC_LINK = '<DOC_LINK>' DOC_LINK = '<DOC_LINK>'
DOC_LINK_MSG = 'See {}'.format(DOC_LINK if DOC_LINK else "https://www.oceanbase.com/product/ob-deployer/error-codes .") DOC_LINK_MSG = 'See {}'.format(DOC_LINK if DOC_LINK else "https://www.oceanbase.com/product/ob-deployer/error-codes .")
EC_CONFIG_CONFLICT_PORT = OBDErrorCode(1000, 'Configuration conflict {server1}:{port} port is used for {server2}\'s {key}') EC_CONFIG_CONFLICT_PORT = OBDErrorCodeTemplate(1000, 'Configuration conflict {server1}:{port} port is used for {server2}\'s {key}')
EC_CONFLICT_PORT = OBDErrorCode(1001, '{server}:{port} port is already used') EC_CONFLICT_PORT = OBDErrorCodeTemplate(1001, '{server}:{port} port is already used')
EC_FAIL_TO_INIT_PATH = OBDErrorCode(1002, 'Fail to init {server} {key}{msg}') EC_FAIL_TO_INIT_PATH = OBDErrorCodeTemplate(1002, 'Fail to init {server} {key}{msg}')
EC_CLEAN_PATH_FAILED = OBDErrorCode(1003, 'Fail to clean {server}:{path}') EC_CLEAN_PATH_FAILED = OBDErrorCodeTemplate(1003, 'Fail to clean {server}:{path}')
EC_CONFIG_CONFLICT_DIR = OBDErrorCode(1004, 'Configuration conflict {server1}: {path} is used for {server2}\'s {key}') EC_CONFIG_CONFLICT_DIR = OBDErrorCodeTemplate(1004, 'Configuration conflict {server1}: {path} is used for {server2}\'s {key}')
EC_SOME_SERVER_STOPED = OBDErrorCode(1005, 'Some of the servers in the cluster have been stopped') EC_SOME_SERVER_STOPED = OBDErrorCodeTemplate(1005, 'Some of the servers in the cluster have been stopped')
EC_FAIL_TO_CONNECT = OBDErrorCode(1006, 'Failed to connect to {component}') EC_FAIL_TO_CONNECT = OBDErrorCodeTemplate(1006, 'Failed to connect to {component}')
EC_ULIMIT_CHECK = OBDErrorCode(1007, '({server}) {key} must not be less than {need} (Current value: {now})') EC_ULIMIT_CHECK = OBDErrorCodeTemplate(1007, '({server}) {key} must not be less than {need} (Current value: {now})')
EC_FAILED_TO_GET_AIO_NR = OBDErrorCodeTemplate(1008, '({ip}) failed to get fs.aio-max-nr and fs.aio-nr')
EC_OBSERVER_NOT_ENOUGH_MEMORY = OBDErrorCode(2000, '({ip}) not enough memory. (Free: {free}, Need: {need})') EC_NEED_CONFIG = OBDErrorCodeTemplate(1009, '{server} {component} need config: {miss_keys}')
EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE = OBDErrorCode(2000, '({ip}) not enough memory. (Available: {available}, Need: {need})') EC_NO_SUCH_NET_DEVICE = OBDErrorCodeTemplate(1010, '{server} No such net interface: {devname}')
EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED = OBDErrorCode(2000, '({ip}) not enough memory. (Free: {free}, Buff/Cache: {cached}, Need: {need})') EC_AIO_NOT_ENOUGH = OBDErrorCodeTemplate(1011, '({ip}) Insufficient AIO remaining (Avail: {avail}, Need: {need}), The recommended value of fs.aio-max-nr is 1048576')
EC_OBSERVER_CAN_NOT_MIGRATE_IN = OBDErrorCode(2001, 'server can not migrate in') EC_PARAM_CHECK = OBDErrorCodeTemplate(1012, '{errors}')
EC_OBSERVER_FAIL_TO_START = OBDErrorCode(2002, 'Failed to start {server} observer') EC_SSH_CONNECT = OBDErrorCodeTemplate(1013, '{user}@{ip} connect failed: {message}')
EC_OBSERVER_NOT_ENOUGH_DISK_4_CLOG = OBDErrorCode(2003, '({ip}) {path} not enough disk space for clog. Use redo_dir to set other disk for clog, or reduce the value of datafile_size')
EC_OBSERVER_INVALID_MODFILY_GLOBAL_KEY = OBDErrorCode(2004, 'Invalid: {key} is not a single server configuration item') # error code for observer
EC_OBSERVER_NOT_ENOUGH_MEMORY = OBDErrorCodeTemplate(2000, '({ip}) not enough memory. (Free: {free}, Need: {need})')
EC_MYSQLTEST_PARSE_CMD_FAILED = OBDErrorCode(3000, 'parse cmd failed: {path}') EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE = OBDErrorCodeTemplate(2000, '({ip}) not enough memory. (Available: {available}, Need: {need})')
EC_MYSQLTEST_FAILE_NOT_FOUND = OBDErrorCode(3001, '{file} not found in {path}') EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED = OBDErrorCodeTemplate(2000, '({ip}) not enough memory. (Free: {free}, Buff/Cache: {cached}, Need: {need})')
EC_TPCC_LOAD_DATA_FAILED = OBDErrorCode(3002, 'Failed to load data.') EC_OBSERVER_CAN_NOT_MIGRATE_IN = OBDErrorCodeTemplate(2001, 'server can not migrate in')
EC_TPCC_RUN_TEST_FAILED = OBDErrorCode(3003, 'Failed to run TPC-C benchmark.') EC_OBSERVER_FAIL_TO_START = OBDErrorCodeTemplate(2002, 'Failed to start {server} observer')
EC_OBSERVER_FAIL_TO_START_WITH_ERR = OBDErrorCodeTemplate(2002, 'Failed to start {server} observer: {stderr}')
EC_OBAGENT_RELOAD_FAILED = OBDErrorCode(4000, 'Fail to reload {server}') EC_OBSERVER_NOT_ENOUGH_DISK = OBDErrorCodeTemplate(2003, '({ip}) {disk} not enough disk space. (Avail: {avail}, Need: {need})')
EC_OBAGENT_SEND_CONFIG_FAILED = OBDErrorCode(4001, 'Fail to send config file to {server}') EC_OBSERVER_NOT_ENOUGH_DISK_4_CLOG = OBDErrorCodeTemplate(2003, '({ip}) {path} not enough disk space for clog. Use redo_dir to set other disk for clog, or reduce the value of datafile_size')
EC_OBSERVER_INVALID_MODFILY_GLOBAL_KEY = OBDErrorCodeTemplate(2004, 'Invalid: {key} is not a single server configuration item')
EC_OBSERVER_FAILED_TO_REGISTER = OBDErrorCodeTemplate(2005, 'Failed to register cluster.')
EC_OBSERVER_FAILED_TO_REGISTER_WITH_DETAILS = OBDErrorCodeTemplate(2005, 'Failed to register cluster. {appname} may have been registered in {obconfig_url}.')
EC_OBSERVER_MULTI_NET_DEVICE = OBDErrorCodeTemplate(2006, '{ip} has more than one network interface. Please set `devname` for ({server})')
EC_OBSERVER_PING_FAILED = OBDErrorCodeTemplate(2007, '{ip1} {devname} fail to ping {ip2}. Please check configuration `devname`')
EC_OBSERVER_TIME_OUT_OF_SYNC = OBDErrorCodeTemplate(2008, 'Cluster clocks are out of sync')
EC_OBSERVER_PRODUCTION_MODE_LIMIT = OBDErrorCodeTemplate(2009, '({server}): when production_mode is True, {key} can not be less then {limit}')
EC_OBSERVER_SYS_MEM_TOO_LARGE = OBDErrorCodeTemplate(2010, '({server}): system_memory too large. system_memory must be less than memory_limit/memory_limit_percentage.')
EC_OBSERVER_GET_MEMINFO_FAIL = OBDErrorCodeTemplate(2011, "{server}: fail to get memory info.\nPlease configure 'memory_limit' manually in configuration file")
# error code for test commands
EC_MYSQLTEST_PARSE_CMD_FAILED = OBDErrorCodeTemplate(3000, 'parse cmd failed: {path}')
EC_MYSQLTEST_FAILE_NOT_FOUND = OBDErrorCodeTemplate(3001, '{file} not found in {path}')
EC_TPCC_LOAD_DATA_FAILED = OBDErrorCodeTemplate(3002, 'Failed to load data.')
EC_TPCC_RUN_TEST_FAILED = OBDErrorCodeTemplate(3003, 'Failed to run TPC-C benchmark.')
# error code for other components.
# obagent
EC_OBAGENT_RELOAD_FAILED = OBDErrorCodeTemplate(4000, 'Fail to reload {server}')
EC_OBAGENT_SEND_CONFIG_FAILED = OBDErrorCodeTemplate(4001, 'Fail to send config file to {server}')
# obproxy
EC_OBPROXY_NEED_CONFIG = OBDErrorCodeTemplate(4100, '{server} need config "rs_list" or "obproxy_config_server_url"')
EC_OBPROXY_START_FAILED = OBDErrorCodeTemplate(4101, 'failed to start {server} obproxy: {stderr}')
# grafana
EC_GRAFANA_DEFAULT_PWD = OBDErrorCodeTemplate(4200, "{server} grafana admin password should not be 'admin'")
EC_GRAFANA_PWD_LESS_5 = OBDErrorCodeTemplate(4201, "{server} grafana admin password length should not be less than 5")
# ocp express
EC_OCP_EXPRESS_JAVA_NOT_FOUND = OBDErrorCodeTemplate(4300, "{server}: failed to query java version, you may not have java installed")
EC_OCP_EXPRESS_JAVA_VERSION_ERROR = OBDErrorCodeTemplate(4301, "{server}: ocp-express need java with version {version}")
EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY = OBDErrorCodeTemplate(4302, '({ip}) not enough memory. (Free: {free}, Need: {need})')
EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_AVAILABLE = OBDErrorCodeTemplate(4302, '({ip}) not enough memory. (Available: {available}, Need: {need})')
EC_OCP_EXPRESS_NOT_ENOUGH_MEMORY_CACHED = OBDErrorCodeTemplate(4302, '({ip}) not enough memory. (Free: {free}, Buff/Cache: {cached}, Need: {need})')
EC_OCP_EXPRESS_NOT_ENOUGH_DISK = OBDErrorCodeTemplate(4303, '({ip}) {disk} not enough disk space. (Avail: {avail}, Need: {need})')
EC_OCP_EXPRESS_DEPENDS_COMP_VERSION = OBDErrorCodeTemplate(4304, 'OCP express {ocp_express_version} needs to use {comp} with version {comp_version} or above')
EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK_AVAILABLE = OBDErrorCodeTemplate(4305, 'There is not enough log disk for ocp meta tenant. (Avail: {avail}, Need: {need})')
EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK = OBDErrorCodeTemplate(4305, 'There is not enough log disk for ocp meta tenant.')
EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_MEM = OBDErrorCodeTemplate(4305, 'There is not enough memory for ocp meta tenant')
# sql
EC_SQL_EXECUTE_FAILED = OBDErrorCodeTemplate(5000, "{sql} execute failed")
# WARN CODE # WARN CODE
WC_ULIMIT_CHECK = OBDErrorCode(1007, '({server}) The recommended number of {key} is {need} (Current value: {now})') WC_ULIMIT_CHECK = OBDErrorCodeTemplate(1007, '({server}) The recommended number of {key} is {need} (Current value: {now})')
\ No newline at end of file WC_AIO_NOT_ENOUGH = OBDErrorCodeTemplate(1011, '({ip}) The recommended value of fs.aio-max-nr is 1048576 (Current value: {current})')
WC_OBSERVER_SAME_DISK = OBDErrorCodeTemplate(1012, '({ip}) clog and data use the same disk ({disk})')
WC_OBSERVER_SYS_MEM_TOO_LARGE = OBDErrorCodeTemplate(2010, '({server}): system_memory too large. system_memory should be less than {factor} * memory_limit/memory_limit_percentage.')
WC_OCP_EXPRESS_FAILED_TO_GET_DISK_INFO = OBDErrorCodeTemplate(4303, '({ip}) failed to get disk information, skip disk space check')
# SUGGESTION for ERROR
SUG_SET_CONFIG = OBDErrorSuggestionTemplate('Please set config {key} correctly')
SUG_INCREASE_CONFIG = OBDErrorSuggestionTemplate('Please increase the {key} in configuration')
SUG_DECREASE_CONFIG = OBDErrorSuggestionTemplate('Please decrease the {key} in configuration')
SUG_PORT_CONFLICTS = OBDErrorSuggestionTemplate('Please adjust the configuration to avoid port conflicts')
SUG_USE_OTHER_PORT = OBDErrorSuggestionTemplate('Please choose another unoccupied port or terminate the process occupying the port')
SUG_NO_SUCH_NET_DEVIC = OBDErrorSuggestionTemplate('Please set the network interface corresponding to {ip} to `devname`', fix_eval=[FixEval(FixEval.DEL, 'devname')])
SUG_CONFIG_CONFLICT_DIR = OBDErrorSuggestionTemplate('Please specify a new `{key}` for the {server}')
SUG_CONFIRM_OS = OBDErrorSuggestionTemplate('Please confirm whether the deployment node is a compatible operating system')
SUG_SPECIFY_PATH = OBDErrorSuggestionTemplate('Please specify the path again')
SUG_SET_DEVICE = OBDErrorSuggestionTemplate('Please set the correct network device name to devname')
SUG_USE_SEPARATE_DISKS = OBDErrorSuggestionTemplate('Please use separate disks for redo_dir and data_dir')
SUG_USE_ANOTHER_DEVICE = OBDErrorSuggestionTemplate('Please specify {dir} to another disk with enough space')
SUB_SET_NO_PRODUCTION_MODE = OBDErrorSuggestionTemplate('Please set production_mode to false', True, [FixEval(FixEval.SET, 'production_mode', False)])
SUG_CONFIRM_CONFIG_SERVER = OBDErrorSuggestionTemplate('Please confirm that the ob config service is running normally and that obproxy_config_server_url can be connected correctly'),
SUG_USE_RS_LIST = OBDErrorSuggestionTemplate('Instead of using ob config service, please use rs_list configuration in obproxy to proxy observer')
SUG_GRAFANA_PWD = OBDErrorSuggestionTemplate('Grafana password length must be greater than 4 and not "admin"', True, [FixEval(FixEval.DEL, 'login_password', is_global=True)])
SUG_PARAM_CHECK = OBDErrorSuggestionTemplate('Please check your config')
SUG_SSH_FAILED = OBDErrorSuggestionTemplate('Please check user config and network')
SUG_SYSCTL = OBDErrorSuggestionTemplate('Please execute `echo ‘{var}={value}’ >> /etc/sysctl.conf; sysctl -p` as root in {ip}.')
SUG_ULIMIT = OBDErrorSuggestionTemplate('Please execute `echo -e "* soft {name} {value}\\n* hard {name} {value}" >> /etc/security/limits.d/{name}.conf` as root in {ip}. if it dosen\'t work, please check whether UsePAM is yes in /etc/ssh/sshd_config.')
SUG_CONNECT_EXCEPT = OBDErrorSuggestionTemplate('Connection exception or unsupported OS. Please retry or contact us.')
SUG_UNSUPPORT_OS = OBDErrorSuggestionTemplate('It may be an unsupported OS, please contact us for assistance')
SUG_OBSERVER_SYS_MEM_TOO_LARGE = OBDErrorSuggestionTemplate('`system_memory` should be less than {factor} * memory_limit/memory_limit_percentage.', fix_eval=[FixEval(FixEval.DEL, 'system_memory')])
SUG_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE = OBDErrorSuggestionTemplate('Please execute `echo 1 > /proc/sys/vm/drop_caches` as root in {ip} to rlease cached.')
SUG_OBSERVER_REDUCE_MEM = OBDErrorSuggestionTemplate('Please reduce the `memory_limit` or `memory_limit_percentage`', fix_eval=[FixEval(FixEval.DEL, 'memory_limit'), FixEval(FixEval.DEL, 'system_memory'), FixEval(FixEval.DEL, 'memory_limit_percentage')])
SUG_OBSERVER_SAME_DISK = OBDErrorSuggestionTemplate('Configure `redo_dir` and `data_dir` to different disks')
SUG_OBSERVER_NOT_ENOUGH_DISK = OBDErrorSuggestionTemplate('Please reduce the `datafile_size` or `datafile_disk_percentage`', fix_eval=[FixEval(FixEval.DEL, 'datafile_size'), FixEval(FixEval.DEL, 'datafile_disk_percentage')])
SUG_OBSERVER_REDUCE_REDO = OBDErrorSuggestionTemplate('Please reduce the `log_disk_size` or `log_disk_percentage`', fix_eval=[FixEval(FixEval.DEL, 'log_disk_size'), FixEval(FixEval.DEL, 'log_disk_percentage')])
SUG_OBSERVER_NOT_ENOUGH_DISK_4_CLOG = OBDErrorSuggestionTemplate('Please increase the `clog_disk_utilization_threshold` and `clog_disk_usage_limit_percentage`', fix_eval=[FixEval(FixEval.DEL, 'clog_disk_utilization_threshold'), FixEval(FixEval.DEL, 'clog_disk_usage_limit_percentage')])
SUG_OBSERVER_TIME_OUT_OF_SYNC = OBDErrorSuggestionTemplate('Please enable clock synchronization service')
SUG_OCP_EXPRESS_INSTALL_JAVA_WITH_VERSION = OBDErrorSuggestionTemplate('Please install java with version {version}. If java is already installed, please set `java_bin` to the expected java binary path')
SUG_OCP_EXPRESS_NOT_ENOUGH_MEMORY_AVALIABLE = OBDErrorSuggestionTemplate('Please execute `echo 1 > /proc/sys/vm/drop_caches` as root in {ip} to rlease cached.')
SUG_OCP_EXPRESS_REDUCE_MEM = OBDErrorSuggestionTemplate('Please reduce the `memory_size`', fix_eval=[FixEval(FixEval.DEL, 'memory_size')])
SUG_OCP_EXPRESS_REDUCE_DISK = OBDErrorSuggestionTemplate('Please reduce the `logging_file_total_size_cap`', fix_eval=[FixEval(FixEval.DEL, 'logging_file_total_size_cap')])
SUG_OCP_EXPRESS_COMP_VERSION = OBDErrorSuggestionTemplate('Please use {comp} with version {version} or above')
SUG_OCP_EXPRESS_REDUCE_META_DB_MEM = OBDErrorSuggestionTemplate('Please reduce the `ocp_meta_tenant_memory_size`', fix_eval=[FixEval(FixEval.DEL, 'ocp_meta_tenant_memory_size')])
SUG_OCP_EXPRESS_REDUCE_META_DB_LOG_DISK = OBDErrorSuggestionTemplate('Please reduce the `ocp_meta_tenant_log_disk_size`', fix_eval=[FixEval(FixEval.DEL, 'ocp_meta_tenant_log_disk_size')])
\ No newline at end of file
...@@ -188,6 +188,12 @@ class EXLock(Lock): ...@@ -188,6 +188,12 @@ class EXLock(Lock):
self.mix_lock.ex_unlock() self.mix_lock.ex_unlock()
class LockMode(Enum):
NO_LOCK = 0
DEPLOY_SHARED_LOCK = 1
DEFAULT = 2
class LockManager(Manager): class LockManager(Manager):
TRY_TIMES = 6000 TRY_TIMES = 6000
...@@ -198,12 +204,13 @@ class LockManager(Manager): ...@@ -198,12 +204,13 @@ class LockManager(Manager):
MIR_REPO_FN = LockType.MIR_REPO.value MIR_REPO_FN = LockType.MIR_REPO.value
DEPLOY_FN_PERFIX = LockType.DEPLOY.value DEPLOY_FN_PERFIX = LockType.DEPLOY.value
LOCKS = {} LOCKS = {}
def __init__(self, home_path, stdio=None): def __init__(self, home_path, stdio=None):
super(LockManager, self).__init__(home_path, stdio) super(LockManager, self).__init__(home_path, stdio)
self.locks = [] self.locks = []
self.global_path = os.path.join(self.path, self.GLOBAL_FN) self.global_path = os.path.join(self.path, self.GLOBAL_FN)
self.mir_repo_path = os.path.join(self.path, self.MIR_REPO_FN) self.mir_repo_path = os.path.join(self.path, self.MIR_REPO_FN)
self.mode = LockMode.DEFAULT
@staticmethod @staticmethod
def set_try_times(try_times): def set_try_times(try_times):
...@@ -226,14 +233,26 @@ class LockManager(Manager): ...@@ -226,14 +233,26 @@ class LockManager(Manager):
@classmethod @classmethod
def shutdown(cls): def shutdown(cls):
for path in cls.LOCKS: for path in cls.LOCKS:
cls.LOCKS[path] = None cls.LOCKS[path]._unlock()
cls.LOCKS = None cls.LOCKS = None
def set_lock_mode(self, mode):
for key in LockMode:
if key.value == mode:
mode = key
break
if not isinstance(mode, LockMode) or mode not in LockMode:
getattr(self.stdio, 'verbose', print)('unknown lock mode {}'.format(mode))
return
self.stdio and getattr(self.stdio, 'verbose', print)('set lock mode to {}({})'.format(mode.name, mode.value))
self.mode = mode
def _lock(self, path, clz): def _lock(self, path, clz):
mix_lock = self._get_mix_lock(path) if self.mode != LockMode.NO_LOCK:
lock = clz(mix_lock) mix_lock = self._get_mix_lock(path)
lock.lock() lock = clz(mix_lock)
self.locks.append(lock) lock.lock()
self.locks.append(lock)
return True return True
def _sh_lock(self, path): def _sh_lock(self, path):
...@@ -258,7 +277,10 @@ class LockManager(Manager): ...@@ -258,7 +277,10 @@ class LockManager(Manager):
return os.path.join(self.path, '%s_%s' % (self.DEPLOY_FN_PERFIX, deploy_name)) return os.path.join(self.path, '%s_%s' % (self.DEPLOY_FN_PERFIX, deploy_name))
def deploy_ex_lock(self, deploy_name): def deploy_ex_lock(self, deploy_name):
return self._ex_lock(self._deploy_lock_fp(deploy_name)) if self.mode == LockMode.DEPLOY_SHARED_LOCK:
return self._sh_lock(self._deploy_lock_fp(deploy_name))
else:
return self._ex_lock(self._deploy_lock_fp(deploy_name))
def deploy_sh_lock(self, deploy_name): def deploy_sh_lock(self, deploy_name):
return self._sh_lock(self._deploy_lock_fp(deploy_name)) return self._sh_lock(self._deploy_lock_fp(deploy_name))
......
...@@ -24,6 +24,7 @@ from __future__ import absolute_import, division, print_function ...@@ -24,6 +24,7 @@ from __future__ import absolute_import, division, print_function
import re import re
import os import os
import sys import sys
import tempfile
import time import time
import pickle import pickle
import string import string
...@@ -33,6 +34,7 @@ from glob import glob ...@@ -33,6 +34,7 @@ from glob import glob
from enum import Enum from enum import Enum
from copy import deepcopy from copy import deepcopy
from xml.etree import cElementTree from xml.etree import cElementTree
from ssh import LocalClient
try: try:
from ConfigParser import ConfigParser from ConfigParser import ConfigParser
except: except:
...@@ -257,6 +259,7 @@ class RemoteMirrorRepository(MirrorRepository): ...@@ -257,6 +259,7 @@ class RemoteMirrorRepository(MirrorRepository):
self.gpgcheck = False self.gpgcheck = False
self._db = None self._db = None
self._repomds = None self._repomds = None
self._available = None
super(RemoteMirrorRepository, self).__init__(mirror_path, stdio=stdio) super(RemoteMirrorRepository, self).__init__(mirror_path, stdio=stdio)
self.section_name = meta_data['section_name'] self.section_name = meta_data['section_name']
self.baseurl = meta_data['baseurl'] self.baseurl = meta_data['baseurl']
...@@ -270,6 +273,17 @@ class RemoteMirrorRepository(MirrorRepository): ...@@ -270,6 +273,17 @@ class RemoteMirrorRepository(MirrorRepository):
if repo_age > self.repo_age or int(time.time()) - 86400 > self.repo_age: if repo_age > self.repo_age or int(time.time()) - 86400 > self.repo_age:
self.repo_age = repo_age self.repo_age = repo_age
self.update_mirror() self.update_mirror()
@property
def available(self):
if self._available is None:
try:
req = requests.request('get', self.baseurl)
self._available = req.status_code < 400
except Exception:
self.stdio and getattr(self.stdio, 'exception', print)('')
self._available = False
return self._available
@property @property
def db(self): def db(self):
...@@ -384,16 +398,19 @@ class RemoteMirrorRepository(MirrorRepository): ...@@ -384,16 +398,19 @@ class RemoteMirrorRepository(MirrorRepository):
self.get_repomds(True) self.get_repomds(True)
primary_repomd = self._get_repomd_by_type(self.PRIMARY_REPOMD_TYPE) primary_repomd = self._get_repomd_by_type(self.PRIMARY_REPOMD_TYPE)
if not primary_repomd: if not primary_repomd:
self._available = False
self.stdio and getattr(self.stdio, 'stop_loading')('fail') self.stdio and getattr(self.stdio, 'stop_loading')('fail')
return False return False
file_path = self._get_repomd_data_file(primary_repomd) file_path = self._get_repomd_data_file(primary_repomd)
if not file_path: if not file_path:
self._available = False
self.stdio and getattr(self.stdio, 'stop_loading')('fail') self.stdio and getattr(self.stdio, 'stop_loading')('fail')
return False return False
self._db = None self._db = None
self.repo_age = int(time.time()) self.repo_age = int(time.time())
self._dump_repo_age_data() self._dump_repo_age_data()
self.stdio and getattr(self.stdio, 'stop_loading')('succeed') self.stdio and getattr(self.stdio, 'stop_loading')('succeed')
self._available = True
return True return True
def get_repomds(self, update=False): def get_repomds(self, update=False):
...@@ -573,7 +590,8 @@ class RemoteMirrorRepository(MirrorRepository): ...@@ -573,7 +590,8 @@ class RemoteMirrorRepository(MirrorRepository):
return True return True
except: except:
FileUtil.rm(save_path) FileUtil.rm(save_path)
stdio and getattr(stdio, 'exception', print)('Failed to download %s to %s' % (url, save_path)) stdio and getattr(stdio, 'warn', print)('Failed to download %s to %s' % (url, save_path))
stdio and getattr(stdio, 'exception', print)('')
return False return False
class LocalMirrorRepository(MirrorRepository): class LocalMirrorRepository(MirrorRepository):
...@@ -586,6 +604,7 @@ class LocalMirrorRepository(MirrorRepository): ...@@ -586,6 +604,7 @@ class LocalMirrorRepository(MirrorRepository):
self.db = {} self.db = {}
self.db_path = os.path.join(mirror_path, self._DB_FILE) self.db_path = os.path.join(mirror_path, self._DB_FILE)
self.enabled = '-' self.enabled = '-'
self.available = True
self._load_db() self._load_db()
@property @property
...@@ -1050,3 +1069,46 @@ class MirrorRepositoryManager(Manager): ...@@ -1050,3 +1069,46 @@ class MirrorRepositoryManager(Manager):
mirror_section.meta_data['repo_age'] = repo_age mirror_section.meta_data['repo_age'] = repo_age
self.stdio and getattr(self.stdio, 'stop_loading')('succeed') self.stdio and getattr(self.stdio, 'stop_loading')('succeed')
return True return True
def add_repo(self, url):
self._lock()
download_file_save_name = url.split('/')[-1]
if not download_file_save_name.endswith(".repo"):
self.stdio.error("Can't download. Please use a file in .repo format.")
return False
download_file_save_path = os.path.join(self.remote_path, download_file_save_name)
if os.path.exists(download_file_save_path):
if not self.stdio.confirm("the repo file you want to add already exists, overwrite it?"):
self.stdio.print("exit without any changes")
return True
try:
download_file_res = requests.get(url, timeout=(5, 5))
except Exception as e:
self.stdio.exception("Failed to download repository file")
return False
download_status_code = download_file_res.status_code
if download_status_code != 200:
self.stdio.verbose("http code: {}, http body: {}".format(download_status_code, download_file_res.text))
self.stdio.error("Failed to download repository file")
return False
try:
with tempfile.NamedTemporaryFile(mode='w+', suffix='.repo') as tf:
tf.write(download_file_res.content.decode(encoding='utf8'))
tf.seek(0)
ConfigParser().readfp(tf)
tf.seek(0)
if LocalClient.put_file(tf.name, download_file_save_path, stdio=self.stdio):
self.stdio.print("repo file saved to {}".format(download_file_save_path))
return True
else:
self.stdio.error("Failed to save repository file")
return False
except Exception as e:
self.stdio.exception("Failed to save repository file")
return False
...@@ -25,7 +25,7 @@ import re ...@@ -25,7 +25,7 @@ import re
import sys import sys
from enum import Enum from enum import Enum
from glob import glob from glob import glob
from copy import deepcopy from copy import deepcopy, copy
from _manager import Manager from _manager import Manager
from _rpm import Version from _rpm import Version
...@@ -47,7 +47,7 @@ class PluginType(Enum): ...@@ -47,7 +47,7 @@ class PluginType(Enum):
class Plugin(object): class Plugin(object):
PLUGIN_TYPE = None PLUGIN_TYPE = None
FLAG_FILE = None FLAG_FILE = None
...@@ -67,6 +67,33 @@ class Plugin(object): ...@@ -67,6 +67,33 @@ class Plugin(object):
return self.PLUGIN_TYPE return self.PLUGIN_TYPE
class PluginContextNamespace:
def __init__(self, spacename):
self.spacename = spacename
self._variables = {}
self._return = {}
@property
def variables(self):
return self._variables
def get_variable(self, name):
return self._variables.get(name)
def set_variable(self, name, value):
self._variables[name] = value
def get_return(self, plugin_name):
ret = self._return.get(plugin_name)
if isinstance(ret, PluginReturn):
return ret
return None
def set_return(self, plugin_name, plugin_return):
self._return[plugin_name] = plugin_return
class PluginReturn(object): class PluginReturn(object):
def __init__(self, value=False, *arg, **kwargs): def __init__(self, value=False, *arg, **kwargs):
...@@ -83,7 +110,7 @@ class PluginReturn(object): ...@@ -83,7 +110,7 @@ class PluginReturn(object):
@property @property
def value(self): def value(self):
return self._return_value return self._return_value
@property @property
def args(self): def args(self):
return self._return_args return self._return_args
...@@ -91,11 +118,9 @@ class PluginReturn(object): ...@@ -91,11 +118,9 @@ class PluginReturn(object):
@property @property
def kwargs(self): def kwargs(self):
return self._return_kwargs return self._return_kwargs
def get_return(self, key): def get_return(self, key, default=None):
if key in self.kwargs: return self.kwargs.get(key, default)
return self.kwargs[key]
return None
def set_args(self, *args): def set_args(self, *args):
self._return_args = args self._return_args = args
...@@ -105,12 +130,12 @@ class PluginReturn(object): ...@@ -105,12 +130,12 @@ class PluginReturn(object):
def set_return(self, value): def set_return(self, value):
self._return_value = value self._return_value = value
def return_true(self, *args, **kwargs): def return_true(self, *args, **kwargs):
self.set_return(True) self.set_return(True)
self.set_args(*args) self.set_args(*args)
self.set_kwargs(**kwargs) self.set_kwargs(**kwargs)
def return_false(self, *args, **kwargs): def return_false(self, *args, **kwargs):
self.set_return(False) self.set_return(False)
self.set_args(*args) self.set_args(*args)
...@@ -119,25 +144,48 @@ class PluginReturn(object): ...@@ -119,25 +144,48 @@ class PluginReturn(object):
class PluginContext(object): class PluginContext(object):
def __init__(self, components, clients, cluster_config, cmd, options, dev_mode, stdio): def __init__(self, plugin_name, namespace, namespaces, deploy_name, repositories, components, clients, cluster_config, cmd, options, dev_mode, stdio):
self.namespace = namespace
self.namespaces = namespaces
self.deploy_name = deploy_name
self.repositories =repositories
self.plugin_name = plugin_name
self.components = components self.components = components
self.clients = clients self.clients = clients
self.cluster_config = cluster_config self.cluster_config = cluster_config
self.cmd = cmd self.cmds = cmd
self.options = options self.options = options
self.dev_mode = dev_mode self.dev_mode = dev_mode
self.stdio = stdio self.stdio = stdio
self.concurrent_executor = ConcurrentExecutor(32) self.concurrent_executor = ConcurrentExecutor(32)
self._return = PluginReturn() self._return = PluginReturn()
def get_return(self): def get_return(self, plugin_name=None, spacename=None):
return self._return if spacename:
namespace = self.namespaces.get(spacename)
else:
namespace = self.namespace
if plugin_name is None:
plugin_name = self.plugin_name
return namespace.get_return(plugin_name) if namespace else None
def return_true(self, *args, **kwargs): def return_true(self, *args, **kwargs):
self._return.return_true(*args, **kwargs) self._return.return_true(*args, **kwargs)
self.namespace.set_return(self.plugin_name, self._return)
def return_false(self, *args, **kwargs): def return_false(self, *args, **kwargs):
self._return.return_false(*args, **kwargs) self._return.return_false(*args, **kwargs)
self.namespace.set_return(self.plugin_name, self._return)
def get_variable(self, name, spacename=None):
if spacename:
namespace = self.namespaces.get(spacename)
else:
namespace = self.namespace
return namespace.get_variable(name) if namespace else None
def set_variable(self, name, value):
self.namespace.set_variable(name, value)
class SubIO(object): class SubIO(object):
...@@ -148,7 +196,7 @@ class SubIO(object): ...@@ -148,7 +196,7 @@ class SubIO(object):
def __del__(self): def __del__(self):
self.before_close() self.before_close()
def _temp_function(self, *arg, **kwargs): def _temp_function(self, *arg, **kwargs):
pass pass
...@@ -192,13 +240,21 @@ class ScriptPlugin(Plugin): ...@@ -192,13 +240,21 @@ class ScriptPlugin(Plugin):
def __del__(self): def __del__(self):
self._export() self._export()
def before_do(self, components, clients, cluster_config, cmd, options, stdio, *arg, **kwargs): def before_do(
self, plugin_name, namespace, namespaces, deploy_name,
repositories, components, clients, cluster_config, cmd,
options, stdio, *arg, **kwargs
):
self._import(stdio) self._import(stdio)
sub_stdio = SubIO(stdio) sub_stdio = SubIO(stdio)
sub_clients = {} sub_clients = {}
for server in clients: for server in clients:
sub_clients[server] = ScriptPlugin.ClientForScriptPlugin(clients[server], sub_stdio) sub_clients[server] = ScriptPlugin.ClientForScriptPlugin(clients[server], sub_stdio)
self.context = PluginContext(components, sub_clients, cluster_config, cmd, options, self.dev_mode, sub_stdio) self.context = PluginContext(
plugin_name, namespace, namespaces, deploy_name, repositories, components,
sub_clients, cluster_config, cmd, options, self.dev_mode, sub_stdio
)
namespace.set_return(plugin_name, None)
def after_do(self, stdio, *arg, **kwargs): def after_do(self, stdio, *arg, **kwargs):
self._export(stdio) self._export(stdio)
...@@ -206,17 +262,28 @@ class ScriptPlugin(Plugin): ...@@ -206,17 +262,28 @@ class ScriptPlugin(Plugin):
def pyScriptPluginExec(func): def pyScriptPluginExec(func):
def _new_func(self, components, clients, cluster_config, cmd, options, stdio, *arg, **kwargs): def _new_func(
self.before_do(components, clients, cluster_config, cmd, options, stdio, *arg, **kwargs) self, namespace, namespaces, deploy_name,
repositories, components, clients, cluster_config, cmd,
options, stdio, *arg, **kwargs
):
self.before_do(self.name, namespace, namespaces, deploy_name,
repositories, components, clients, cluster_config, cmd,
options, stdio, *arg, **kwargs)
if self.module: if self.module:
method_name = func.__name__ method_name = func.__name__
method = getattr(self.module, method_name, False) method = getattr(self.module, method_name, False)
namespace_vars = copy(self.context.namespace.variables)
namespace_vars.update(kwargs)
kwargs = namespace_vars
if method: if method:
try: try:
method(self.context, *arg, **kwargs) ret = method(self.context, *arg, **kwargs)
if ret is None and self.context and self.context.get_return() is None:
self.context.return_false()
except Exception as e: except Exception as e:
self.context.return_false(exception=e)
stdio and getattr(stdio, 'exception', print)('%s RuntimeError: %s' % (self, e)) stdio and getattr(stdio, 'exception', print)('%s RuntimeError: %s' % (self, e))
pass
ret = self.context.get_return() if self.context else PluginReturn() ret = self.context.get_return() if self.context else PluginReturn()
self.after_do(stdio, *arg, **kwargs) self.after_do(stdio, *arg, **kwargs)
return ret return ret
...@@ -226,45 +293,57 @@ def pyScriptPluginExec(func): ...@@ -226,45 +293,57 @@ def pyScriptPluginExec(func):
class PyScriptPlugin(ScriptPlugin): class PyScriptPlugin(ScriptPlugin):
LIBS_PATH = [] LIBS_PATH = []
PLUGIN_COMPONENT_NAME = None PLUGIN_NAME = None
def __init__(self, component_name, plugin_path, version, dev_mode): def __init__(self, component_name, plugin_path, version, dev_mode):
if not self.PLUGIN_COMPONENT_NAME: if not self.PLUGIN_NAME:
raise NotImplementedError raise NotImplementedError
super(PyScriptPlugin, self).__init__(component_name, plugin_path, version, dev_mode) super(PyScriptPlugin, self).__init__(component_name, plugin_path, version, dev_mode)
self.module = None self.module = None
self.name = self.PLUGIN_NAME
self.libs_path = deepcopy(self.LIBS_PATH) self.libs_path = deepcopy(self.LIBS_PATH)
self.libs_path.append(self.plugin_path) self.libs_path.append(self.plugin_path)
def __call__(self, clients, cluster_config, cmd, options, stdio, *arg, **kwargs): def __call__(
method = getattr(self, self.PLUGIN_COMPONENT_NAME, False) self, namespace, namespaces, deploy_name,
repositories, components, clients, cluster_config, cmd,
options, stdio, *arg, **kwargs
):
method = getattr(self, self.PLUGIN_NAME, False)
if method: if method:
return method(clients, cluster_config, cmd, options, stdio, *arg, **kwargs) return method(
namespace, namespaces, deploy_name,
repositories, components, clients, cluster_config, cmd,
options, stdio, *arg, **kwargs
)
else: else:
raise NotImplementedError raise NotImplementedError
def _import(self, stdio=None): def _import(self, stdio=None):
if self.module is None: if self.module is None:
DynamicLoading.add_libs_path(self.libs_path) DynamicLoading.add_libs_path(self.libs_path)
self.module = DynamicLoading.import_module(self.PLUGIN_COMPONENT_NAME, stdio) self.module = DynamicLoading.import_module(self.PLUGIN_NAME, stdio)
def _export(self, stdio=None): def _export(self, stdio=None):
if self.module: if self.module:
DynamicLoading.remove_libs_path(self.libs_path) DynamicLoading.remove_libs_path(self.libs_path)
DynamicLoading.export_module(self.PLUGIN_COMPONENT_NAME, stdio) DynamicLoading.export_module(self.PLUGIN_NAME, stdio)
# this is PyScriptPlugin demo # this is PyScriptPlugin demo
# class InitPlugin(PyScriptPlugin): # class InitPlugin(PyScriptPlugin):
# FLAG_FILE = 'init.py' # FLAG_FILE = 'init.py'
# PLUGIN_COMPONENT_NAME = 'init' # PLUGIN_NAME = 'init'
# PLUGIN_TYPE = PluginType.INIT # PLUGIN_TYPE = PluginType.INIT
# def __init__(self, component_name, plugin_path, version): # def __init__(self, component_name, plugin_path, version):
# super(InitPlugin, self).__init__(component_name, plugin_path, version) # super(InitPlugin, self).__init__(component_name, plugin_path, version)
# @pyScriptPluginExec # @pyScriptPluginExec
# def init(self, components, ssh_clients, cluster_config, cmd, options, stdio, *arg, **kwargs): # def init(
# self, namespace, namespaces, deploy_name,
# repositories, components, clients, cluster_config, cmd,
# options, stdio, *arg, **kwargs):
# pass # pass
class Null(object): class Null(object):
...@@ -353,7 +432,7 @@ class ParamPlugin(Plugin): ...@@ -353,7 +432,7 @@ class ParamPlugin(Plugin):
raise Exception('Invalid Value') raise Exception('Invalid Value')
else: else:
self._value = 0 self._value = 0
class Time(ConfigItemType): class Time(ConfigItemType):
UNITS = { UNITS = {
...@@ -384,7 +463,7 @@ class ParamPlugin(Plugin): ...@@ -384,7 +463,7 @@ class ParamPlugin(Plugin):
self._value = 0 self._value = 0
class Capacity(ConfigItemType): class Capacity(ConfigItemType):
UNITS = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40, 'P': 1 << 50} UNITS = {"B": 1, "K": 1<<10, "M": 1<<20, "G": 1<<30, "T": 1<<40, 'P': 1 << 50}
def _format(self): def _format(self):
...@@ -396,7 +475,7 @@ class ParamPlugin(Plugin): ...@@ -396,7 +475,7 @@ class ParamPlugin(Plugin):
else: else:
r = re.match('^(\d+)(\w)B?$', self._origin.upper()) r = re.match('^(\d+)(\w)B?$', self._origin.upper())
n, u = r.groups() n, u = r.groups()
unit = self.UNITS.get(u.upper()) unit = self.UNITS.get(u.upper())
if unit: if unit:
self._value = int(n) * unit self._value = int(n) * unit
else: else:
...@@ -496,18 +575,27 @@ class ParamPlugin(Plugin): ...@@ -496,18 +575,27 @@ class ParamPlugin(Plugin):
def __init__( def __init__(
self, self,
name, name,
param_type=str, param_type=str,
default=None, default=None,
min_value=None, min_value=None,
max_value=None, max_value=None,
require=False, require=False,
need_restart=False, essential=False,
section="",
need_reload=False,
need_restart=False,
need_redeploy=False, need_redeploy=False,
modify_limit=None modify_limit=None,
name_local=None,
description_en=None,
description_local=None
): ):
self.name = name self.name = name
self.default = default self.default = default
self.require = require self.require = require
self.essential = essential
self.section = section
self.need_reload = need_reload
self.need_restart = need_restart self.need_restart = need_restart
self.need_redeploy = need_redeploy self.need_redeploy = need_redeploy
self._param_type = param_type self._param_type = param_type
...@@ -515,6 +603,9 @@ class ParamPlugin(Plugin): ...@@ -515,6 +603,9 @@ class ParamPlugin(Plugin):
self.max_value = param_type(max_value) if max_value is not None else None self.max_value = param_type(max_value) if max_value is not None else None
self.modify_limit = getattr(self, ('_%s_limit' % modify_limit).lower(), self._none_limit) self.modify_limit = getattr(self, ('_%s_limit' % modify_limit).lower(), self._none_limit)
self.had_modify_limit = self.modify_limit != self._none_limit self.had_modify_limit = self.modify_limit != self._none_limit
self.name_local = name_local if name_local is not None else self.name
self.description_en = description_en
self.description_local = description_local if description_local is not None else self.description_en
def param_type(self, value): def param_type(self, value):
try: try:
...@@ -535,12 +626,12 @@ class ParamPlugin(Plugin): ...@@ -535,12 +626,12 @@ class ParamPlugin(Plugin):
if old_value == new_value: if old_value == new_value:
return True return True
raise Exception('DO NOT modify %s after startup' % self.name) raise Exception('DO NOT modify %s after startup' % self.name)
def _increase_limit(self, old_value, new_value): def _increase_limit(self, old_value, new_value):
if self.param_type(new_value) > self.param_type(old_value): if self.param_type(new_value) > self.param_type(old_value):
raise Exception('DO NOT increase %s after startup' % self.name) raise Exception('DO NOT increase %s after startup' % self.name)
return True return True
def _decrease_limit(self, old_value, new_value): def _decrease_limit(self, old_value, new_value):
if self.param_type(new_value) < self.param_type(old_value): if self.param_type(new_value) < self.param_type(old_value):
raise Exception('DO NOT decrease %s after startup' % self.name) raise Exception('DO NOT decrease %s after startup' % self.name)
...@@ -548,7 +639,7 @@ class ParamPlugin(Plugin): ...@@ -548,7 +639,7 @@ class ParamPlugin(Plugin):
def _none_limit(self, old_value, new_value): def _none_limit(self, old_value, new_value):
return True return True
PLUGIN_TYPE = PluginType.PARAM PLUGIN_TYPE = PluginType.PARAM
DEF_PARAM_YAML = 'parameter.yaml' DEF_PARAM_YAML = 'parameter.yaml'
FLAG_FILE = DEF_PARAM_YAML FLAG_FILE = DEF_PARAM_YAML
...@@ -598,8 +689,13 @@ class ParamPlugin(Plugin): ...@@ -598,8 +689,13 @@ class ParamPlugin(Plugin):
max_value=ConfigUtil.get_value_from_dict(conf, 'max_value', None), max_value=ConfigUtil.get_value_from_dict(conf, 'max_value', None),
modify_limit=ConfigUtil.get_value_from_dict(conf, 'modify_limit', None), modify_limit=ConfigUtil.get_value_from_dict(conf, 'modify_limit', None),
require=ConfigUtil.get_value_from_dict(conf, 'require', False), require=ConfigUtil.get_value_from_dict(conf, 'require', False),
section=ConfigUtil.get_value_from_dict(conf, 'section', ""),
essential=ConfigUtil.get_value_from_dict(conf, 'essential', False),
need_reload=ConfigUtil.get_value_from_dict(conf, 'need_reload', False),
need_restart=ConfigUtil.get_value_from_dict(conf, 'need_restart', False), need_restart=ConfigUtil.get_value_from_dict(conf, 'need_restart', False),
need_redeploy=ConfigUtil.get_value_from_dict(conf, 'need_redeploy', False) need_redeploy=ConfigUtil.get_value_from_dict(conf, 'need_redeploy', False),
description_en=ConfigUtil.get_value_from_dict(conf, 'description_en', None),
description_local=ConfigUtil.get_value_from_dict(conf, 'description_local', None),
) )
except: except:
pass pass
...@@ -647,7 +743,7 @@ class ParamPlugin(Plugin): ...@@ -647,7 +743,7 @@ class ParamPlugin(Plugin):
params = self.params params = self.params
for name in params: for name in params:
conf = params[name] conf = params[name]
temp[conf.name] = conf.default self._params_default[conf.name] = conf.default
return self._params_default return self._params_default
...@@ -722,7 +818,7 @@ class InstallPlugin(Plugin): ...@@ -722,7 +818,7 @@ class InstallPlugin(Plugin):
def var_replace(cls, string, var): def var_replace(cls, string, var):
if not var: if not var:
return string return string
done = [] done = []
while string: while string:
m = cls._KEYCRE.search(string) m = cls._KEYCRE.search(string)
...@@ -830,7 +926,7 @@ class ComponentPluginLoader(object): ...@@ -830,7 +926,7 @@ class ComponentPluginLoader(object):
if plugins: if plugins:
plugin = max(plugins, key=lambda x: x.version) plugin = max(plugins, key=lambda x: x.version)
# self.stdio and getattr(self.stdio, 'warn', print)( # self.stdio and getattr(self.stdio, 'warn', print)(
# '%s %s plugin version %s not found, use the best suitable version %s.\n Use `obd update` to update local plugin repository' % # '%s %s plugin version %s not found, use the best suitable version %s.\n Use `obd update` to update local plugin repository' %
# (self.component_name, self.PLUGIN_TYPE.name.lower(), version, plugin.version) # (self.component_name, self.PLUGIN_TYPE.name.lower(), version, plugin.version)
# ) # )
return plugin return plugin
...@@ -862,7 +958,7 @@ class PyScriptPluginLoader(ComponentPluginLoader): ...@@ -862,7 +958,7 @@ class PyScriptPluginLoader(ComponentPluginLoader):
class %s(PyScriptPlugin): class %s(PyScriptPlugin):
FLAG_FILE = '%s.py' FLAG_FILE = '%s.py'
PLUGIN_COMPONENT_NAME = '%s' PLUGIN_NAME = '%s'
def __init__(self, component_name, plugin_path, version, dev_mode): def __init__(self, component_name, plugin_path, version, dev_mode):
super(%s, self).__init__(component_name, plugin_path, version, dev_mode) super(%s, self).__init__(component_name, plugin_path, version, dev_mode)
...@@ -872,7 +968,10 @@ class %s(PyScriptPlugin): ...@@ -872,7 +968,10 @@ class %s(PyScriptPlugin):
%s.PLUGIN_TYPE = plugin_type %s.PLUGIN_TYPE = plugin_type
@pyScriptPluginExec @pyScriptPluginExec
def %s(self, components, ssh_clients, cluster_config, cmd, options, stdio, *arg, **kwargs): def %s(
self, namespace, namespaces, deploy_name,
repositories, components, clients, cluster_config, cmd,
options, stdio, *arg, **kwargs):
pass pass
''' % (self.PLUGIN_TYPE.value, script_name, script_name, self.PLUGIN_TYPE.value, self.PLUGIN_TYPE.value, script_name)) ''' % (self.PLUGIN_TYPE.value, script_name, script_name, self.PLUGIN_TYPE.value, self.PLUGIN_TYPE.value, script_name))
clz = locals()[self.PLUGIN_TYPE.value] clz = locals()[self.PLUGIN_TYPE.value]
......
...@@ -30,7 +30,8 @@ from multiprocessing.pool import Pool ...@@ -30,7 +30,8 @@ from multiprocessing.pool import Pool
from _rpm import Package, PackageInfo, Version from _rpm import Package, PackageInfo, Version
from _arch import getBaseArch from _arch import getBaseArch
from tool import DirectoryUtil, FileUtil, YamlLoader from _environ import ENV_DISABLE_PARALLER_EXTRACT
from tool import DirectoryUtil, FileUtil, YamlLoader, COMMAND_ENV
from _manager import Manager from _manager import Manager
from _plugin import InstallPlugin from _plugin import InstallPlugin
...@@ -150,24 +151,24 @@ class ExtractFileInfo(object): ...@@ -150,24 +151,24 @@ class ExtractFileInfo(object):
self.mode = mode self.mode = mode
class ParallerExtractWorker(object): class Extractor(object):
def __init__(self, pkg, files, stdio=None): def __init__(self, pkg, files, stdio=None):
self.pkg = pkg self.pkg = pkg
self.files = files self.files = files
self.stdio = stdio self.stdio = stdio
@staticmethod def extract(self):
def extract(worker): with self.pkg.open() as rpm:
with worker.pkg.open() as rpm: for info in self.files:
for info in worker.files:
if os.path.exists(info.target_path): if os.path.exists(info.target_path):
continue continue
fd = rpm.extractfile(info.src_path) fd = rpm.extractfile(info.src_path)
with FileUtil.open(info.target_path, 'wb', stdio=worker.stdio) as f: with FileUtil.open(info.target_path, 'wb', stdio=self.stdio) as f:
FileUtil.copy_fileobj(fd, f) FileUtil.copy_fileobj(fd, f)
if info.mode != 0o744: if info.mode != 0o744:
os.chmod(info.target_path, info.mode) os.chmod(info.target_path, info.mode)
return True
class ParallerExtractor(object): class ParallerExtractor(object):
...@@ -180,10 +181,30 @@ class ParallerExtractor(object): ...@@ -180,10 +181,30 @@ class ParallerExtractor(object):
self.pkg = pkg self.pkg = pkg
self.files = files self.files = files
self.stdio = stdio self.stdio = stdio
@staticmethod
def _extract(worker):
return worker.extract()
def extract(self): def extract(self):
if not self.files: if not self.files:
return return
if sys.version_info.major == 2 or COMMAND_ENV.get(ENV_DISABLE_PARALLER_EXTRACT, False):
return self._single()
else:
return self._paraller()
def _single(self):
self.stdio and getattr(self.stdio, 'verbose', print)('extract mode: single')
return Extractor(
self.pkg,
self.files,
stdio=self.stdio
).extract()
def _paraller(self):
self.stdio and getattr(self.stdio, 'verbose', print)('extract mode: paraller')
workers = [] workers = []
file_num = len(self.files) file_num = len(self.files)
paraller = int(min(self.MAX_PARALLER, file_num)) paraller = int(min(self.MAX_PARALLER, file_num))
...@@ -192,7 +213,7 @@ class ParallerExtractor(object): ...@@ -192,7 +213,7 @@ class ParallerExtractor(object):
index = 0 index = 0
while index < file_num: while index < file_num:
p_index = index + size p_index = index + size
workers.append(ParallerExtractWorker( workers.append(Extractor(
self.pkg, self.pkg,
self.files[index:p_index], self.files[index:p_index],
stdio=self.stdio stdio=self.stdio
...@@ -201,16 +222,20 @@ class ParallerExtractor(object): ...@@ -201,16 +222,20 @@ class ParallerExtractor(object):
pool = Pool(processes=paraller) pool = Pool(processes=paraller)
try: try:
results = pool.map(ParallerExtractWorker.extract, workers) results = pool.map(ParallerExtractor._extract, workers)
for r in results: for r in results:
if not r: if not r:
return False return False
return True
except KeyboardInterrupt: except KeyboardInterrupt:
if pool: if pool:
pool.close() pool.close()
pool = None pool = None
except:
self.stdio and getattr(self.stdio, 'exception', print)()
finally: finally:
pool and pool.close() pool and pool.close()
return False
class Repository(PackageInfo): class Repository(PackageInfo):
...@@ -309,14 +334,14 @@ class Repository(PackageInfo): ...@@ -309,14 +334,14 @@ class Repository(PackageInfo):
except: except:
self.stdio and getattr(self.stdio, 'exception', print)('dump %s to %s failed' % (data, self.data_file_path)) self.stdio and getattr(self.stdio, 'exception', print)('dump %s to %s failed' % (data, self.data_file_path))
return False return False
def need_load(self, pkg, plugin):
return self.hash != pkg.md5 or not self.install_time > plugin.check_value or not self.file_check(plugin)
def load_pkg(self, pkg, plugin): def load_pkg(self, pkg, plugin):
if self.is_shadow_repository(): if self.is_shadow_repository():
self.stdio and getattr(self.stdio, 'print', '%s is a shadow repository' % self) self.stdio and getattr(self.stdio, 'print', '%s is a shadow repository' % self)
return False return False
hash_path = os.path.join(self.repository_dir, '.hash')
if self.hash == pkg.md5 and self.file_check(plugin) and self.install_time > plugin.check_value:
return True
self.clear() self.clear()
try: try:
with pkg.open() as rpm: with pkg.open() as rpm:
...@@ -385,7 +410,7 @@ class Repository(PackageInfo): ...@@ -385,7 +410,7 @@ class Repository(PackageInfo):
if not os.path.exists(path) and n_dir[:-1] in dirnames: if not os.path.exists(path) and n_dir[:-1] in dirnames:
DirectoryUtil.mkdir(path) DirectoryUtil.mkdir(path)
if not os.path.isdir(path): if not os.path.isdir(path):
raise Exception('%s in %s is not dir.' % (pkg.path, n_dir)) raise Exception('%s in %s is not dir.' % (n_dir, pkg.path))
self.set_version(pkg.version) self.set_version(pkg.version)
self.set_release(pkg.release) self.set_release(pkg.release)
self.md5 = pkg.md5 self.md5 = pkg.md5
......
...@@ -48,24 +48,61 @@ if sys.version_info.major == 3: ...@@ -48,24 +48,61 @@ if sys.version_info.major == 3:
class BufferIO(object): class BufferIO(object):
def __init__(self, auto_clear=True):
self._buffer = []
self.auto_clear = auto_clear
self.closed = False
def isatty(self):
return False
def writable(self):
return not self.closed
def __init__(self): def close(self):
self.closed = True
return self
def open(self):
self.closed = False
self._buffer = [] self._buffer = []
return self
def __enter__(self):
return self.open()
def __exit__(self, *args, **kwargs):
return self.close()
def write(self, s): def write(self, s):
self._buffer.append(s) self._buffer.append(s)
def read(self): def read(self, *args, **kwargs):
s = ''.join(self._buffer) s = ''.join(self._buffer)
self._buffer = [] self.auto_clear and self.clear()
return s return s
def clear(self):
self._buffer = []
def flush(self):
self.auto_clear and self.clear()
return True
class SysStdin(object): class SysStdin(object):
NONBLOCK = False NONBLOCK = False
STATS = None STATS = None
FD = None FD = None
IS_TTY = None
@classmethod
def isatty(cls):
if cls.IS_TTY is None:
cls.IS_TTY = sys.stdin.isatty()
return cls.IS_TTY
@classmethod @classmethod
def fileno(cls): def fileno(cls):
...@@ -140,29 +177,33 @@ class SysStdin(object): ...@@ -140,29 +177,33 @@ class SysStdin(object):
return sys.stdin.readlines() return sys.stdin.readlines()
class FormtatText(object):
def __init__(self, text, color):
self.text = text
self.color_text = color + text + Fore.RESET
class FormtatText(object): def format(self, istty=True):
return self.color_text if istty else self.text
@staticmethod def __str__(self):
def format(text, color): return self.format()
return color + text + Fore.RESET
@staticmethod @staticmethod
def info(text): def info(text):
return FormtatText.format(text, Fore.BLUE) return FormtatText(text, Fore.BLUE)
@staticmethod @staticmethod
def success(text): def success(text):
return FormtatText.format(text, Fore.GREEN) return FormtatText(text, Fore.GREEN)
@staticmethod @staticmethod
def warning(text): def warning(text):
return FormtatText.format(text, Fore.YELLOW) return FormtatText(text, Fore.YELLOW)
@staticmethod @staticmethod
def error(text): def error(text):
return FormtatText.format(text, Fore.RED) return FormtatText(text, Fore.RED)
class LogSymbols(Enum): class LogSymbols(Enum):
...@@ -220,7 +261,7 @@ class IOHalo(Halo): ...@@ -220,7 +261,7 @@ class IOHalo(Halo):
if getattr(self._stream, 'isatty', lambda : False)(): if getattr(self._stream, 'isatty', lambda : False)():
return super(IOHalo, self).stop_and_persist(symbol=symbol, text=text) return super(IOHalo, self).stop_and_persist(symbol=symbol, text=text)
else: else:
self._stream.write(' %s\n' % symbol) self._stream.write(' %s\n' % symbol.format(istty=False))
def succeed(self, text=None): def succeed(self, text=None):
return self.stop_and_persist(symbol=LogSymbols.SUCCESS.value, text=text) return self.stop_and_persist(symbol=LogSymbols.SUCCESS.value, text=text)
...@@ -238,9 +279,11 @@ class IOHalo(Halo): ...@@ -238,9 +279,11 @@ class IOHalo(Halo):
class IOProgressBar(ProgressBar): class IOProgressBar(ProgressBar):
@staticmethod @staticmethod
def _get_widgets(widget_type, text): def _get_widgets(widget_type, text, istty=True):
if widget_type == 'download': if istty is False:
return ['%s: ' % text, Percentage(), ' ', Bar(marker='#', left='[', right=']'), ' ', ETA(), ' ', FileTransferSpeed()] return [text]
elif widget_type == 'download':
return ['%s: ' % text, Percentage(), ' ', Bar(marker='#', left='[', right=']'), ' ', ETA(), ' ', FileTransferSpeed()]
elif widget_type == 'timer': elif widget_type == 'timer':
return ['%s: ' % text, Percentage(), ' ', Bar(marker='#', left='[', right=']'), ' ', AdaptiveETA()] return ['%s: ' % text, Percentage(), ' ', Bar(marker='#', left='[', right=']'), ' ', AdaptiveETA()]
elif widget_type == 'simple_progress': elif widget_type == 'simple_progress':
...@@ -249,7 +292,8 @@ class IOProgressBar(ProgressBar): ...@@ -249,7 +292,8 @@ class IOProgressBar(ProgressBar):
return ['%s: ' % text, Percentage(), ' ', Bar(marker='#', left='[', right=']')] return ['%s: ' % text, Percentage(), ' ', Bar(marker='#', left='[', right=']')]
def __init__(self, maxval=None, text='', term_width=None, poll=1, left_justify=True, stream=None, widget_type='download'): def __init__(self, maxval=None, text='', term_width=None, poll=1, left_justify=True, stream=None, widget_type='download'):
super(IOProgressBar, self).__init__(maxval=maxval, widgets=self._get_widgets(widget_type, text), term_width=term_width, poll=poll, left_justify=left_justify, fd=stream) self.stream_isatty = getattr(stream, 'isatty', lambda : False)()
super(IOProgressBar, self).__init__(maxval=maxval, widgets=self._get_widgets(widget_type, text, self.stream_isatty), term_width=term_width, poll=poll, left_justify=left_justify, fd=stream)
def start(self): def start(self):
self._hide_cursor() self._hide_cursor()
...@@ -261,21 +305,23 @@ class IOProgressBar(ProgressBar): ...@@ -261,21 +305,23 @@ class IOProgressBar(ProgressBar):
def finish(self): def finish(self):
if self.finished: if self.finished:
return return
self._show_cursor() self.update(self.maxval)
return super(IOProgressBar, self).finish() self._finish()
def interrupt(self): def interrupt(self):
if self.finished: if self.finished:
return return
self._show_cursor() self._finish()
def _finish(self):
self.finished = True self.finished = True
self.fd.write('\n') self.fd.write('\n')
self._show_cursor()
if self.signal_set: if self.signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL) signal.signal(signal.SIGWINCH, signal.SIG_DFL)
def _need_update(self): def _need_update(self):
return (self.currval == self.maxval or self.currval == 0 or getattr(self.fd, 'isatty', lambda : False)()) \ return (self.currval == self.maxval or self.currval == 0 or self.stream_isatty) and super(IOProgressBar, self)._need_update()
and super(IOProgressBar, self)._need_update()
def _check_stream(self): def _check_stream(self):
if self.fd.closed: if self.fd.closed:
...@@ -291,13 +337,13 @@ class IOProgressBar(ProgressBar): ...@@ -291,13 +337,13 @@ class IOProgressBar(ProgressBar):
def _hide_cursor(self): def _hide_cursor(self):
"""Disable the user's blinking cursor """Disable the user's blinking cursor
""" """
if self._check_stream() and self.fd.isatty(): if self._check_stream() and self.stream_isatty:
cursor.hide(stream=self.fd) cursor.hide(stream=self.fd)
def _show_cursor(self): def _show_cursor(self):
"""Re-enable the user's blinking cursor """Re-enable the user's blinking cursor
""" """
if self._check_stream() and self.fd.isatty(): if self._check_stream() and self.stream_isatty:
cursor.show(stream=self.fd) cursor.show(stream=self.fd)
...@@ -320,8 +366,6 @@ class IO(object): ...@@ -320,8 +366,6 @@ class IO(object):
VERBOSE_LEVEL = 0 VERBOSE_LEVEL = 0
WARNING_PREV = FormtatText.warning('[WARN]') WARNING_PREV = FormtatText.warning('[WARN]')
ERROR_PREV = FormtatText.error('[ERROR]') ERROR_PREV = FormtatText.error('[ERROR]')
IS_TTY = sys.stdin.isatty()
INPUT = SysStdin
def __init__(self, def __init__(self,
level, level,
...@@ -329,7 +373,8 @@ class IO(object): ...@@ -329,7 +373,8 @@ class IO(object):
use_cache=False, use_cache=False,
track_limit=0, track_limit=0,
root_io=None, root_io=None,
stream=sys.stdout input_stream=SysStdin,
output_stream=sys.stdout
): ):
self.level = level self.level = level
self.msg_lv = msg_lv self.msg_lv = msg_lv
...@@ -344,9 +389,34 @@ class IO(object): ...@@ -344,9 +389,34 @@ class IO(object):
self._verbose_prefix = '-' * self.level self._verbose_prefix = '-' * self.level
self.sub_ios = {} self.sub_ios = {}
self.sync_obj = None self.sync_obj = None
self._out_obj = None if self._root_io else stream self.input_stream = None
self._cur_out_obj = self._out_obj self._out_obj = None
self._cur_out_obj = None
self._before_critical = None self._before_critical = None
self._output_is_tty = False
self._input_is_tty = False
self.set_input_stream(input_stream)
self.set_output_stream(output_stream)
def isatty(self):
if self._root_io:
return self._root_io.isatty()
return self._output_is_tty and self._input_is_tty
def set_input_stream(self, input_stream):
if self._root_io:
return False
self.input_stream = input_stream
self._input_is_tty = input_stream.isatty()
def set_output_stream(self, output_stream):
if self._root_io:
return False
if self._cur_out_obj == self._out_obj:
self._cur_out_obj = output_stream
self._out_obj = output_stream
self._output_is_tty = output_stream.isatty()
return True
def init_trace_logger(self, log_path, log_name=None, trace_id=None): def init_trace_logger(self, log_path, log_name=None, trace_id=None):
if self._trace_logger is None: if self._trace_logger is None:
...@@ -360,7 +430,7 @@ class IO(object): ...@@ -360,7 +430,7 @@ class IO(object):
state = {} state = {}
for key in self.__dict__: for key in self.__dict__:
state[key] = self.__dict__[key] state[key] = self.__dict__[key]
for key in ['_trace_logger', 'sync_obj', '_out_obj', '_cur_out_obj', '_before_critical']: for key in ['_trace_logger', 'input_stream', 'sync_obj', '_out_obj', '_cur_out_obj', '_before_critical']:
state[key] = None state[key] = None
return state return state
...@@ -418,6 +488,11 @@ class IO(object): ...@@ -418,6 +488,11 @@ class IO(object):
self._flush_log() self._flush_log()
self._log_cache = None self._log_cache = None
return True return True
def get_input_stream(self):
if self._root_io:
return self._root_io.get_input_stream()
return self.input_stream
def get_cur_out_obj(self): def get_cur_out_obj(self):
if self._root_io: if self._root_io:
...@@ -571,15 +646,15 @@ class IO(object): ...@@ -571,15 +646,15 @@ class IO(object):
def read(self, msg='', blocked=False): def read(self, msg='', blocked=False):
if msg: if msg:
self._print(MsgLevel.INFO, msg) self._print(MsgLevel.INFO, msg)
return self.INPUT.read(blocked) return self.get_input_stream().read(blocked)
def confirm(self, msg): def confirm(self, msg):
msg = '%s [y/n]: ' % msg msg = '%s [y/n]: ' % msg
self.print(msg, end='') self.print(msg, end='')
if self.IS_TTY: if self._input_is_tty:
while True: while True:
try: try:
ans = raw_input() ans = self.get_input_stream().readline(blocked=True).strip().lower()
if ans == 'y': if ans == 'y':
return True return True
if ans == 'n': if ans == 'n':
...@@ -598,8 +673,13 @@ class IO(object): ...@@ -598,8 +673,13 @@ class IO(object):
def _print(self, msg_lv, msg, *args, **kwargs): def _print(self, msg_lv, msg, *args, **kwargs):
if msg_lv < self.msg_lv: if msg_lv < self.msg_lv:
return return
if 'prev_msg' in kwargs:
print_msg = '%s %s' % (kwargs['prev_msg'], msg)
del kwargs['prev_msg']
else:
print_msg = msg
kwargs['file'] = self.get_cur_out_obj() kwargs['file'] = self.get_cur_out_obj()
kwargs['file'] and print(self._format(msg, *args), **kwargs) kwargs['file'] and print(self._format(print_msg, *args), **kwargs)
del kwargs['file'] del kwargs['file']
self.log(msg_lv, msg, *args, **kwargs) self.log(msg_lv, msg, *args, **kwargs)
...@@ -621,6 +701,7 @@ class IO(object): ...@@ -621,6 +701,7 @@ class IO(object):
for levelno, line, args, kwargs in self._log_cache: for levelno, line, args, kwargs in self._log_cache:
self.trace_logger.log(levelno, line, *args, **kwargs) self.trace_logger.log(levelno, line, *args, **kwargs)
self._log_cache = [] self._log_cache = []
def _log(self, levelno, msg, *args, **kwargs): def _log(self, levelno, msg, *args, **kwargs):
if self.trace_logger: if self.trace_logger:
self.trace_logger.log(levelno, msg, *args, **kwargs) self.trace_logger.log(levelno, msg, *args, **kwargs)
...@@ -629,16 +710,15 @@ class IO(object): ...@@ -629,16 +710,15 @@ class IO(object):
self._print(MsgLevel.INFO, msg, *args, **kwargs) self._print(MsgLevel.INFO, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs): def warn(self, msg, *args, **kwargs):
self._print(MsgLevel.WARN, '%s %s' % (self.WARNING_PREV, msg), *args, **kwargs) self._print(MsgLevel.WARN, msg, prev_msg=self.WARNING_PREV.format(self.isatty()), *args, **kwargs)
def error(self, msg, *args, **kwargs): def error(self, msg, *args, **kwargs):
self._print(MsgLevel.ERROR, '%s %s' % (self.ERROR_PREV, msg), *args, **kwargs) self._print(MsgLevel.ERROR, msg, prev_msg=self.ERROR_PREV.format(self.isatty()), *args, **kwargs)
def critical(self, msg, *args, **kwargs): def critical(self, msg, *args, **kwargs):
if self._root_io:
return self._root_io.critical(msg, *args, **kwargs)
self._print(MsgLevel.CRITICAL, '%s %s' % (self.ERROR_PREV, msg), *args, **kwargs) self._print(MsgLevel.CRITICAL, '%s %s' % (self.ERROR_PREV, msg), *args, **kwargs)
self.exit(kwargs['code'] if 'code' in kwargs else 255) if not self._root_io:
self.exit(kwargs['code'] if 'code' in kwargs else 255)
def verbose(self, msg, *args, **kwargs): def verbose(self, msg, *args, **kwargs):
if self.level > self.VERBOSE_LEVEL: if self.level > self.VERBOSE_LEVEL:
...@@ -728,13 +808,17 @@ class StdIO(object): ...@@ -728,13 +808,17 @@ class StdIO(object):
if item.startswith('__'): if item.startswith('__'):
return super(StdIO, self).__getattribute__(item) return super(StdIO, self).__getattribute__(item)
if self.io is None: if self.io is None:
return FAKE_RETURN if item == 'sub_io':
return self
else:
return FAKE_RETURN
if item not in self._attrs: if item not in self._attrs:
attr = getattr(self.io, item, EMPTY) attr = getattr(self.io, item, EMPTY)
if attr is not EMPTY: if attr is not EMPTY:
self._attrs[item] = attr self._attrs[item] = attr
else: else:
self._warn_func(FormtatText.warning("WARNING: {} has no attribute '{}'".format(self.io, item))) is_tty = getattr(self._stream, 'isatty', lambda : False)()
self._warn_func(FormtatText.warning("WARNING: {} has no attribute '{}'".format(self.io, item)).format(is_tty))
self._attrs[item] = FAKE_RETURN self._attrs[item] = FAKE_RETURN
return self._attrs[item] return self._attrs[item]
......
...@@ -56,6 +56,12 @@ class ClusterConfigParser(ConfigParser): ...@@ -56,6 +56,12 @@ class ClusterConfigParser(ConfigParser):
zone_config[server.name] = {} zone_config[server.name] = {}
return zone_config[server.name] return zone_config[server.name]
@classmethod
def get_global_src_conf(cls, cluster_config, component_config):
if 'config' not in component_config:
component_config['config'] = {}
return component_config['config']
@classmethod @classmethod
def _to_cluster_config(cls, component_name, conf): def _to_cluster_config(cls, component_name, conf):
servers = OrderedDict() servers = OrderedDict()
...@@ -242,8 +248,8 @@ class ClusterConfigParser(ConfigParser): ...@@ -242,8 +248,8 @@ class ClusterConfigParser(ConfigParser):
conf['name'] = global_config['appname'] conf['name'] = global_config['appname']
del global_config['appname'] del global_config['appname']
conf['zones'] = zones
if global_config: if global_config:
conf['config'] = global_config conf['config'] = global_config
conf['zones'] = zones
return conf return conf
\ No newline at end of file
此差异已折叠。
...@@ -24,14 +24,22 @@ oceanbase-ce: ...@@ -24,14 +24,22 @@ oceanbase-ce:
memory_limit: 6G # The maximum running memory for an observer memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file. datafile_size: 20G # Size of the data file.
log_disk_size: 24G # The size of disk space used by the clog files. log_disk_size: 15G # The size of disk space used by the clog files.
cpu_count: 16 cpu_count: 16
production_mode: false production_mode: false
syslog_level: INFO # System log level. The default value is INFO.
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
# root_password: # root user password, can be empty # root_password: # root user password, can be empty
# ocp_meta_db: ocp_express # The database name of ocp express meta
# ocp_meta_username: meta # The username of ocp express meta
# ocp_meta_password: '' # The password of ocp express meta
# ocp_agent_monitor_password: '' # The password for obagent monitor user
ocp_meta_tenant: # The config for ocp express meta tenant
tenant_name: ocp
max_cpu: 1
memory_size: 2G
log_disk_size: 7680M # The recommend value is (4608 + (expect node num + expect tenant num) * 512) M.
server1: server1:
mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started. mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started.
rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started. rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started.
...@@ -68,7 +76,7 @@ obproxy-ce: ...@@ -68,7 +76,7 @@ obproxy-ce:
depends: depends:
- oceanbase-ce - oceanbase-ce
servers: servers:
- 192.168.1.5 - 172.19.33.6
global: global:
listen_port: 2883 # External port. The default value is 2883. listen_port: 2883 # External port. The default value is 2883.
prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884. prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884.
...@@ -96,19 +104,17 @@ obagent: ...@@ -96,19 +104,17 @@ obagent:
ip: 172.19.33.4 ip: 172.19.33.4
global: global:
home_path: /root/obagent home_path: /root/obagent
ob_monitor_status: active ocp-express:
prometheus: depeneds:
depends: - oceanbase-ce
- obproxy-ce
- obagent - obagent
servers: servers:
- 192.168.1.5 - 172.19.33.5
global:
home_path: /root/prometheus
grafana:
depends:
- prometheus
servers:
- 192.168.1.5
global: global:
home_path: /root/grafana # The working directory for prometheus. prometheus is started under this directory. This is a required field.
login_password: oceanbase home_path: /root/ocp-server
\ No newline at end of file # log_dir: /home/oceanbase/ocp-server/log # The log directory of ocp express server. The default value is {home_path}/log.
memory_size: 1G # The memory size of ocp-express server. The recommend value is 512MB * (expect node num + expect tenant num) * 60MB.
# logging_file_total_size_cap: 10G # The total log file size of ocp-express server
# logging_file_max_history: 1 # The maximum of retention days the log archive log files to keep. The default value is unlimited
\ No newline at end of file
...@@ -25,13 +25,21 @@ oceanbase-ce: ...@@ -25,13 +25,21 @@ oceanbase-ce:
system_memory: 30G system_memory: 30G
datafile_size: 192G # Size of the data file. datafile_size: 192G # Size of the data file.
log_disk_size: 192G # The size of disk space used by the clog files. log_disk_size: 192G # The size of disk space used by the clog files.
syslog_level: INFO # System log level. The default value is INFO.
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
skip_proxy_sys_private_check: true skip_proxy_sys_private_check: true
enable_strict_kernel_release: false enable_strict_kernel_release: false
# root_password: # root user password # root_password: # root user password
# ocp_meta_db: ocp_express # The database name of ocp express meta
# ocp_meta_username: meta # The username of ocp express meta
# ocp_meta_password: '' # The password of ocp express meta
# ocp_agent_monitor_password: '' # The password for obagent monitor user
ocp_meta_tenant: # The config for ocp express meta tenant
tenant_name: ocp
max_cpu: 1
memory_size: 2G
log_disk_size: 7680M # The recommend value is (4608 + (expect node num + expect tenant num) * 512) M.
# In this example , support multiple ob process in single node, so different process use different ports. # In this example , support multiple ob process in single node, so different process use different ports.
# If deploy ob cluster in multiple nodes, the port and path setting can be same. # If deploy ob cluster in multiple nodes, the port and path setting can be same.
server1: server1:
...@@ -70,7 +78,7 @@ obproxy-ce: ...@@ -70,7 +78,7 @@ obproxy-ce:
depends: depends:
- oceanbase-ce - oceanbase-ce
servers: servers:
- 192.168.1.5 - 172.19.33.6
global: global:
listen_port: 2883 # External port. The default value is 2883. listen_port: 2883 # External port. The default value is 2883.
prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884. prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884.
...@@ -98,19 +106,17 @@ obagent: ...@@ -98,19 +106,17 @@ obagent:
ip: 172.19.33.4 ip: 172.19.33.4
global: global:
home_path: /root/obagent home_path: /root/obagent
ob_monitor_status: active ocp-express:
prometheus: depeneds:
depends: - oceanbase-ce
- obproxy-ce
- obagent - obagent
servers: servers:
- 192.168.1.5 - 172.19.33.5
global:
home_path: /root/prometheus
grafana:
depends:
- prometheus
servers:
- 192.168.1.5
global: global:
home_path: /root/grafana # The working directory for prometheus. prometheus is started under this directory. This is a required field.
login_password: oceanbase home_path: /root/ocp-server
\ No newline at end of file # log_dir: /home/oceanbase/ocp-server/log # The log directory of ocp express server. The default value is {home_path}/log.
memory_size: 1G # The memory size of ocp-express server. The recommend value is 512MB * (expect node num + expect tenant num) * 60MB.
# logging_file_total_size_cap: 10G # The total log file size of ocp-express server
# logging_file_max_history: 1 # The maximum of retention days the log archive log files to keep. The default value is unlimited
\ No newline at end of file
...@@ -41,8 +41,8 @@ oceanbase-ce: ...@@ -41,8 +41,8 @@ oceanbase-ce:
# datafile_size: 200G # datafile_size: 200G
# The size of disk space used by the clog files. When ignored, autodeploy calculates this value based on the current server available resource. # The size of disk space used by the clog files. When ignored, autodeploy calculates this value based on the current server available resource.
# log_disk_size: 66G # log_disk_size: 66G
# System log level. The default value is INFO. # System log level. The default value is WDIAG.
# syslog_level: INFO # syslog_level: WDIAG
# Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false. # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false.
# enable_syslog_wf: false # enable_syslog_wf: false
# Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on. # Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on.
......
...@@ -41,8 +41,8 @@ oceanbase-ce: ...@@ -41,8 +41,8 @@ oceanbase-ce:
# datafile_size: 200G # datafile_size: 200G
# The size of disk space used by the clog files. When ignored, autodeploy calculates this value based on the current server available resource. # The size of disk space used by the clog files. When ignored, autodeploy calculates this value based on the current server available resource.
# log_disk_size: 66G # log_disk_size: 66G
# System log level. The default value is INFO. # System log level. The default value is WDIAG.
# syslog_level: INFO # syslog_level: WDIAG
# Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false. # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false.
# enable_syslog_wf: false # enable_syslog_wf: false
# Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on. # Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on.
...@@ -103,40 +103,34 @@ obagent: ...@@ -103,40 +103,34 @@ obagent:
global: global:
# The working directory for obagent. obagent is started under this directory. This is a required field. # The working directory for obagent. obagent is started under this directory. This is a required field.
home_path: /root/obagent home_path: /root/obagent
# The port that pulls and manages the metrics. The default port number is 8088. # The port of monitor agent. The default port number is 8088.
# server_port: 8088 # monagent_http_port: 8088
# Debug port for pprof. The default port number is 8089. # The port of manager agent. The default port number is 8089.
# pprof_port: 8089 # mgragent_http_port: 8089
# Log level. The default value is INFO.
# log_level: INFO
# Log path. The default value is log/monagent.log. # Log path. The default value is log/monagent.log.
# log_path: log/monagent.log # log_path: log/monagent.log
# Encryption method. OBD supports aes and plain. The default value is plain. # The log level of manager agent.
# crypto_method: plain # mgragent_log_level: info
# Path to store the crypto key. The default value is conf/.config_secret.key. # The total size of manager agent.Log size is measured in Megabytes. The default value is 30M.
# crypto_path: conf/.config_secret.key # mgragent_log_max_size: 30
# Size for a single log file. Log size is measured in Megabytes. The default value is 30M. # Expiration time for manager agent logs. The default value is 30 days.
# log_size: 30 # mgragent_log_max_days: 30
# Expiration time for logs. The default value is 7 days. # The maximum number for manager agent log files. The default value is 15.
# log_expire_day: 7 # mgragent_log_max_backups: 15
# The maximum number for log files. The default value is 10. # The log level of monitor agent.
# log_file_count: 10 # monagent_log_level: info
# Whether to use local time for log files. The default value is true. # The total size of monitor agent.Log size is measured in Megabytes. The default value is 200M.
# log_use_localtime: true # monagent_log_max_size: 200
# Whether to enable log compression. The default value is true. # Expiration time for monitor agent logs. The default value is 30 days.
# log_compress: true # monagent_log_max_days: 30
# The maximum number for monitor agent log files. The default value is 15.
# monagent_log_max_backups: 15
# Username for HTTP authentication. The default value is admin. # Username for HTTP authentication. The default value is admin.
# http_basic_auth_user: admin # http_basic_auth_user: admin
# Password for HTTP authentication. The default value is root. # Password for HTTP authentication. The default value is root.
# http_basic_auth_password: root # http_basic_auth_password: root
# Username for debug service. The default value is admin. # Monitor password for OceanBase Database. The default value is empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the ocp_agent_monitor_password in oceanbase-ce.
# pprof_basic_auth_user: admin # monitor_password:
# Password for debug service. The default value is root.
# pprof_basic_auth_password: root
# Monitor username for OceanBase Database. The user must have read access to OceanBase Database as a system tenant. The default value is root.
# monitor_user: root
# Monitor password for OceanBase Database. The default value is empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the root_password in oceanbase-ce.
# monitor_password:
# The SQL port for observer. The default value is 2881. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the mysql_port in oceanbase-ce. # The SQL port for observer. The default value is 2881. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the mysql_port in oceanbase-ce.
# sql_port: 2881 # sql_port: 2881
# The RPC port for observer. The default value is 2882. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the rpc_port in oceanbase-ce. # The RPC port for observer. The default value is 2882. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the rpc_port in oceanbase-ce.
...@@ -145,13 +139,12 @@ obagent: ...@@ -145,13 +139,12 @@ obagent:
# cluster_name: obcluster # cluster_name: obcluster
# Cluster ID for OceanBase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the cluster_id in oceanbase-ce. # Cluster ID for OceanBase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the cluster_id in oceanbase-ce.
# cluster_id: 1 # cluster_id: 1
# Zone name for your observer. The default value is zone1. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the zone name in oceanbase-ce. # The redo dir for Oceanbase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the redo_dir in oceanbase-ce.
# zone_name: zone1 # ob_log_path: /root/observer/store
# Monitor status for OceanBase Database. Active is to enable. Inactive is to disable. The default value is active. When you deploy an cluster automatically, OBD decides whether to enable this parameter based on depends. # The data dir for Oceanbase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the data_dir in oceanbase-ce.
# ob_monitor_status: active # ob_data_path: /root/observer/store
# Monitor status for your host. Active is to enable. Inactive is to disable. The default value is active. # The work directory for Oceanbase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the home_path in oceanbase-ce.
# host_monitor_status: active # ob_install_path: /root/observer
# Whether to disable the basic authentication for HTTP service. True is to disable. False is to enable. The default value is false. # The log path for Oceanbase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the {home_path}/log in oceanbase-ce.
# disable_http_basic_auth: false # observer_log_path: /root/observer/log
# Whether to disable the basic authentication for the debug interface. True is to disable. False is to enable. The default value is false. # Monitor status for OceanBase Database. Active is to enable. Inactive is to disable. The default value is active. When you deploy an cluster automatically, OBD decides whether to enable this parameter based on depends.
# disable_pprof_basic_auth: false \ No newline at end of file
...@@ -41,8 +41,8 @@ oceanbase-ce: ...@@ -41,8 +41,8 @@ oceanbase-ce:
# datafile_size: 200G # datafile_size: 200G
# The size of disk space used by the clog files. When ignored, autodeploy calculates this value based on the current server available resource. # The size of disk space used by the clog files. When ignored, autodeploy calculates this value based on the current server available resource.
# log_disk_size: 66G # log_disk_size: 66G
# System log level. The default value is INFO. # System log level. The default value is WDIAG.
# syslog_level: INFO # syslog_level: WDIAG
# Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false. # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false.
# enable_syslog_wf: false # enable_syslog_wf: false
# Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on. # Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on.
......
...@@ -36,8 +36,8 @@ oceanbase-ce: ...@@ -36,8 +36,8 @@ oceanbase-ce:
# datafile_size: 200G # datafile_size: 200G
# The size of disk space used by the clog files. When ignored, autodeploy calculates this value based on the current server available resource. # The size of disk space used by the clog files. When ignored, autodeploy calculates this value based on the current server available resource.
# log_disk_size: 66G # log_disk_size: 66G
# System log level. The default value is INFO. # System log level. The default value is WDIAG.
# syslog_level: INFO # syslog_level: WDIAG
# Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false. # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false.
# enable_syslog_wf: false # enable_syslog_wf: false
# Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on. # Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on.
......
...@@ -36,8 +36,8 @@ oceanbase-ce: ...@@ -36,8 +36,8 @@ oceanbase-ce:
# datafile_size: 200G # datafile_size: 200G
# The size of disk space used by the clog files. When ignored, autodeploy calculates this value based on the current server available resource. # The size of disk space used by the clog files. When ignored, autodeploy calculates this value based on the current server available resource.
# log_disk_size: 66G # log_disk_size: 66G
# System log level. The default value is INFO. # System log level. The default value is WDIAG.
# syslog_level: INFO # syslog_level: WDIAG
# Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false. # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false.
# enable_syslog_wf: false # enable_syslog_wf: false
# Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on. # Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on.
......
...@@ -25,7 +25,6 @@ oceanbase-ce: ...@@ -25,7 +25,6 @@ oceanbase-ce:
system_memory: 30G system_memory: 30G
datafile_size: 192G # Size of the data file. datafile_size: 192G # Size of the data file.
log_disk_size: 192G # The size of disk space used by the clog files. log_disk_size: 192G # The size of disk space used by the clog files.
syslog_level: INFO # System log level. The default value is INFO.
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
......
...@@ -25,7 +25,6 @@ oceanbase-ce: ...@@ -25,7 +25,6 @@ oceanbase-ce:
system_memory: 30G system_memory: 30G
datafile_size: 192G # Size of the data file. datafile_size: 192G # Size of the data file.
log_disk_size: 192G # The size of disk space used by the clog files. log_disk_size: 192G # The size of disk space used by the clog files.
syslog_level: INFO # System log level. The default value is INFO.
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
......
...@@ -22,7 +22,6 @@ oceanbase-ce: ...@@ -22,7 +22,6 @@ oceanbase-ce:
system_memory: 30G system_memory: 30G
datafile_size: 192G # Size of the data file. datafile_size: 192G # Size of the data file.
log_disk_size: 192G # The size of disk space used by the clog files. log_disk_size: 192G # The size of disk space used by the clog files.
syslog_level: INFO # System log level. The default value is INFO.
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
......
...@@ -24,10 +24,9 @@ oceanbase-ce: ...@@ -24,10 +24,9 @@ oceanbase-ce:
memory_limit: 6G # The maximum running memory for an observer memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file. datafile_size: 20G # Size of the data file.
log_disk_size: 24G # The size of disk space used by the clog files. log_disk_size: 15G # The size of disk space used by the clog files.
cpu_count: 16 cpu_count: 16
production_mode: false production_mode: false
syslog_level: INFO # System log level. The default value is INFO.
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
......
...@@ -24,10 +24,9 @@ oceanbase-ce: ...@@ -24,10 +24,9 @@ oceanbase-ce:
memory_limit: 6G # The maximum running memory for an observer memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file. datafile_size: 20G # Size of the data file.
log_disk_size: 24G # The size of disk space used by the clog files. log_disk_size: 15G # The size of disk space used by the clog files.
cpu_count: 16 cpu_count: 16
production_mode: false production_mode: false
syslog_level: INFO # System log level. The default value is INFO.
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -25,7 +25,6 @@ oceanbase-ce: ...@@ -25,7 +25,6 @@ oceanbase-ce:
system_memory: 30G system_memory: 30G
datafile_size: 192G # Size of the data file. datafile_size: 192G # Size of the data file.
log_disk_size: 192G # The size of disk space used by the clog files. log_disk_size: 192G # The size of disk space used by the clog files.
syslog_level: INFO # System log level. The default value is INFO.
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false. enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0. max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册