未验证 提交 ae0f3bca 编写于 作者: R Rongfeng Fu 提交者: GitHub

V2.1.0 (#170)

* V2.1.0

* V2.1.0 #163
上级 97ce6598
......@@ -36,3 +36,4 @@ tags
/web/src/.umi-production
/web/src/.umi-test
/web/.env.local
/web/.must.config.js
此差异已折叠。
......@@ -36,3 +36,9 @@ ENV_REPO_INSTALL_MODE = "OBD_REPO_INSTALL_MODE"
ENV_DISABLE_RSYNC = "OBD_DISABLE_RSYNC"
ENV_DISABLE_PARALLER_EXTRACT = "OBD_DISALBE_PARALLER_EXTRACT"
# telemetry mode. 0 - disable, 1 - enable.
TELEMETRY_MODE = "TELEMETRY_MODE"
# telemetry log mode. 0 - disable, 1 - enable.
TELEMETRY_LOG_MODE = "TELEMETRY_LOG_MODE"
......@@ -174,9 +174,15 @@ EC_OCP_EXPRESS_DEPENDS_COMP_VERSION = OBDErrorCodeTemplate(4304, 'OCP express {o
EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK_AVAILABLE = OBDErrorCodeTemplate(4305, 'There is not enough log disk for ocp meta tenant. (Avail: {avail}, Need: {need})')
EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_LOG_DISK = OBDErrorCodeTemplate(4305, 'There is not enough log disk for ocp meta tenant.')
EC_OCP_EXPRESS_META_DB_NOT_ENOUGH_MEM = OBDErrorCodeTemplate(4305, 'There is not enough memory for ocp meta tenant')
EC_OCP_EXPRESS_ADMIN_PASSWD_ERROR = OBDErrorCodeTemplate(4306, '({ip}) ocp-express admin_passwd invalid.(Current :{current})')
# sql
EC_SQL_EXECUTE_FAILED = OBDErrorCodeTemplate(5000, "{sql} execute failed")
# obdiag
EC_OBDIAG_NOT_FOUND = OBDErrorCodeTemplate(6000, 'Failed to executable obdiag command, you may not have obdiag installed')
EC_OBDIAG_NOT_CONTAIN_DEPEND_COMPONENT = OBDErrorCodeTemplate(6001, 'obdiag must contain depend components {components}')
EC_OBDIAG_OPTIONS_FORMAT_ERROR = OBDErrorCodeTemplate(6002, 'obdiag options {option} format error, please check the value : {value}')
# WARN CODE
WC_ULIMIT_CHECK = OBDErrorCodeTemplate(1007, '({server}) The recommended number of {key} is {need} (Current value: {now})')
WC_AIO_NOT_ENOUGH = OBDErrorCodeTemplate(1011, '({ip}) The recommended value of fs.aio-max-nr is 1048576 (Current value: {current})')
......@@ -221,4 +227,5 @@ SUG_OCP_EXPRESS_REDUCE_MEM = OBDErrorSuggestionTemplate('Please reduce the `memo
SUG_OCP_EXPRESS_REDUCE_DISK = OBDErrorSuggestionTemplate('Please reduce the `logging_file_total_size_cap`', fix_eval=[FixEval(FixEval.DEL, 'logging_file_total_size_cap')])
SUG_OCP_EXPRESS_COMP_VERSION = OBDErrorSuggestionTemplate('Please use {comp} with version {version} or above')
SUG_OCP_EXPRESS_REDUCE_META_DB_MEM = OBDErrorSuggestionTemplate('Please reduce the `ocp_meta_tenant_memory_size`', fix_eval=[FixEval(FixEval.DEL, 'ocp_meta_tenant_memory_size')])
SUG_OCP_EXPRESS_REDUCE_META_DB_LOG_DISK = OBDErrorSuggestionTemplate('Please reduce the `ocp_meta_tenant_log_disk_size`', fix_eval=[FixEval(FixEval.DEL, 'ocp_meta_tenant_log_disk_size')])
\ No newline at end of file
SUG_OCP_EXPRESS_REDUCE_META_DB_LOG_DISK = OBDErrorSuggestionTemplate('Please reduce the `ocp_meta_tenant_log_disk_size`', fix_eval=[FixEval(FixEval.DEL, 'ocp_meta_tenant_log_disk_size')])
SUG_OCP_EXPRESS_EDIT_ADMIN_PASSWD_ERROR = OBDErrorSuggestionTemplate('Please edit the `admin_passwd`, must be 8 to 32 characters in length, and must contain at least two digits, two uppercase letters, two lowercase letters, and two of the following special characters:~!@#%^&*_-+=|(){{}}[]:;,.?/)', fix_eval=[FixEval(FixEval.DEL, 'admin_passwd')], auto_fix=True)
\ No newline at end of file
......@@ -23,6 +23,7 @@ from __future__ import absolute_import, division, print_function
import os
import re
import sys
import time
from enum import Enum
from glob import glob
from copy import deepcopy, copy
......@@ -78,8 +79,8 @@ class PluginContextNamespace:
def variables(self):
return self._variables
def get_variable(self, name):
return self._variables.get(name)
def get_variable(self, name, default=None):
return self._variables.get(name, default)
def set_variable(self, name, value):
self._variables[name] = value
......@@ -177,12 +178,12 @@ class PluginContext(object):
self._return.return_false(*args, **kwargs)
self.namespace.set_return(self.plugin_name, self._return)
def get_variable(self, name, spacename=None):
def get_variable(self, name, spacename=None, default=None):
if spacename:
namespace = self.namespaces.get(spacename)
else:
namespace = self.namespace
return namespace.get_variable(name) if namespace else None
return namespace.get_variable(name, default) if namespace else None
def set_variable(self, name, value):
self.namespace.set_variable(name, value)
......@@ -270,8 +271,11 @@ def pyScriptPluginExec(func):
self.before_do(self.name, namespace, namespaces, deploy_name,
repositories, components, clients, cluster_config, cmd,
options, stdio, *arg, **kwargs)
method_name = self.PLUGIN_NAME
run_result = self.context.get_variable('run_result', default={})
run_result[method_name] = {'result': True}
start_time = time.time()
if self.module:
method_name = func.__name__
method = getattr(self.module, method_name, False)
namespace_vars = copy(self.context.namespace.variables)
namespace_vars.update(kwargs)
......@@ -280,10 +284,15 @@ def pyScriptPluginExec(func):
try:
ret = method(self.context, *arg, **kwargs)
if ret is None and self.context and self.context.get_return() is None:
run_result[method_name]['result'] = False
self.context.return_false()
except Exception as e:
run_result[method_name]['result'] = False
self.context.return_false(exception=e)
stdio and getattr(stdio, 'exception', print)('%s RuntimeError: %s' % (self, e))
end_time = time.time()
run_result[method_name]['time'] = end_time - start_time
self.context.set_variable('run_result', run_result)
ret = self.context.get_return() if self.context else PluginReturn()
self.after_do(stdio, *arg, **kwargs)
return ret
......
......@@ -379,10 +379,9 @@ class IO(object):
):
self.level = level
self.msg_lv = msg_lv
self.log_path = None
self.trace_id = None
self.log_name = 'default'
self.log_path = None
self._log_path = None
self._trace_id = None
self._log_name = 'default'
self._trace_logger = None
self._log_cache = [] if use_cache else None
self._root_io = root_io
......@@ -419,13 +418,18 @@ class IO(object):
self._output_is_tty = output_stream.isatty()
return True
def init_trace_logger(self, log_path, log_name=None, trace_id=None):
if self._trace_logger is None:
self.log_path = log_path
if trace_id:
self.trace_id = trace_id
def init_trace_logger(self, log_path, log_name=None, trace_id=None, recreate=False):
if self._root_io:
return False
if self._trace_logger is None or recreate:
self._log_path = log_path
if log_name:
self.log_name = log_name
self._log_name = log_name
if trace_id:
self._trace_id = trace_id
self._trace_logger = None
return True
return False
def __getstate__(self):
state = {}
......@@ -437,6 +441,8 @@ class IO(object):
@property
def trace_logger(self):
if self._root_io:
return self._root_io.trace_logger
if self.log_path and self._trace_logger is None:
self._trace_logger = Logger(self.log_name)
handler = handlers.TimedRotatingFileHandler(self.log_path, when='midnight', interval=1, backupCount=30)
......@@ -447,6 +453,24 @@ class IO(object):
self._trace_logger.addHandler(handler)
return self._trace_logger
@property
def trace_id(self):
if self._root_io:
return self._root_io.trace_id
return self._trace_id
@property
def log_path(self):
if self._root_io:
return self._root_io.log_path
return self._log_path
@property
def log_name(self):
if self._root_io:
return self._root_io.log_name
return self._log_name
@property
def log_cache(self):
if self._root_io:
......@@ -618,10 +642,6 @@ class IO(object):
track_limit=self.track_limit,
root_io=self._root_io if self._root_io else self
)
sub_io.log_name = self.log_name
sub_io.log_path = self.log_path
sub_io.trace_id = self.trace_id
sub_io._trace_logger = self.trace_logger
self.sub_ios[key] = sub_io
return self.sub_ios[key]
......
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
# OceanBase official website
OB_OFFICIAL_WEBSITE = 'https://www.oceanbase.com/'
# post telemetry data to OceanBase official
TELEMETRY_URL = 'http://openwebapi.dev.alipay.net/api/web/oceanbase/report'
# obdeploy version
VERSION = '<VERSION>'
# obdeploy build commit
REVISION = '<CID>'
# obdeploy build branch
BUILD_BRANCH = '<B_BRANCH>'
# obdeploy build time
BUILD_TIME = '<B_TIME>'
# obdeploy home path
CONST_OBD_HOME = "OBD_HOME"
# obdeploy install pre path
CONST_OBD_INSTALL_PRE = "OBD_INSTALL_PRE"
# obdeploy install path
CONST_OBD_INSTALL_PATH = "OBD_INSTALL_PATH"
# obdeploy forbidden variable
FORBIDDEN_VARS = (CONST_OBD_HOME, CONST_OBD_INSTALL_PRE, CONST_OBD_INSTALL_PATH)
\ No newline at end of file
......@@ -25,6 +25,7 @@ import os
import time
from optparse import Values
from copy import deepcopy, copy
import requests
import tempfile
from subprocess import call as subprocess_call
......@@ -41,6 +42,7 @@ import _errno as err
from _lock import LockManager, LockMode
from _optimize import OptimizeManager
from _environ import ENV_REPO_INSTALL_MODE, ENV_BASE_DIR
from const import OB_OFFICIAL_WEBSITE
class ObdHome(object):
......@@ -108,7 +110,7 @@ class ObdHome(object):
self._optimize_manager = OptimizeManager(self.home_path, stdio=self.stdio)
return self._optimize_manager
def _obd_update_lock(self):
def _global_ex_lock(self):
self.lock_manager.global_ex_lock()
def fork(self, deploy=None, repositories=None, cmds=None, options=None, stdio=None):
......@@ -936,6 +938,7 @@ class ObdHome(object):
continue
if component_status[repository] != cluster_status[server]:
self._call_stdio('verbose', '%s cluster status is inconsistent' % repository)
component_status[repository] = False
break
else:
continue
......@@ -1252,8 +1255,9 @@ class ObdHome(object):
if component_name:
components.add(component_name)
self.get_namespace(component_name).set_variable('generate_config_mini', True)
self.get_namespace(component_name).set_variable('generate_password', False)
self.get_namespace(component_name).set_variable('auto_depend', True)
if not components:
self._call_stdio('error', 'Use `-c/--components` to set in the components to be deployed')
return
......@@ -1404,6 +1408,12 @@ class ObdHome(object):
# Check whether the components have the parameter plugins and apply the plugins
self.search_param_plugin_and_apply(repositories, deploy_config)
# Generate password when password is None
gen_config_plugins = self.search_py_script_plugin(repositories, 'generate_config')
for repository in repositories:
if repository in gen_config_plugins:
self.call_plugin(gen_config_plugins[repository], repository, only_generate_password=True)
# Parameter check
self._call_stdio('verbose', 'Cluster param configuration check')
errors = self.deploy_param_check(repositories, deploy_config)
......@@ -2605,6 +2615,19 @@ class ObdHome(object):
ssh_clients = self.get_clients(deploy_config, [current_repository])
cluster_config = deploy_config.components[current_repository.name]
# Check the status for the deployed cluster
component_status = {}
cluster_status = self.cluster_status_check(repositories, component_status)
if cluster_status is False or cluster_status == 0:
if self.stdio:
self._call_stdio('error', err.EC_SOME_SERVER_STOPED)
for repository in component_status:
cluster_status = component_status[repository]
for server in cluster_status:
if cluster_status[server] == 0:
self._call_stdio('print', '%s %s is stopped' % (server, repository.name))
return False
route = []
use_images = []
upgrade_route_plugins = self.search_py_script_plugin([current_repository], 'upgrade_route', no_found_act='warn')
......@@ -2722,6 +2745,7 @@ class ObdHome(object):
if not self.install_repositories_to_servers(deploy_config, upgrade_repositories[1:], install_plugins, ssh_clients, self.options):
return False
script_query_timeout = getattr(self.options, 'script_query_timeout', '')
n = len(upgrade_repositories)
while upgrade_ctx['index'] < n:
repository = upgrade_repositories[upgrade_ctx['index']]
......@@ -2737,7 +2761,8 @@ class ObdHome(object):
apply_param_plugin=lambda repository: self.search_param_plugin_and_apply([repository], deploy_config),
upgrade_ctx=upgrade_ctx,
install_repository_to_servers=self.install_repository_to_servers,
unuse_lib_repository=deploy_config.unuse_lib_repository
unuse_lib_repository=deploy_config.unuse_lib_repository,
script_query_timeout=script_query_timeout
)
deploy.update_upgrade_ctx(**upgrade_ctx)
if not ret:
......@@ -3466,7 +3491,7 @@ class ObdHome(object):
connect_plugin=connect_plugin, optimize_envs=kwargs, operation='recover')
def update_obd(self, version, install_prefix='/'):
self._obd_update_lock()
self._global_ex_lock()
component_name = 'ob-deploy'
plugin = self.plugin_manager.get_best_plugin(PluginType.INSTALL, component_name, '1.0.0')
if not plugin:
......@@ -3920,3 +3945,219 @@ class ObdHome(object):
self.call_plugin(sync_config_plugin, target_repository)
dooba_plugin = self.plugin_manager.get_best_py_script_plugin('run', 'dooba', plugin_version)
return self.call_plugin(dooba_plugin, target_repository)
def telemetry_post(self, name):
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name)
self.set_deploy(deploy)
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
deploy_info = deploy.deploy_info
if deploy_info.status in (DeployStatus.STATUS_DESTROYED, DeployStatus.STATUS_CONFIGURED):
self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value))
return False
repositories = self.load_local_repositories(deploy_info)
if repositories == []:
return
self.set_repositories(repositories)
target_repository = None
for repository in repositories:
if repository.name in ['oceanbase', 'oceanbase-ce']:
target_repository = repository
break
else:
target_repository = repository
telemetry_info_collect_plugin = self.plugin_manager.get_best_py_script_plugin('telemetry_info_collect', 'general', '0.1')
ret = self.call_plugin(telemetry_info_collect_plugin, target_repository, target_repository=target_repository)
if ret:
post_data = ret.get_return('post_data')
self._call_stdio('verbose', 'telemetry_data: %s' % post_data)
telemetry_post_plugin = self.plugin_manager.get_best_py_script_plugin('telemetry_post', 'general', '0.1')
return self.call_plugin(telemetry_post_plugin, target_repository, data=post_data)
def obdiag_gather(self, name, gather_type, opts):
self._global_ex_lock()
self._call_stdio('verbose', 'Get Deploy by name')
deploy = self.deploy_manager.get_deploy_config(name, read_only=True)
if not deploy:
self._call_stdio('error', 'No such deploy: %s.' % name)
return False
self.set_deploy(deploy)
self._call_stdio('verbose', 'Get deploy configuration')
deploy_config = deploy.deploy_config
deploy_info = deploy.deploy_info
if deploy_info.status in (DeployStatus.STATUS_DESTROYED, DeployStatus.STATUS_CONFIGURED):
self._call_stdio('print', 'Deploy "%s" is %s' % (name, deploy_info.status.value))
return False
allow_components = []
if gather_type.startswith("gather_obproxy"):
allow_components = ['obproxy-ce', 'obproxy']
else:
allow_components = ['oceanbase-ce', 'oceanbase']
component_name = ""
for component in deploy_config.components:
if component in allow_components:
component_name = component
break
if component_name == "":
self._call_stdio('error', err.EC_OBDIAG_NOT_CONTAIN_DEPEND_COMPONENT.format(components=allow_components))
return False
cluster_config = deploy_config.components[component_name]
if not cluster_config.servers:
self._call_stdio('error', '%s server list is empty' % allow_components[0])
return False
self._call_stdio('start_loading', 'Get local repositories and plugins')
# Get the repository
repositories = self.load_local_repositories(deploy_info)
self.set_repositories(repositories)
self._call_stdio('stop_loading', 'succeed')
target_repository = None
for repository in repositories:
if repository.name == allow_components[0]:
target_repository = repository
if gather_type in ['gather_plan_monitor']:
setattr(opts, 'connect_cluster', True)
obdiag_path = getattr(opts, 'obdiag_dir', None)
diagnostic_component_name = 'oceanbase-diagnostic-tool'
obdiag_version = '1.0'
pre_check_plugin = self.plugin_manager.get_best_py_script_plugin('pre_check', diagnostic_component_name, obdiag_version)
check_pass = self.call_plugin(pre_check_plugin,
target_repository,
gather_type = gather_type,
obdiag_path = obdiag_path,
version_check = True,
utils_work_dir_check = True)
if not check_pass:
# obdiag checker return False
if not check_pass.get_return('obdiag_found'):
if not self._call_stdio('confirm', 'Could not find the obdiag, please confirm whether to install it' ):
return False
self.obdiag_deploy(auto_deploy=True, install_prefix=obdiag_path)
# utils checker return False
if not check_pass.get_return('utils_status'):
repositories_utils_map = self.get_repositories_utils(repositories)
if repositories_utils_map is False:
self._call_stdio('error', 'Failed to get utils package')
else:
if not self._call_stdio('confirm', 'obdiag gather clog/slog need to install ob_admin\nDo you want to install ob_admin?'):
if not check_pass.get_return('skip'):
return False
else:
self._call_stdio('warn', 'Just skip gather clog/slog')
else:
if not self.install_utils_to_servers(repositories, repositories_utils_map):
self._call_stdio('error', 'Failed to install utils to servers')
obdiag_version = check_pass.get_return('obdiag_version')
generate_config_plugin = self.plugin_manager.get_best_py_script_plugin('generate_config', diagnostic_component_name, obdiag_version)
self.call_plugin(generate_config_plugin, target_repository, deploy_config=deploy_config)
self._call_stdio('generate_config', 'succeed')
obdiag_plugin = self.plugin_manager.get_best_py_script_plugin(gather_type, diagnostic_component_name, obdiag_version)
return self.call_plugin(obdiag_plugin, target_repository)
def obdiag_deploy(self, auto_deploy=False, install_prefix=None):
self._global_ex_lock()
component_name = 'oceanbase-diagnostic-tool'
if install_prefix is None:
install_prefix = os.path.join(os.getenv('HOME'), component_name)
pkg = self.mirror_manager.get_best_pkg(name=component_name)
if not pkg:
self._call_stdio('critical', '%s package not found' % component_name)
return False
plugin = self.plugin_manager.get_best_plugin(PluginType.INSTALL, component_name, pkg.version)
self._call_stdio('print', 'obdiag plugin : %s' % plugin)
repository = self.repository_manager.create_instance_repository(pkg.name, pkg.version, pkg.md5)
check_plugin = self.plugin_manager.get_best_py_script_plugin('pre_check', component_name, pkg.version)
if not auto_deploy:
ret = self.call_plugin(check_plugin,
repository,
clients={},
obdiag_path = install_prefix,
obdiag_new_version = pkg.version,
version_check = True)
if not ret and ret.get_return('obdiag_found'):
self._call_stdio('print', 'No updates detected. obdiag is already up to date.')
return False
if not self._call_stdio('confirm', 'Found a higher version\n%s\nDo you want to use it?' % pkg):
return False
self._call_stdio('start_loading', 'Get local repositories and plugins')
repository.load_pkg(pkg, plugin)
src_path = os.path.join(repository.repository_dir, component_name)
if FileUtil.symlink(src_path, install_prefix, self.stdio):
self._call_stdio('stop_loading', 'succeed')
self._call_stdio('print', 'Deploy obdiag successful.\nCurrent version : %s. \nPath of obdiag : %s' % (pkg.version, install_prefix))
return True
def get_repositories_utils(self, repositories):
all_data = []
data = {}
temp_map = {}
need_install_repositories = ['oceanbase-ce']
for repository in repositories:
utils_name = '%s-utils' % repository.name
if (utils_name in data) or (repository.name not in need_install_repositories):
continue
data[utils_name] = {'version': repository.version}
temp_map[utils_name] = repository
all_data.append((data, temp_map))
try:
repositories_utils_map = {}
for data, temp_map in all_data:
with tempfile.NamedTemporaryFile(suffix=".yaml", mode='w') as tf:
yaml_loader = YamlLoader(self.stdio)
yaml_loader.dump(data, tf)
deploy_config = DeployConfig(tf.name, yaml_loader=yaml_loader, config_parser_manager=self.deploy_manager.config_parser_manager)
self._call_stdio('verbose', 'Search best suitable repository utils')
pkgs, utils_repositories, errors = self.search_components_from_mirrors(deploy_config, only_info=False)
if errors:
self._call_stdio('error', '\n'.join(errors))
return False
# Get the installation plugin and install
install_plugins = self.get_install_plugin_and_install(utils_repositories, pkgs)
if not install_plugins:
return False
for utils_repository in utils_repositories:
repository = temp_map[utils_repository.name]
install_plugin = install_plugins[utils_repository]
repositories_utils_map[repository] = {
'repositories': utils_repository,
'install_plugin': install_plugin
}
return repositories_utils_map
except:
self._call_stdio('exception', 'Failed to create utils-repo config file')
pass
return False
def install_utils_to_servers(self, repositories, repositories_utils_map, unuse_utils_repository=True):
install_repo_plugin = self.plugin_manager.get_best_py_script_plugin('install_repo', 'general', '0.1')
check_file_maps = {}
need_install_repositories = ['oceanbase-ce']
for repository in repositories:
if (repository.name not in need_install_repositories):
continue
temp_repository = deepcopy(repository)
temp_repository.name = '%s-utils' % repository.name
utils_repository = repositories_utils_map[temp_repository]['repositories']
install_plugin = repositories_utils_map[temp_repository]['install_plugin']
check_file_map = check_file_maps[repository] = install_plugin.file_map(repository)
ret = self.call_plugin(install_repo_plugin, repository, obd_home=self.home_path, install_repository=utils_repository,
install_plugin=install_plugin, check_repository=repository, check_file_map=check_file_map,
msg_lv='error' if unuse_utils_repository else 'warn')
if not ret:
return False
return True
\ No newline at end of file
......@@ -10,8 +10,6 @@ After you deploy OceanBase Deployer (OBD), you can run the `obd demo` command to
- At least 54 GB of disk space is available on the server.
- Your server can be connected to the network, or there are installation packages required for deployment.
> **Note**
>
> If the foregoing prerequisites are not met, see [Use OBD to start an OceanBase cluster](../3.user-guide/2.start-the-oceanbase-cluster-by-using-obd.md).
......
......@@ -36,8 +36,6 @@ obd demo -c oceanbase-ce,obproxy-ce --obproxy-ce.home_path=/data/demo/
obd demo --oceanbase-ce.mysql_port=3881
```
For more information about the relevant configuration items in the configuration file, refer to [Configuration file description](../../4.configuration-file-description.md).
> **Notice**
>
> This command supports only level-1 configurations under global that are specified by using options.
# Mirror and repository commands
OBD provides multiple-level commands. You can use the `-h/--help` option to view the help information of sub-commands. Similarly, you can also use `-v/--verbose` to view the detailed execution process of commands when the execution of sub commands reports an error.
OBD provides multiple-level commands. You can use the`-h/--help` option to view the help information of sub-commands.
## obd mirror clone
## `obd mirror clone`
Copy an RPM package to the local mirror repository. You can run the corresponding OBD cluster command to start the mirror.
......@@ -14,7 +14,7 @@ obd mirror clone <path> [-f]
The `-f` option is `--force`. `-f` is optional. This option is disabled by default. If it is enabled and a mirror of the same name exists in the repository, the copied mirror will forcibly overwrite the existing one.
## obd mirror create
## `obd mirror create`
Creates a mirror based on the local directory. When OBD starts a user-compiled open-source OceanBase software, you can run this command to add the compilation output to the local repository. Then, you can run the corresponding `obd cluster` command to start the mirror.
......@@ -22,19 +22,19 @@ Creates a mirror based on the local directory. When OBD starts a user-compiled o
obd mirror create -n <component name> -p <your compile dir> -V <component version> [-t <tag>] [-f]
```
For example, you can [compile an OceanBase cluster based on the source code](https://en.oceanbase.com/docs/community-observer-en-10000000000209369). Then, you can run the `make DESTDIR=./ install && obd mirror create -n oceanbase-ce -V 3.1.0 -p ./usr/local` command to add the compilation output to the local repository of OBD.
For example, you can [compile an OceanBase cluster based on the source code](https://www.oceanbase.com/en/docs/community-observer-en-10000000000209369). Then, you can run the `make DESTDIR=./ install && obd mirror create -n oceanbase-ce -V 3.1.0 -p ./usr/local` command to add the compilation output to the local repository of OBD.
This table describes the corresponding options.
| Option | Required | Data type | Description |
|----|-----|-----|----|
| -n/--name | Yes | string | The component name. If you want to compile an OceanBase cluster, set this option to oceanbase-ce. If you want to compile ODP, set this option to obproxy-ce. |
--- | --- | --- |---
| -n/--name | Yes | string | The component name. If you want to compile an OceanBase cluster, set this option to oceanbase-ce. If you want to compile ODP, set this option to obproxy. |
| -p/--path | Yes | string | The directory that stores the compilation output. OBD will automatically retrieve files required by the component from this directory. |
| -V/--version | Yes | string | The component version. |
| -t/--tag | No | string | The mirror tags. You can define one or more tags for the created mirror. Separate multiple tags with commas (,). |
| -f/--force | No | bool | Specifies whether to forcibly overwrite an existing mirror or tag. This option is disabled by default. |
## obd mirror list
## `obd mirror list`
Shows the mirror repository or mirror list.
......@@ -44,7 +44,7 @@ obd mirror list [mirror repo name]
`mirror repo name` specifies the mirror repository name. This parameter is optional. When it is not specified, all mirror repositories will be returned. When it is specified, only the specified mirror repository will be returned.
## obd mirror update
## `obd mirror update`
Synchronizes the information of all remote mirror repositories.
......@@ -52,7 +52,7 @@ Synchronizes the information of all remote mirror repositories.
obd mirror update
```
## obd mirror disable
## `obd mirror disable`
Disable remote mirror repositories. To disable all the remote mirror repositories, run the `obd mirror disable remote` command.
......@@ -62,7 +62,7 @@ obd mirror disable <mirror_repo_name>
Parameter `mirror repo name` specifies the mirror repository name. When you specify `remote`, all the remote mirror repositories are disabled.
## obd mirror enable
## `obd mirror enable`
Enable remote mirror repositories.
......
......@@ -2,8 +2,6 @@
OceanBase Deployer (OBD) provides a series of tool commands, including general commands that deliver a better experience for developers.
You can use the `-h/--help` option to view the help information of sub-commands. Similarly, you can also use `-v/--verbose` to view the detailed execution process of commands when the execution of sub commands reports an error.
## obd devmode enable
You can run this command to enable the developer mode, which is a prerequisite for using other tool commands. After you enable the developer mode, OBD will downgrade the level of some exceptions and ignore some parameter exceptions. If you are not a kernel developer, use this command with caution.
......@@ -104,3 +102,13 @@ The following table describes the options of the command.
| -t or --tenant | No | String | sys | The tenant for connecting to the database. |
| -D or --database | No | String | Empty | The name of the database to be connected. |
| --obclient-bin | No | String | obclient | The path to the directory where the binary files of OBClient are stored. |
## obd display-trace
You can run this command to display trace_id obd log。
```shell
obd display-trace <trace_id>
```
The `trace_id` parameter is the uuid for executing the command。
\ No newline at end of file
......@@ -12,7 +12,7 @@ user: # The SSH login configuration.
timeout: ssh connection timeout (second), default 30
oceanbase-ce: # The name of the component that is configured as follows.
# version: 3.1.3 # Specify the version of the component, which is usually not required.
# package_hash: 589c4f8ed2662835148a95d5c1b46a07e36c2d346804791364a757aef4f7b60d # Specify the hash of the component, which is usually not required.
# pacakge_hash: 589c4f8ed2662835148a95d5c1b46a07e36c2d346804791364a757aef4f7b60d # Specify the hash of the component, which is usually not required.
# tag: dev # Specify the tag of the component, which is usually not required.
servers: # The list of nodes.
- name: z1 # The node name, which can be left blank. The default node name is the same as the IP address if this name is left blank. The node name is z1 in this example.
......@@ -24,7 +24,7 @@ oceanbase-ce: # The name of the component that is configured as follows.
ip: 192.168.1.4
global: # The global configuration. The identical configuration in the same component can be written here.
# The node configuration is used if it has the same configuration item as the global configuration.
# Please set devname as the network adaptor's name whose ip is in the setting of severs.
# Please set devname as the network adaptor's name whose ip is in the setting of severs.
# if set severs as "127.0.0.1", please set devname as "lo"
# if current ip is 192.168.1.10, and the ip's network adaptor's name is "eth0", please use "eth0"
devname: eth0
......@@ -62,7 +62,7 @@ oceanbase-ce: # The name of the component that is configured as follows.
zone: zone3
obproxy-ce: # The name of the component that is configured as follows.
# version: 3.2.3 # Specify the version of the component, which is usually not required.
# package_hash: 73cccf4d05508de0950ad1164aec03003c4ddbe1415530e031ac8b6469815fea # Specify the hash of the component, which is usually not required.
# pacakge_hash: 73cccf4d05508de0950ad1164aec03003c4ddbe1415530e031ac8b6469815fea # Specify the hash of the component, which is usually not required.
# tag: dev # Specify the tag of the component, which is usually not required.
servers:
- 192.168.1.5
......
# 什么是 OBD
OBD 全称为 OceanBase Deployer,是 OceanBase 集群安装部署工具,通过命令行部署或白屏界面部署的方式,将复杂配置流程标准化,降低集群部署难度。详细操作请参考 [单机部署 OceanBase 数据库](4.user-guide/2.start-the-oceanbase-cluster-by-using-obd.md)[通过白屏部署 OceanBase 集群](2.quick-start/3.use-ui-deploy-oceanbase.md)
其中,命令行支持编辑配置文件,可以更加灵活的进行配置调整,适用于需要深度了解 OceanBase 的用户,有一定的使用门槛;白屏界面配置简单,通过页面的引导配置即可完成集群部署,适用于需要快速体验,构建标准环境的用户。
在集群部署之外,OBD 还提供了包管理器、压测软件、集群管理等常用的运维能力,更好的支持用户体验使用 OceanBase 分布式数据库。
OBD 全称为 OceanBase Deployer,是 OceanBase 开源软件的安装部署工具。OBD 同时也是包管理器,可以用来管理 OceanBase 所有的开源软件。
......@@ -260,6 +260,16 @@ obd cluster tenant drop <deploy name> [-n <tenant name>]
选项 `-n``--tenant-name`,此选项为必填项,表示要删除的租户名。
## obd cluster tenant list
使用该命令可以展示所有租户。该命令仅对 OceanBase 数据库有效。
```shell
obd cluster tenant list <deploy name>
```
参数 `deploy name` 为部署集群名,可以理解为配置文件的别名。
## obd cluster chst
使用该命令可以转换配置风格。
......
......@@ -102,3 +102,13 @@ obd tool db_connect <deploy name> [options]
| -t/--tenant | 否 | string | sys | 数据库连接使用的租户。 |
| -D/--database | 否 | string | 默认为空 | 数据库连接使用的数据库名称。 |
| --obclient-bin | 否 | string | obclient | OBClient 二进制文件路径。 |
## obd display-trace
使用该命令可展示对应trace_id obd日志。
```shell
obd display-trace <trace_id>
```
参数 `trace_id` 为执行命令的uuid。
\ No newline at end of file
# 诊断工具命令组
OBD 集成了Oceanbase Diagnostic Tool(简称: obdiag) 工具,obdiag现有功能包含了对于OceanBase日志、SQL Audit以及OceanBase进程堆栈等信息进行的扫描收集,支持通过OBD实现诊断信息的一键采集。
## obd obdiag deploy
使用该命令可部署obdiag工具可在本机安装部署obdiag
```shell
obd obdiag deploy
```
## obd obdiag gather
使用该命令可调用obdiag工具进行OceanBase相关的诊断信息收集
```shell
obd obdiag gather <gather type> <deploy name> [options]
```
gather type包含:
* log:一键收集所属OceanBase集群的日志
* sysstat:一键收集所属OceanBase集群主机信息
* clog:一键收集所属OceanBase集群(clog日志)
* slog:一键收集所属OceanBase集群(slog日志)
* plan_monitor:一键收集所属OceanBase集群指定trace_id的并行SQL的执行详情信息
* stack:一键收集所属OceanBase集群的堆栈信息
* perf:一键收集所属OceanBase集群的perf信息(扁鹊图、perf火焰图、pstack火焰图)
* obproxy_log:一键收集所属OceanBase集群所依赖的obproxy组件的日志
* all:一键统一收集所属OceanBase集群的诊断信息,包括收集OceanBase日志/主机信息/OceanBase堆栈信息/OceanBase clog、slog日志/OceanBase perf信息(扁鹊图、perf火焰图、pstack火焰图)
## obd obdiag gather log
使用该命令可以一键收集所属OceanBase集群的日志
```shell
obd obdiag gather log <deploy name> [options]
```
参数 `deploy name` 为部署集群名,可以理解为配置文件的别名。
参数说明见下表
| 选项名 | 是否必选 | 数据类型 | 默认值 | 说明 |
|---------------------|------|--------|---------------------------|-------------------------------------------------------------------|
| --from | 否 | string | / | 日志收集的开始时间,格式为: "yyyy-mm-dd hh:mm:ss",需要加引号 ,例如"1970-01-01 12:00:00" |
| --to | 否 | string | / | 日志收集的结束时间,格式为: "yyyy-mm-dd hh:mm:ss",需要加引号 ,例如"1970-01-01 13:00:00" |
| --since | 否 | string | / | 日志收集最近的某段时间,格式为: n m/h/d,m表示分钟, h表示小时, d表示天,例如 30m表示收集最近30分钟的日志 |
| --scope | 否 | string | all | 选择收集的OceanBase日志类型,choices=[observer, election, rootservice, all], default=all |
| --grep | 否 | string | / | 选择查询的关键字 |
| --encrypt | 否 | string | false | 回传的文件是否加密,选项[true,false] |
| --store_dir | 否 | string | 默认命令执行的当前路径 | 存储结果的本地路径 |
| --obdiag_dir | 否 | string | $HOME/oceanbase-diagnostic-tool | obdiag的安装目录 |
## obd obdiag gather sysstat
使用该命令可以一键收集所属OceanBase集群主机信息(主机dmesg信息/主机cpu/内存信息)
```shell
obd obdiag gather sysstat <deploy name> [options]
```
参数 `deploy name` 为部署集群名,可以理解为配置文件的别名。
参数说明见下表
| 选项名 | 是否必选 | 数据类型 | 默认值 | 说明 |
|---------------------|------|--------|---------------------------|-------------------------------------------------------------------|
| --store_dir | 否 | string | 默认命令执行的当前路径 | 存储结果的本地路径 |
| --obdiag_dir | 否 | string | $HOME/oceanbase-diagnostic-tool | obdiag的安装目录 |
## obd obdiag gather clog
使用该命令可以一键收集所属OceanBase集群的clog日志,需要收集的集群节点上部署过ob_admin工具
```shell
obd obdiag gather clog <deploy name> [options]
```
参数 `deploy name` 为部署集群名,可以理解为配置文件的别名。
参数说明见下表
| 选项名 | 是否必选 | 数据类型 | 默认值 | 说明 |
|---------------------|------|--------|---------------------------|-------------------------------------------------------------------|
| --from | 否 | string | / | 日志收集的开始时间,格式为: "yyyy-mm-dd hh:mm:ss",需要加引号 ,例如"1970-01-01 12:00:00" |
| --to | 否 | string | / | 日志收集的结束时间,格式为: "yyyy-mm-dd hh:mm:ss",需要加引号 ,例如"1970-01-01 13:00:00" |
| --since | 否 | string | / | 日志收集最近的某段时间,格式为: n m/h/d,m表示分钟, h表示小时, d表示天,例如 30m表示收集最近30分钟的日志 |
| --encrypt | 否 | string | false | 回传的文件是否加密,选项[true,false] |
| --store_dir | 否 | string | 默认命令执行的当前路径 | 存储结果的本地路径 |
| --obdiag_dir | 否 | string | $HOME/oceanbase-diagnostic-tool | obdiag的安装目录 |
## obd obdiag gather slog
使用该命令可以一键收集所属OceanBase集群的slog日志,需要收集的集群节点上部署过ob_admin工具
```shell
obd obdiag gather slog <deploy name> [options]
```
参数 `deploy name` 为部署集群名,可以理解为配置文件的别名。
参数说明见下表
| 选项名 | 是否必选 | 数据类型 | 默认值 | 说明 |
|---------------------|------|--------|---------------------------|-------------------------------------------------------------------|
| --from | 否 | string | / | 日志收集的开始时间,格式为: "yyyy-mm-dd hh:mm:ss",需要加引号 ,例如"1970-01-01 12:00:00" |
| --to | 否 | string | / | 日志收集的结束时间,格式为: "yyyy-mm-dd hh:mm:ss",需要加引号 ,例如"1970-01-01 13:00:00" |
| --since | 否 | string | / | 日志收集最近的某段时间,格式为: n m/h/d,m表示分钟, h表示小时, d表示天,例如 30m表示收集最近30分钟的日志 |
| --encrypt | 否 | string | false | 回传的文件是否加密,选项[true,false] |
| --store_dir | 否 | string | 默认命令执行的当前路径 | 存储结果的本地路径 |
| --obdiag_dir | 否 | string | $HOME/oceanbase-diagnostic-tool | obdiag的安装目录 |
## obd obdiag gather plan_monitor
该命令可以一键收集所属OceanBase集群指定的trace_id并行SQL的执行详情信息,便于在AP场景下分析SQL执行慢的算子层面的原因。说明:该功能仅在OB > 3.0版本才支持
```shell
obd obdiag gather plan_monitor <deploy name> [options]
```
参数 `deploy name` 为部署集群名,可以理解为配置文件的别名。
参数说明见下表
| 选项名 | 是否必选 | 数据类型 | 默认值 | 说明 |
|---------------------|------|--------|---------------------------|-------------------------------------------------------------------|
| -c/--components | 否 | string | oceanbase-ce | 用于访问oceanbase库的组件 |
| --trace_id | 是 | string | | OB 4.0以下版本从gv$sql_audit中查到的trace_id, OB 4.0及以上版本从gv$ob_sql_audit中查到的trace_id|
| -u/--user | 否 | string | root | 可访问oceanbase库的用户|
| -p/--password | 否 | string | | 可访问oceanbase库的用户的密码|
| --store_dir | 否 | string | 默认命令执行的当前路径 | 存储结果的本地路径 |
| --obdiag_dir | 否 | string | $HOME/oceanbase-diagnostic-tool | obdiag的安装目录 |
## obd obdiag gather stack
该命令可以一键收集所属OceanBase集群的堆栈信息,该采集项暂不支持arm版本。
```shell
obd obdiag gather stack <deploy name> [options]
```
参数 `deploy name` 为部署集群名,可以理解为配置文件的别名。
参数说明见下表
| 选项名 | 是否必选 | 数据类型 | 默认值 | 说明 |
|---------------------|------|--------|---------------------------|-------------------------------------------------------------------|
| --store_dir | 否 | string | 默认命令执行的当前路径 | 存储结果的本地路径 |
| --obdiag_dir | 否 | string | $HOME/oceanbase-diagnostic-tool | obdiag的安装目录 |
## obd obdiag gather perf
该命令可以一键收集所属OceanBase集群的perf信息(扁鹊图、perf火焰图、pstack火焰图)
```shell
obd obdiag gather perf <deploy name> [options]
```
参数 `deploy name` 为部署集群名,可以理解为配置文件的别名。
参数说明见下表
| 选项名 | 是否必选 | 数据类型 | 默认值 | 说明 |
|---------------------|------|--------|---------------------------|-------------------------------------------------------------------|
| --scope | 否 | string | all | 用于选择性的收集指定的perf信息, 可选项目[sample, flame, pstack,all] |
| --store_dir | 否 | string | 默认命令执行的当前路径 | 存储结果的本地路径 |
| --obdiag_dir | 否 | string | $HOME/oceanbase-diagnostic-tool | obdiag的安装目录 |
## obd obdiag gather obproxy_log
使用该命令可以一键收集所属OceanBase集群所依赖的obproxy集群的日志
```shell
obd obdiag gather obproxy_log <deploy name> [options]
```
参数 `deploy name` 为部署集群名,可以理解为配置文件的别名。
参数说明见下表
| 选项名 | 是否必选 | 数据类型 | 默认值 | 说明 |
|---------------------|------|--------|---------------------------|-------------------------------------------------------------------|
| --from | 否 | string | / | 日志收集的开始时间,格式为: "yyyy-mm-dd hh:mm:ss",需要加引号 ,例如"1970-01-01 12:00:00" |
| --to | 否 | string | / | 日志收集的结束时间,格式为: "yyyy-mm-dd hh:mm:ss",需要加引号 ,例如"1970-01-01 13:00:00" |
| --since | 否 | string | / | 日志收集最近的某段时间,格式为: n m/h/d,m表示分钟, h表示小时, d表示天,例如 30m表示收集最近30分钟的日志 |
| --scope | 否 | string | all | 选择查询的obproxy日志类型,可选项[obproxy, obproxy_digest, obproxy_stat, obproxy_slow, obproxy_limit, all] |
| --grep | 否 | string | / | 选择查询的关键字 |
| --encrypt | 否 | string | false | 回传的文件是否加密,选项[true,false] |
| --store_dir | 否 | string | 默认命令执行的当前路径 | 存储结果的本地路径 |
| --obdiag_dir | 否 | string | $HOME/oceanbase-diagnostic-tool | obdiag的安装目录 |
## obd obdiag gather all
使用该命令可以一键统一收集所属OceanBase集群的诊断信息,包括收集OceanBase日志/主机信息/OceanBase堆栈信息/OceanBase clog、slog日志/OceanBase perf信息(扁鹊图、perf火焰图、pstack火焰图)
```shell
obd obdiag gather all <deploy name> [options]
```
参数 `deploy name` 为部署集群名,可以理解为配置文件的别名。
参数说明见下表
| 选项名 | 是否必选 | 数据类型 | 默认值 | 说明 |
|---------------------|------|--------|---------------------------|-------------------------------------------------------------------|
| --from | 否 | string | / | 日志收集的开始时间,格式为: "yyyy-mm-dd hh:mm:ss",需要加引号 ,例如"1970-01-01 12:00:00" |
| --to | 否 | string | / | 日志收集的结束时间,格式为: "yyyy-mm-dd hh:mm:ss",需要加引号 ,例如"1970-01-01 13:00:00" |
| --since | 否 | string | / | 日志收集最近的某段时间,格式为: n m/h/d,m表示分钟, h表示小时, d表示天,例如 30m表示收集最近30分钟的日志 |
| --scope | 否 | string | all | 选择收集的OceanBase日志类型,choices=[observer, election, rootservice, all], default=all |
| --grep | 否 | string | / | 选择查询的关键字 |
| --encrypt | 否 | string | false | 回传的文件是否加密,选项[true,false] |
| --store_dir | 否 | string | 默认命令执行的当前路径 | 存储结果的本地路径 |
| --obdiag_dir | 否 | string | $HOME/oceanbase-diagnostic-tool | obdiag的安装目录 |
......@@ -114,29 +114,23 @@ obd cluster upgrade <deploy name> -c oceanbase-ce -V 3.1.2 --usable 7fafba0fac1e
## 如何升级 obproxy 到 obproxy-ce 3.2.3?
由于开源 obproxy 组件在 V3.2.3 之后正式更名为 obproxy-ce,所以您需在 OBD 的执行用户下 [执行脚本](2.how-to-upgrade-obproxy-to-obproxy-ce-3.2.3.md) 修改 meta 信息。而后使用以下命令进行升级。
由于开源 OBProxy 组件在 V3.2.3 之后正式更名为 obproxy-ce,所以您需在 OBD 的执行用户下 [执行脚本](2.how-to-upgrade-obproxy-to-obproxy-ce-3.2.3.md) 修改 meta 信息。而后使用以下命令进行升级。
```shell
obd cluster upgrade <deploy name> -c obproxy-ce -V 3.2.3
```
OBD 从 V1.3.0 开始仅支持使用 obproxy-ce 的组件名部署 V3.2.3 及之后版本的 ODP。但若您是使用 `obd update` 命令将 OBD 从低版本升级到 V1.3.0 及以上版本,仍支持使用 obproxy 组件名安装 V3.2.3 之前版本的 ODP(即:OBD 从 V1.3.0 开始不再提供 obproxy 插件库,但如果本地插件库中存在 obproxy 的插件库,则会被保留)。
OBD 从 V1.3.0 开始仅支持使用 obproxy-ce 的组件名部署 V3.2.3 及之后版本的 OBProxy。但若您是使用 `obd update` 命令将 OBD 从低版本升级到 V1.3.0 及以上版本,仍支持使用 obproxy 组件名安装 V3.2.3 之前版本的 OBProxy(即:OBD 从 V1.3.0 开始不再提供 obproxy 插件库,但如果本地插件库中存在 obproxy 的插件库,则会被保留)。
<main id="notice" type='explain'>
<h4>说明</h4>
<ul>
<li>
<p>如果 OBD 升级后发现旧插件无法使用,可直接通过 RPM 包安装旧版本 OBD 进行覆盖。</p>
</li>
<li>
<p>如果您安装的是新版本 OBD,但想使用 obproxy,也可安装 V1.3.0 之前版本的 OBD,在完成 obproxy 的部署后执行 <code>obd update</code> 命令升级 OBD,或安装新版本的 OBD 进行覆盖。</p>
</li>
</ul>
</main>
> **说明**
>
> * 如果 OBD 升级后发现旧插件无法使用,可直接通过 RPM 包安装旧版本 OBD 进行覆盖。
>
> * 如果您安装的是新版本 OBD,但想使用 obproxy,也可安装 V1.3.0 之前版本的 OBD,在完成 obproxy 的部署后执行 `obd update` 命令升级 OBD,或安装新版本的 OBD 进行覆盖。
## 使用 OBD 升级 ODP 出现异常如何解决?
## 使用 OBD 升级 OBProxy 出现异常如何解决?
若您在升级 ODP 过程中出现如下问题:
若您在升级 OBProxy 过程中出现如下问题:
```bash
Stop obproxy ok
......@@ -145,7 +139,7 @@ obproxy program health check ok
Connect to obproxy x
```
即 OBD 机器无法连接 ODP,可能原因有以下两种:
即 OBD 机器无法连接 OBProxy,可能原因有以下两种:
1. proxysys 禁用了非 127.0.0.1 的 IP 访问,导致 OBD 所在的机器不能建连,这种情况下请先执行如下命令连接到 proxysys:
......@@ -153,10 +147,9 @@ Connect to obproxy x
obclient -h<obproxy_ip> -uroot@proxysys -P<obproxy_post> -p<obproxy_pwd>
```
<main id="notice" type='explain'>
<h4>说明</h4>
<p>若您在连接 proxysys 时,使用自己设置的 proxysys 密码无法连接,请尝试将密码设置为空或者 <code>proxysys</code> 进行连接。</p>
</main>
> **说明**
>
> 若您在连接 proxysys 时,使用自己设置的 proxysys 密码无法连接,请尝试将密码设置为空或者 `proxysys` 进行连接。
之后执行 `alter proxyconfig set skip_proxy_sys_private_check = true` 命令。
......@@ -166,51 +159,16 @@ Connect to obproxy x
若排查后发现不是由上述两条原因引起的异常,您可到官网 [问答区](https://open.oceanbase.com/answer) 进行提问,会有专业人员为您解答。
## OBD 升级后无法启动 ODP 服务如何解决?
## OBD 升级后无法启动 OBProxy 服务如何解决?
OBD 升级后会初始化 ODP 的密码,若您设置了 `obproxy_sys_password`,则需执行如下命令连接到 proxysys:
OBD 升级后会初始化 OBProxy 的密码,若您设置了 `obproxy_sys_password`,则需执行如下命令连接到 proxysys:
```bash
obclient -h<obproxy_ip> -uroot@proxysys -P<obproxy_post> -p<obproxy_pwd>
```
<main id="notice" type='explain'>
<h4>说明</h4>
<p>若您连接 proxysys 时,使用自己设置的 proxysys 命令无法连接,请尝试使用空密码或者 <code>proxysys</code> 进行连接。</p>
</main>
> **说明**
>
> 若您连接 proxysys 时,使用自己设置的 proxysys 命令无法连接,请尝试使用空密码或者 `proxysys` 进行连接。
之后使用命令 `alter proxyconfig set obproxy_sys_password = ''` 将 proxysys 的密码设置为空,或者使其与配置文件中 `obproxy_sys_password` 的密码保持一致。
## 部署 OCP Express 前如何配置 Java 环境?
您可登录到需部署 OCP Express 的机器,根据该机器的联网情况,参考如下方式为 OCP Express 安装 Java 环境。
### 在线安装
在 CentOS 或 RedHat 系统上,执行如下命令安装:
```bash
sudo yum install java-1.8.0-openjdk
```
在 Ubuntu 或 Debian 系统上,执行如下命令安装:
```bash
sudo apt-get update
sudo apt-get install openjdk-8-jre
```
### 离线安装
1. 单击部署所需安装包 [x86_64 架构](https://github.com/dragonwell-project/dragonwell8/releases/download/dragonwell-extended-8.14.15_jdk8u362-ga/Alibaba_Dragonwell_Extended_8.14.15_x64_linux.tar.gz)[arm 架构](https://github.com/alibaba/dragonwell8/releases/download/dragonwell-extended-8.14.15_jdk8u362-ga/Alibaba_Dragonwell_Extended_8.14.15_aarch64_linux.tar.gz)
2. 将下载的安装包上传至对应机器,并在安装包所在目录下执行如下命令解压安装。
```bash
# 解压资源包
tar -zxvf Alibaba_Dragonwell_Extended_8*.tar.gz
# 进入解压后的目录
cd dragonwell*
# 创建软连
ln -s `pwd`/bin/java /usr/bin/java
```
# 如何升级 obproxy 到 obproxy-ce 3.2.3
由于开源 obproxy 组件正式更名为 obproxy-ce,因此使用以下命令升级会报 `No such package obproxy-3.2.3` 错误。
由于开源 OBProxy 组件正式更名为 obproxy-ce,因此使用以下命令升级会报 `No such package obproxy-3.2.3` 错误。
```shell
obd cluster upgrade <deploy name> -c obproxy -V 3.2.3
```
您需在 OBD 的执行用户下执行下述 **脚本** 修改 meta 信息,而后使用以下命令对 ODP 进行升级。
您需在 OBD 的执行用户下执行下述 **脚本** 修改 meta 信息,而后使用以下命令对 OBProxy 进行升级。
```shell
obd cluster upgrade <deploy name> -c obproxy-ce -V 3.2.3
......@@ -14,7 +14,7 @@ obd cluster upgrade <deploy name> -c obproxy-ce -V 3.2.3
## 脚本
```shell
```bash
OBD_HOME=${OBD_HOME:-${HOME}}/.obd
obproxy_repository=${OBD_HOME}/repository/obproxy
obproxy_ce_repository=${OBD_HOME}/repository/obproxy-ce
......
......@@ -8,33 +8,13 @@
错误原因:配置文件中存在端口冲突。
解决方法:您可使用 obd cluster edit-config 命令打开配置文件,查看端口配置并进行修改。
解决方法:请您检查配置并进行修改。
### OBD-1001:x.x.x.x:xxx port is already used
错误原因:端口已经被占用。
解决方法:请您检查配置并更换端口,您可根据自身情况选择以下任一方式。
- 方法一:若您使用配置文件部署,使用 `obd cluster edit-config` 命令修改配置文件中对应的端口配置。修改完成后继续执行 `obd cluster start` 命令部署即可。
<main id="notice" type='explain'>
<h4>说明</h4>
<p>方法一中提到的命令详细介绍可参考 <a href='3.obd-command/1.cluster-command-groups.md'> 集群命令组</a></p>
</main>
- 方法二:若您使用 `obd demo` 命令部署,可通过如下命令指定端口,此处以指定 oceanbase-ce 组件的 mysql_port 为例。
```shell
obd demo --oceanbase-ce.mysql_port=3881
```
<main id="notice" type='explain'>
<h4>说明</h4>
<p>方法二中提到的命令详细介绍可参考 <a href='3.obd-command/0.obd-demo.md'> 快速部署命令</a></p>
</main>
- 方法三:若您通过 OBD 白屏界面部署,可在 **集群配置** 界面修改对应的端口。
解决方法:请您检查配置并更换端口。
### OBD-1002:Fail to init x.x.x.x path
......@@ -116,93 +96,7 @@
错误原因:ulimits 配置不满足要求。
解决办法:可通过修改 `/etc/security/limits.d/` 目录下对应文件和 `/etc/security/limits.conf` 使其满足要求。
### OBD-1008:(x.x.x.x) failed to get fs.aio-max-nr and fs.aio-nr
错误原因:OBD 获取不到服务器上 aio 配置。
解决办法:请检查当前用户是否有权限查看 fs.aio-max-nr/fs.aio-nr。
```bash
cat /proc/sys/fs/aio-max-nr /proc/sys/fs/aio-nr
```
### OBD-1009:x.x.x.x xxx need config: xxx
错误原因:服务相关组件缺少对应配置。
解决办法:执行如下命令打开配置文件,并在配置文件中添加所提示的配置项,修改后根据输出执行对应重启命令。
```bash
obd cluster edit-config <deploy_name>
```
### OBD-1010:x.x.x.x No such net interface: xxx
错误原因:
1. 黑屏端获取不到 devname。
2. 白屏端获取不到 devname。
解决办法:
对于情况 1,执行如下命令打开配置文件,在配置文件中添加或修改 devname,修改后根据输出执行对应重启命令。
```bash
obd cluster edit-config <deploy_name>
```
对于情况 2,可在白屏界面 **集群配置** -> **更多配置** 中设置 devname。
### OBD-1011:(x.x.x.x) Insufficient AIO remaining (Avail: xxx, Need: xxx), The recommended value of fs.aio-max-nr is 1048576
错误原因:系统可用 aio 数量少于数据库需要的 aio 数量。
解决办法:执行如下命令修改 linux aio-max-nr。
```bash
echo 1048576 > /proc/sys/fs/aio-max-nr
```
### OBD-1012:xxx
错误原因:
1. 类型转换异常,如 int 型参数传入字符串。
2. 参数值超限,如 `rpc_port` 的取值区间是 1025~65535,则 `rpc_port` 配置的值不在该区间就会报错。
3. 参数缺失,如关键参数如 `home_path` 未配置。
解决办法:
对于情况 1,请您检查参数类型并修改。
对于情况 2,请您检查传参值并修改。
对于情况 3,请您检查传参配置,若存在参数缺失需配置对应参数。
### OBD-1013:xxx@x.x.x.x connect failed: xxx
错误原因:出现该报错的原因有很多,常见的原因有以下两种。
1. 用户名或密码错误。
2. 连接超时。
解决办法:
对于情况 1,执行如下命令打开配置文件,在配置文件中添加或修改用户名和密码,修改后根据输出执行对应重启命令。
```bash
obd cluster edit-config <deploy_name>
```
对于情况 2,检查服务器相应配置,如端口是否准确,防火墙是否开启。
若排查后发现并非以上两种原因导致,您可到官网 [问答区](https://ask.oceanbase.com/) 进行提问,会有专业人员为您解答。
解决办法:可通过修改 /etc/security/limits.d/ 目录下对应文件和 /etc/security/limits.conf 使其满足要求。
## OceanBase 部署相关报错
......@@ -218,16 +112,15 @@ echo 3 > /proc/sys/vm/drop_caches
如果内存仍然不足请通过 `edit-config` 调整 `memory_limt``system_memory`,通常情况下 `memory_limt/3 ≤ system_memory ≤ memory_limt/2`
<main id="notice" type='notice'>
<h4>注意</h4>
<p><code>memory_limt</code> 不能低于 8G,即您的可用内存必须大于等于 8G。</p>
</main>
> **注意**
>
> `memory_limt` 不能低于 8G,即您的可用内存必须大于等于 8G。
### OBD-2001:server can not migrate in
错误原因:可用的 Unit 数小于 `--unit-num`
错误原因:可用的 unit 数小于 `--unit-num`
解决方法:请您修改传入的 `--unit-num`。您可使用以下命令查看当前可用的 Unit 数。
解决方法:请您修改传入的 `--unit-num`。您可使用以下命令查看当前可用的 unit 数。
```sql
select count(*) num from oceanbase.__all_server where status = 'active' and start_service_time > 0
......@@ -257,10 +150,9 @@ select count(*) num from oceanbase.__all_server where status = 'active' and star
- 若您采用的是手动部署的方式,在不更改配置的情况下,要求磁盘使用率不能高于 64%。
<main id="notice" type='notice'>
<h4>注意</h4>
<p>在 redo_dir 和 data_dir 同盘的情况下,计算磁盘使用率时会算上 datafile 将要占用的空间。</p>
</main>
> **注意**
>
> 在 redo_dir 和 data_dir 同盘的情况下,计算磁盘使用率时会算上 datafile 将要占用的空间。
### OBD-2004:Invalid: xxx is not a single server configuration item
......@@ -268,98 +160,6 @@ select count(*) num from oceanbase.__all_server where status = 'active' and star
解决方法:您可将需修改的配置改放到 global 下。
### OBD-2005:Failed to register cluster. xxx may have been registered in xxx
错误原因:注册集群失败,或者该集群已经被注册。
解决办法:有以下三种解决办法。
- 执行 `obd cluster edit-config` 命令打开配置文件,将正确的 Config Server 配置给配置项 `obconfig_url`
- 若您确认 Config Server 正确并希望强制覆盖,可以在执行 `obd cluster start` 命令时加上 `-f` 参数覆盖已注册的集群。
- 若您确认 Config Server 正确,也可以执行 `obd cluster edit-config` 命令打开配置文件,修改配置项 `appname``cluster_id`,更换为其他集群名进行部署。
### OBD-2006:x.x.x.x has more than one network interface. Please set `devname` for x.x.x.x
错误原因:
1. 黑屏端获取不到 devname。
2. 白屏端获取不到 devname。
解决办法:
对于情况 1,执行如下命令打开配置文件,在配置文件中添加或修改 devname,修改后根据输出执行对应重启命令。
```bash
obd cluster edit-config <deploy_name>
```
对于情况 2,可在白屏界面 **集群配置** -> **更多配置** 中设置 devname。
### OBD-2007:x.x.x.x xxx fail to ping x.x.x.x. Please check configuration `devname`
错误原因:机器之间相互 ping 不通。
解决办法:
1. 检查网卡配置是否与实际匹配。
2. 检查各个节点网络是否畅通。
### OBD-2008:Cluster clocks are out of sync
错误原因:集群之间时钟超时。
解决办法:同步各个服务器的时钟。
### OBD-2009:x.x.x.x: when production_mode is True, xxx can not be less then xxx
错误原因:当生产模式开启时,`__min_full_resource_pool_mem``memory_limit` 等配置项不能小于固定值。
解决办法:
- 部署非生产环境时,执行如下命令打开配置文件,修改配置项 `production_mode``False`,修改后根据输出执行对应重启命令。
```bash
obd cluster edit-config <deploy_name>
```
- 部署生产环境时, 执行如下命令打开配置文件,修改配置项 `__min_full_resource_pool_mem``memory_limit`,使其大于固定值,修改后根据输出执行对应重启命令。
```bash
obd cluster edit-config <deploy_name>
```
### OBD-2010:x.x.x.x: system_memory too large. system_memory must be less than memory_limit/memory_limit_percentage
错误原因:配置项 `system_memory` 配置过大,该配置项值必须小于 `memory_limit`/`memory_limit_percentage` * `total_memory`
解决办法:
1. 黑屏端:执行如下命令打开配置文件,修改配置项 `system_memory`,修改后根据输出执行对应重启命令。
```bash
obd cluster edit-config <deploy_name>
```
2. 白屏端:可在白屏界面 **集群配置** -> **更多配置** 中设置 `system_memory`
### OBD-2011:x.x.x.x: fail to get memory info.\nPlease configure 'memory_limit' manually in configuration file
错误原因:服务器获取不到内存信息。
解决办法:
1. 黑屏端:执行如下命令打开配置文件,配置 `memory_limit` 信息,修改后根据输出执行对应重启命令。
```bash
obd cluster edit-config <deploy_name>
```
2. 白屏端:可在白屏界面 **集群配置** -> **更多配置** 中设置 `memory_limit`
## 测试相关报错
### OBD-3000:parse cmd failed
......@@ -404,7 +204,7 @@ obd cluster edit-config <deploy_name>
如果上述方法均无法解决问题,请到官网 [问答区](https://ask.oceanbase.com/) 提问,会有专业人员为您解答。
## OBAgent 相关报错
## obagent 相关报错
### OBD-4000:Fail to reload x.x.x.x
......@@ -429,109 +229,3 @@ obd cluster edit-config <deploy_name>
```
- 登陆到目标机器,为当前账号赋予对应目录的写权限。
## ODP 相关报错
### OBD-4100:x.x.x.x need config "rs_list" or "obproxy_config_server_url"
错误原因:服务器获取不到 rs_list/obproxy_config_server_url 信息。
解决办法:执行如下命令打开配置文件,添加或修改 rs_list/obproxy_config_server_url 配置项,修改后根据输出执行对应重启命令。
```bash
obd cluster edit-config <deploy name>
```
### OBD-4101:failed to start x.x.x.x obproxy: xxx
错误原因:启动 ODP 失败。
解决办法:需根据提示进一步分析。
## Grafana 相关报错
### OBD-4200:x.x.x.x grafana admin password should not be 'admin'
错误原因:grafana 组件 admin 用户的 password 不应该是 admin。
解决办法:执行如下命令打开配置文件,添加或修改 password 信息,修改后根据输出执行对应重启命令。
```bash
obd cluster edit-config <deploy name>
```
### OBD-4201:x.x.x.x grafana admin password length should not be less than 5
错误原因:grafana 组件 admin 用户的 password 长度不能小于 5 位。
解决办法:执行如下命令打开配置文件,添加或修改 password 信息,修改后根据输出执行对应重启命令。
```bash
obd cluster edit-config <deploy name>
```
## OCP Express 相关报错
### OBD-4300:x.x.x.x: failed to query java version, you may not have java installed
错误原因:OBD 获取不到服务器上 Java。
解决办法:
1. 安装 Java,详细步骤可参考 [常见问题](5.faq/1.faq.md)**部署 OCP Express 前如何配置 Java 环境**
2. 如果 Java 已经安装,可以通过配置 `java_bin` 来指定 Java 可执行文件的路径。
### OBD-4301:x.x.x.x: ocp-express need java with version xxx
错误原因:服务器上 Java 版本过低。
解决办法:安装提示版本的 Java,如果目标版本 Java 已经安装,可以通过配置 `java_bin` 来指定 Java 可执行文件的路径。
### OBD-4302:x.x.x.x not enough memory. (Free: xxx, Need: xxx)
错误原因:服务器上没有足够内存
解决办法:分为以下几种解决方法。
- 若机器本身内存不足,您需执行 `obd cluster edit-config` 命令打开配置文件,调小 `memory_limit` 配置;或者更换其他内存足够的机器
- 若是机器剩余内存资源不足,如果存在可以释放的 cached,您可以先使用以下命令尝试释放。
```shell
echo 3 > /proc/sys/vm/drop_caches
```
### OBD-4303:x.x.x.x xxx not enough disk space. (Avail: xxx, Need: xxx)
错误原因:服务器磁盘没有足够的空间。
解决办法:请您自行检查并清理磁盘。
### OBD-4304:OCP express xxx needs to use xxx with version xxx or above
错误原因:部署 ocp-express 组件需要使用对应版本的组件。
解决办法:执行如下命令打开配置文件,修改提示对应组件版本,修改后根据输出执行对应重启命令。
```bash
obd cluster edit-config <deploy_name>
```
### OBD-4305: There is not enough xxx for ocp meta tenant
错误原因:没有足够的日志磁盘、内存去创建 OCP meta 租户。
解决办法:
- 如果是白屏 **最大占用** 模式部署,或者黑屏使用 `obd cluster autodeploy` 命令部署的部署方式,建议尝试清理磁盘、内存后重试。
- 如果用户配置了集群规格,需要根据提示信息,调大 `oceanbase-ce` 组件的相应配置项。例如内存相关配置项 `memory_limit`/`memory_limit_percentage`、日志盘相关配置项 `log_disk_size`/`log_disk_percentage`
## SQL 相关报错
OBD-5000:sql execute failed
错误原因:SQL 执行失败。
解决办法:需根据具体情况确定解决办法。
......@@ -30,7 +30,10 @@ oceanbase-ce:
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
# root_password: # root user password, can be empty
# Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy.
# appname: obcluster
# root_password: # root user password
# proxyro_password: # proxyro user pasword, consistent with obproxy's observer_sys_password, can be empty
# ocp_meta_db: ocp_express # The database name of ocp express meta
# ocp_meta_username: meta # The username of ocp express meta
# ocp_meta_password: '' # The password of ocp express meta
......
......@@ -30,7 +30,10 @@ oceanbase-ce:
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
skip_proxy_sys_private_check: true
enable_strict_kernel_release: false
# Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy.
# appname: obcluster
# root_password: # root user password
# proxyro_password: # proxyro user pasword, consistent with obproxy's observer_sys_password, can be empty
# ocp_meta_db: ocp_express # The database name of ocp express meta
# ocp_meta_username: meta # The username of ocp express meta
# ocp_meta_password: '' # The password of ocp express meta
......
......@@ -150,6 +150,10 @@ obagent:
# Monitor status for OceanBase Database. Active is to enable. Inactive is to disable. The default value is active. When you deploy an cluster automatically, OBD decides whether to enable this parameter based on depends.
# ob_monitor_status: active
ocp-express:
depends:
- oceanbase-ce
- obproxy-ce
- obagent
servers:
- name: server1
ip: 192.168.1.1
......
......@@ -150,6 +150,10 @@ obagent:
# Monitor status for OceanBase Database. Active is to enable. Inactive is to disable. The default value is active. When you deploy an cluster automatically, OBD decides whether to enable this parameter based on depends.
# ob_monitor_status: active
ocp-express:
depends:
- oceanbase-ce
- obproxy-ce
- obagent
servers:
- name: server1
ip: 192.168.1.1
......
......@@ -82,30 +82,6 @@ obproxy-ce:
enable_strict_kernel_release: false
# obproxy_sys_password: # obproxy sys user password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends.
# observer_sys_password: # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends.
obproxy-ce:
depends:
- oceanbase-ce
servers:
- 192.168.1.5
global:
# The working directory for obproxy. Obproxy is started under this directory. This is a required field.
home_path: /root/obproxy
skip_proxy_sys_private_check: true
enable_strict_kernel_release: false
# External port. The default value is 2883.
# listen_port: 2883
# The Prometheus port. The default value is 2884.
# prometheus_listen_port: 2884
# rs_list is the root server list for observers. The default root server is the first server in the zone.
# The format for rs_list is observer_ip:observer_mysql_port;observer_ip:observer_mysql_port.
# Ignore this value in autodeploy mode.
# rs_list: 127.0.0.1:2881
# Cluster name for the proxy OceanBase Database. The default value is obcluster. This value must be set to the same with the appname for OceanBase Database.
# cluster_name: obcluster
# Password for obproxy system tenant. The default value is empty.
# obproxy_sys_password:
# Password for proxyro. proxyro_password must be the same with proxyro_password. The default value is empty.
# observer_sys_password:
obagent:
# Set dependent components for the component.
# When the associated configurations are not done, OBD will automatically get the these configurations from the dependent components.
......
......@@ -98,62 +98,52 @@ obagent:
depends:
- oceanbase-ce
global:
# The working directory for obagent. obagent is started under this directory. This is a required field.
home_path: /root/obagent
# The port of monitor agent. The default port number is 8088.
monagent_http_port: 8088
# The port of manager agent. The default port number is 8089.
mgragent_http_port: 8089
# The port that pulls and manages the metrics. The default port number is 8088.
server_port: 8088
# Debug port for pprof. The default port number is 8089.
pprof_port: 8089
# Log path. The default value is log/monagent.log.
log_path: log/monagent.log
# The log level of manager agent.
mgragent_log_level: info
# The total size of manager agent.Log size is measured in Megabytes. The default value is 30M.
mgragent_log_max_size: 30
# Expiration time for manager agent logs. The default value is 30 days.
mgragent_log_max_days: 30
# The maximum number for manager agent log files. The default value is 15.
mgragent_log_max_backups: 15
# The log level of monitor agent.
monagent_log_level: info
# The total size of monitor agent.Log size is measured in Megabytes. The default value is 200M.
monagent_log_max_size: 200
# Expiration time for monitor agent logs. The default value is 30 days.
monagent_log_max_days: 30
# The maximum number for monitor agent log files. The default value is 15.
monagent_log_max_backups: 15
# Encryption method. OBD supports aes and plain. The default value is plain.
crypto_method: plain
# Path to store the crypto key. The default value is conf/.config_secret.key.
# crypto_path: conf/.config_secret.key
# Size for a single log file. Log size is measured in Megabytes. The default value is 30M.
log_size: 30
# Expiration time for logs. The default value is 7 days.
log_expire_day: 7
# The maximum number for log files. The default value is 10.
log_file_count: 10
# Whether to use local time for log files. The default value is true.
# log_use_localtime: true
# Whether to enable log compression. The default value is true.
# log_compress: true
# Username for HTTP authentication. The default value is admin.
http_basic_auth_user: admin
# Password for HTTP authentication. The default value is root.
http_basic_auth_password: root
# Monitor password for OceanBase Database. The default value is empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the ocp_agent_monitor_password in oceanbase-ce.
# monitor_password:
# Username for debug service. The default value is admin.
pprof_basic_auth_user: admin
# Password for debug service. The default value is root.
pprof_basic_auth_password: root
# Monitor username for OceanBase Database. The user must have read access to OceanBase Database as a system tenant. The default value is root.
monitor_user: root
# Monitor password for OceanBase Database. The default value is empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the root_password in oceanbase-ce.
monitor_password:
# The SQL port for observer. The default value is 2881. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the mysql_port in oceanbase-ce.
# sql_port: 2881
sql_port: 2881
# The RPC port for observer. The default value is 2882. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the rpc_port in oceanbase-ce.
# rpc_port: 2882
rpc_port: 2882
# Cluster name for OceanBase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the appname in oceanbase-ce.
# cluster_name: obcluster
cluster_name: obcluster
# Cluster ID for OceanBase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the cluster_id in oceanbase-ce.
# cluster_id: 1
# The redo dir for Oceanbase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the redo_dir in oceanbase-ce.
# ob_log_path: /root/observer/store
# The data dir for Oceanbase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the data_dir in oceanbase-ce.
# ob_data_path: /root/observer/store
# The work directory for Oceanbase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the home_path in oceanbase-ce.
# ob_install_path: /root/observer
# The log path for Oceanbase Database. When a depends exists, OBD gets this value from the oceanbase-ce of the depends. The value is the same as the {home_path}/log in oceanbase-ce.
# observer_log_path: /root/observer/log
cluster_id: 1
# Monitor status for OceanBase Database. Active is to enable. Inactive is to disable. The default value is active. When you deploy an cluster automatically, OBD decides whether to enable this parameter based on depends.
ob_monitor_status: active
# Synchronize the obagent-related information to the specified path of the remote host, as the targets specified by `file_sd_config` in the Prometheus configuration.
# For prometheus that depends on obagent, it can be specified to $home_path/targets of prometheus.
# For independently deployed prometheus, specify the files to be collected by setting `config` -> `scrape_configs` -> `file_sd_configs` -> `files`. For details, please refer to prometheus-only-example.yaml.
# target_sync_configs:
# - host: 192.168.1.1
# target_dir: /root/prometheus/targets
# username: your username
# password: your password if need
# key_file: your ssh-key file path if need
# port: your ssh port, default 22
# timeout: ssh connection timeout (second), default 30
\ No newline at end of file
# Monitor status for your host. Active is to enable. Inactive is to disable. The default value is active.
host_monitor_status: active
# Whether to disable the basic authentication for HTTP service. True is to disable. False is to enable. The default value is false.
disable_http_basic_auth: false
# Whether to disable the basic authentication for the debug interface. True is to disable. False is to enable. The default value is false.
disable_pprof_basic_auth: false
\ No newline at end of file
......@@ -64,6 +64,7 @@ def install_repo(plugin_context, obd_home, install_repository, install_plugin, c
clients = plugin_context.clients
servers = cluster_config.servers
is_lib_repo = install_repository.name.endswith("-libs")
is_utils_repo = install_repository.name.endswith("-utils")
home_path_map = {}
for server in servers:
server_config = cluster_config.get_server_conf(server)
......@@ -85,6 +86,8 @@ def install_repo(plugin_context, obd_home, install_repository, install_plugin, c
else:
if is_lib_repo:
install_path = os.path.join(remote_home_path, 'lib')
elif is_utils_repo:
install_path = os.path.join(remote_home_path, 'bin')
else:
install_path = remote_home_path
client.execute_command('mkdir -p {}'.format(install_path))
......@@ -131,34 +134,64 @@ def install_repo(plugin_context, obd_home, install_repository, install_plugin, c
stdio.stop_loading('succeed')
# check lib
lib_check = True
stdio.start_loading('Remote %s repository lib check' % check_repository)
for server in servers:
stdio.verbose('%s %s repository lib check' % (server, check_repository))
client = clients[server]
remote_home_path = home_path_map[server]
need_libs = set()
client.add_env('LD_LIBRARY_PATH', '%s/lib:' % remote_home_path, True)
for file_item in check_file_map.values():
if file_item.type == InstallPlugin.FileItemType.BIN:
remote_file_path = os.path.join(remote_home_path, file_item.target_path)
ret = client.execute_command('ldd %s' % remote_file_path)
libs = re.findall('(/?[\w+\-/]+\.\w+[\.\w]+)[\s\\n]*\=\>[\s\\n]*not found', ret.stdout)
if not libs:
libs = re.findall('(/?[\w+\-/]+\.\w+[\.\w]+)[\s\\n]*\=\>[\s\\n]*not found', ret.stderr)
if not libs and not ret:
stdio.error('Failed to execute repository lib check.')
return
need_libs.update(libs)
if need_libs:
for lib in need_libs:
getattr(stdio, msg_lv, '%s %s require: %s' % (server, check_repository, lib))
lib_check = False
client.add_env('LD_LIBRARY_PATH', '', True)
if msg_lv == 'error':
stdio.stop_loading('succeed' if lib_check else 'fail')
elif msg_lv == 'warn':
stdio.stop_loading('succeed' if lib_check else 'warn')
return plugin_context.return_true(checked=lib_check)
def check_lib():
lib_check = True
stdio.start_loading('Remote %s repository lib check' % check_repository)
for server in servers:
stdio.verbose('%s %s repository lib check' % (server, check_repository))
client = clients[server]
remote_home_path = home_path_map[server]
need_libs = set()
client.add_env('LD_LIBRARY_PATH', '%s/lib:' % remote_home_path, True)
for file_item in check_file_map.values():
if file_item.type == InstallPlugin.FileItemType.BIN:
remote_file_path = os.path.join(remote_home_path, file_item.target_path)
ret = client.execute_command('ldd %s' % remote_file_path)
libs = re.findall('(/?[\w+\-/]+\.\w+[\.\w]+)[\s\\n]*\=\>[\s\\n]*not found', ret.stdout)
if not libs:
libs = re.findall('(/?[\w+\-/]+\.\w+[\.\w]+)[\s\\n]*\=\>[\s\\n]*not found', ret.stderr)
if not libs and not ret:
stdio.error('Failed to execute repository lib check.')
return
need_libs.update(libs)
if need_libs:
for lib in need_libs:
getattr(stdio, msg_lv, '%s %s require: %s' % (server, check_repository, lib))
lib_check = False
client.add_env('LD_LIBRARY_PATH', '', True)
if msg_lv == 'error':
stdio.stop_loading('succeed' if lib_check else 'fail')
elif msg_lv == 'warn':
stdio.stop_loading('succeed' if lib_check else 'warn')
return plugin_context.return_true(checked=lib_check)
# check utils
def check_utils():
utils_check = True
for server in servers:
client = clients[server]
remote_home_path = home_path_map[server]
need_utils = set()
for file_item in check_file_map.values():
if file_item.type == InstallPlugin.FileItemType.BIN:
utils_file_path = os.path.join(remote_home_path, 'bin')
remote_file_path = os.path.join(utils_file_path, file_item.target_path)
ret = client.execute_command('ls -1 %s' % remote_file_path)
utils = file_item.target_path
if not ret:
stdio.error('Failed to execute repository utils check.')
return
need_utils.update(utils)
if need_utils:
for util in need_utils:
getattr(stdio, '%s %s require: %s' % (server, check_repository, util))
utils_check = False
return plugin_context.return_true(checked=utils_check)
if is_utils_repo:
return check_utils()
else:
return check_lib()
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import json
import sys
import time
import uuid
import resource
import hashlib
from tool import NetUtil
from ssh import LocalClient
from const import VERSION, REVISION
shell_command_map = {
"host_type": 'systemd-detect-virt',
"_cpu_physical_core_num": 'cat /proc/cpuinfo | grep "physical id" | sort | uniq | wc -l',
"_per_physical_core_num": 'cat /proc/cpuinfo | grep "cpu cores" | cut -f2 -d: | uniq',
"cpu_logical_cores": 'cat /proc/cpuinfo | grep "processor" | wc -l',
"cpu_model_name": 'cat /proc/cpuinfo | grep name | cut -f2 -d: | uniq',
"cpu_frequency": 'cat /proc/cpuinfo | grep MHz | cut -f2 -d: | uniq',
"cpu_flags": 'cat /proc/cpuinfo | grep flags | cut -f2 -d: | uniq',
"memory_total": 'cat /proc/meminfo | grep MemTotal | cut -f2 -d: | uniq',
"memory_free": 'cat /proc/meminfo | grep MemFree | cut -f2 -d: | uniq',
"memory_avaiable": 'cat /proc/meminfo | grep MemAvailable | cut -f2 -d: | uniq',
"os_name": 'cat /etc/os-release | grep "^ID=" | cut -f2 -d=',
"os_release": 'cat /etc/os-release | grep "^VERSION_ID=" | cut -f2 -d='
}
def shell_command(func):
def wrapper(*args, **kwargs):
name = func.__name__
command = shell_command_map.get(name)
assert command, f"{name} is not in shell_command.yaml"
res = LocalClient.execute_command(command)
kwargs["bash_result"] = res.stdout.strip() if res.code == 0 else None
return func(*args, **kwargs)
return wrapper
class BaseInfo:
@staticmethod
def reporter():
return 'obd'
@staticmethod
def report_time():
return time.time()
@staticmethod
def event_id():
return str(uuid.uuid4())
class HostInfo:
@staticmethod
def host_ip_hash():
sha1 = hashlib.sha1()
sha1.update(NetUtil.get_host_ip().encode())
return sha1.hexdigest()
@staticmethod
@shell_command
def host_type(*args, **kwargs):
return kwargs["bash_result"]
class CpuInfo:
@staticmethod
@shell_command
def _cpu_physical_core_num(*args, **kwargs):
return int(kwargs['bash_result'])
@staticmethod
@shell_command
def _per_physical_core_num(*args, **kwargs):
return int(kwargs['bash_result'])
@staticmethod
def cpu_physical_cores(*args, **kwargs):
return CpuInfo._cpu_physical_core_num() * CpuInfo._per_physical_core_num()
@staticmethod
@shell_command
def cpu_logical_cores(*args, **kwargs):
return kwargs["bash_result"]
@staticmethod
@shell_command
def cpu_model_name(*args, **kwargs):
return kwargs["bash_result"]
@staticmethod
@shell_command
def cpu_frequency(*args, **kwargs):
return kwargs["bash_result"]
@staticmethod
@shell_command
def cpu_flags(*args, **kwargs):
return kwargs["bash_result"]
class MemInfo:
@staticmethod
@shell_command
def memory_total(*args, **kwargs):
return kwargs["bash_result"]
@staticmethod
@shell_command
def memory_free(*args, **kwargs):
return kwargs["bash_result"]
@staticmethod
@shell_command
def memory_avaiable(*args, **kwargs):
return kwargs["bash_result"]
class DiskInfo:
@staticmethod
def get_disks_info():
data = []
sha1 = hashlib.sha1()
for _ in LocalClient.execute_command("df -h | awk '{if(NR>1)print}'").stdout.strip().split('\n'):
_disk_info = {}
_ = [i for i in _.split(' ') if i != '']
_disk_info['deviceName'] = _[0]
_disk_info['total'] = _[1]
_disk_info['used'] = _[2]
sha1.update(_[5].encode())
_disk_info['mountHash'] = sha1.hexdigest()
data.append(_disk_info)
return data
class OsInfo:
@staticmethod
@shell_command
def os_name(*args, **kwargs):
return kwargs["bash_result"].replace('\"', '')
@staticmethod
@shell_command
def os_release(*args, **kwargs):
return kwargs["bash_result"].replace('\"', '')
class MachineInfo:
@staticmethod
def get_nofile():
res = resource.getrlimit(resource.RLIMIT_NOFILE)
return {'nofileSoft': res[0], 'nofileHard': res[1]}
class ObdInfo:
@staticmethod
def obd_type():
return sys.argv[0]
@staticmethod
def obd_version(*args, **kwargs):
return VERSION
@staticmethod
def obd_revision(*args, **kwargs):
return REVISION
def telemetry_machine_data():
data = {}
data['reporter'] = BaseInfo.reporter()
data['reportTime'] = BaseInfo.report_time()
data['eventId'] = BaseInfo.event_id()
data['hosts'] = []
_hosts = dict(basic={}, cpu={}, memory={}, disks=[], os={}, ulimit={})
_hosts['basic']['hostHash'] = HostInfo.host_ip_hash()
_hosts['basic']['hostType'] = HostInfo.host_type()
_hosts['cpu']['physicalCores'] = CpuInfo.cpu_physical_cores()
_hosts['cpu']['logicalCores'] = CpuInfo.cpu_logical_cores()
_hosts['cpu']['modelName'] = CpuInfo.cpu_model_name()
_hosts['cpu']['frequency'] = CpuInfo.cpu_frequency()
_hosts['cpu']['flags'] = CpuInfo.cpu_flags()
_hosts['memory']['total'] = MemInfo.memory_total()
_hosts['memory']['free'] = MemInfo.memory_free()
_hosts['memory']['avaiable'] = MemInfo.memory_avaiable()
_hosts['disks'] = DiskInfo.get_disks_info()
_hosts['os']['os'] = OsInfo.os_name()
_hosts['os']['version'] = OsInfo.os_release()
_hosts['ulimit'] = MachineInfo.get_nofile()
data['hosts'].append(_hosts)
data['instances'] = []
obd_info = {}
obd_info['type'] = ObdInfo.obd_type()
obd_info['version'] = ObdInfo.obd_version()
obd_info['revision'] = ObdInfo.obd_revision()
data['instances'].append(obd_info)
return data
def telemetry_info_collect(plugin_context, *args, **kwargs):
repositories = plugin_context.repositories
repository = kwargs.get('target_repository')
options = plugin_context.options
stdio = plugin_context.stdio
cluster_config = plugin_context.cluster_config
post_data = telemetry_machine_data()
for repository in repositories:
data = {}
data['type'] = repository.name
data['version'] = repository.version
data['revision'] = repository.hash
post_data['instances'].append(data)
for component, _ in json.loads(getattr(options, 'data', {})).items():
for plugin_name, _ in _.items():
data = {}
data['type'] = 'plugins'
data['component'] = component
data['name'] = plugin_name
data['runTime'] = _['time']
data['runResult'] = _['result']
post_data['instances'].append(data)
if repository.name in ['oceanbase', 'oceanbase-ce']:
_ = cluster_config.get_global_conf()
data = {}
data['type'] = 'config'
data['name'] = repository.name
data['memoryLimit'] = _.get('memory_limit', '0') if _ else '0'
data['cpuCount'] = _.get('cpu_count', '0') if _ else '0'
data['syslogLevel'] = _.get('syslog_level', 'INFO') if _ else 'INFO'
post_data['instances'].append(data)
return plugin_context.return_true(post_data=json.dumps(post_data, indent=4))
\ No newline at end of file
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import requests
from const import TELEMETRY_URL
from tool import timeout
def telemetry_post(plugin_context, *args, **kwargs):
stdio = plugin_context.stdio
data = kwargs.get('data', {})
if data:
stdio.verbose('post data: %s' % data)
with timeout(30):
requests.post(url=TELEMETRY_URL, data=data)
return plugin_context.return_true()
else:
stdio.verbose('noting to post')
return plugin_context.return_false()
\ No newline at end of file
......@@ -20,12 +20,19 @@
from __future__ import absolute_import, division, print_function
from tool import ConfigUtil
def generate_config(plugin_context, auto_depend=False, generate_check=True, return_generate_keys=False, *args, **kwargs):
def generate_config(plugin_context, auto_depend=False, generate_check=True, return_generate_keys=False, only_generate_password=False, *args, **kwargs):
if return_generate_keys:
return plugin_context.return_true(generate_keys=['login_password'])
generate_keys = ['login_password']
return plugin_context.return_true(generate_keys=generate_keys)
cluster_config = plugin_context.cluster_config
generate_random_password(cluster_config)
if only_generate_password:
return plugin_context.return_true()
stdio = plugin_context.stdio
success = True
have_depend = False
......@@ -45,9 +52,6 @@ def generate_config(plugin_context, auto_depend=False, generate_check=True, retu
elif login_password == "admin":
stdio.error("Grafana : configuration 'login_password' in configuration file should not be 'admin'")
success = False
else:
generate_configs['global']['login_password'] = 'oceanbase'
cluster_config.update_global_conf('login_password', 'oceanbase', False)
if not success:
stdio.stop_loading('fail')
......@@ -61,4 +65,10 @@ def generate_config(plugin_context, auto_depend=False, generate_check=True, retu
cluster_config.add_depend_component(depend)
stdio.stop_loading('succeed')
plugin_context.return_true()
\ No newline at end of file
plugin_context.return_true()
def generate_random_password(cluster_config):
global_config = cluster_config.get_original_global_conf()
if 'login_password' not in global_config:
cluster_config.update_global_conf('login_password', ConfigUtil.get_random_pwd_by_total_length())
\ No newline at end of file
......@@ -20,12 +20,24 @@
from __future__ import absolute_import, division, print_function
from tool import ConfigUtil
def generate_config(plugin_context, auto_depend=False, return_generate_keys=False, *args, **kwargs):
def generate_config(plugin_context, auto_depend=False, return_generate_keys=False, only_generate_password=False, generate_password=True, *args, **kwargs):
if return_generate_keys:
return plugin_context.return_true(generate_keys=['ob_monitor_status'])
generate_keys = []
if generate_password:
generate_keys += ['http_basic_auth_password']
if not only_generate_password:
generate_keys += ['ob_monitor_status']
return plugin_context.return_true(generate_keys=generate_keys)
cluster_config = plugin_context.cluster_config
if generate_password:
generate_random_password(cluster_config)
if only_generate_password:
return plugin_context.return_true()
stdio = plugin_context.stdio
have_depend = False
depends = ['oceanbase', 'oceanbase-ce']
......@@ -63,3 +75,9 @@ def generate_config(plugin_context, auto_depend=False, return_generate_keys=Fals
stdio.stop_loading('succeed')
plugin_context.return_true()
def generate_random_password(cluster_config):
global_config = cluster_config.get_original_global_conf()
if 'http_basic_auth_password' not in global_config:
cluster_config.update_global_conf('http_basic_auth_password', ConfigUtil.get_random_pwd_by_total_length())
\ No newline at end of file
......@@ -20,12 +20,26 @@
from __future__ import absolute_import, division, print_function
import hashlib
def generate_config(plugin_context, generate_config_mini=False, auto_depend=False, return_generate_keys=False, *args, **kwargs):
from tool import ConfigUtil
def generate_config(plugin_context, generate_config_mini=False, auto_depend=False, return_generate_keys=False, only_generate_password=False, generate_password=True, *args, **kwargs):
if return_generate_keys:
return plugin_context.return_true(generate_keys=['skip_proxy_sys_private_check', 'enable_strict_kernel_release', 'enable_cluster_checkout', 'proxy_mem_limited'])
generate_keys = []
if generate_password:
generate_keys += ['obproxy_sys_password']
if not only_generate_password:
generate_keys += ['skip_proxy_sys_private_check', 'enable_strict_kernel_release', 'enable_cluster_checkout', 'proxy_mem_limited']
return plugin_context.return_true(generate_keys=generate_keys)
cluster_config = plugin_context.cluster_config
if generate_password:
generate_random_password(cluster_config)
if only_generate_password:
return plugin_context.return_true()
stdio = plugin_context.stdio
generate_configs = {'global': {}}
plugin_context.set_variable('generate_configs', generate_configs)
......@@ -43,7 +57,7 @@ def generate_config(plugin_context, generate_config_mini=False, auto_depend=Fals
if 'enable_cluster_checkout' not in global_config:
generate_configs['global']['enable_cluster_checkout'] = False
cluster_config.update_global_conf('enable_cluster_checkout', False, False)
if generate_config_mini:
if 'proxy_mem_limited' not in global_config:
generate_configs['global']['proxy_mem_limited'] = '500M'
......@@ -56,4 +70,10 @@ def generate_config(plugin_context, generate_config_mini=False, auto_depend=Fals
return plugin_context.return_true()
stdio.stop_loading('succeed')
return plugin_context.return_true()
\ No newline at end of file
return plugin_context.return_true()
def generate_random_password(cluster_config):
global_config = cluster_config.get_original_global_conf()
if 'obproxy_sys_password' not in global_config:
cluster_config.update_global_conf('obproxy_sys_password', ConfigUtil.get_random_pwd_by_total_length())
\ No newline at end of file
......@@ -126,8 +126,8 @@ class Restart(object):
if self.connect(cluster_config):
if self.bootstrap_plugin:
self.call_plugin(self.bootstrap_plugin, cursor=self.cursors)
return self.call_plugin(self.display_plugin, cursor=self.cursors)
self.call_plugin(self.bootstrap_plugin, cluster_config=cluster_config, cursor=self.cursors)
return self.call_plugin(self.display_plugin, cluster_config=cluster_config, cursor=self.cursors)
return False
def rollback(self):
......
- src_path: ./usr/bin
target_path: ''
type: dir
\ No newline at end of file
- src_path: ./usr/local/
target_path: ''
type: dir
\ No newline at end of file
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from ssh import LocalClient
import datetime
from tool import TimeUtils
from subprocess import call, Popen, PIPE
import _errno as err
def gather_all(plugin_context, *args, **kwargs):
def get_option(key, default=''):
value = getattr(options, key)
if value is None:
value = default
stdio.verbose('get option: %s value %s' % (key, value))
return value
def local_execute_command(command, env=None, timeout=None):
command = r"cd {install_dir} && sh ".format(install_dir=obdiag_install_dir) + command
return LocalClient.execute_command(command, env, timeout, stdio)
def get_obdiag_cmd():
base_commond=r"cd {install_dir} && sh obdiag gather all".format(install_dir=obdiag_install_dir)
cmd = r"{base} --cluster_name {cluster_name} --from {from_option} --to {to_option} --scope {scope_option} --encrypt {encrypt_option}".format(
base=base_commond,
cluster_name=cluster_name,
from_option=from_option,
to_option=to_option,
scope_option=scope_option,
encrypt_option=encrypt_option,
)
if grep_option:
cmd = cmd + r" --grep {grep_option}".format(grep_option=grep_option)
if ob_install_dir_option:
cmd = cmd + r" --ob_install_dir {ob_install_dir_option}".format(ob_install_dir_option=ob_install_dir_option)
if store_dir_option:
cmd = cmd + r" --store_dir {store_dir_option}".format(store_dir_option=store_dir_option)
if clog_dir:
cmd = cmd + r" --clog_dir {clog_dir}".format(clog_dir=clog_dir)
if slog_dir:
cmd = cmd + r" --slog_dir {slog_dir}".format(slog_dir=slog_dir)
return cmd
def run():
obdiag_cmd = get_obdiag_cmd()
stdio.verbose('execute cmd: {}'.format(obdiag_cmd))
p = None
return_code = 255
try:
p = Popen(obdiag_cmd, shell=True)
return_code = p.wait()
except:
stdio.exception("")
if p:
p.kill()
stdio.verbose('exit code: {}'.format(return_code))
return return_code == 0
options = plugin_context.options
obdiag_bin = "obdiag"
cluster_config = plugin_context.cluster_config
cluster_name = cluster_config.name
stdio = plugin_context.stdio
global_conf = cluster_config.get_global_conf()
from_option = get_option('from')
to_option = get_option('to')
scope_option = get_option('scope')
since_option = get_option('since')
grep_option = get_option('grep')
encrypt_option = get_option('encrypt')
store_dir_option = get_option('store_dir')
ob_install_dir_option = global_conf.get('home_path')
obdiag_install_dir = get_option('obdiag_dir')
clog_dir = ob_install_dir_option + "/store"
slog_dir = ob_install_dir_option + "/store"
if len(cluster_config.servers) > 0:
server_config = cluster_config.get_server_conf(cluster_config.servers[0])
if not server_config.get('data_dir'):
server_config['data_dir'] = '%s/store' % ob_install_dir_option
if not server_config.get('redo_dir'):
server_config['redo_dir'] = server_config['data_dir']
if not server_config.get('slog_dir'):
server_config['slog_dir'] = '%s/slog' % server_config['redo_dir']
if not server_config.get('clog_dir'):
server_config['clog_dir'] = '%s/clog' % server_config['redo_dir']
clog_dir = server_config['clog_dir']
slog_dir = server_config['slog_dir']
try:
if (not from_option) and (not to_option) and since_option:
now_time = datetime.datetime.now()
to_option = (now_time + datetime.timedelta(minutes=10)).strftime('%Y-%m-%d %H:%M:%S')
from_option = (now_time - datetime.timedelta(seconds=TimeUtils.parse_time_sec(since_option))).strftime('%Y-%m-%d %H:%M:%S')
except:
stdio.error(err.EC_OBDIAG_OPTIONS_FORMAT_ERROR.format(option="since", value=since_option))
return plugin_context.return_false()
ret = local_execute_command('%s --help' % obdiag_bin)
if not ret:
stdio.error(err.EC_OBDIAG_NOT_FOUND.format())
return plugin_context.return_false()
try:
if run():
plugin_context.return_true()
except KeyboardInterrupt:
stdio.exception("obdiag gather all failded")
return plugin_context.return_false()
\ No newline at end of file
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from ssh import LocalClient
import datetime
import os
from tool import TimeUtils
from subprocess import call, Popen, PIPE
import _errno as err
def gather_clog(plugin_context, *args, **kwargs):
def get_option(key, default=''):
value = getattr(options, key)
if value is None:
value = default
stdio.verbose('get option: %s value %s' % (key, value))
return value
def local_execute_command(command, env=None, timeout=None):
command = r"cd {install_dir} && sh ".format(install_dir=obdiag_install_dir) + command
return LocalClient.execute_command(command, env, timeout, stdio)
def get_obdiag_cmd():
base_commond = r"cd {install_dir} && sh obdiag gather clog".format(install_dir=obdiag_install_dir)
cmd = r"{base} --clog_dir {data_dir} --from {from_option} --to {to_option} --encrypt {encrypt_option}".format(
base = base_commond,
data_dir = data_dir,
from_option = from_option,
to_option = to_option,
encrypt_option = encrypt_option
)
if ob_install_dir_option:
cmd = cmd + r" --ob_install_dir {ob_install_dir_option}".format(ob_install_dir_option=ob_install_dir_option)
if store_dir_option:
cmd = cmd + r" --store_dir {store_dir_option}".format(store_dir_option=store_dir_option)
return cmd
def run():
obdiag_cmd = get_obdiag_cmd()
stdio.verbose('execute cmd: {}'.format(obdiag_cmd))
p = None
return_code = 255
try:
p = Popen(obdiag_cmd, shell=True)
return_code = p.wait()
except:
stdio.exception("")
if p:
p.kill()
stdio.verbose('exit code: {}'.format(return_code))
return return_code == 0
options = plugin_context.options
obdiag_bin = "obdiag"
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
global_conf = cluster_config.get_global_conf()
from_option = get_option('from')
to_option = get_option('to')
since_option = get_option('since')
encrypt_option = get_option('encrypt')
store_dir_option = get_option('store_dir')
ob_install_dir_option = global_conf.get('home_path')
data_dir = ob_install_dir_option + "/store"
obdiag_install_dir = get_option('obdiag_dir')
if len(cluster_config.servers) > 0:
server_config = cluster_config.get_server_conf(cluster_config.servers[0])
if not server_config.get('data_dir'):
server_config['data_dir'] = '%s/store' % ob_install_dir_option
if not server_config.get('redo_dir'):
server_config['redo_dir'] = server_config['data_dir']
if not server_config.get('clog_dir'):
server_config['clog_dir'] = '%s/clog' % server_config['redo_dir']
data_dir = server_config['clog_dir']
try:
if (not from_option) and (not to_option) and since_option:
now_time = datetime.datetime.now()
to_option = (now_time + datetime.timedelta(minutes=10)).strftime('%Y-%m-%d %H:%M:%S')
from_option = (now_time - datetime.timedelta(seconds=TimeUtils.parse_time_sec(since_option))).strftime('%Y-%m-%d %H:%M:%S')
except:
stdio.error(err.EC_OBDIAG_OPTIONS_FORMAT_ERROR.format(option="since", value=since_option))
return plugin_context.return_false()
ret = local_execute_command('%s --help' % obdiag_bin)
if not ret:
stdio.error(err.EC_OBDIAG_NOT_FOUND.format())
return plugin_context.return_false()
try:
if run():
plugin_context.return_true()
except KeyboardInterrupt:
stdio.exception("obdiag gather clog failded")
return plugin_context.return_false()
\ No newline at end of file
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from ssh import LocalClient
import datetime
from tool import TimeUtils
from subprocess import call, Popen, PIPE
import _errno as err
def gather_log(plugin_context, *args, **kwargs):
def get_option(key, default=''):
value = getattr(options, key)
if value is None:
value = default
stdio.verbose('get option: %s value %s' % (key, value))
return value
def local_execute_command(command, env=None, timeout=None):
command = r"cd {install_dir} && sh ".format(install_dir=obdiag_install_dir) + command
return LocalClient.execute_command(command, env, timeout, stdio)
def get_obdiag_cmd():
base_commond=r"cd {install_dir} && sh obdiag gather log".format(install_dir=obdiag_install_dir)
cmd = r"{base} --from {from_option} --to {to_option} --scope {scope_option} --encrypt {encrypt_option}".format(
base = base_commond,
from_option = from_option,
to_option = to_option,
scope_option = scope_option,
encrypt_option = encrypt_option,
)
if ob_install_dir_option:
cmd = cmd + r" --ob_install_dir {ob_install_dir_option}".format(ob_install_dir_option=ob_install_dir_option)
if store_dir_option:
cmd = cmd + r" --store_dir {store_dir_option}".format(store_dir_option=store_dir_option)
if grep_option:
cmd = cmd + r" --grep '{grep_option}'".format(grep_option=grep_option)
return cmd
def run():
obdiag_cmd = get_obdiag_cmd()
stdio.verbose('execute cmd: {}'.format(obdiag_cmd))
p = None
return_code = 255
try:
p = Popen(obdiag_cmd, shell=True)
return_code = p.wait()
except:
stdio.exception("")
if p:
p.kill()
stdio.verbose('exit code: {}'.format(return_code))
return return_code == 0
options = plugin_context.options
obdiag_bin = "obdiag"
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
global_conf = cluster_config.get_global_conf()
from_option = get_option('from')
to_option = get_option('to')
scope_option = get_option('scope')
since_option = get_option('since')
grep_option = get_option('grep')
encrypt_option = get_option('encrypt')
store_dir_option = get_option('store_dir')
ob_install_dir_option = global_conf.get('home_path')
obdiag_install_dir = get_option('obdiag_dir')
try:
if (not from_option) and (not to_option) and since_option:
now_time = datetime.datetime.now()
to_option = (now_time + datetime.timedelta(minutes=1)).strftime('%Y-%m-%d %H:%M:%S')
from_option = (now_time - datetime.timedelta(seconds=TimeUtils.parse_time_sec(since_option))).strftime('%Y-%m-%d %H:%M:%S')
except:
stdio.error(err.EC_OBDIAG_OPTIONS_FORMAT_ERROR.format(option="since", value=since_option))
return plugin_context.return_false()
ret = local_execute_command('%s --help' % obdiag_bin)
if not ret:
stdio.error(err.EC_OBDIAG_NOT_FOUND.format())
return plugin_context.return_false()
try:
if run():
plugin_context.return_true()
except KeyboardInterrupt:
stdio.exception("obdiag gather log failded")
return plugin_context.return_false()
\ No newline at end of file
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from ssh import LocalClient
import datetime
from tool import TimeUtils
from subprocess import call, Popen, PIPE
import _errno as err
def gather_obproxy_log(plugin_context, *args, **kwargs):
def get_option(key, default=''):
value = getattr(options, key)
if value is None:
value = default
stdio.verbose('get option: %s value %s' % (key, value))
return value
def local_execute_command(command, env=None, timeout=None):
command = r"cd {install_dir} && sh ".format(install_dir=obdiag_install_dir) + command
return LocalClient.execute_command(command, env, timeout, stdio)
def get_obdiag_cmd():
base_commond=r"cd {install_dir} && sh obdiag gather obproxy_log".format(install_dir=obdiag_install_dir)
cmd = r"{base} --from {from_option} --to {to_option} --scope {scope_option} --encrypt {encrypt_option}".format(
base = base_commond,
from_option = from_option,
to_option = to_option,
scope_option = scope_option,
encrypt_option = encrypt_option,
)
if obproxy_install_dir_option:
cmd = cmd + r" --obproxy_install_dir {obproxy_install_dir_option}".format(obproxy_install_dir_option=obproxy_install_dir_option)
if store_dir_option:
cmd = cmd + r" --store_dir {store_dir_option}".format(store_dir_option=store_dir_option)
if grep_option:
cmd = cmd + r" --grep '{grep_option}'".format(grep_option=grep_option)
return cmd
def run():
obdiag_cmd = get_obdiag_cmd()
stdio.verbose('execute cmd: {}'.format(obdiag_cmd))
p = None
return_code = 255
try:
p = Popen(obdiag_cmd, shell=True)
return_code = p.wait()
except:
stdio.exception("")
if p:
p.kill()
stdio.verbose('exit code: {}'.format(return_code))
return return_code == 0
options = plugin_context.options
obdiag_bin = "obdiag"
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
global_conf = cluster_config.get_global_conf()
from_option = get_option('from')
to_option = get_option('to')
scope_option = get_option('scope')
since_option = get_option('since')
grep_option = get_option('grep')
encrypt_option = get_option('encrypt')
store_dir_option = get_option('store_dir')
obproxy_install_dir_option=global_conf.get('home_path')
obdiag_install_dir = get_option('obdiag_dir')
try:
if (not from_option) and (not to_option) and since_option:
now_time = datetime.datetime.now()
to_option = (now_time + datetime.timedelta(minutes=1)).strftime('%Y-%m-%d %H:%M:%S')
from_option = (now_time - datetime.timedelta(seconds=TimeUtils.parse_time_sec(since_option))).strftime('%Y-%m-%d %H:%M:%S')
except:
stdio.error(err.EC_OBDIAG_OPTIONS_FORMAT_ERROR.format(option="since", value=since_option))
return plugin_context.return_false()
ret = local_execute_command('%s --help' % obdiag_bin)
if not ret:
stdio.error(err.EC_OBDIAG_NOT_FOUND.format())
return plugin_context.return_false()
try:
if run():
plugin_context.return_true()
except KeyboardInterrupt:
stdio.exception("obdiag gather obproxy_log failded")
return plugin_context.return_false()
\ No newline at end of file
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from ssh import LocalClient
from subprocess import call, Popen, PIPE
import _errno as err
def gather_perf(plugin_context, *args, **kwargs):
def get_option(key, default=''):
value = getattr(options, key)
if value is None:
value = default
stdio.verbose('get option: %s value %s' % (key, value))
return value
def local_execute_command(command, env=None, timeout=None):
command = r"cd {install_dir} && sh ".format(install_dir=obdiag_install_dir) + command
return LocalClient.execute_command(command, env, timeout, stdio)
def get_obdiag_cmd():
base_commond=r"cd {install_dir} && sh obdiag gather perf".format(install_dir=obdiag_install_dir)
cmd = r"{base} --scope {scope_option} ".format(
base = base_commond,
scope_option = scope_option
)
if store_dir_option:
cmd = cmd + r" --store_dir {store_dir_option}".format(store_dir_option=store_dir_option)
if ob_install_dir_option:
cmd = cmd + r" --ob_install_dir {ob_install_dir_option}".format(ob_install_dir_option=ob_install_dir_option)
return cmd
def run():
obdiag_cmd = get_obdiag_cmd()
stdio.verbose('execute cmd: {}'.format(obdiag_cmd))
p = None
return_code = 255
try:
p = Popen(obdiag_cmd, shell=True)
return_code = p.wait()
except:
stdio.exception("")
if p:
p.kill()
stdio.verbose('exit code: {}'.format(return_code))
return return_code == 0
options = plugin_context.options
obdiag_bin = "obdiag"
stdio = plugin_context.stdio
cluster_config = plugin_context.cluster_config
global_conf = cluster_config.get_global_conf()
ob_install_dir_option=global_conf.get('home_path')
scope_option = get_option('scope')
store_dir_option = get_option('store_dir')
obdiag_install_dir = get_option('obdiag_dir')
ret = local_execute_command('%s --help' % obdiag_bin)
if not ret:
stdio.error(err.EC_OBDIAG_NOT_FOUND.format())
return plugin_context.return_false()
try:
if run():
plugin_context.return_true()
except KeyboardInterrupt:
stdio.exception("obdiag gather perf failded")
return plugin_context.return_false()
\ No newline at end of file
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from ssh import LocalClient
from subprocess import call, Popen, PIPE
import _errno as err
def gather_plan_monitor(plugin_context, *args, **kwargs):
def get_option(key, default=''):
value = getattr(options, key)
if value is None:
value = default
stdio.verbose('get option: %s value %s' % (key, value))
return value
def local_execute_command(command, env=None, timeout=None):
command = r"cd {install_dir} && sh ".format(install_dir=obdiag_install_dir) + command
return LocalClient.execute_command(command, env, timeout, stdio)
def get_obdiag_cmd():
base_commond=r"cd {install_dir} && sh obdiag gather plan_monitor".format(install_dir=obdiag_install_dir)
cmd = r"{base} --trace_id {trace_id}".format(
base=base_commond,
trace_id=trace_id,
)
if store_dir_option:
cmd = cmd + r" --store_dir {store_dir_option}".format(store_dir_option=store_dir_option)
return cmd
def run():
obdiag_cmd = get_obdiag_cmd()
stdio.verbose('execute cmd: {}'.format(obdiag_cmd))
p = None
return_code = 255
try:
p = Popen(obdiag_cmd, shell=True)
return_code = p.wait()
except:
stdio.exception("")
if p:
p.kill()
stdio.verbose('exit code: {}'.format(return_code))
return return_code == 0
options = plugin_context.options
obdiag_bin = "obdiag"
stdio = plugin_context.stdio
store_dir_option = get_option('store_dir')
obdiag_install_dir = get_option('obdiag_dir')
trace_id = get_option('trace_id')
ret = local_execute_command('%s --help' % obdiag_bin)
if not ret:
stdio.error(err.EC_OBDIAG_NOT_FOUND.format())
return plugin_context.return_false()
try:
if run():
plugin_context.return_true()
except KeyboardInterrupt:
stdio.exception("obdiag gather plan_monitor failded")
return plugin_context.return_false()
\ No newline at end of file
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from ssh import LocalClient
import datetime
from tool import TimeUtils
from subprocess import call, Popen, PIPE
import _errno as err
def gather_slog(plugin_context, *args, **kwargs):
def get_option(key, default=''):
value = getattr(options, key)
if value is None:
value = default
stdio.verbose('get option: %s value %s' % (key, value))
return value
def local_execute_command(command, env=None, timeout=None):
command = r"cd {install_dir} && sh ".format(install_dir=obdiag_install_dir) + command
return LocalClient.execute_command(command, env, timeout, stdio)
def get_obdiag_cmd():
base_commond = r"cd {install_dir} && sh obdiag gather slog".format(install_dir=obdiag_install_dir)
cmd = r"{base} --slog_dir {data_dir} --from {from_option} --to {to_option} --encrypt {encrypt_option}".format(
base = base_commond,
data_dir = data_dir,
from_option = from_option,
to_option = to_option,
encrypt_option = encrypt_option
)
if ob_install_dir_option:
cmd = cmd + r" --ob_install_dir {ob_install_dir_option}".format(ob_install_dir_option=ob_install_dir_option)
if store_dir_option:
cmd = cmd + r" --store_dir {store_dir_option}".format(store_dir_option=store_dir_option)
return cmd
def run():
obdiag_cmd = get_obdiag_cmd()
stdio.verbose('execute cmd: {}'.format(obdiag_cmd))
p = None
return_code = 255
try:
p = Popen(obdiag_cmd, shell=True)
return_code = p.wait()
except:
stdio.exception("")
if p:
p.kill()
stdio.verbose('exit code: {}'.format(return_code))
return return_code == 0
options = plugin_context.options
obdiag_bin = "obdiag"
cluster_config = plugin_context.cluster_config
cluster_name = cluster_config.name
stdio = plugin_context.stdio
global_conf = cluster_config.get_global_conf()
from_option = get_option('from')
to_option = get_option('to')
since_option = get_option('since')
encrypt_option = get_option('encrypt')
store_dir_option = get_option('store_dir')
ob_install_dir_option=global_conf.get('home_path')
data_dir = ob_install_dir_option + "/store"
obdiag_install_dir = get_option('obdiag_dir')
if len(cluster_config.servers) > 0:
server_config = cluster_config.get_server_conf(cluster_config.servers[0])
if not server_config.get('data_dir'):
server_config['data_dir'] = '%s/store' % ob_install_dir_option
if not server_config.get('redo_dir'):
server_config['redo_dir'] = server_config['data_dir']
if not server_config.get('slog_dir'):
server_config['slog_dir'] = '%s/slog' % server_config['redo_dir']
data_dir = server_config['slog_dir']
try:
if (not from_option) and (not to_option) and since_option:
now_time = datetime.datetime.now()
to_option = (now_time + datetime.timedelta(minutes=10)).strftime('%Y-%m-%d %H:%M:%S')
from_option = (now_time - datetime.timedelta(seconds=TimeUtils.parse_time_sec(since_option))).strftime('%Y-%m-%d %H:%M:%S')
except:
stdio.error(err.EC_OBDIAG_OPTIONS_FORMAT_ERROR.format(option="since", value=since_option))
return plugin_context.return_false()
ret = local_execute_command('%s --help' % obdiag_bin)
if not ret:
stdio.error(err.EC_OBDIAG_NOT_FOUND.format())
return plugin_context.return_false()
try:
if run():
plugin_context.return_true()
except KeyboardInterrupt:
stdio.exception("obdiag gather slog failded")
return plugin_context.return_false()
\ No newline at end of file
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from ssh import LocalClient
from subprocess import call, Popen, PIPE
import _errno as err
def gather_stack(plugin_context, *args, **kwargs):
def get_option(key, default=''):
value = getattr(options, key)
if value is None:
value = default
stdio.verbose('get option: %s value %s' % (key, value))
return value
def local_execute_command(command, env=None, timeout=None):
command = r"cd {install_dir} && sh ".format(install_dir=obdiag_install_dir) + command
return LocalClient.execute_command(command, env, timeout, stdio)
def get_obdiag_cmd():
base_commond = r"cd {install_dir} && sh obdiag gather stack".format(install_dir=obdiag_install_dir)
cmd = r"{base} ".format(
base = base_commond
)
if ob_install_dir_option:
cmd = cmd + r" --ob_install_dir {ob_install_dir_option}".format(ob_install_dir_option=ob_install_dir_option)
if store_dir_option:
cmd = cmd + r" --store_dir {store_dir_option}".format(store_dir_option=store_dir_option)
return cmd
def run():
obdiag_cmd = get_obdiag_cmd()
stdio.verbose('execute cmd: {}'.format(obdiag_cmd))
p = None
return_code = 255
try:
p = Popen(obdiag_cmd, shell=True)
return_code = p.wait()
except:
stdio.exception("")
if p:
p.kill()
stdio.verbose('exit code: {}'.format(return_code))
return return_code == 0
options = plugin_context.options
obdiag_bin = "obdiag"
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
global_conf = cluster_config.get_global_conf()
store_dir_option = get_option('store_dir')
obdiag_install_dir = get_option('obdiag_dir')
ob_install_dir_option=global_conf.get('home_path')
ret = local_execute_command('%s --help' % obdiag_bin)
if not ret:
stdio.error(err.EC_OBDIAG_NOT_FOUND.format())
return plugin_context.return_false()
try:
if run():
plugin_context.return_true()
except KeyboardInterrupt:
stdio.exception("obdiag gather stack failded")
return plugin_context.return_false()
\ No newline at end of file
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from ssh import LocalClient
from subprocess import call, Popen, PIPE
import _errno as err
def gather_sysstat(plugin_context, *args, **kwargs):
def get_option(key, default=''):
value = getattr(options, key)
if value is None:
value = default
stdio.verbose('get option: %s value %s' % (key, value))
return value
def local_execute_command(command, env=None, timeout=None):
command = r"cd {install_dir} && sh ".format(install_dir=obdiag_install_dir) + command
return LocalClient.execute_command(command, env, timeout, stdio)
def get_obdiag_cmd():
base_commond=r"cd {install_dir} && sh obdiag gather sysstat".format(install_dir=obdiag_install_dir)
cmd = r"{base}".format(
base=base_commond,
)
if store_dir_option:
cmd = cmd + r" --store_dir {store_dir_option}".format(store_dir_option=store_dir_option)
return cmd
def run():
obdiag_cmd = get_obdiag_cmd()
stdio.verbose('execute cmd: {}'.format(obdiag_cmd))
p = None
return_code = 255
try:
p = Popen(obdiag_cmd, shell=True)
return_code = p.wait()
except:
stdio.exception("")
if p:
p.kill()
stdio.verbose('exit code: {}'.format(return_code))
return return_code == 0
options = plugin_context.options
obdiag_bin = "obdiag"
stdio = plugin_context.stdio
store_dir_option = get_option('store_dir')
obdiag_install_dir = get_option('obdiag_dir')
ret = local_execute_command('%s --help' % obdiag_bin)
if not ret:
stdio.error(err.EC_OBDIAG_NOT_FOUND.format())
return plugin_context.return_false()
try:
if run():
plugin_context.return_true()
except KeyboardInterrupt:
stdio.exception("obdiag gather sysstat failded")
return plugin_context.return_false()
\ No newline at end of file
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import os
import json
from tool import YamlLoader, FileUtil
from subprocess import call, Popen, PIPE
OBAG_BASE_DEFAULT_CONFIG = {
"OBDIAG": {
"BASIC": {
"config_backup_dir": "/tmp/oceanbase-diagnostic-tool/conf",
"file_number_limit": 20,
"file_size_limit": "2G"
},
"LOGGER": {
"file_handler_log_level": "DEBUG",
"log_dir": "/tmp/oceanbase-diagnostic-tool/log",
"log_filename": "obdiag.log",
"log_level": "INFO",
"mode": "obdiag",
"stdout_handler_log_level": "DEBUG"
}
}
}
def generate_config(plugin_context, deploy_config, *args, **kwargs):
def get_option(key, default=''):
value = getattr(options, key)
if value is None:
value = default
stdio.verbose('get option: %s value %s' % (key, value))
return value
def parse_empty(value,default=''):
if value is None:
value = default
return value
yaml = YamlLoader()
options = plugin_context.options
stdio = plugin_context.stdio
cluster_config = plugin_context.cluster_config
global_conf = cluster_config.get_global_conf()
deploy_name = plugin_context.deploy_name
user_config = deploy_config.user
obdiag_install_dir = get_option('obdiag_dir')
config_path = os.path.join(obdiag_install_dir, 'conf/config.yml')
def get_obdiag_config():
with FileUtil.open(config_path) as f:
data = YamlLoader(stdio=stdio).load(f)
base_config = data["OBDIAG"]
if base_config is None:
base_config = OBAG_BASE_DEFAULT_CONFIG
ocp_config = data["OCP"]
obcluster_config = data["OBCLUSTER"]
ob_services = cluster_config.servers
nodes = []
for server in ob_services:
nodeItem = {}
nodeItem["ip"] = server.ip
nodeItem["port"] = parse_empty(user_config.port)
nodeItem["user"] = parse_empty(user_config.username)
nodeItem["password"] = parse_empty(user_config.password)
nodeItem["private_key"] = parse_empty(user_config.key_file)
nodes.append(nodeItem)
nodes_config = nodes
try:
component = get_option('component')
except:
component = "oceanbase-ce"
if len(ob_services) > 0:
server_config = cluster_config.get_server_conf(server)
port = 2881
if component in ["oceanbase", "oceanbase-ce"]:
port = server_config.get("mysql_port")
elif component in ["obproxy", "obproxy-ce"]:
port = server_config.get("listen_port")
obcluster_config["cluster_name"] = deploy_name
obcluster_config["host"] = ob_services[0].ip
obcluster_config["port"] = port
try:
obcluster_config["user"] = get_option('user')
except:
obcluster_config["user"] = 'root'
try:
obcluster_config["port"] = get_option('port')
except:
obcluster_config["port"] = 2881
try:
obcluster_config["password"] = get_option('password')
except:
obcluster_config["password"] = ""
if global_conf.get('root_password') is not None:
obcluster_config["password"] = global_conf.get('root_password')
if global_conf.get('mysql_port') is not None:
obcluster_config["port"] = global_conf.get('mysql_port')
config={
"OBDIAG": base_config,
"OCP": ocp_config,
"OBCLUSTER": obcluster_config,
"NODES":nodes_config
}
return config
def dump_obdiag_config(data):
with open(config_path, 'wb') as f:
try:
yaml.dump(data, f)
except:
stdio.error('path %s dump obdiag config %s failed.\n' % (config_path, data))
def run():
config_data = get_obdiag_config()
dump_obdiag_config(config_data)
p = None
return_code = 255
try:
p = Popen("get obdiag config", shell=True)
return_code = p.wait()
except:
stdio.exception("")
if p:
p.kill()
stdio.verbose('exit code: {}'.format(return_code))
return return_code == 0
try:
if run():
plugin_context.return_true()
except KeyboardInterrupt:
stdio.exception("obdiag gather log failded")
return plugin_context.return_false()
\ No newline at end of file
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import os
import os
from copy import deepcopy
import re
from ssh import LocalClient
from _rpm import Version
import _errno as err
from tool import YamlLoader, FileUtil
def pre_check(plugin_context, gather_type=None, obdiag_path='', obdiag_new_version='1.0', utils_work_dir_check=False, version_check=False, *args, **kwargs):
def utils_work_dir_checker(util_name):
clients = plugin_context.clients
cluster_config = plugin_context.cluster_config
if util_name is None:
stdio.verbose('util name not provided')
return False
for server in cluster_config.servers:
home_path = cluster_config.get_server_conf(server).get('home_path')
remote_path = os.path.join(home_path, 'bin')
software_path = os.path.join(remote_path, util_name)
client = clients[server]
stdio.verbose('%s pre check' % (server))
if not client.execute_command('[ -f %s ]' % software_path):
stdio.verbose('%s util not exist: %s' % (server, software_path))
return False
stdio.stop_loading('succeed')
return True
def version_checker():
client = LocalClient
check_status = {}
ret = client.execute_command('cd {} && sh obdiag version'.format(obdiag_path))
if not ret:
check_status = {'version_checker_status': False, 'obdiag_version': obdiag_new_version, 'obdiag_found': False}
return check_status
version_pattern = r'OceanBase\sDiagnostic\sTool:\s+(\d+\.\d+.\d+)'
found = re.search(version_pattern, ret.stdout) or re.search(version_pattern, ret.stderr)
if not found:
check_status = {'version_checker_status': False, 'obdiag_version': obdiag_new_version, 'obdiag_found': False}
return check_status
else:
major_version = found.group(1)
if Version(major_version) < Version(obdiag_new_version):
check_status = {'version_checker_status': True, 'obdiag_version': major_version, 'obdiag_found': True}
return check_status
else:
check_status = {'version_checker_status': False, 'obdiag_version': major_version, 'obdiag_found': True}
return check_status
stdio = plugin_context.stdio
utils_work_dir_check_status = True
version_check_status = True
obdiag_version = obdiag_new_version
obdiag_found = True
skip = True
if utils_work_dir_check:
if gather_type in ['gather_clog', 'gather_slog', 'gather_all']:
utils_work_dir_check_status = utils_work_dir_checker('ob_admin')
if gather_type != 'gather_all':
skip = False
if version_check:
res = version_checker()
version_check_status = res['version_checker_status']
obdiag_version = res['obdiag_version']
obdiag_found = res['obdiag_found']
status = utils_work_dir_check_status and version_check_status
if status:
return plugin_context.return_true(version_status = version_check_status, utils_status = utils_work_dir_check_status, obdiag_version = obdiag_version, obdiag_found = obdiag_found, skip = skip)
else:
return plugin_context.return_false(version_status = version_check_status, utils_status = utils_work_dir_check_status, obdiag_version = obdiag_version, obdiag_found = obdiag_found, skip = skip)
......@@ -24,6 +24,7 @@ from __future__ import absolute_import, division, print_function
import re, os
from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED
from tool import ConfigUtil
def parse_size(size):
......@@ -66,13 +67,24 @@ def get_system_memory(memory_limit):
return format_size(system_memory, 0)
def generate_config(plugin_context, generate_config_mini=False, generate_check=True, return_generate_keys=False, generate_consistent_config=False, *args, **kwargs):
def generate_config(plugin_context, generate_config_mini=False, generate_check=True, return_generate_keys=False, generate_consistent_config=False, only_generate_password=False, generate_password=True, *args, **kwargs):
if return_generate_keys:
return plugin_context.return_true(generate_keys=[
'memory_limit', 'datafile_size', 'clog_disk_utilization_threshold', 'clog_disk_usage_limit_percentage',
'syslog_level', 'enable_syslog_recycle', 'enable_syslog_wf', 'max_syslog_file_count', 'cluster_id',
'devname', 'system_memory', 'cpu_count',
])
generate_keys = []
if not only_generate_password:
generate_keys += [
'memory_limit', 'datafile_size', 'clog_disk_utilization_threshold', 'clog_disk_usage_limit_percentage',
'syslog_level', 'enable_syslog_recycle', 'enable_syslog_wf', 'max_syslog_file_count', 'cluster_id',
'devname', 'system_memory', 'cpu_count'
]
if generate_password:
generate_keys += ['root_password', 'proxyro_password']
return plugin_context.return_true(generate_keys=generate_keys)
cluster_config = plugin_context.cluster_config
if generate_password:
generate_random_password(plugin_context, cluster_config)
if only_generate_password:
return plugin_context.return_true()
def update_server_conf(server, key, value):
if server not in generate_configs:
......@@ -92,7 +104,6 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
for key in generate_server_config:
cluster_config.update_server_conf(server, key, generate_server_config[key], False)
cluster_config = plugin_context.cluster_config
clients = plugin_context.clients
stdio = plugin_context.stdio
success = True
......@@ -321,6 +332,8 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
else:
datafile_size = max(5 << 30, data_dir_disk['avail'] * 0.8, 0)
update_server_conf(server, 'datafile_size', format_size(datafile_size, 0))
if generate_password:
generate_random_password(plugin_context, cluster_config)
if generate_consistent_config:
generate_global_config = generate_configs['global']
......@@ -395,4 +408,13 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
stdio.stop_loading('succeed')
return plugin_context.return_true()
stdio.stop_loading('fail')
\ No newline at end of file
stdio.stop_loading('fail')
def generate_random_password(plugin_context, cluster_config):
global_config = cluster_config.get_original_global_conf()
if 'root_password' not in global_config:
cluster_config.update_global_conf('root_password', ConfigUtil.get_random_pwd_by_total_length(20))
components_name_list = [repo.name for repo in plugin_context.repositories]
if 'obproxy' in components_name_list or 'obproxy-ce' in components_name_list and 'proxyro_password' not in global_config:
cluster_config.update_global_conf('proxyro_password', ConfigUtil.get_random_pwd_by_total_length())
......@@ -64,9 +64,13 @@ def list_tenant(plugin_context, cursor, *args, **kwargs):
stdio.stop_loading('fail')
return
for tenant in tenants:
unit_name = '%s_unit' % tenant['tenant_name'] if tenant['tenant_name'] != 'sys' else 'sys_unit_config'
sql = "select * from oceanbase.__all_unit_config where name = '%s'"
res = cursor.fetchone(sql % unit_name)
select_resource_pools_sql = "select unit_config_id from oceanbase.__all_resource_pool where tenant_id = {};"
res = cursor.fetchone(select_resource_pools_sql.format(tenant['tenant_id']))
if res is False:
stdio.stop_loading('fail')
return
select_unit_configs_sql = "select * from oceanbase.__all_unit_config where unit_config_id = {};"
res = cursor.fetchone(select_unit_configs_sql.format(res['unit_config_id']))
if res is False:
stdio.stop_loading('fail')
return
......
......@@ -173,7 +173,7 @@ def upgrade_route(plugin_context, current_repository, dest_repository, *args, **
stdio = plugin_context.stdio
repository_dir = dest_repository.repository_dir
if dest_repository.version >= Version("4.1"):
if dest_repository.version > Version("4.1.0.0"):
stdio.error('upgrade observer to version {} is not support, please upgrade obd first.'.format(dest_repository.version))
return
......
......@@ -26,6 +26,7 @@ from math import sqrt
from _errno import EC_OBSERVER_NOT_ENOUGH_MEMORY_ALAILABLE, EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED, EC_OBSERVER_GET_MEMINFO_FAIL
import _errno as err
from tool import ConfigUtil
def parse_size(size):
......@@ -76,12 +77,23 @@ def get_system_memory(memory_limit, min_pool_memory, generate_config_mini):
return max(system_memory, min_pool_memory)
def generate_config(plugin_context, generate_config_mini=False, generate_check=True, return_generate_keys=False, generate_consistent_config=False, *args, **kwargs):
def generate_config(plugin_context, generate_config_mini=False, generate_check=True, return_generate_keys=False, generate_consistent_config=False, only_generate_password=False, generate_password=True, *args, **kwargs):
if return_generate_keys:
return plugin_context.return_true(generate_keys=[
'memory_limit', 'datafile_size', 'log_disk_size', 'devname', 'system_memory', 'cpu_count', 'production_mode',
'syslog_level', 'enable_syslog_recycle', 'enable_syslog_wf', 'max_syslog_file_count', 'cluster_id', 'ocp_meta_tenant_log_disk_size'
])
generate_keys = []
if not only_generate_password:
generate_keys += [
'memory_limit', 'datafile_size', 'log_disk_size', 'devname', 'system_memory', 'cpu_count', 'production_mode',
'syslog_level', 'enable_syslog_recycle', 'enable_syslog_wf', 'max_syslog_file_count', 'cluster_id', 'ocp_meta_tenant_log_disk_size'
]
if generate_password:
generate_keys += ['root_password', 'proxyro_password', 'ocp_meta_password', 'ocp_agent_monitor_password']
return plugin_context.return_true(generate_keys=generate_keys)
cluster_config = plugin_context.cluster_config
if generate_password:
generate_random_password(plugin_context, cluster_config)
if only_generate_password:
return plugin_context.return_true()
def update_server_conf(server, key, value):
if server not in generate_configs:
......@@ -100,7 +112,6 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
for key in generate_server_config:
cluster_config.update_server_conf(server, key, generate_server_config[key], False)
cluster_config = plugin_context.cluster_config
clients = plugin_context.clients
stdio = plugin_context.stdio
success = True
......@@ -482,6 +493,9 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
if generate_config_mini and 'ocp_meta_tenant_memory_size' not in global_config and 'memory_size' not in global_config.get('ocp_meta_tenant', {}):
update_global_conf('ocp_meta_tenant_memory_size', '1536M')
if generate_password:
generate_random_password(plugin_context, cluster_config)
if generate_consistent_config:
generate_global_config = generate_configs['global']
server_num = len(cluster_config.servers)
......@@ -552,3 +566,16 @@ def generate_config(plugin_context, generate_config_mini=False, generate_check=T
return plugin_context.return_true()
stdio.stop_loading('fail')
def generate_random_password(plugin_context, cluster_config):
global_config = cluster_config.get_original_global_conf()
if 'root_password' not in global_config:
cluster_config.update_global_conf('root_password', ConfigUtil.get_random_pwd_by_total_length(20))
components_name_list = [repo.name for repo in plugin_context.repositories]
if 'obagent' in components_name_list and 'ocp_agent_monitor_password' not in global_config:
cluster_config.update_global_conf('ocp_agent_monitor_password', ConfigUtil.get_random_pwd_by_total_length())
if 'obproxy' in components_name_list or 'obproxy-ce' in components_name_list and 'proxyro_password' not in global_config:
cluster_config.update_global_conf('proxyro_password', ConfigUtil.get_random_pwd_by_total_length())
if 'ocp-express' in components_name_list and 'ocp_meta_password' not in global_config:
cluster_config.update_global_conf('ocp_meta_password', ConfigUtil.get_random_pwd_by_total_length())
\ No newline at end of file
......@@ -52,7 +52,6 @@ def format_size(size, precision=1):
def list_tenant(plugin_context, cursor, *args, **kwargs):
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
......@@ -65,11 +64,15 @@ def list_tenant(plugin_context, cursor, *args, **kwargs):
return
for tenant in tenants:
sql = "select * from oceanbase.__all_unit_config where name = '%s'"
select_resource_pools_sql = "select UNIT_CONFIG_ID from oceanbase.DBA_OB_RESOURCE_POOLS where TENANT_ID = {};"
if tenant['TENANT_TYPE'] == 'META':
continue
unit_name = '%s_unit' % tenant['TENANT_NAME'] if tenant['TENANT_NAME'] != 'sys' else 'sys_unit_config'
res = cursor.fetchone(sql % unit_name)
res = cursor.fetchone(select_resource_pools_sql.format(tenant['TENANT_ID']))
if res is False:
stdio.stop_loading('fail')
return
select_unit_configs_sql = "select * from oceanbase.DBA_OB_UNIT_CONFIGS where UNIT_CONFIG_ID = {};"
res = cursor.fetchone(select_unit_configs_sql.format(res['UNIT_CONFIG_ID']))
if res is False:
stdio.stop_loading('fail')
return
......@@ -78,10 +81,10 @@ def list_tenant(plugin_context, cursor, *args, **kwargs):
stdio.print_list(tenant_infos, ['tenant_name', 'tenant_type', 'compatibility_mode', 'primary_zone', 'max_cpu',
'min_cpu', 'memory_size', 'max_iops', 'min_iops', 'log_disk_size',
'iops_weight'],
lambda x: [x['TENANT_NAME'], x['TENANT_TYPE'], x['COMPATIBILITY_MODE'], x['PRIMARY_ZONE'],
x['max_cpu'], x['min_cpu'], format_size(x['memory_size']), x['max_iops'], x['min_iops'],
format_size(x['log_disk_size']), x['iops_weight']],
title='tenant')
lambda x: [x['TENANT_NAME'], x['TENANT_TYPE'], x['COMPATIBILITY_MODE'], x['PRIMARY_ZONE'],
x['MAX_CPU'], x['MIN_CPU'], format_size(x['MEMORY_SIZE']), x['MAX_IOPS'], x['MIN_IOPS'],
format_size(x['LOG_DISK_SIZE']), x['IOPS_WEIGHT']],
title='tenant')
stdio.stop_loading('succeed')
return plugin_context.return_true()
......
......@@ -22,8 +22,9 @@ from __future__ import absolute_import, division, print_function
import os
import time
import tool
import datetime
from _rpm import Version
from ssh import LocalClient
......@@ -153,6 +154,7 @@ class Upgrader(object):
self.process_index = upgrade_ctx.get('process_index', 0)
self.process_route_index = upgrade_ctx.get('process_route_index', self.route_index)
self.process = [
self.disable_ddl_and_check,
self.exec_upgrade_checker,
self.upgrade_mode_on,
self.exec_upgrade_pre,
......@@ -359,6 +361,56 @@ class Upgrader(object):
self.broken_sql("select * from GV$OB_LOG_STAT where in_sync = 'NO'")
return True
def disable_ddl_and_check(self):
if self.repositories[self.route_index - 1].version == Version('4.0.0.0'):
self.stdio.start_loading('Disable DDL')
while True:
# check ddl end
while self.execute_sql("select task_id from __all_virtual_ddl_task_status", error=True):
time.sleep(3)
# close ddl
if self.execute_sql('alter system set enable_ddl = false') is False:
self.stdio.stop_loading('fail')
return False
while self.execute_sql("select * from __all_virtual_sys_parameter_stat where name = 'enable_ddl' and value != 'false'"):
time.sleep(3)
# check ddl end
if self.execute_sql("select task_id from __all_virtual_ddl_task_status", error=True):
if not self.execute_sql('alter system set enable_ddl = true'):
self.stdio.stop_loading('fail')
continue
break
# check clog
rets = self.execute_sql("select tenant_id, ls_id, max(max_scn) as max_scn from gv$ob_log_stat group by tenant_id, ls_id", one=False, error=True)
if rets is not None:
for ret in rets:
while self.execute_sql("select unsubmitted_log_scn from __all_virtual_replay_stat where tenant_id = %s and ls_id = %s and role != 'leader' and unsubmitted_log_scn <= %s" % (ret['tenant_id'], ret['ls_id'], ret['max_scn']), error=True):
time.sleep(3)
# major freeze
# 1. check merge status
pre_global_broadcast_scn = 0
while True:
merge_status = self.execute_sql("select max(global_broadcast_scn) as global_broadcast_scn, max(global_broadcast_scn > last_scn) as is_merging from CDB_OB_MAJOR_COMPACTION")
if merge_status['is_merging'] == 0:
pre_global_broadcast_scn = merge_status['global_broadcast_scn']
break
time.sleep(3)
# 2. begin merge
self.execute_sql("alter system major freeze tenant = all", error=False)
# 3. wait merge start
while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn <= %s", [pre_global_broadcast_scn]):
time.sleep(3)
# 4.wait merge finsh
while self.execute_sql("select * from CDB_OB_MAJOR_COMPACTION where global_broadcast_scn > last_scn"):
time.sleep(3)
self.stdio.stop_loading('succeed')
return True
def start_zone(self, zone=None):
if not self.connect():
return False
......
此差异已折叠。
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
def bootstrap(plugin_context, *args, **kwargs):
return plugin_context.return_true()
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import json
import requests
import _errno as err
class OcpExpressCursor(object):
class Response(object):
def __init__(self, code, content):
self.code = code
self.content = content
def __bool__(self):
return self.code == 200
def __init__(self, ip, port):
self.ip = ip
self.port = port
self.url_prefix = "http://{ip}:{port}/".format(ip=self.ip, port=self.port)
self.auth = None
def status(self, stdio=None):
resp = self._request('GET', 'api/v1/status', stdio=stdio)
if resp:
return resp.content.get("status") == "ok"
return False
def init(self, data, stdio=None):
return self._request("POST", 'api/v1/init', data=data, stdio=stdio)
def _request(self, method, api, data=None, retry=5, stdio=None):
url = self.url_prefix + api
headers = {"Content-Type": "application/json"}
try:
if data is not None:
data = json.dumps(data)
stdio.verbose('send http request method: {}, url: {}, data: {}'.format(method, url, data))
resp = requests.request(method, url, auth=self.auth, data=data, verify=False, headers=headers)
return_code = resp.status_code
content = resp.content
except Exception as e:
if retry:
retry -= 1
return self._request(method=method, api=api, data=data, retry=retry, stdio=stdio)
stdio.exception("")
return_code = 500
content = str(e)
if return_code != 200:
stdio.verbose("request ocp-express failed: %s" % content)
try:
content = json.loads(content.decode())
except:
pass
return self.Response(code=return_code, content=content)
def connect(plugin_context, target_server=None, *args, **kwargs):
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
if target_server:
servers = [target_server]
stdio.start_loading('Connect to ocp-express ({})'.format(target_server))
else:
servers = cluster_config.servers
stdio.start_loading('Connect to ocp-express')
cursors = {}
for server in servers:
config = cluster_config.get_server_conf(server)
username = 'system'
stdio.verbose('connect ocp-express ({}:{} by user {})'.format(server.ip, config['port'], username))
cursor = OcpExpressCursor(ip=server.ip, port=config['port'])
if cursor.status(stdio=stdio):
cursors[server] = cursor
if not cursors:
stdio.error(err.EC_FAIL_TO_CONNECT.format(component=cluster_config.name))
stdio.stop_loading('fail')
return plugin_context.return_false()
stdio.stop_loading('succeed')
return plugin_context.return_true(connect=cursors, cursor=cursors)
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from tool import NetUtil
def display(plugin_context, cursor, *args, **kwargs):
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
servers = cluster_config.servers
results = []
for server in servers:
api_cursor = cursor.get(server)
ip = server.ip
if ip == '127.0.0.1':
ip = NetUtil.get_host_ip()
url = 'http://{}:{}'.format(ip, api_cursor.port)
results.append({
'ip': ip,
'port': api_cursor.port,
'user': "admin",
'password': cluster_config.get_global_conf_with_default().get("admin_passwd", ''),
'url': url,
'status': 'active' if api_cursor and api_cursor.status(stdio) else 'inactive'
})
stdio.print_list(results, ['url', 'username', 'initial password', 'status'], lambda x: [x['url'], 'admin', x['password'], x['status']], title='ocp-express')
active_result = [r for r in results if r['status'] == 'active']
info_dict = active_result[0] if len(active_result) > 0 else None
if info_dict is not None:
info_dict['type'] = 'web'
return plugin_context.return_true(info=info_dict)
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import re
from tool import ConfigUtil
def generate_config(plugin_context, auto_depend=False, generate_config_mini=False, return_generate_keys=False, only_generate_password=False, *args, **kwargs):
if return_generate_keys:
generate_keys = ['admin_passwd']
if not only_generate_password:
generate_keys += ['memory_size', 'log_dir', 'logging_file_max_history']
return plugin_context.return_true(generate_keys=generate_keys)
cluster_config = plugin_context.cluster_config
generate_random_password(cluster_config)
if only_generate_password:
return plugin_context.return_true()
stdio = plugin_context.stdio
depend_comps = [['obagent'], ['oceanbase', 'oceanbase-ce'], ['obproxy', 'obproxy-ce']]
generate_configs = {'global': {}}
plugin_context.set_variable('generate_configs', generate_configs)
stdio.start_loading('Generate ocp express configuration')
min_memory_size = '752M'
if auto_depend:
for comps in depend_comps:
for comp in comps:
if cluster_config.add_depend_component(comp):
break
global_config = cluster_config.get_global_conf()
if generate_config_mini:
if 'memory_size' not in global_config:
cluster_config.update_global_conf('memory_size', min_memory_size)
auto_set_memory = False
if 'memory_size' not in global_config:
for server in cluster_config.servers:
server_config = cluster_config.get_server_conf(server)
if 'memory_size' not in server_config:
auto_set_memory = True
if auto_set_memory:
observer_num = 0
for comp in ['oceanbase', 'oceanbase-ce']:
if comp in cluster_config.depends:
observer_num = len(cluster_config.get_depend_servers(comp))
if not observer_num:
stdio.warn('The component oceanbase/oceanbase-ce is not in the depends, the memory size cannot be calculated, and a fixed value of {} is used'.format(min_memory_size))
cluster_config.update_global_conf('memory_size', min_memory_size)
else:
cluster_config.update_global_conf('memory_size', '%dM' % (512 + (observer_num + 3) * 60))
stdio.stop_loading('succeed')
plugin_context.return_true()
def generate_random_password(cluster_config):
global_config = cluster_config.get_original_global_conf()
if 'admin_passwd' not in global_config:
cluster_config.update_global_conf('admin_passwd', ConfigUtil.get_random_pwd_by_rule())
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -38,11 +38,11 @@ def display(plugin_context, cursor, *args, **kwargs):
'ip': ip,
'port': api_cursor.port,
'user': "admin",
'password': "oceanbase",
'password': cluster_config.get_global_conf_with_default().get('_admin_password_', 'oceanbase'),
'url': url,
'status': 'active' if api_cursor and api_cursor.status(stdio) else 'inactive'
})
stdio.print_list(results, ['url', 'username', 'default_password', 'status'], lambda x: [x['url'], 'admin', 'oceanbase', x['status']], title='ocp-express')
stdio.print_list(results, ['url', 'username', 'initial password', 'status'], lambda x: [x['url'], 'admin', x['password'], x['status']], title='ocp-express')
active_result = [r for r in results if r['status'] == 'active']
info_dict = active_result[0] if len(active_result) > 0 else None
if info_dict is not None:
......
......@@ -52,7 +52,7 @@ function _obd_complete_func
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
all_cmds["obd"]="mirror cluster test update repo demo web"
all_cmds["obd"]="mirror cluster test update repo demo web obdiag display-trace"
all_cmds["obd cluster"]="autodeploy tenant start deploy redeploy restart reload destroy stop edit-config list display upgrade chst check4ocp reinstall"
all_cmds["obd cluster *"]="_obd_reply_deploy_names"
all_cmds["obd cluster tenant"]="create drop show"
......@@ -72,6 +72,9 @@ function _obd_complete_func
all_cmds["obd tool command"]="_obd_reply_deploy_names"
all_cmds["obd tool command *"]="_obd_reply_tool_commands"
all_cmds["obd env"]="set unset show clear"
all_cmds["obd obdiag"]="gather deploy"
all_cmds["obd obdiag gather"]="all log clog slog obproxy_log perf plan_monitor stack sysstat"
all_cmds["obd obdiag gather *"]="_obd_reply_deploy_names"
# fi
case $prev in
list)
......
......@@ -129,7 +129,8 @@ function build()
mkdir -p $BUILD_DIR/lib/site-packages
mkdir -p $BUILD_DIR/mirror/remote
wget https://mirrors.aliyun.com/oceanbase/OceanBase.repo -O $BUILD_DIR/mirror/remote/OceanBase.repo
cat _cmd.py | sed "s/<CID>/$CID/" | sed "s/<B_BRANCH>/$BRANCH/" | sed "s/<B_TIME>/$DATE/" | sed "s/<DEBUG>/$OBD_DUBUG/" | sed "s/<VERSION>/$VERSION/" > obd.py
sed -i "s/<CID>/$CID/" const.py && sed -i "s/<B_BRANCH>/$BRANCH/" const.py && sed -i "s/<B_TIME>/$DATE/" const.py && sed -i "s/<DEBUG>/$OBD_DUBUG/" const.py && sed -i "s/<VERSION>/$VERSION/" const.py
cp -f _cmd.py obd.py
sed -i "s|<DOC_LINK>|$OBD_DOC_LINK|" _errno.py
pip install -r $req_fn.txt || exit 1
pip install -r plugins-$req_fn.txt --target=$BUILD_DIR/lib/site-packages || exit 1
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册