未验证 提交 fd3c00ad 编写于 作者: R Rongfeng Fu 提交者: GitHub

V2.1.1 (#175)

上级 ae0f3bca
......@@ -422,7 +422,7 @@ class EnvironmentMajorCommand(HiddenMajorCommand):
class TelemetryPostCommand(HiddenObdCommand):
def __init__(self):
super(TelemetryPostCommand, self).__init__("post", "Post telemetry data to OceanBase.By default, OBD telemetry is enabled. To disable OBD telemetry, run the `obd env set TELEMETRY_MODE 0` command. To enable OBD telemetry data printing, run `obd env set TELEMETRY_LOG_MODE 1`.")
super(TelemetryPostCommand, self).__init__('post', "Post telemetry data to OceanBase.By default, OBD telemetry is enabled. To disable OBD telemetry, run the `obd env set TELEMETRY_MODE 0` command. To enable OBD telemetry data printing, run `obd env set TELEMETRY_LOG_MODE 1`.")
self.parser.add_option('-d', '--data', type='string', help="post obd data")
@property
......@@ -431,9 +431,7 @@ class TelemetryPostCommand(HiddenObdCommand):
@property
def enable_log(self):
if COMMAND_ENV.get(ENV.TELEMETRY_LOG_MODE, default='1') == '0':
return False
return True
return COMMAND_ENV.get(ENV.TELEMETRY_LOG_MODE, default='0') == '1'
def init(self, cmd, args):
super(TelemetryPostCommand, self).init(cmd, args)
......@@ -447,7 +445,7 @@ class TelemetryPostCommand(HiddenObdCommand):
class TelemetryMajorCommand(HiddenMajorCommand):
def __init__(self):
super(TelemetryMajorCommand, self).__init__("telemetry", "Telemetry for OB-Deploy.By default, OBD telemetry is enabled. To disable OBD telemetry, run the `obd env set TELEMETRY_MODE 0` command. To enable OBD telemetry data printing, run `obd env set TELEMETRY_LOG_MODE 1`.")
super(TelemetryMajorCommand, self).__init__('telemetry', "Telemetry for OB-Deploy.By default, OBD telemetry is enabled. To disable OBD telemetry, run the `obd env set TELEMETRY_MODE 0` command. To enable OBD telemetry data printing, run `obd env set TELEMETRY_LOG_MODE 1`.")
self.register_command(TelemetryPostCommand())
def do_command(self):
......@@ -654,9 +652,11 @@ class ClusterMirrorCommand(ObdCommand):
data[component] = _.get_variable('run_result')
return data
def background_telemetry_task(self, obd):
def background_telemetry_task(self, obd, demploy_name=None):
if demploy_name is None:
demploy_name = self.cmds[0]
data = json.dumps(self.get_obd_namespaces_data(obd))
LocalClient.execute_command_background(f"nohup obd telemetry post {self.cmds[0]} --data='{data}' >/dev/null 2>&1 &")
LocalClient.execute_command_background(f"nohup obd telemetry post {demploy_name} --data='{data}' >/dev/null 2>&1 &")
class ClusterConfigStyleChange(ClusterMirrorCommand):
......@@ -704,7 +704,10 @@ class DemoCommand(ClusterMirrorCommand):
setattr(self.opts, 'force', True)
setattr(self.opts, 'force_delete', True)
obd.set_options(self.opts)
return obd.demo()
res = obd.demo()
self.background_telemetry_task(obd, 'demo')
return res
class WebCommand(ObdCommand):
......@@ -1479,7 +1482,7 @@ class ObdiagGatherAllCommand(ObdiagGatherMirrorCommand):
self.parser.add_option('--scope', type='string', help="log type constrains, choices=[observer, election, rootservice, all]",default='all')
self.parser.add_option('--grep', type='string', help="specify keywords constrain")
self.parser.add_option('--encrypt', type='string', help="Whether the returned results need to be encrypted, choices=[true, false]", default="false")
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default=os.getcwd())
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./')
self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir",default=OBDIAG_HOME_PATH)
class ObdiagGatherLogCommand(ObdiagGatherMirrorCommand):
......@@ -1500,7 +1503,7 @@ class ObdiagGatherLogCommand(ObdiagGatherMirrorCommand):
self.parser.add_option('--scope', type='string', help="log type constrains, choices=[observer, election, rootservice, all]",default='all')
self.parser.add_option('--grep', type='string', help="specify keywords constrain")
self.parser.add_option('--encrypt', type='string', help="Whether the returned results need to be encrypted, choices=[true, false]", default="false")
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default=os.getcwd())
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./')
self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir",default=OBDIAG_HOME_PATH)
......@@ -1516,7 +1519,7 @@ class ObdiagGatherSysStatCommand(ObdiagGatherMirrorCommand):
def __init__(self):
super(ObdiagGatherSysStatCommand, self).__init__('sysstat', 'Gather Host information')
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default=os.getcwd())
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./')
self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir",default=OBDIAG_HOME_PATH)
......@@ -1532,7 +1535,7 @@ class ObdiagGatherStackCommand(ObdiagGatherMirrorCommand):
def __init__(self):
super(ObdiagGatherStackCommand, self).__init__('stack', 'Gather stack')
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default=os.getcwd())
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./')
self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir",default=OBDIAG_HOME_PATH)
......@@ -1548,7 +1551,7 @@ class ObdiagGatherPerfCommand(ObdiagGatherMirrorCommand):
def __init__(self):
super(ObdiagGatherPerfCommand, self).__init__('perf', 'Gather perf')
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default=os.getcwd())
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./')
self.parser.add_option('--scope', type='string', help="perf type constrains, choices=[sample, flame, pstack, all]",default='all')
self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir",default=OBDIAG_HOME_PATH)
......@@ -1569,7 +1572,7 @@ class ObdiagGatherSlogCommand(ObdiagGatherMirrorCommand):
self.parser.add_option('--to', type='string', help="specify the end of the time range. format: yyyy-mm-dd hh:mm:ss")
self.parser.add_option('--since', type='string', help="Specify time range that from 'n' [d]ays, 'n' [h]ours or 'n' [m]inutes. before to now. format: <n> <m|h|d>. example: 1h.",default='30m')
self.parser.add_option('--encrypt', type='string', help="Whether the returned results need to be encrypted, choices=[true, false]", default="false")
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default=os.getcwd())
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./')
self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir",default=OBDIAG_HOME_PATH)
......@@ -1589,7 +1592,7 @@ class ObdiagGatherClogCommand(ObdiagGatherMirrorCommand):
self.parser.add_option('--to', type='string', help="specify the end of the time range. format: yyyy-mm-dd hh:mm:ss")
self.parser.add_option('--since', type='string', help="Specify time range that from 'n' [d]ays, 'n' [h]ours or 'n' [m]inutes. before to now. format: <n> <m|h|d>. example: 1h.",default='30m')
self.parser.add_option('--encrypt', type='string', help="Whether the returned results need to be encrypted, choices=[true, false]", default="false")
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default=os.getcwd())
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./')
self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir",default=OBDIAG_HOME_PATH)
......@@ -1607,7 +1610,7 @@ class ObdiagGatherPlanMonitorCommand(ObdiagGatherMirrorCommand):
super(ObdiagGatherPlanMonitorCommand, self).__init__('plan_monitor', 'Gather ParalleSQL information')
self.parser.add_option('-c', '--component', type='string', help="Component name to connect.", default='oceanbase-ce')
self.parser.add_option('--trace_id', type='string', help='sql trace id')
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default=os.getcwd())
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./')
self.parser.add_option('-u', '--user', type='string', help='The username used by database connection. [root]',default='root')
self.parser.add_option('-p', '--password', type='string', help='The password used by database connection.',default='')
self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir",default=OBDIAG_HOME_PATH)
......@@ -1631,7 +1634,7 @@ class ObdiagGatherObproxyLogCommand(ObdiagGatherMirrorCommand):
self.parser.add_option('--scope', type='string', help="log type constrains, choices=[observer, election, rootservice, all]",default='all')
self.parser.add_option('--grep', type='string', help="specify keywords constrain")
self.parser.add_option('--encrypt', type='string', help="Whether the returned results need to be encrypted, choices=[true, false]", default="false")
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default=os.getcwd())
self.parser.add_option('--store_dir', type='string', help='the dir to store gather result, current dir by default.', default='./')
self.parser.add_option('--obdiag_dir', type='string', help="obdiag install dir",default=OBDIAG_HOME_PATH)
......@@ -1673,7 +1676,7 @@ if __name__ == '__main__':
sys.setdefaultencoding(defaultencoding)
sys.path.append(os.path.join(ObdCommand.OBD_INSTALL_PATH, 'lib/site-packages'))
ROOT_IO.track_limit += 2
if MainCommand().init('obd', sys.argv[1:]).do_command():
if MainCommand().init(sys.argv[0], sys.argv[1:]).do_command():
ROOT_IO.exit(0)
ROOT_IO.exit(1)
......@@ -44,6 +44,7 @@ from _arch import getArchList, getBaseArch
from _rpm import Package, PackageInfo
from tool import ConfigUtil, FileUtil, var_replace
from _manager import Manager
from tool import timeout
_ARCH = getArchList()
......@@ -276,10 +277,13 @@ class RemoteMirrorRepository(MirrorRepository):
@property
def available(self):
if not self.enabled:
return False
if self._available is None:
try:
req = requests.request('get', self.baseurl)
self._available = req.status_code < 400
with timeout(5):
req = requests.request('get', self.baseurl)
self._available = req.status_code < 400
except Exception:
self.stdio and getattr(self.stdio, 'exception', print)('')
self._available = False
......
......@@ -21,7 +21,8 @@
OB_OFFICIAL_WEBSITE = 'https://www.oceanbase.com/'
# post telemetry data to OceanBase official
TELEMETRY_URL = 'http://openwebapi.dev.alipay.net/api/web/oceanbase/report'
TELEMETRY_WEBSITE = '<TELEMETRY_WEBSITE>'
TELEMETRY_URL = '{}/api/web/oceanbase/report'.format(TELEMETRY_WEBSITE if TELEMETRY_WEBSITE else 'https://openwebapi.oceanbase.com')
# obdeploy version
VERSION = '<VERSION>'
......
......@@ -3963,21 +3963,14 @@ class ObdHome(object):
if repositories == []:
return
self.set_repositories(repositories)
target_repository = None
for repository in repositories:
if repository.name in ['oceanbase', 'oceanbase-ce']:
target_repository = repository
break
else:
target_repository = repository
telemetry_info_collect_plugin = self.plugin_manager.get_best_py_script_plugin('telemetry_info_collect', 'general', '0.1')
ret = self.call_plugin(telemetry_info_collect_plugin, target_repository, target_repository=target_repository)
if ret:
post_data = ret.get_return('post_data')
self._call_stdio('verbose', 'telemetry_data: %s' % post_data)
telemetry_post_plugin = self.plugin_manager.get_best_py_script_plugin('telemetry_post', 'general', '0.1')
return self.call_plugin(telemetry_post_plugin, target_repository, data=post_data)
telemetry_info_collect_plugin = self.plugin_manager.get_best_py_script_plugin('telemetry_info_collect', 'general', '0.1')
for repository in repositories:
if not self.call_plugin(telemetry_info_collect_plugin, repository, spacename='telemetry'):
return False
telemetry_post_plugin = self.plugin_manager.get_best_py_script_plugin('telemetry_post', 'general', '0.1')
return self.call_plugin(telemetry_post_plugin, repository, spacename='telemetry')
def obdiag_gather(self, name, gather_type, opts):
......
......@@ -107,6 +107,129 @@ obagent:
ip: 172.19.33.4
global:
home_path: /root/obagent
prometheus:
servers:
- 192.168.1.5
depends:
- obagent
global:
# The working directory for prometheus. prometheus is started under this directory. This is a required field.
home_path: /root/prometheus
# address: 0.0.0.0 # The ip address to bind to. Along with port, corresponds to the `web.listen-address` parameter.
# port: 9090 # The http port to use. Along with address, corresponds to the `web.listen-address` parameter.
# enable_lifecycle: true # Enable shutdown and reload via HTTP request. Corresponds to the `web.enable-lifecycle` parameter.
# data_dir: /root/prometheus/data # Base path for metrics storage. Corresponds to the `storage.tsdb.path` parameter.
# basic_auth_users: # Usernames and passwords that have full access to the web server via basic authentication. Corresponds to the `basic_auth_users` parameter.
# <username>: <password> # The format of `basic_auth_users` : the key is the user name and the value is the password.
# web_config: # Content of Prometheus web service config file. The format is consistent with the file. However, `basic_auth_users` cannot be set in it. Please set `basic_auth_users` above if needed. Corresponds to the `web.config.file` parameter.
# tls_server_config:
# # Certificate and key files for server to use to authenticate to client.
# cert_file: <filename>
# key_file: <filename>
# config: # Configuration of the Prometheus service. The format is consistent with the Prometheus config file. Corresponds to the `config.file` parameter.
# rule_files:
# - rules/*rules.yaml
# scrape_configs:
# - job_name: prometheus
# metrics_path: /metrics
# scheme: http
# static_configs:
# - targets:
# - localhost:9090
# - job_name: node
# basic_auth:
# username: admin
# password: root
# metrics_path: /metrics/node/host
# scheme: http
# file_sd_configs: # Set the targets to be collected by reading local files. The example is to collect targets corresponding to all yaml files in the 'targets' directory under $home_path.
# - files:
# - 'targets/*.yaml'
# - job_name: ob_basic
# basic_auth:
# username: admin
# password: root
# metrics_path: /metrics/ob/basic
# scheme: http
# file_sd_configs:
# - files:
# - 'targets/*.yaml'
# - job_name: ob_extra
# basic_auth:
# username: admin
# password: root
# metrics_path: /metrics/ob/extra
# scheme: http
# file_sd_configs:
# - files:
# - 'targets/*.yaml'
# - job_name: agent
# basic_auth:
# username: admin
# password: root
# metrics_path: /metrics/stat
# scheme: http
# file_sd_configs:
# - files:
# - 'targets/*.yaml'
# additional_parameters: # Additional parameters for Prometheus service, among which `web.listen-address`, `web.enable-lifecycle`, `storage.tsdb.path`, `config.file` and `web.config.file` cannot be set. Please set them in the corresponding configuration above if needed.
# - log.level: debug
grafana:
servers:
- 192.168.1.5
depends:
- prometheus
global:
home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
# provisioning_dir: # folder that contains provisioning config files that grafana will apply on startup and while running, can be empty. The default value is $home_path/conf/provisioning.
# temp_data_lifetime: # How long temporary images in data directory should be kept. Supported modifiers h (hours), m (minutes), Use 0 to never clean up temporary files, can be empty. The default value is 24h.
# log_max_days: # Expired days of log file(delete after max days), can be empty. The default value is 7.
# domian: # The public facing domain name used to access grafana from a browser, can be empty. The default value is $server.ip.
# port: # The http port to use, can be empty. The default value is 3000.
# # list of datasources to insert/update depending on what's available in the database, can be empty.
# # For more parameter settings, please refer to https://grafana.com/docs/grafana/latest/administration/provisioning/#datasources
# datasources:
# name: # name of the datasource. Required and should not be 'OB-Prometheus'
# type: # datasource type. Required
# access: # access mode. direct or proxy. Required
# url: # the url of datasource
# list of dashboards providers that load dashboards into Grafana from the local filesystem, can be empty.
# For more information, please refer to https://grafana.com/docs/grafana/latest/administration/provisioning/#dashboards
# providers:
# name: # an unique provider name. Required and should not be 'OceanBase Metrics'
# type: # provider type. Default to 'file'
# options:
# path: # path to dashboard files on disk. Required when using the 'file' type
# # customize your Grafana instance by adding/modifying the custom configuration as follows
# # for more information, please refer to https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#configure-grafana
# # Here, setting parameters is required for format conversion.
# # For example, if the original grafana configuration format is
# #
# # [section1.section2]
# # key1 = value1
# # key2 = value2
# #
# # Then when writing the configuration below, you need to write it as
# #
# # section1:
# # section2:
# # key1: value1
# # key2: value2
# #
# # Here we only list one item, because there are more than 500 items. Please add them according to your own needs.
# customize_config:
# # original grafana configuration format is
# # [server]
# # protocol = http
# server:
# protocol: http
ocp-express:
depends:
- oceanbase-ce
......
......@@ -28,8 +28,6 @@ oceanbase-ce:
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
skip_proxy_sys_private_check: true
enable_strict_kernel_release: false
# Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy.
# appname: obcluster
# root_password: # root user password
......@@ -109,6 +107,129 @@ obagent:
ip: 172.19.33.4
global:
home_path: /root/obagent
prometheus:
servers:
- 192.168.1.5
depends:
- obagent
global:
# The working directory for prometheus. prometheus is started under this directory. This is a required field.
home_path: /root/prometheus
# address: 0.0.0.0 # The ip address to bind to. Along with port, corresponds to the `web.listen-address` parameter.
# port: 9090 # The http port to use. Along with address, corresponds to the `web.listen-address` parameter.
# enable_lifecycle: true # Enable shutdown and reload via HTTP request. Corresponds to the `web.enable-lifecycle` parameter.
# data_dir: /root/prometheus/data # Base path for metrics storage. Corresponds to the `storage.tsdb.path` parameter.
# basic_auth_users: # Usernames and passwords that have full access to the web server via basic authentication. Corresponds to the `basic_auth_users` parameter.
# <username>: <password> # The format of `basic_auth_users` : the key is the user name and the value is the password.
# web_config: # Content of Prometheus web service config file. The format is consistent with the file. However, `basic_auth_users` cannot be set in it. Please set `basic_auth_users` above if needed. Corresponds to the `web.config.file` parameter.
# tls_server_config:
# # Certificate and key files for server to use to authenticate to client.
# cert_file: <filename>
# key_file: <filename>
# config: # Configuration of the Prometheus service. The format is consistent with the Prometheus config file. Corresponds to the `config.file` parameter.
# rule_files:
# - rules/*rules.yaml
# scrape_configs:
# - job_name: prometheus
# metrics_path: /metrics
# scheme: http
# static_configs:
# - targets:
# - localhost:9090
# - job_name: node
# basic_auth:
# username: admin
# password: root
# metrics_path: /metrics/node/host
# scheme: http
# file_sd_configs: # Set the targets to be collected by reading local files. The example is to collect targets corresponding to all yaml files in the 'targets' directory under $home_path.
# - files:
# - 'targets/*.yaml'
# - job_name: ob_basic
# basic_auth:
# username: admin
# password: root
# metrics_path: /metrics/ob/basic
# scheme: http
# file_sd_configs:
# - files:
# - 'targets/*.yaml'
# - job_name: ob_extra
# basic_auth:
# username: admin
# password: root
# metrics_path: /metrics/ob/extra
# scheme: http
# file_sd_configs:
# - files:
# - 'targets/*.yaml'
# - job_name: agent
# basic_auth:
# username: admin
# password: root
# metrics_path: /metrics/stat
# scheme: http
# file_sd_configs:
# - files:
# - 'targets/*.yaml'
# additional_parameters: # Additional parameters for Prometheus service, among which `web.listen-address`, `web.enable-lifecycle`, `storage.tsdb.path`, `config.file` and `web.config.file` cannot be set. Please set them in the corresponding configuration above if needed.
# - log.level: debug
grafana:
servers:
- 192.168.1.5
depends:
- prometheus
global:
home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
# provisioning_dir: # folder that contains provisioning config files that grafana will apply on startup and while running, can be empty. The default value is $home_path/conf/provisioning.
# temp_data_lifetime: # How long temporary images in data directory should be kept. Supported modifiers h (hours), m (minutes), Use 0 to never clean up temporary files, can be empty. The default value is 24h.
# log_max_days: # Expired days of log file(delete after max days), can be empty. The default value is 7.
# domian: # The public facing domain name used to access grafana from a browser, can be empty. The default value is $server.ip.
# port: # The http port to use, can be empty. The default value is 3000.
# # list of datasources to insert/update depending on what's available in the database, can be empty.
# # For more parameter settings, please refer to https://grafana.com/docs/grafana/latest/administration/provisioning/#datasources
# datasources:
# name: # name of the datasource. Required and should not be 'OB-Prometheus'
# type: # datasource type. Required
# access: # access mode. direct or proxy. Required
# url: # the url of datasource
# list of dashboards providers that load dashboards into Grafana from the local filesystem, can be empty.
# For more information, please refer to https://grafana.com/docs/grafana/latest/administration/provisioning/#dashboards
# providers:
# name: # an unique provider name. Required and should not be 'OceanBase Metrics'
# type: # provider type. Default to 'file'
# options:
# path: # path to dashboard files on disk. Required when using the 'file' type
# # customize your Grafana instance by adding/modifying the custom configuration as follows
# # for more information, please refer to https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#configure-grafana
# # Here, setting parameters is required for format conversion.
# # For example, if the original grafana configuration format is
# #
# # [section1.section2]
# # key1 = value1
# # key2 = value2
# #
# # Then when writing the configuration below, you need to write it as
# #
# # section1:
# # section2:
# # key1: value1
# # key2: value2
# #
# # Here we only list one item, because there are more than 500 items. Please add them according to your own needs.
# customize_config:
# # original grafana configuration format is
# # [server]
# # protocol = http
# server:
# protocol: http
ocp-express:
depends:
- oceanbase-ce
......
......@@ -149,6 +149,129 @@ obagent:
# observer_log_path: /root/observer/log
# Monitor status for OceanBase Database. Active is to enable. Inactive is to disable. The default value is active. When you deploy an cluster automatically, OBD decides whether to enable this parameter based on depends.
# ob_monitor_status: active
prometheus:
servers:
- 192.168.1.5
depends:
- obagent
global:
# The working directory for prometheus. prometheus is started under this directory. This is a required field.
home_path: /root/prometheus
# address: 0.0.0.0 # The ip address to bind to. Along with port, corresponds to the `web.listen-address` parameter.
# port: 9090 # The http port to use. Along with address, corresponds to the `web.listen-address` parameter.
# enable_lifecycle: true # Enable shutdown and reload via HTTP request. Corresponds to the `web.enable-lifecycle` parameter.
# data_dir: /root/prometheus/data # Base path for metrics storage. Corresponds to the `storage.tsdb.path` parameter.
# basic_auth_users: # Usernames and passwords that have full access to the web server via basic authentication. Corresponds to the `basic_auth_users` parameter.
# <username>: <password> # The format of `basic_auth_users` : the key is the user name and the value is the password.
# web_config: # Content of Prometheus web service config file. The format is consistent with the file. However, `basic_auth_users` cannot be set in it. Please set `basic_auth_users` above if needed. Corresponds to the `web.config.file` parameter.
# tls_server_config:
# # Certificate and key files for server to use to authenticate to client.
# cert_file: <filename>
# key_file: <filename>
# config: # Configuration of the Prometheus service. The format is consistent with the Prometheus config file. Corresponds to the `config.file` parameter.
# rule_files:
# - rules/*rules.yaml
# scrape_configs:
# - job_name: prometheus
# metrics_path: /metrics
# scheme: http
# static_configs:
# - targets:
# - localhost:9090
# - job_name: node
# basic_auth:
# username: admin
# password: root
# metrics_path: /metrics/node/host
# scheme: http
# file_sd_configs: # Set the targets to be collected by reading local files. The example is to collect targets corresponding to all yaml files in the 'targets' directory under $home_path.
# - files:
# - 'targets/*.yaml'
# - job_name: ob_basic
# basic_auth:
# username: admin
# password: root
# metrics_path: /metrics/ob/basic
# scheme: http
# file_sd_configs:
# - files:
# - 'targets/*.yaml'
# - job_name: ob_extra
# basic_auth:
# username: admin
# password: root
# metrics_path: /metrics/ob/extra
# scheme: http
# file_sd_configs:
# - files:
# - 'targets/*.yaml'
# - job_name: agent
# basic_auth:
# username: admin
# password: root
# metrics_path: /metrics/stat
# scheme: http
# file_sd_configs:
# - files:
# - 'targets/*.yaml'
# additional_parameters: # Additional parameters for Prometheus service, among which `web.listen-address`, `web.enable-lifecycle`, `storage.tsdb.path`, `config.file` and `web.config.file` cannot be set. Please set them in the corresponding configuration above if needed.
# - log.level: debug
grafana:
servers:
- 192.168.1.5
depends:
- prometheus
global:
home_path: /root/grafana
login_password: oceanbase # Grafana login password. The default value is 'oceanbase'.
# data_dir: # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used).$data_dir can be empty. The default value is $home_path/data.
# logs_dir: # Directory where grafana can store logs, can be empty. The default value is $data_dir/log.
# plugins_dir: # Directory where grafana will automatically scan and look for plugins, can be empty. The default value is $data_dir/plugins.
# provisioning_dir: # folder that contains provisioning config files that grafana will apply on startup and while running, can be empty. The default value is $home_path/conf/provisioning.
# temp_data_lifetime: # How long temporary images in data directory should be kept. Supported modifiers h (hours), m (minutes), Use 0 to never clean up temporary files, can be empty. The default value is 24h.
# log_max_days: # Expired days of log file(delete after max days), can be empty. The default value is 7.
# domian: # The public facing domain name used to access grafana from a browser, can be empty. The default value is $server.ip.
# port: # The http port to use, can be empty. The default value is 3000.
# # list of datasources to insert/update depending on what's available in the database, can be empty.
# # For more parameter settings, please refer to https://grafana.com/docs/grafana/latest/administration/provisioning/#datasources
# datasources:
# name: # name of the datasource. Required and should not be 'OB-Prometheus'
# type: # datasource type. Required
# access: # access mode. direct or proxy. Required
# url: # the url of datasource
# list of dashboards providers that load dashboards into Grafana from the local filesystem, can be empty.
# For more information, please refer to https://grafana.com/docs/grafana/latest/administration/provisioning/#dashboards
# providers:
# name: # an unique provider name. Required and should not be 'OceanBase Metrics'
# type: # provider type. Default to 'file'
# options:
# path: # path to dashboard files on disk. Required when using the 'file' type
# # customize your Grafana instance by adding/modifying the custom configuration as follows
# # for more information, please refer to https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#configure-grafana
# # Here, setting parameters is required for format conversion.
# # For example, if the original grafana configuration format is
# #
# # [section1.section2]
# # key1 = value1
# # key2 = value2
# #
# # Then when writing the configuration below, you need to write it as
# #
# # section1:
# # section2:
# # key1: value1
# # key2: value2
# #
# # Here we only list one item, because there are more than 500 items. Please add them according to your own needs.
# customize_config:
# # original grafana configuration format is
# # [server]
# # protocol = http
# server:
# protocol: http
ocp-express:
depends:
- oceanbase-ce
......
## Only need to configure when remote login is required
# user:
# username: your username
# password: your password if need
# key_file: your ssh-key file path if need
# port: your ssh port, default 22
# timeout: ssh connection timeout (second), default 30
oceanbase-ce:
servers:
- name: server1
# Please don't use hostname, only IP can be supported
ip: 172.19.33.2
- name: server2
ip: 172.19.33.3
- name: server3
ip: 172.19.33.4
global:
# Please set devname as the network adaptor's name whose ip is in the setting of severs.
# if set severs as "127.0.0.1", please set devname as "lo"
# if current ip is 192.168.1.10, and the ip's network adaptor's name is "eth0", please use "eth0"
devname: eth0
cluster_id: 1
# please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files.
cpu_count: 16
production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
# Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy.
# appname: obcluster
# root_password: # root user password
# proxyro_password: # proxyro user pasword, consistent with obproxy's observer_sys_password, can be empty
# ocp_meta_db: ocp_express # The database name of ocp express meta
# ocp_meta_username: meta # The username of ocp express meta
# ocp_meta_password: '' # The password of ocp express meta
# ocp_agent_monitor_password: '' # The password for obagent monitor user
ocp_meta_tenant: # The config for ocp express meta tenant
tenant_name: ocp
max_cpu: 1
memory_size: 2G
log_disk_size: 7680M # The recommend value is (4608 + (expect node num + expect tenant num) * 512) M.
server1:
mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started.
rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started.
# The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field.
home_path: /root/observer
# The directory for data storage. The default value is $home_path/store.
# data_dir: /data
# The directory for clog, ilog, and slog. The default value is the same as the data_dir value.
# redo_dir: /redo
zone: zone1
server2:
mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started.
rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started.
# The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field.
home_path: /root/observer
# The directory for data storage. The default value is $home_path/store.
# data_dir: /data
# The directory for clog, ilog, and slog. The default value is the same as the data_dir value.
# redo_dir: /redo
zone: zone2
server3:
mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started.
rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started.
# The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field.
home_path: /root/observer
# The directory for data storage. The default value is $home_path/store.
# data_dir: /data
# The directory for clog, ilog, and slog. The default value is the same as the data_dir value.
# redo_dir: /redo
zone: zone3
obproxy-ce:
# Set dependent components for the component.
# When the associated configurations are not done, OBD will automatically get the these configurations from the dependent components.
depends:
- oceanbase-ce
servers:
- 172.19.33.6
global:
listen_port: 2883 # External port. The default value is 2883.
prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884.
home_path: /root/obproxy
# oceanbase root server list
# format: ip:mysql_port;ip:mysql_port. When a depends exists, OBD gets this value from the oceanbase-ce of the depends.
# rs_list: 192.168.1.2:2881;192.168.1.3:2881;192.168.1.4:2881
enable_cluster_checkout: false
# observer cluster name, consistent with oceanbase-ce's appname. When a depends exists, OBD gets this value from the oceanbase-ce of the depends.
# cluster_name: obcluster
skip_proxy_sys_private_check: true
enable_strict_kernel_release: false
# obproxy_sys_password: # obproxy sys user password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends.
# observer_sys_password: # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends.
obagent:
depends:
- oceanbase-ce
servers:
- name: server1
# Please don't use hostname, only IP can be supported
ip: 172.19.33.2
- name: server2
ip: 172.19.33.3
- name: server3
ip: 172.19.33.4
global:
home_path: /root/obagent
ocp-express:
depends:
- oceanbase-ce
- obproxy-ce
- obagent
servers:
- 172.19.33.5
global:
# The working directory for prometheus. prometheus is started under this directory. This is a required field.
home_path: /root/ocp-express
# log_dir: /home/oceanbase/ocp-express/log # The log directory of ocp express server. The default value is {home_path}/log.
memory_size: 1G # The memory size of ocp-express server. The recommend value is 512MB * (expect node num + expect tenant num) * 60MB.
# logging_file_total_size_cap: 10G # The total log file size of ocp-express server
# logging_file_max_history: 1 # The maximum of retention days the log archive log files to keep. The default value is unlimited
\ No newline at end of file
## Only need to configure when remote login is required
# user:
# username: your username
# password: your password if need
# key_file: your ssh-key file path if need
# port: your ssh port, default 22
# timeout: ssh connection timeout (second), default 30
oceanbase-ce:
servers:
- name: server1
# Please don't use hostname, only IP can be supported
ip: 172.19.33.2
- name: server2
ip: 172.19.33.3
- name: server3
ip: 172.19.33.4
global:
# Please set devname as the network adaptor's name whose ip is in the setting of severs.
# if set severs as "127.0.0.1", please set devname as "lo"
# if current ip is 192.168.1.10, and the ip's network adaptor's name is "eth0", please use "eth0"
devname: eth0
# if current hardware's memory capacity is smaller than 50G, please use the setting of "mini-single-example.yaml" and do a small adjustment.
memory_limit: 64G # The maximum running memory for an observer
# The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
system_memory: 30G
datafile_size: 192G # Size of the data file.
log_disk_size: 192G # The size of disk space used by the clog files.
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
# Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy.
# appname: obcluster
# root_password: # root user password
# proxyro_password: # proxyro user pasword, consistent with obproxy's observer_sys_password, can be empty
# ocp_meta_db: ocp_express # The database name of ocp express meta
# ocp_meta_username: meta # The username of ocp express meta
# ocp_meta_password: '' # The password of ocp express meta
# ocp_agent_monitor_password: '' # The password for obagent monitor user
ocp_meta_tenant: # The config for ocp express meta tenant
tenant_name: ocp
max_cpu: 1
memory_size: 2G
log_disk_size: 7680M # The recommend value is (4608 + (expect node num + expect tenant num) * 512) M.
# In this example , support multiple ob process in single node, so different process use different ports.
# If deploy ob cluster in multiple nodes, the port and path setting can be same.
server1:
mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started.
rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started.
# The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field.
home_path: /root/observer
# The directory for data storage. The default value is $home_path/store.
# data_dir: /data
# The directory for clog, ilog, and slog. The default value is the same as the data_dir value.
# redo_dir: /redo
zone: zone1
server2:
mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started.
rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started.
# The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field.
home_path: /root/observer
# The directory for data storage. The default value is $home_path/store.
# data_dir: /data
# The directory for clog, ilog, and slog. The default value is the same as the data_dir value.
# redo_dir: /redo
zone: zone2
server3:
mysql_port: 2881 # External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started.
rpc_port: 2882 # Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started.
# The working directory for OceanBase Database. OceanBase Database is started under this directory. This is a required field.
home_path: /root/observer
# The directory for data storage. The default value is $home_path/store.
# data_dir: /data
# The directory for clog, ilog, and slog. The default value is the same as the data_dir value.
# redo_dir: /redo
zone: zone3
obproxy-ce:
# Set dependent components for the component.
# When the associated configurations are not done, OBD will automatically get the these configurations from the dependent components.
depends:
- oceanbase-ce
servers:
- 172.19.33.6
global:
listen_port: 2883 # External port. The default value is 2883.
prometheus_listen_port: 2884 # The Prometheus port. The default value is 2884.
home_path: /root/obproxy
# oceanbase root server list
# format: ip:mysql_port;ip:mysql_port. When a depends exists, OBD gets this value from the oceanbase-ce of the depends.
# rs_list: 192.168.1.2:2881;192.168.1.3:2881;192.168.1.4:2881
enable_cluster_checkout: false
# observer cluster name, consistent with oceanbase-ce's appname. When a depends exists, OBD gets this value from the oceanbase-ce of the depends.
# cluster_name: obcluster
skip_proxy_sys_private_check: true
enable_strict_kernel_release: false
# obproxy_sys_password: # obproxy sys user password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends.
# observer_sys_password: # proxyro user pasword, consistent with oceanbase-ce's proxyro_password, can be empty. When a depends exists, OBD gets this value from the oceanbase-ce of the depends.
obagent:
depends:
- oceanbase-ce
servers:
- name: server1
# Please don't use hostname, only IP can be supported
ip: 172.19.33.2
- name: server2
ip: 172.19.33.3
- name: server3
ip: 172.19.33.4
global:
home_path: /root/obagent
ocp-express:
depends:
- oceanbase-ce
- obproxy-ce
- obagent
servers:
- 172.19.33.5
global:
# The working directory for prometheus. prometheus is started under this directory. This is a required field.
home_path: /root/ocp-express
# log_dir: /home/oceanbase/ocp-express/log # The log directory of ocp express server. The default value is {home_path}/log.
memory_size: 1G # The memory size of ocp-express server. The recommend value is 512MB * (expect node num + expect tenant num) * 60MB.
# logging_file_total_size_cap: 10G # The total log file size of ocp-express server
# logging_file_max_history: 1 # The maximum of retention days the log archive log files to keep. The default value is unlimited
\ No newline at end of file
......@@ -24,37 +24,22 @@ oceanbase-ce:
# Please set devname as the network adaptor's name whose ip is in the setting of severs.
# if set severs as "127.0.0.1", please set devname as "lo"
# if current ip is 192.168.1.10, and the ip's network adaptor's name is "eth0", please use "eth0"
# devname: eth0
# External port for OceanBase Database. The default value is 2881. DO NOT change this value after the cluster is started.
# mysql_port: 2881
# Internal port for OceanBase Database. The default value is 2882. DO NOT change this value after the cluster is started.
# rpc_port: 2882
# Defines the zone for an observer. The default value is zone1.
# zone: zone1
# The maximum running memory for an observer. When ignored, autodeploy calculates this value based on the current server available resource.
# memory_limit: 58G
# The percentage of the maximum available memory to the total memory. This value takes effect only when memory_limit is 0. The default value is 80.
# memory_limit_percentage: 80
# The reserved system memory. system_memory is reserved for general tenants. The default value is 30G. Autodeploy calculates this value based on the current server available resource.
# system_memory: 22G
# The size of a data file. When ignored, autodeploy calculates this value based on the current server available resource.
# datafile_size: 200G
# The size of disk space used by the clog files. When ignored, autodeploy calculates this value based on the current server available resource.
# log_disk_size: 66G
# System log level. The default value is WDIAG.
# syslog_level: WDIAG
# Print system logs whose levels are higher than WARNING to a separate log file. The default value is true. The default value for autodeploy mode is false.
# enable_syslog_wf: false
# Enable auto system log recycling or not. The default value is false. The default value for autodeploy mode is on.
# enable_syslog_recycle: true
# The maximum number of reserved log files before enabling auto recycling. When set to 0, no logs are deleted. The default value for autodeploy mode is 4.
# max_syslog_file_count: 4
devname: eth0
cluster_id: 1
# please set memory limit to a suitable value which is matching resource.
memory_limit: 6G # The maximum running memory for an observer
system_memory: 1G # The reserved system memory. system_memory is reserved for general tenants. The default value is 30G.
datafile_size: 20G # Size of the data file.
log_disk_size: 15G # The size of disk space used by the clog files.
cpu_count: 16
production_mode: false
enable_syslog_wf: false # Print system logs whose levels are higher than WARNING to a separate log file. The default value is true.
enable_syslog_recycle: true # Enable auto system log recycling or not. The default value is false.
max_syslog_file_count: 4 # The maximum number of reserved log files before enabling auto recycling. The default value is 0.
# Cluster name for OceanBase Database. The default value is obcluster. When you deploy OceanBase Database and obproxy, this value must be the same as the cluster_name for obproxy.
# appname: obcluster
# Password for root. The default value is empty.
# root_password:
# Password for proxyro. proxyro_password must be the same as observer_sys_password. The default value is empty.
# proxyro_password:
# root_password: # root user password
# proxyro_password: # proxyro user pasword, consistent with obproxy's observer_sys_password, can be empty
server1:
zone: zone1
server2:
......
......@@ -6,6 +6,7 @@
# port: your ssh port, default 22
# timeout: ssh connection timeout (second), default 30
oceanbase-ce:
version: 3.1.4
servers:
- name: server1
# Please don't use hostname, only IP can be supported
......
test:
system_config:
- name: proxy_mem_limited
value: 4G
- name: enable_compression_protocol
value: false
need_restart: true
value_type: BOOL
- name: enable_qos
value: false
\ No newline at end of file
system_config: []
\ No newline at end of file
......@@ -5,6 +5,4 @@ build:
- name: enable_compression_protocol
value: false
need_restart: true
value_type: BOOL
- name: enable_qos
value: false
\ No newline at end of file
value_type: BOOL
\ No newline at end of file
test:
variables:
- name: binlog_row_image
value: MINIMAL
- name: auto_increment_cache_size
value: 10000000
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 3
optimizer: sleep
- name: enable_early_lock_release
value: 'false'
optimizer: tenant
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: _enable_defensive_check
value: false
- name: _rowsets_enabled
value: 'false'
optimizer: tenant
- name: _enable_newsort
value: 'false'
- name: _trace_control_info
value: ''
optimizer: tenant
- name: _lcl_op_interval
value: 0ms
- name: writing_throttling_trigger_percentage
value: 100
optimizer: tenant
- name: default_auto_increment_mode
value: 'NOORDER'
optimizer: tenant
- name: enable_monotonic_weak_read
value: 'false'
optimizer: tenant
- name: _enable_adaptive_compaction
value: 'false'
optimizer: tenant
- name: enable_record_trace_log
value: 'false'
- name: cpu_quota_concurrency
value: 2
optimizer: tenant
- name: _ob_enable_prepared_statement
value: 'true'
- name: _pushdown_storage_level
value: 0
optimizer: tenant
- name: ignore_replay_checksum_error
value: 'true'
- name: weak_read_version_refresh_interval
value: 2s
- name: freeze_trigger_percentage
value: 40
optimizer: tenant
\ No newline at end of file
value: 'false'
\ No newline at end of file
build:
variables:
- name: binlog_row_image
value: MINIMAL
- name: auto_increment_cache_size
value: 10000000
- name: ob_query_timeout
value: 36000000000
- name: ob_trx_timeout
value: 36000000000
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 5
optimizer: sleep
- name: enable_early_lock_release
value: 'false'
optimizer: tenant
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: enable_record_trace_log
value: 'false'
- name: _enable_defensive_check
value: false
- name: _rowsets_enabled
value: false
optimizer: tenant
- name: _enable_newsort
value: false
- name: _trace_control_info
value: ''
optimizer: tenant
- name: _lcl_op_interval
value: 0ms
- name: default_auto_increment_mode
value: 'NOORDER'
optimizer: tenant
- name: enable_monotonic_weak_read
value: 'false'
optimizer: tenant
- name: _enable_adaptive_compaction
value: 'false'
optimizer: tenant
- name: enable_record_trace_log
value: 'false'
- name: cpu_quota_concurrency
value: 2
optimizer: tenant
- name: _ob_enable_prepared_statement
value: 'true'
- name: _pushdown_storage_level
value: 0
- name: _rowsets_enabled
value: false
optimizer: tenant
- name: ignore_replay_checksum_error
value: 'true'
- name: weak_read_version_refresh_interval
value: 2s
- name: freeze_trigger_percentage
value: 40
optimizer: tenant
test:
system_config:
- name: writing_throttling_trigger_percentage
value: 100
- name: _trace_control_info
value: ''
optimizer: tenant
\ No newline at end of file
test:
system_config:
- name: enable_sql_audit
value: false
- name: syslog_level
value: PERF
- name: enable_perf_event
value: false
- name: enable_record_trace_log
value: 'false'
variables:
- name: ob_sql_work_area_percentage
value: 80
- name: ob_query_timeout
value: 36000000000
- name: ob_trx_timeout
value: 36000000000
- name: max_allowed_packet
value: 67108864
- name: secure_file_priv
value: ''
- name: parallel_servers_target
value: int(max_cpu * server_num * 8)
expression: true
exec_sql:
- name: clean_cache
optimizer: clean_cache
\ No newline at end of file
test:
variables:
- name: binlog_row_image
value: MINIMAL
- name: auto_increment_cache_size
value: 10000000
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 3
optimizer: sleep
- name: enable_early_lock_release
value: 'false'
optimizer: tenant
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: _enable_defensive_check
value: false
- name: _rowsets_enabled
value: 'false'
optimizer: tenant
- name: _enable_newsort
value: 'false'
- name: _trace_control_info
value: ''
optimizer: tenant
- name: _lcl_op_interval
value: 0ms
- name: writing_throttling_trigger_percentage
value: 100
optimizer: tenant
- name: default_auto_increment_mode
value: 'NOORDER'
optimizer: tenant
- name: enable_monotonic_weak_read
value: 'false'
optimizer: tenant
- name: _enable_adaptive_compaction
value: 'false'
optimizer: tenant
- name: enable_record_trace_log
value: 'false'
- name: cpu_quota_concurrency
value: 2
optimizer: tenant
- name: _ob_enable_prepared_statement
value: 'true'
- name: _pushdown_storage_level
value: 0
optimizer: tenant
- name: ignore_replay_checksum_error
value: 'true'
- name: weak_read_version_refresh_interval
value: 2s
- name: freeze_trigger_percentage
value: 40
optimizer: tenant
\ No newline at end of file
value: 'false'
\ No newline at end of file
build:
variables:
- name: binlog_row_image
value: MINIMAL
- name: auto_increment_cache_size
value: 10000000
- name: ob_query_timeout
value: 36000000000
- name: ob_trx_timeout
value: 36000000000
system_config:
- name: enable_sql_audit
value: 'false'
- name: sleep
value: 5
optimizer: sleep
- name: enable_early_lock_release
value: 'false'
optimizer: tenant
- name: syslog_level
value: 'ERROR'
- name: enable_perf_event
value: false
value_type: BOOL
- name: enable_record_trace_log
value: 'false'
- name: _enable_defensive_check
value: false
- name: _rowsets_enabled
value: false
optimizer: tenant
- name: _enable_newsort
value: false
- name: _trace_control_info
value: ''
optimizer: tenant
- name: _lcl_op_interval
value: 0ms
- name: default_auto_increment_mode
value: 'NOORDER'
optimizer: tenant
- name: enable_monotonic_weak_read
value: 'false'
optimizer: tenant
- name: _enable_adaptive_compaction
value: 'false'
optimizer: tenant
- name: enable_record_trace_log
value: 'false'
- name: cpu_quota_concurrency
value: 2
optimizer: tenant
- name: _ob_enable_prepared_statement
value: 'true'
- name: _pushdown_storage_level
value: 0
- name: _rowsets_enabled
value: false
optimizer: tenant
- name: ignore_replay_checksum_error
value: 'true'
- name: weak_read_version_refresh_interval
value: 2s
- name: freeze_trigger_percentage
value: 40
optimizer: tenant
test:
system_config:
- name: writing_throttling_trigger_percentage
value: 100
- name: _trace_control_info
value: ''
optimizer: tenant
\ No newline at end of file
......@@ -28,7 +28,6 @@ import resource
import hashlib
from tool import NetUtil
from ssh import LocalClient
from const import VERSION, REVISION
......@@ -46,6 +45,7 @@ shell_command_map = {
"os_name": 'cat /etc/os-release | grep "^ID=" | cut -f2 -d=',
"os_release": 'cat /etc/os-release | grep "^VERSION_ID=" | cut -f2 -d='
}
current_client = None
def shell_command(func):
......@@ -53,8 +53,9 @@ def shell_command(func):
name = func.__name__
command = shell_command_map.get(name)
assert command, f"{name} is not in shell_command.yaml"
assert current_client, "current_client is None"
res = LocalClient.execute_command(command)
res = current_client.execute_command(command)
kwargs["bash_result"] = res.stdout.strip() if res.code == 0 else None
return func(*args, **kwargs)
......@@ -78,9 +79,9 @@ class BaseInfo:
class HostInfo:
@staticmethod
def host_ip_hash():
def host_ip_hash(ip=None):
sha1 = hashlib.sha1()
sha1.update(NetUtil.get_host_ip().encode())
sha1.update(ip.encode() if ip else NetUtil.get_host_ip().encode())
return sha1.hexdigest()
@staticmethod
......@@ -148,7 +149,7 @@ class DiskInfo:
def get_disks_info():
data = []
sha1 = hashlib.sha1()
for _ in LocalClient.execute_command("df -h | awk '{if(NR>1)print}'").stdout.strip().split('\n'):
for _ in current_client.execute_command("df -h | awk '{if(NR>1)print}'").stdout.strip().split('\n'):
_disk_info = {}
_ = [i for i in _.split(' ') if i != '']
_disk_info['deviceName'] = _[0]
......@@ -181,9 +182,6 @@ class MachineInfo:
class ObdInfo:
@staticmethod
def obd_type():
return sys.argv[0]
@staticmethod
def obd_version(*args, **kwargs):
......@@ -194,15 +192,45 @@ class ObdInfo:
return REVISION
def telemetry_machine_data():
def init_telemetry_data(opt_data):
data = telemetry_base_data()
for component, _ in json.loads(opt_data).items():
for plugin_name, _ in _.items():
plugin_data = {}
plugin_data['component'] = component
plugin_data['name'] = plugin_name
plugin_data['runTime'] = _['time']
plugin_data['runResult'] = _['result']
data['plugins'].append(plugin_data)
return data
def telemetry_base_data():
data = {}
data['reporter'] = BaseInfo.reporter()
data['reportTime'] = BaseInfo.report_time()
data['eventId'] = BaseInfo.event_id()
data['telemetryVersion'] = 1
data['obdVersion'] = ObdInfo.obd_version()
data['obdRevision'] = ObdInfo.obd_revision()
data['hosts'] = []
data['instances'] = []
data['plugins'] = []
return data
def telemetry_machine_data(data):
ip_hash = HostInfo.host_ip_hash(current_client.config.host)
for host in data['hosts']:
if host['basic']['hostHash'] == ip_hash:
return data
_hosts = dict(basic={}, cpu={}, memory={}, disks=[], os={}, ulimit={})
_hosts['basic']['hostHash'] = HostInfo.host_ip_hash()
_hosts['basic']['hostHash'] = ip_hash
_hosts['basic']['hostType'] = HostInfo.host_type()
_hosts['cpu']['physicalCores'] = CpuInfo.cpu_physical_cores()
......@@ -223,47 +251,41 @@ def telemetry_machine_data():
_hosts['ulimit'] = MachineInfo.get_nofile()
data['hosts'].append(_hosts)
data['instances'] = []
obd_info = {}
obd_info['type'] = ObdInfo.obd_type()
obd_info['version'] = ObdInfo.obd_version()
obd_info['revision'] = ObdInfo.obd_revision()
data['instances'].append(obd_info)
return data
def telemetry_info_collect(plugin_context, *args, **kwargs):
def telemetry_info_collect(plugin_context, telemetry_post_data={}, *args, **kwargs):
global current_client
repositories = plugin_context.repositories
repository = kwargs.get('target_repository')
options = plugin_context.options
stdio = plugin_context.stdio
clients = plugin_context.clients
cluster_config = plugin_context.cluster_config
post_data = telemetry_machine_data()
if not telemetry_post_data:
options = plugin_context.options
telemetry_post_data = init_telemetry_data(getattr(options, 'data', {}))
for server in cluster_config.servers:
current_client = clients[server]
telemetry_post_data = telemetry_machine_data(telemetry_post_data)
for repository in repositories:
data = {}
data['type'] = repository.name
data['version'] = repository.version
data['revision'] = repository.hash
post_data['instances'].append(data)
if repository.name != cluster_config.name:
continue
is_ob = cluster_config.name in ['oceanbase', 'oceanbase-ce']
for component, _ in json.loads(getattr(options, 'data', {})).items():
for plugin_name, _ in _.items():
for server in cluster_config.servers:
data = {}
data['type'] = 'plugins'
data['component'] = component
data['name'] = plugin_name
data['runTime'] = _['time']
data['runResult'] = _['result']
post_data['instances'].append(data)
if repository.name in ['oceanbase', 'oceanbase-ce']:
_ = cluster_config.get_global_conf()
data = {}
data['type'] = 'config'
data['name'] = repository.name
data['memoryLimit'] = _.get('memory_limit', '0') if _ else '0'
data['cpuCount'] = _.get('cpu_count', '0') if _ else '0'
data['syslogLevel'] = _.get('syslog_level', 'INFO') if _ else 'INFO'
post_data['instances'].append(data)
return plugin_context.return_true(post_data=json.dumps(post_data, indent=4))
\ No newline at end of file
data['type'] = repository.name
data['version'] = repository.version
data['revision'] = repository.release
config = cluster_config.get_server_conf(server)
data['hostHash'] = HostInfo.host_ip_hash(server.ip)
if is_ob:
data['memoryLimit'] = config.get('memory_limit', '0')
data['dataFileSize'] = config.get('datafile_size', '0')
data['logDiskSize'] = config.get('log_disk_size', '0')
data['cpuCount'] = config.get('cpu_count', '0')
telemetry_post_data['instances'].append(data)
plugin_context.set_variable('telemetry_post_data', telemetry_post_data)
return plugin_context.return_true(telemetry_post_data=telemetry_post_data)
\ No newline at end of file
......@@ -20,20 +20,25 @@
from __future__ import absolute_import, division, print_function
import json
import requests
from const import TELEMETRY_URL
from tool import timeout
def telemetry_post(plugin_context, *args, **kwargs):
def telemetry_post(plugin_context, telemetry_post_data={}, *args, **kwargs):
stdio = plugin_context.stdio
data = kwargs.get('data', {})
if data:
if telemetry_post_data:
data = json.dumps(telemetry_post_data, indent=4)
stdio.verbose('post data: %s' % data)
with timeout(30):
requests.post(url=TELEMETRY_URL, data=data)
try:
with timeout(30):
requests.post(url=TELEMETRY_URL, data=json.dumps({'content': data}), headers={'sig': 'dbe97393a695335d67de91dd4049ba', 'Content-Type': 'application/json'})
return plugin_context.return_true()
except:
stdio.exception('post data failed')
return plugin_context.return_false()
else:
stdio.verbose('noting to post')
return plugin_context.return_false()
\ No newline at end of file
......@@ -25,6 +25,8 @@ import time
import hashlib
from copy import deepcopy
import re
from _errno import EC_CONFLICT_PORT
stdio = None
......@@ -250,7 +252,7 @@ def start(plugin_context, need_bootstrap=False, *args, **kwargs):
stdio.verbose('%s program health check' % server)
remote_pid = client.execute_command("cat %s" % pid_path[server]).stdout.strip()
if remote_pid:
for pid in remote_pid.split('\n'):
for pid in re.findall('\d+',remote_pid):
confirm = confirm_port(client, pid, int(server_config["listen_port"]))
if confirm:
proxyd_Pid_path = os.path.join(server_config["home_path"], 'run/obproxyd-%s-%d.pid' % (server.ip, server_config["listen_port"]))
......
......@@ -24,6 +24,7 @@ import datetime
from tool import TimeUtils
from subprocess import call, Popen, PIPE
import _errno as err
import os
def gather_all(plugin_context, *args, **kwargs):
......@@ -87,7 +88,7 @@ def gather_all(plugin_context, *args, **kwargs):
since_option = get_option('since')
grep_option = get_option('grep')
encrypt_option = get_option('encrypt')
store_dir_option = get_option('store_dir')
store_dir_option = os.path.abspath(get_option('store_dir'))
ob_install_dir_option = global_conf.get('home_path')
obdiag_install_dir = get_option('obdiag_dir')
clog_dir = ob_install_dir_option + "/store"
......
......@@ -78,7 +78,7 @@ def gather_clog(plugin_context, *args, **kwargs):
to_option = get_option('to')
since_option = get_option('since')
encrypt_option = get_option('encrypt')
store_dir_option = get_option('store_dir')
store_dir_option = os.path.abspath(get_option('store_dir'))
ob_install_dir_option = global_conf.get('home_path')
data_dir = ob_install_dir_option + "/store"
obdiag_install_dir = get_option('obdiag_dir')
......
......@@ -21,6 +21,7 @@
from __future__ import absolute_import, division, print_function
from ssh import LocalClient
import datetime
import os
from tool import TimeUtils
from subprocess import call, Popen, PIPE
import _errno as err
......@@ -81,10 +82,9 @@ def gather_log(plugin_context, *args, **kwargs):
since_option = get_option('since')
grep_option = get_option('grep')
encrypt_option = get_option('encrypt')
store_dir_option = get_option('store_dir')
store_dir_option = os.path.abspath(get_option('store_dir'))
ob_install_dir_option = global_conf.get('home_path')
obdiag_install_dir = get_option('obdiag_dir')
try:
if (not from_option) and (not to_option) and since_option:
now_time = datetime.datetime.now()
......
......@@ -21,6 +21,7 @@
from __future__ import absolute_import, division, print_function
from ssh import LocalClient
import datetime
import os
from tool import TimeUtils
from subprocess import call, Popen, PIPE
import _errno as err
......@@ -81,7 +82,7 @@ def gather_obproxy_log(plugin_context, *args, **kwargs):
since_option = get_option('since')
grep_option = get_option('grep')
encrypt_option = get_option('encrypt')
store_dir_option = get_option('store_dir')
store_dir_option = os.path.abspath(get_option('store_dir'))
obproxy_install_dir_option=global_conf.get('home_path')
obdiag_install_dir = get_option('obdiag_dir')
......
......@@ -22,6 +22,7 @@ from __future__ import absolute_import, division, print_function
from ssh import LocalClient
from subprocess import call, Popen, PIPE
import _errno as err
import os
def gather_perf(plugin_context, *args, **kwargs):
......@@ -70,7 +71,7 @@ def gather_perf(plugin_context, *args, **kwargs):
global_conf = cluster_config.get_global_conf()
ob_install_dir_option=global_conf.get('home_path')
scope_option = get_option('scope')
store_dir_option = get_option('store_dir')
store_dir_option = os.path.abspath(get_option('store_dir'))
obdiag_install_dir = get_option('obdiag_dir')
ret = local_execute_command('%s --help' % obdiag_bin)
......
......@@ -22,6 +22,7 @@ from __future__ import absolute_import, division, print_function
from ssh import LocalClient
from subprocess import call, Popen, PIPE
import _errno as err
import os
def gather_plan_monitor(plugin_context, *args, **kwargs):
......@@ -64,7 +65,7 @@ def gather_plan_monitor(plugin_context, *args, **kwargs):
options = plugin_context.options
obdiag_bin = "obdiag"
stdio = plugin_context.stdio
store_dir_option = get_option('store_dir')
store_dir_option = os.path.abspath(get_option('store_dir'))
obdiag_install_dir = get_option('obdiag_dir')
trace_id = get_option('trace_id')
......
......@@ -24,6 +24,7 @@ import datetime
from tool import TimeUtils
from subprocess import call, Popen, PIPE
import _errno as err
import os
def gather_slog(plugin_context, *args, **kwargs):
......@@ -78,7 +79,7 @@ def gather_slog(plugin_context, *args, **kwargs):
to_option = get_option('to')
since_option = get_option('since')
encrypt_option = get_option('encrypt')
store_dir_option = get_option('store_dir')
store_dir_option = os.path.abspath(get_option('store_dir'))
ob_install_dir_option=global_conf.get('home_path')
data_dir = ob_install_dir_option + "/store"
obdiag_install_dir = get_option('obdiag_dir')
......
......@@ -22,6 +22,7 @@ from __future__ import absolute_import, division, print_function
from ssh import LocalClient
from subprocess import call, Popen, PIPE
import _errno as err
import os
def gather_stack(plugin_context, *args, **kwargs):
......@@ -67,7 +68,7 @@ def gather_stack(plugin_context, *args, **kwargs):
cluster_config = plugin_context.cluster_config
stdio = plugin_context.stdio
global_conf = cluster_config.get_global_conf()
store_dir_option = get_option('store_dir')
store_dir_option = os.path.abspath(get_option('store_dir'))
obdiag_install_dir = get_option('obdiag_dir')
ob_install_dir_option=global_conf.get('home_path')
......
......@@ -22,6 +22,7 @@ from __future__ import absolute_import, division, print_function
from ssh import LocalClient
from subprocess import call, Popen, PIPE
import _errno as err
import os
def gather_sysstat(plugin_context, *args, **kwargs):
......@@ -63,7 +64,7 @@ def gather_sysstat(plugin_context, *args, **kwargs):
options = plugin_context.options
obdiag_bin = "obdiag"
stdio = plugin_context.stdio
store_dir_option = get_option('store_dir')
store_dir_option = os.path.abspath(get_option('store_dir'))
obdiag_install_dir = get_option('obdiag_dir')
ret = local_execute_command('%s --help' % obdiag_bin)
......
......@@ -28,7 +28,7 @@ from ssh import LocalClient
from _rpm import Version
import _errno as err
from tool import YamlLoader, FileUtil
from tool import DirectoryUtil
def pre_check(plugin_context, gather_type=None, obdiag_path='', obdiag_new_version='1.0', utils_work_dir_check=False, version_check=False, *args, **kwargs):
def utils_work_dir_checker(util_name):
......@@ -70,6 +70,13 @@ def pre_check(plugin_context, gather_type=None, obdiag_path='', obdiag_new_versi
check_status = {'version_checker_status': False, 'obdiag_version': major_version, 'obdiag_found': True}
return check_status
def store_dir_checker_and_handler():
store_dir_option = getattr(plugin_context.options, 'store_dir', None)
if (store_dir_option is not None) and (not DirectoryUtil.mkdir(store_dir_option, stdio=stdio)):
return False
else:
return True
stdio = plugin_context.stdio
utils_work_dir_check_status = True
version_check_status = True
......@@ -86,8 +93,8 @@ def pre_check(plugin_context, gather_type=None, obdiag_path='', obdiag_new_versi
version_check_status = res['version_checker_status']
obdiag_version = res['obdiag_version']
obdiag_found = res['obdiag_found']
status = utils_work_dir_check_status and version_check_status
store_dir_checker_status = store_dir_checker_and_handler()
status = utils_work_dir_check_status and version_check_status and store_dir_checker_status
if status:
return plugin_context.return_true(version_status = version_check_status, utils_status = utils_work_dir_check_status, obdiag_version = obdiag_version, obdiag_found = obdiag_found, skip = skip)
else:
......
......@@ -53,7 +53,8 @@ def display(plugin_context, cursor, *args, **kwargs):
}
return plugin_context.return_true(info=info_dict)
except Exception as e:
if e.args[0] != 1146:
code = e.args[0]
if code != 1146 and code != 4012:
raise e
time.sleep(3)
except:
......
......@@ -142,6 +142,22 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor
check_fail(item, error, suggests)
stdio.error(error)
def system_memory_check():
server_memory_config = server_memory_stat['servers']
for server in server_memory_config:
if server_memory_config[server]['system_memory']:
memory_limit = server_memory_config[server]['num']
if not memory_limit:
memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total']
factor = 0.7
suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor)
suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {})
if memory_limit < server_memory_config[server]['system_memory']:
critical('mem', err.EC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server), [suggest])
elif memory_limit * factor < server_memory_config[server]['system_memory']:
alert('mem', err.WC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server, factor=factor), [suggest])
global stdio, success
success = True
check_status = {}
......@@ -290,7 +306,7 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor
memory_limit = 0
percentage = 0
if 'memory_limit' in server_config:
if server_config.get('memory_limit'):
memory_limit = parse_size(server_config['memory_limit'])
memory['num'] += memory_limit
elif 'memory_limit_percentage' in server_config:
......@@ -429,23 +445,11 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor
break
error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(total_use)), [suggest])
elif total_use > server_memory_stats['free']:
system_memory_check()
for server in ip_servers:
alert('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(server_memory_stats['free']), need=format_size(total_use)), [err.SUG_OBSERVER_REDUCE_MEM.format()])
else:
server_memory_config = server_memory_stat['servers']
for server in server_memory_config:
if server_memory_config[server]['system_memory']:
memory_limit = server_memory_config[server]['num']
if not memory_limit:
memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total']
factor = 0.7
suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor)
suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {})
if memory_limit < server_memory_config[server]['system_memory']:
critical('mem', err.EC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server), [suggest])
elif memory_limit * factor < server_memory_config[server]['system_memory']:
alert('mem', err.WC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server, factor=factor), [suggest])
system_memory_check()
# disk
all_path = set(list(servers_disk[ip].keys()) + list(servers_clog_mount[ip].keys()))
......
......@@ -107,7 +107,7 @@ class Exector(object):
path = os.path.join(script_dir, name)
self.stdio.verbose('exec %s %s' % (repository, name))
if os.path.exists(path):
cmd = self.cmd % path
cmd = self.cmd.replace('%s', path, 1)
self.stdio.start_loading('Exec %s %s' % (repository, name))
if LocalClient.execute_command(cmd, stdio=self.stdio):
self.stdio.stop_loading('succeed')
......
......@@ -94,7 +94,7 @@ class ObVersionGraph(object):
def get_node(self, repository):
version = '%s-%s' % (repository.version , repository.release)
if version in self.allNodes:
return self.allNodes
return self.allNodes[version]
find = None
for k in self.allNodes:
......@@ -104,7 +104,7 @@ class ObVersionGraph(object):
find = node
return find
def findShortestUpgradePath(self, current_repository, dest_repository):
def findShortestUpgradePath(self, current_repository, dest_repository, stdio):
start_node = self.get_node(current_repository)
queue = [start_node]
visited = set([start_node])
......@@ -156,17 +156,24 @@ class ObVersionGraph(object):
i += 1
if len(res) == 1:
res.insert(0, start_node)
if res and res[-1].deprecated:
raise Exception('upgrade destination version:{}{} is deprecated, not support upgrade.'.format(res[-1].version, '-{}'.format(res[-1].release) if res[-1].release else ''))
return format_route(res)
def format_route(routes):
return [{
route_res = []
for node in routes:
require_from_binary = getattr(node, 'require_from_binary', False)
if node.when_come_from:
require_from_binary = require_from_binary and routes[0].version in node.when_come_from
route_res.append({
'version': node.version,
'release': None if node.release == VersionNode.RELEASE_NULL else node.release,
'direct_upgrade': getattr(node, 'direct_upgrade', False),
'require_from_binary': getattr(node, 'require_from_binary', False)
} for node in routes]
'require_from_binary': require_from_binary
})
return route_res
def upgrade_route(plugin_context, current_repository, dest_repository, *args, **kwargs):
......@@ -193,7 +200,7 @@ def upgrade_route(plugin_context, current_repository, dest_repository, *args, **
with FileUtil.open(upgrade_dep_path, encoding='utf-8') as f:
data = yaml.load(f)
graph = ObVersionGraph(data)
route = graph.findShortestUpgradePath(current_repository, dest_repository)
route = graph.findShortestUpgradePath(current_repository, dest_repository, plugin_context.stdio)
if not route:
raise Exception('No upgrade route available')
plugin_context.return_true(route=route)
......
......@@ -170,6 +170,21 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor
check_fail(item, error, suggests)
stdio.error(error)
def system_memory_check():
server_memory_config = server_memory_stat['servers']
for server in server_memory_config:
if server_memory_config[server]['system_memory']:
memory_limit = server_memory_config[server]['num']
if not memory_limit:
server_memory_config[server]['num'] = memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total']
factor = 0.75
suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor)
suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {})
if memory_limit < server_memory_config[server]['system_memory']:
critical('mem', err.EC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server), [suggest])
elif memory_limit * factor < server_memory_config[server]['system_memory']:
alert('mem', err.WC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server, factor=factor), [suggest])
global stdio, success
success = True
check_status = {}
......@@ -328,7 +343,7 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor
memory_limit = 0
percentage = 0
if 'memory_limit' in server_config:
if server_config.get('memory_limit'):
memory_limit = parse_size(server_config['memory_limit'])
if server_config.get('production_mode') and memory_limit < PRO_MEMORY_MIN:
error('mem', err.EC_OBSERVER_PRODUCTION_MODE_LIMIT.format(server=server, key='memory_limit', limit=format_size(PRO_MEMORY_MIN)), [err.SUB_SET_NO_PRODUCTION_MODE.format()])
......@@ -475,23 +490,11 @@ def start_check(plugin_context, init_check_status=False, strict_check=False, wor
break
error('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY_CACHED.format(ip=ip, free=format_size(server_memory_stats['free']), cached=format_size(server_memory_stats['buffers'] + server_memory_stats['cached']), need=format_size(total_use)), [suggest])
elif total_use > server_memory_stats['free']:
system_memory_check()
for server in ip_servers:
alert('mem', err.EC_OBSERVER_NOT_ENOUGH_MEMORY.format(ip=ip, free=format_size(server_memory_stats['free']), need=format_size(total_use)), [err.SUG_OBSERVER_REDUCE_MEM.format()])
else:
server_memory_config = server_memory_stat['servers']
for server in server_memory_config:
if server_memory_config[server]['system_memory']:
memory_limit = server_memory_config[server]['num']
if not memory_limit:
server_memory_config[server]['num'] = memory_limit = server_memory_config[server]['percentage'] * server_memory_stats['total']
factor = 0.75
suggest = err.SUG_OBSERVER_SYS_MEM_TOO_LARGE.format(factor=factor)
suggest.auto_fix = 'system_memory' not in global_generate_config and 'system_memory' not in generate_configs.get(server, {})
if memory_limit < server_memory_config[server]['system_memory']:
critical('mem', err.EC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server), [suggest])
elif memory_limit * factor < server_memory_config[server]['system_memory']:
alert('mem', err.WC_OBSERVER_SYS_MEM_TOO_LARGE.format(server=server, factor=factor), [suggest])
system_memory_check()
# disk
all_path = set(list(servers_disk[ip].keys()) + list(servers_clog_mount[ip].keys()))
......
......@@ -108,7 +108,7 @@ class Exector(object):
path = os.path.join(script_dir, name)
self.stdio.verbose('exec %s %s' % (repository, name))
if os.path.exists(path):
cmd = self.cmd % path
cmd = self.cmd.replace('%s', path, 1)
self.stdio.start_loading('Exec %s %s' % (repository, name))
if LocalClient.execute_command(cmd, stdio=self.stdio):
self.stdio.stop_loading('succeed')
......
# coding: utf-8
# OceanBase Deploy.
# Copyright (C) 2021 OceanBase
#
# This file is part of OceanBase Deploy.
#
# OceanBase Deploy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OceanBase Deploy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OceanBase Deploy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import os
from _rpm import Version, Release, PackageInfo
from tool import YamlLoader, FileUtil
class VersionNode(PackageInfo):
RELEASE_NULL = Release('0')
def __init__(self, version, deprecated = False, require_from_binary = False):
md5 = version
version = version.split('_')
release = version[1] if len(version) > 1 else self.RELEASE_NULL
version = version[0]
super(VersionNode, self).__init__('', version, release, '', md5)
self.next = []
self.can_be_upgraded_to = []
self.can_be_upgraded_to = []
self.direct_come_from = []
self.deprecated = deprecated
self.require_from_binary = require_from_binary
self.when_come_from = []
self.when_upgraded_to = []
self.direct_upgrade = False
self.precursor = None
def set_require_from_binary(self, require_from_binary):
if isinstance(require_from_binary, dict):
self.require_from_binary = require_from_binary.get('value')
self.when_come_from = require_from_binary.get('when_come_from')
self.when_upgraded_to = require_from_binary.get('when_upgraded_to')
if None != self.when_come_from and None != self.when_upgraded_to:
raise Exception("when_come_from and when_upgraded_to can not appear at the same time")
else:
self.require_from_binary = require_from_binary
class ObVersionGraph(object):
def __init__(self, data):
self.allNodes = {}
self._build(data)
def _build(self, data):
for info in data:
version = info.get('version')
if version in self.allNodes:
raise Exception("the version node '%s' was already exists, please check 'oceanbase_upgrade_dep.yml' to make sure there are no duplicate versions!" % version)
node = VersionNode(version, info.get('deprecated', False))
node.can_be_upgraded_to += info.get('can_be_upgraded_to', [])
node.can_be_upgraded_to += info.get('can_be_upgraded_to', [])
node.set_require_from_binary(info.get('require_from_binary', False))
self.allNodes[version] = node
for k in self.allNodes:
v = self.allNodes[k]
self.buildNeighbors(v, v.can_be_upgraded_to, False)
self.buildNeighbors(v, v.can_be_upgraded_to, True)
def buildNeighbors(self, current, neighborVersions, direct):
for k in neighborVersions:
node = self.allNodes.get(k)
if node is None:
node = VersionNode(k)
if direct:
node.direct_come_from.append(node)
if node.release == VersionNode.RELEASE_NULL:
current.next.append(node)
else:
current.next.insert(0, node)
def get_node(self, repository):
version = '%s-%s' % (repository.version , repository.release)
if version in self.allNodes:
return self.allNodes[version]
find = None
for k in self.allNodes:
node = self.allNodes[k]
if node.version == repository.version:
if node > find:
find = node
return find
def findShortestUpgradePath(self, current_repository, dest_repository, stdio):
start_node = self.get_node(current_repository)
queue = [start_node]
visited = set([start_node])
finalNode = None
for k in self.allNodes:
self.allNodes[k].precursor = None
while queue:
node = queue.pop(0)
if node.version == dest_repository.version:
if node.release == dest_repository.release:
finalNode = node
break
if node.release == VersionNode.RELEASE_NULL:
flag = False
for v in node.next:
if v not in visited and v.version == dest_repository.version:
flag = True
v.precursor = node
queue.append(v)
visited.add(v)
if flag is False:
finalNode = node
else:
for v in node.next:
if v not in visited:
v.precursor = node
queue.append(v)
visited.add(v)
if finalNode is not None:
break
p = finalNode
pre = None
res = []
while p:
res.insert(0, p)
pre = p.precursor
while pre and pre.precursor and p.version == pre.version:
pre = pre.precursor
p = pre
n, i = len(res), 1
while i < n:
node = res[i]
pre = res[i - 1]
if pre in node.direct_come_from:
node.direct_upgrade = True
i += 1
if len(res) == 1:
res.insert(0, start_node)
if len(res) > 0 and res[-1].deprecated:
raise Exception('upgrade destination version:{}{} is deprecated, not support upgrade.'.format(res[-1].version, '-{}'.format(res[-1].release) if res[-1].release else ''))
return format_route(res)
def format_route(routes):
route_res = []
for i, node in enumerate(routes):
require_from_binary = getattr(node, 'require_from_binary', False)
if getattr(node, 'when_come_from', False):
require_from_binary = require_from_binary and routes[0].version in node.when_come_from
route_res.append({
'version': node.version,
'release': None if node.release == VersionNode.RELEASE_NULL else node.release,
'direct_upgrade': getattr(node, 'direct_upgrade', False),
'require_from_binary': require_from_binary
})
first_result = []
second_result = [route_res[-1]]
for j in range(len(route_res[1:-1]), 0, -1):
if route_res[j].get('version') < '4.1.0.0':
if route_res[j].get('require_from_binary'):
first_result = route_res[1: j + 1]
break
elif route_res[j].get('require_from_binary'):
second_result.insert(0, route_res[j])
first_result.insert(0, route_res[0])
return first_result + second_result
def upgrade_route(plugin_context, current_repository, dest_repository, *args, **kwargs):
stdio = plugin_context.stdio
repository_dir = dest_repository.repository_dir
if dest_repository.version >= Version("4.2"):
stdio.error('upgrade observer to version {} is not support, please upgrade obd first.'.format(dest_repository.version))
return
if current_repository.version == dest_repository.version:
return plugin_context.return_true(route=format_route([current_repository, dest_repository]))
upgrade_dep_name = 'etc/oceanbase_upgrade_dep.yml'
upgrade_dep_path = os.path.join(repository_dir, upgrade_dep_name)
if not os.path.isfile(upgrade_dep_path):
stdio.error('%s No such file: %s' % (dest_repository, upgrade_dep_name))
return
version_dep = {}
yaml = YamlLoader(stdio)
try:
with FileUtil.open(upgrade_dep_path, encoding='utf-8') as f:
data = yaml.load(f)
graph = ObVersionGraph(data)
route = graph.findShortestUpgradePath(current_repository, dest_repository, plugin_context.stdio)
if not route:
raise Exception('No upgrade route available')
plugin_context.return_true(route=route)
except Exception as e:
stdio.exception('fail to get upgrade graph: %s' % e)
......@@ -110,7 +110,7 @@ class Exector(object):
path = os.path.join(script_dir, name)
self.stdio.verbose('exec %s %s' % (repository, name))
if os.path.exists(path):
cmd = '{} {} {}'.format(self.cmd % path, param, '-t {}'.format(self.script_query_timeout) if self.script_query_timeout else '')
cmd = '{} {} {}'.format(self.cmd.replace('%s', path, 1), param, '-t {}'.format(self.script_query_timeout) if self.script_query_timeout else '')
self.stdio.start_loading('Exec %s %s' % (repository, name))
if LocalClient.execute_command(cmd, stdio=self.stdio):
self.stdio.stop_loading('succeed')
......
......@@ -339,7 +339,7 @@ def start(plugin_context, start_env=None, *args, **kwargs):
sql_port = matched.group(2)[1:]
database = matched.group(3)
connected = False
retries = 10
retries = 300
while not connected and retries:
retries -= 1
try:
......@@ -418,7 +418,7 @@ def start(plugin_context, start_env=None, *args, **kwargs):
stdio.start_loading("ocp-express program health check")
failed = []
servers = cluster_config.servers
count = 60
count = 200
while servers and count:
count -= 1
tmp_servers = []
......
......@@ -339,7 +339,7 @@ def start(plugin_context, start_env=None, *args, **kwargs):
sql_port = matched.group(2)[1:]
database = matched.group(3)
connected = False
retries = 10
retries = 300
while not connected and retries:
retries -= 1
try:
......@@ -407,7 +407,7 @@ def start(plugin_context, start_env=None, *args, **kwargs):
stdio.start_loading("ocp-express program health check")
failed = []
servers = cluster_config.servers
count = 60
count = 200
while servers and count:
count -= 1
tmp_servers = []
......
......@@ -27,6 +27,7 @@ from copy import deepcopy
import bcrypt
from _errno import EC_CONFLICT_PORT
from tool import YamlLoader, FileUtil
from _rpm import Version
......@@ -263,9 +264,19 @@ def start(plugin_context, *args, **kwargs):
stdio.stop_loading('fail')
return False
cmd_args_map[server] = cmd_items
remote_pid = client.execute_command("cat %s" % pid_path[server]).stdout.strip()
if remote_pid:
if client.execute_command('ls /proc/{}'.format(remote_pid)):
if confirm_port(client, remote_pid, int(server_config["port"]), stdio):
continue
stdio.stop_loading('fail')
stdio.error(EC_CONFLICT_PORT.format(server=server.ip, port=port))
return plugin_context.return_false()
if not prometheusd(home_path, client, server, cmd_items, start_only=True, stdio=stdio) or not client.execute_command('pid=`cat %s` && ls /proc/$pid' % pid_path[server]):
stdio.stop_loading('fail')
return False
stdio.stop_loading('fail')
return False
stdio.stop_loading('succeed')
time.sleep(1)
......@@ -283,7 +294,7 @@ def start(plugin_context, *args, **kwargs):
stdio.verbose('%s program health check' % server)
remote_pid = client.execute_command("cat %s" % pid_path[server]).stdout.strip()
if remote_pid:
for pid in remote_pid.split('\n'):
for pid in re.findall('\d+',remote_pid):
confirm = confirm_port(client, pid, int(server_config["port"]), stdio)
if confirm:
prometheusd_pid_path = os.path.join(home_path, 'run/prometheusd.pid')
......
......@@ -116,23 +116,16 @@ def run_test(plugin_context, cursor, odp_cursor=None, *args, **kwargs):
break
time.sleep(5)
# analyze
ret = LocalClient.execute_command("%s \"show parameters where name = 'enable_sql_extension' \G;\"" % exec_sql_cmd, stdio=stdio)
if ret:
output = ret.stdout.strip()
searched = re.search('\s+value:\s+(\S+)\n', output)
if searched:
value = searched.group(1).lower()
if value == 'true':
local_dir, _ = os.path.split(__file__)
analyze_path = os.path.join(local_dir, 'analyze.sql')
with FileUtil.open(analyze_path, stdio=stdio) as f:
content = f.read()
analyze_content = content.format(cpu_total=cpu_total, database=db_name)
ret = LocalClient.execute_command('%s """%s"""' % (exec_sql_cmd, analyze_content), stdio=stdio)
if not ret:
stdio.error('failed to analyze table: {}'.format(ret.stderr))
stdio.stop_loading('fail')
return
local_dir, _ = os.path.split(__file__)
analyze_path = os.path.join(local_dir, 'analyze.sql')
with FileUtil.open(analyze_path, stdio=stdio) as f:
content = f.read()
analyze_content = content.format(cpu_total=cpu_total, database=db_name)
ret = LocalClient.execute_command('%s """%s"""' % (exec_sql_cmd, analyze_content), stdio=stdio)
if not ret:
stdio.error('failed to analyze table: {}'.format(ret.stderr))
stdio.stop_loading('fail')
return
stdio.stop_loading('succeed')
stdio.verbose('Benchmark run')
......
......@@ -192,22 +192,15 @@ def run_test(plugin_context, db, cursor, *args, **kwargs):
if int(ret.get("FROZEN_SCN", 0)) / 1000 == int(ret.get("LAST_SCN", 0)) / 1000:
break
time.sleep(5)
ret = LocalClient.execute_command("%s -e \"show parameters where name = 'enable_sql_extension' \G;\"" % sql_cmd_prefix, stdio=stdio)
if ret:
output = ret.stdout.strip()
searched = re.search('\s+value:\s+(\S+)\n', output)
if searched:
value = searched.group(1).lower()
if value == 'true':
# analyze
local_dir, _ = os.path.split(__file__)
analyze_path = os.path.join(local_dir, 'analyze.sql')
with FileUtil.open(analyze_path, stdio=stdio) as f:
content = f.read()
analyze_content = content.format(cpu_total=cpu_total, database=mysql_db)
ret = LocalClient.execute_command('%s -e """%s"""' % (sql_cmd_prefix, analyze_content), stdio=stdio)
if not ret:
raise Exception(ret.stderr)
# analyze
local_dir, _ = os.path.split(__file__)
analyze_path = os.path.join(local_dir, 'analyze.sql')
with FileUtil.open(analyze_path, stdio=stdio) as f:
content = f.read()
analyze_content = content.format(cpu_total=cpu_total, database=mysql_db)
ret = LocalClient.execute_command('%s -e """%s"""' % (sql_cmd_prefix, analyze_content), stdio=stdio)
if not ret:
raise Exception(ret.stderr)
stdio.stop_loading('succeed')
# 替换并发数
......
......@@ -58,7 +58,7 @@ cd $SRC_DIR/web
yarn
yarn build
cd $SRC_DIR
sed -i "s/<CID>/$CID/" const.py && sed -i "s/<B_BRANCH>/$BRANCH/" const.py && sed -i "s/<B_TIME>/$DATE/" const.py && sed -i "s/<DEBUG>/$OBD_DUBUG/" const.py && sed -i "s/<VERSION>/$VERSION/" const.py
sed -i "s/<CID>/$CID/" const.py && sed -i "s/<B_BRANCH>/$BRANCH/" const.py && sed -i "s/<B_TIME>/$DATE/" const.py && sed -i "s/<DEBUG>/$OBD_DUBUG/" const.py && sed -i "s/<VERSION>/$VERSION/" const.py && sed -i "s/<TELEMETRY_WEBSITE>/$TELEMETRY_WEBSITE/" const.py
cp -f _cmd.py obd.py
sed -i "s|<DOC_LINK>|$OBD_DOC_LINK|" _errno.py
mkdir -p $BUILD_DIR/SOURCES ${RPM_BUILD_ROOT}
......@@ -130,6 +130,12 @@ echo -e 'Installation of obd finished successfully\nPlease source /etc/profile.d
#/sbin/chkconfig obd on
%changelog
* Mon Jun 12 2023 obd 2.1.1
- new features: support upgrade keyword 'when_come_from' and 'deprecated'
- fix bug: start server failed when other servers downtime #171
- fix bug: The signed '%' password causes a stack overflow in the upgrade plugin
- fix bug: system_memory check failed when memory_limit is 0
- fix bug: xtend ocp-express meta ob connect time
* Fri May 12 2023 obd 2.1.0
- new features: support oceanbase-ce V4.0 upgrade
- new features: support ocp-express V1.0.1
......
......@@ -55,7 +55,6 @@ body,
td,
th {
padding: 12px 16px;
// 弱化列标题字体
color: #5c6b8a;
font-weight: normal;
font-size: 14px;
......@@ -68,13 +67,10 @@ body,
tr {
td {
padding: 12px 16px;
// 去掉表格边框
border: none;
// 去掉 hover 时的背景圆角
border-radius: 0;
}
}
// 斑马纹样式
tr:nth-child(2n + 1):not(.ant-table-placeholder) {
td {
background-color: @colorBgContainer;
......@@ -85,7 +81,6 @@ body,
background-color: @colorFillQuaternary;
}
}
// 伪类选择器样式优先级: hover < nth-child,因此需要将 hover 样式写到后面
tr:not(.ant-table-placeholder):hover {
td {
background-color: @colorPrimaryBgHover;
......@@ -177,7 +172,6 @@ body,
border-radius: 8px;
}
// 嵌套表格样式
.ant-table-tbody > tr > td > .ant-table-wrapper:only-child .ant-table,
.ant-table.ant-table-middle
.ant-table-tbody
......
/*
* This file is generated by parrot must
* DOCUMENT LIST:
* parrot must: http://gitlab.alibaba-inc.com/parrot/parrot-tool-must
* @ali/global-locale: http://gitlab.alibaba-inc.com/parrot/global-locale
* @ali/global-string-format: http://gitlab.alibaba-inc.com/parrot/global-string-format
*/
import locale from '@ali/global-locale';
......@@ -39,7 +35,6 @@ function change(langTag) {
* },{
* name: 'Alice'
* })
* More syntax: https://formatjs.io/guides/message-syntax/
* @param {string|object} id key or object
* @param {object} variable variable for id
* @return {string} format message
......
......@@ -6,7 +6,7 @@ import { getDestroyTaskInfo } from '@/services/ob-deploy-web/Deployments';
import useRequest from '@/utils/useRequest';
import { checkLowVersion, getErrorInfo } from '@/utils';
import NP from 'number-precision';
import { oceanbaseComponent } from '../constants';
import { oceanbaseComponent, obproxyComponent } from '../constants';
import { getLocale } from 'umi';
import EnStyles from './indexEn.less';
import ZhStyles from './indexZh.less';
......@@ -114,12 +114,27 @@ export default function DeleteDeployModal({
`${components?.oceanbase?.version}-${components?.oceanbase?.release}-${components?.oceanbase?.package_hash}`,
);
let currentObproxyVersionInfo = {};
componentsVersionInfo?.[
obproxyComponent
]?.dataSource?.some((item: API.service_model_components_ComponentInfo) => {
if (item?.version_type === newSelectedVersionInfo?.version_type) {
currentObproxyVersionInfo = item;
return true;
}
return false
});
setComponentsVersionInfo({
...componentsVersionInfo,
[oceanbaseComponent]: {
...componentsVersionInfo[oceanbaseComponent],
...newSelectedVersionInfo,
},
[obproxyComponent]: {
...componentsVersionInfo[obproxyComponent],
...currentObproxyVersionInfo
}
});
}
setTimeout(() => {
......@@ -191,13 +206,13 @@ export default function DeleteDeployModal({
>
{status === 'SUCCESSFUL'
? intl.formatMessage({
id: 'OBD.pages.components.DeleteDeployModal.FailedHistoryDeploymentEnvironmentCleared',
defaultMessage: '清理失败历史部署环境成功',
})
id: 'OBD.pages.components.DeleteDeployModal.FailedHistoryDeploymentEnvironmentCleared',
defaultMessage: '清理失败历史部署环境成功',
})
: intl.formatMessage({
id: 'OBD.pages.components.DeleteDeployModal.FailedToCleanUpThe',
defaultMessage: '清理失败历史部署环境失败',
})}
id: 'OBD.pages.components.DeleteDeployModal.FailedToCleanUpThe',
defaultMessage: '清理失败历史部署环境失败',
})}
</div>
<Progress
className={styles.deleteDeployProgress}
......
......@@ -230,22 +230,39 @@ export default function InstallConfig() {
}: API.OBResponseDataListComponent_) => {
if (success) {
const newComponentsVersionInfo = {};
const oceanbaseVersionsData = data?.items?.filter(item => item.name === oceanbaseComponent);
const initOceanbaseVersionInfo = oceanbaseVersionsData[0]?.info[0] || {};
const newSelectedOceanbaseVersionInfo = oceanbaseVersionsData[0]?.info?.filter(
(item) => item.md5 === oceanbase?.package_hash,
)?.[0];
const currentOceanbaseVersionInfo =
newSelectedOceanbaseVersionInfo || initOceanbaseVersionInfo;
data?.items?.forEach((item) => {
if (allComponentsName.includes(item.name)) {
if (item?.info?.length) {
const initVersionInfo = item?.info[0] || {};
if (item.name === oceanbaseComponent) {
const newSelectedVersionInfo = item.info.filter(
(item) => item.md5 === oceanbase?.package_hash,
)?.[0];
const currentSelectedVersionInfo =
newSelectedVersionInfo || initVersionInfo;
setOBVersionValue(
`${currentSelectedVersionInfo?.version}-${currentSelectedVersionInfo?.release}-${currentSelectedVersionInfo?.md5}`,
`${currentOceanbaseVersionInfo?.version}-${currentOceanbaseVersionInfo?.release}-${currentOceanbaseVersionInfo?.md5}`,
);
newComponentsVersionInfo[item.name] = {
...currentSelectedVersionInfo,
...currentOceanbaseVersionInfo,
dataSource: item.info || [],
};
} else if (item.name === obproxyComponent) {
let currentObproxyVersionInfo = {};
item?.info?.some(subItem => {
if (subItem?.version_type === currentOceanbaseVersionInfo?.version_type) {
currentObproxyVersionInfo = subItem;
return true;
}
return false
});
newComponentsVersionInfo[item.name] = {
...currentObproxyVersionInfo,
dataSource: item.info || [],
};
} else {
......@@ -435,12 +452,27 @@ export default function InstallConfig() {
const newSelectedVersionInfo = dataSource.filter(
(item) => item.md5 === md5,
)[0];
let currentObproxyVersionInfo = {};
componentsVersionInfo?.[
obproxyComponent
]?.dataSource?.some((item: API.service_model_components_ComponentInfo) => {
if (item?.version_type === newSelectedVersionInfo?.version_type) {
currentObproxyVersionInfo = item;
return true;
}
return false
});
setComponentsVersionInfo({
...componentsVersionInfo,
[oceanbaseComponent]: {
...componentsVersionInfo[oceanbaseComponent],
...newSelectedVersionInfo,
},
[obproxyComponent]: {
...componentsVersionInfo[obproxyComponent],
...currentObproxyVersionInfo
}
});
setLowVersion(
!!(
......@@ -451,12 +483,10 @@ export default function InstallConfig() {
};
const directTo = (url: string) => {
// 在新的标签页中打开
const blankWindow = window.open('about:blank');
if (blankWindow) {
blankWindow.location.href = url;
} else {
// 兜底逻辑,在当前标签页打开
window.location.href = url;
}
};
......@@ -907,11 +937,10 @@ export default function InstallConfig() {
{componentsGroupInfo.map((info) => (
<ProCard
type="inner"
className={`${styles.componentCard} ${
currentType === 'ob' && info.onlyAll
? styles.disabledCard
: ''
}`}
className={`${styles.componentCard} ${currentType === 'ob' && info.onlyAll
? styles.disabledCard
: ''
}`}
key={info.group}
>
<Table
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册