提交 850d43df 编写于 作者: Z zengqiao

add v2.2.0 feature & fix

上级 fc109fd1
---
![kafka-manager-logo](../../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 升级至`2.2.0`版本
`2.2.0`版本在`cluster`表及`logical_cluster`各增加了一个字段,因此需要执行下面的sql进行字段的增加。
```sql
# cluster表中增加jmx_properties字段, 这个字段会用于存储jmx相关的认证以及配置信息
ALTER TABLE `cluster` ADD COLUMN `jmx_properties` TEXT NULL COMMENT 'JMX配置' AFTER `security_properties`;
# logical_cluster中增加identification字段, 同时数据和原先name数据相同, 最后增加一个唯一键.
# 此后, name字段还是表示集群名称, identification字段表示的是集群标识, 只能是字母数字及下划线组成,
# 数据上报到监控系统时, 集群这个标识采用的字段就是identification字段, 之前使用的是name字段.
ALTER TABLE `logical_cluster` ADD COLUMN `identification` VARCHAR(192) NOT NULL DEFAULT '' COMMENT '逻辑集群标识' AFTER `name`;
UPDATE `logical_cluster` SET `identification`=`name` WHERE id>=0;
ALTER TABLE `logical_cluster` ADD INDEX `uniq_identification` (`identification` ASC);
```
-- create database
CREATE DATABASE logi_kafka_manager;
USE logi_kafka_manager;
--
-- Table structure for table `account`
--
......@@ -104,7 +109,8 @@ CREATE TABLE `cluster` (
`zookeeper` varchar(512) NOT NULL DEFAULT '' COMMENT 'zk地址',
`bootstrap_servers` varchar(512) NOT NULL DEFAULT '' COMMENT 'server地址',
`kafka_version` varchar(32) NOT NULL DEFAULT '' COMMENT 'kafka版本',
`security_properties` text COMMENT '安全认证参数',
`security_properties` text COMMENT 'Kafka安全认证参数',
`jmx_properties` text COMMENT 'JMX配置',
`status` tinyint(4) NOT NULL DEFAULT '1' COMMENT ' 监控标记, 0表示未监控, 1表示监控中',
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
......@@ -302,20 +308,22 @@ INSERT INTO kafka_user(app_id, password, user_type, operation) VALUES ('dkm_admi
-- Table structure for table `logical_cluster`
--
-- DROP TABLE IF EXISTS `logical_cluster`;
CREATE TABLE `logical_cluster` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`name` varchar(192) NOT NULL DEFAULT '' COMMENT '逻辑集群名称',
`mode` int(16) NOT NULL DEFAULT '0' COMMENT '逻辑集群类型, 0:共享集群, 1:独享集群, 2:独立集群',
`app_id` varchar(64) NOT NULL DEFAULT '' COMMENT '所属应用',
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
`region_list` varchar(256) NOT NULL DEFAULT '' COMMENT 'regionid列表',
`description` text COMMENT '备注说明',
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uniq_name` (`name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='逻辑集群信息表';
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`name` varchar(192) NOT NULL DEFAULT '' COMMENT '逻辑集群名称',
`identification` varchar(192) NOT NULL DEFAULT '' COMMENT '逻辑集群标识',
`mode` int(16) NOT NULL DEFAULT '0' COMMENT '逻辑集群类型, 0:共享集群, 1:独享集群, 2:独立集群',
`app_id` varchar(64) NOT NULL DEFAULT '' COMMENT '所属应用',
`cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
`region_list` varchar(256) NOT NULL DEFAULT '' COMMENT 'regionid列表',
`description` text COMMENT '备注说明',
`gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uniq_name` (`name`),
UNIQUE KEY `uniq_identification` (`identification`)
) ENGINE=InnoDB AUTO_INCREMENT=7 DEFAULT CHARSET=utf8 COMMENT='逻辑集群信息表';
--
-- Table structure for table `monitor_rule`
......
......@@ -9,50 +9,56 @@
# 安装手册
## 1、环境依赖
## 环境依赖
如果是以Release包进行安装的,则仅安装`Java``MySQL`即可。如果是要先进行源码包进行打包,然后再使用,则需要安装`Maven``Node`环境。
- `Maven 3.5+`(后端打包依赖)
- `node v12+`(前端打包依赖)
- `Java 8+`(运行环境需要)
- `MySQL 5.7`(数据存储)
- `Maven 3.5+`(后端打包依赖)
- `Node 10+`(前端打包依赖)
---
## 环境初始化
## 2、获取安装包
执行[create_mysql_table.sql](create_mysql_table.sql)中的SQL命令,从而创建所需的MySQL库及表,默认创建的库名是`kafka_manager`
**1、Release直接下载**
```
# 示例:
mysql -uXXXX -pXXX -h XXX.XXX.XXX.XXX -PXXXX < ./create_mysql_table.sql
```
这里如果觉得麻烦,然后也不想进行二次开发,则可以直接下载Release包,下载地址:[Github Release包下载地址](https://github.com/didi/Logi-KafkaManager/releases)
如果觉得Github的下载地址太慢了,也可以进入`Logi-KafkaManager`的用户群获取,群地址在README中。
---
## 打包
**2、源代码进行打包**
```bash
下载好代码之后,进入`Logi-KafkaManager`的主目录,执行`sh build.sh`命令即可,执行完成之后会在`output/kafka-manager-xxx`目录下面生成一个jar包。
# 一次性打包
cd ..
mvn install
对于`windows`环境的用户,估计执行不了`sh build.sh`命令,因此可以直接执行`mvn install`,然后在`kafka-manager-web/target`目录下生成一个kafka-manager-web-xxx.jar的包。
获取到jar包之后,我们继续下面的步骤。
---
## 3、MySQL-DB初始化
执行[create_mysql_table.sql](create_mysql_table.sql)中的SQL命令,从而创建所需的MySQL库及表,默认创建的库名是`logi_kafka_manager`
```
# 示例:
mysql -uXXXX -pXXX -h XXX.XXX.XXX.XXX -PXXXX < ./create_mysql_table.sql
```
---
## 启动
## 4、启动
```
# application.yml 是配置文件
# application.yml 是配置文件,最简单的是仅修改MySQL相关的配置即可启动
cp kafka-manager-web/src/main/resources/application.yml kafka-manager-web/target/
cd kafka-manager-web/target/
nohup java -jar kafka-manager-web-2.1.0-SNAPSHOT.jar --spring.config.location=./application.yml > /dev/null 2>&1 &
nohup java -jar kafka-manager.jar --spring.config.location=./application.yml > /dev/null 2>&1 &
```
## 使用
### 5、使用
本地启动的话,访问`http://localhost:8080`,输入帐号及密码(默认`admin/admin`)进行登录。更多参考:[kafka-manager 用户使用手册](../user_guide/user_guide_cn.md)
......@@ -9,6 +9,8 @@ public class LogicalCluster {
private String logicalClusterName;
private String logicalClusterIdentification;
private Integer mode;
private Integer topicNum;
......@@ -41,6 +43,14 @@ public class LogicalCluster {
this.logicalClusterName = logicalClusterName;
}
public String getLogicalClusterIdentification() {
return logicalClusterIdentification;
}
public void setLogicalClusterIdentification(String logicalClusterIdentification) {
this.logicalClusterIdentification = logicalClusterIdentification;
}
public Integer getMode() {
return mode;
}
......@@ -81,6 +91,14 @@ public class LogicalCluster {
this.bootstrapServers = bootstrapServers;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Long getGmtCreate() {
return gmtCreate;
}
......@@ -97,19 +115,12 @@ public class LogicalCluster {
this.gmtModify = gmtModify;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public String toString() {
return "LogicalCluster{" +
"logicalClusterId=" + logicalClusterId +
", logicalClusterName='" + logicalClusterName + '\'' +
", logicalClusterIdentification='" + logicalClusterIdentification + '\'' +
", mode=" + mode +
", topicNum=" + topicNum +
", clusterVersion='" + clusterVersion + '\'' +
......
......@@ -27,9 +27,12 @@ public class ClusterDTO {
@ApiModelProperty(value="数据中心")
private String idc;
@ApiModelProperty(value="安全配置参数")
@ApiModelProperty(value="Kafka安全配置")
private String securityProperties;
@ApiModelProperty(value="Jmx配置")
private String jmxProperties;
public Long getClusterId() {
return clusterId;
}
......@@ -78,6 +81,14 @@ public class ClusterDTO {
this.securityProperties = securityProperties;
}
public String getJmxProperties() {
return jmxProperties;
}
public void setJmxProperties(String jmxProperties) {
this.jmxProperties = jmxProperties;
}
@Override
public String toString() {
return "ClusterDTO{" +
......@@ -87,6 +98,7 @@ public class ClusterDTO {
", bootstrapServers='" + bootstrapServers + '\'' +
", idc='" + idc + '\'' +
", securityProperties='" + securityProperties + '\'' +
", jmxProperties='" + jmxProperties + '\'' +
'}';
}
......
......@@ -21,6 +21,9 @@ public class LogicalClusterDTO {
@ApiModelProperty(value = "名称")
private String name;
@ApiModelProperty(value = "集群标识, 用于告警的上报")
private String identification;
@ApiModelProperty(value = "集群模式")
private Integer mode;
......@@ -52,6 +55,14 @@ public class LogicalClusterDTO {
this.name = name;
}
public String getIdentification() {
return identification;
}
public void setIdentification(String identification) {
this.identification = identification;
}
public Integer getMode() {
return mode;
}
......@@ -97,6 +108,7 @@ public class LogicalClusterDTO {
return "LogicalClusterDTO{" +
"id=" + id +
", name='" + name + '\'' +
", identification='" + identification + '\'' +
", mode=" + mode +
", clusterId=" + clusterId +
", regionIdList=" + regionIdList +
......@@ -117,6 +129,7 @@ public class LogicalClusterDTO {
}
appId = ValidateUtils.isNull(appId)? "": appId;
description = ValidateUtils.isNull(description)? "": description;
identification = ValidateUtils.isNull(identification)? name: identification;
return true;
}
}
\ No newline at end of file
......@@ -17,6 +17,8 @@ public class ClusterDO implements Comparable<ClusterDO> {
private String securityProperties;
private String jmxProperties;
private Integer status;
private Date gmtCreate;
......@@ -31,30 +33,6 @@ public class ClusterDO implements Comparable<ClusterDO> {
this.id = id;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public Date getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Date gmtCreate) {
this.gmtCreate = gmtCreate;
}
public Date getGmtModify() {
return gmtModify;
}
public void setGmtModify(Date gmtModify) {
this.gmtModify = gmtModify;
}
public String getClusterName() {
return clusterName;
}
......@@ -87,6 +65,38 @@ public class ClusterDO implements Comparable<ClusterDO> {
this.securityProperties = securityProperties;
}
public String getJmxProperties() {
return jmxProperties;
}
public void setJmxProperties(String jmxProperties) {
this.jmxProperties = jmxProperties;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
public Date getGmtCreate() {
return gmtCreate;
}
public void setGmtCreate(Date gmtCreate) {
this.gmtCreate = gmtCreate;
}
public Date getGmtModify() {
return gmtModify;
}
public void setGmtModify(Date gmtModify) {
this.gmtModify = gmtModify;
}
@Override
public String toString() {
return "ClusterDO{" +
......@@ -95,6 +105,7 @@ public class ClusterDO implements Comparable<ClusterDO> {
", zookeeper='" + zookeeper + '\'' +
", bootstrapServers='" + bootstrapServers + '\'' +
", securityProperties='" + securityProperties + '\'' +
", jmxProperties='" + jmxProperties + '\'' +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
......
......@@ -11,6 +11,8 @@ public class LogicalClusterDO {
private String name;
private String identification;
private Integer mode;
private String appId;
......@@ -41,6 +43,14 @@ public class LogicalClusterDO {
this.name = name;
}
public String getIdentification() {
return identification;
}
public void setIdentification(String identification) {
this.identification = identification;
}
public Integer getMode() {
return mode;
}
......@@ -102,6 +112,7 @@ public class LogicalClusterDO {
return "LogicalClusterDO{" +
"id=" + id +
", name='" + name + '\'' +
", identification='" + identification + '\'' +
", mode=" + mode +
", appId='" + appId + '\'' +
", clusterId=" + clusterId +
......
......@@ -15,6 +15,9 @@ public class LogicClusterVO {
@ApiModelProperty(value="逻辑集群名称")
private String clusterName;
@ApiModelProperty(value="逻辑标识")
private String clusterIdentification;
@ApiModelProperty(value="逻辑集群类型, 0:共享集群, 1:独享集群, 2:独立集群")
private Integer mode;
......@@ -24,9 +27,6 @@ public class LogicClusterVO {
@ApiModelProperty(value="集群版本")
private String clusterVersion;
@ApiModelProperty(value="物理集群ID")
private Long physicalClusterId;
@ApiModelProperty(value="集群服务地址")
private String bootstrapServers;
......@@ -55,6 +55,22 @@ public class LogicClusterVO {
this.clusterName = clusterName;
}
public String getClusterIdentification() {
return clusterIdentification;
}
public void setClusterIdentification(String clusterIdentification) {
this.clusterIdentification = clusterIdentification;
}
public Integer getMode() {
return mode;
}
public void setMode(Integer mode) {
this.mode = mode;
}
public Integer getTopicNum() {
return topicNum;
}
......@@ -71,14 +87,6 @@ public class LogicClusterVO {
this.clusterVersion = clusterVersion;
}
public Long getPhysicalClusterId() {
return physicalClusterId;
}
public void setPhysicalClusterId(Long physicalClusterId) {
this.physicalClusterId = physicalClusterId;
}
public String getBootstrapServers() {
return bootstrapServers;
}
......@@ -87,6 +95,14 @@ public class LogicClusterVO {
this.bootstrapServers = bootstrapServers;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Long getGmtCreate() {
return gmtCreate;
}
......@@ -103,32 +119,15 @@ public class LogicClusterVO {
this.gmtModify = gmtModify;
}
public Integer getMode() {
return mode;
}
public void setMode(Integer mode) {
this.mode = mode;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
@Override
public String toString() {
return "LogicClusterVO{" +
"clusterId=" + clusterId +
", clusterName='" + clusterName + '\'' +
", clusterIdentification='" + clusterIdentification + '\'' +
", mode=" + mode +
", topicNum=" + topicNum +
", clusterVersion='" + clusterVersion + '\'' +
", physicalClusterId=" + physicalClusterId +
", bootstrapServers='" + bootstrapServers + '\'' +
", description='" + description + '\'' +
", gmtCreate=" + gmtCreate +
......
......@@ -32,9 +32,12 @@ public class ClusterBaseVO {
@ApiModelProperty(value="集群类型")
private Integer mode;
@ApiModelProperty(value="安全配置参数")
@ApiModelProperty(value="Kafka安全配置")
private String securityProperties;
@ApiModelProperty(value="Jmx配置")
private String jmxProperties;
@ApiModelProperty(value="1:监控中, 0:暂停监控")
private Integer status;
......@@ -108,6 +111,14 @@ public class ClusterBaseVO {
this.securityProperties = securityProperties;
}
public String getJmxProperties() {
return jmxProperties;
}
public void setJmxProperties(String jmxProperties) {
this.jmxProperties = jmxProperties;
}
public Integer getStatus() {
return status;
}
......@@ -141,8 +152,9 @@ public class ClusterBaseVO {
", bootstrapServers='" + bootstrapServers + '\'' +
", kafkaVersion='" + kafkaVersion + '\'' +
", idc='" + idc + '\'' +
", mode='" + mode + '\'' +
", mode=" + mode +
", securityProperties='" + securityProperties + '\'' +
", jmxProperties='" + jmxProperties + '\'' +
", status=" + status +
", gmtCreate=" + gmtCreate +
", gmtModify=" + gmtModify +
......
......@@ -18,6 +18,9 @@ public class LogicalClusterVO {
@ApiModelProperty(value = "逻辑集群名称")
private String logicalClusterName;
@ApiModelProperty(value = "逻辑集群标识")
private String logicalClusterIdentification;
@ApiModelProperty(value = "物理集群ID")
private Long physicalClusterId;
......@@ -55,6 +58,14 @@ public class LogicalClusterVO {
this.logicalClusterName = logicalClusterName;
}
public String getLogicalClusterIdentification() {
return logicalClusterIdentification;
}
public void setLogicalClusterIdentification(String logicalClusterIdentification) {
this.logicalClusterIdentification = logicalClusterIdentification;
}
public Long getPhysicalClusterId() {
return physicalClusterId;
}
......@@ -116,6 +127,7 @@ public class LogicalClusterVO {
return "LogicalClusterVO{" +
"logicalClusterId=" + logicalClusterId +
", logicalClusterName='" + logicalClusterName + '\'' +
", logicalClusterIdentification='" + logicalClusterIdentification + '\'' +
", physicalClusterId=" + physicalClusterId +
", regionIdList=" + regionIdList +
", mode=" + mode +
......
......@@ -53,6 +53,13 @@ public class JsonUtils {
return JSON.toJSONString(obj);
}
public static <T> T stringToObj(String src, Class<T> clazz) {
if (ValidateUtils.isBlank(src)) {
return null;
}
return JSON.parseObject(src, clazz);
}
public static List<TopicConnectionDO> parseTopicConnections(Long clusterId, JSONObject jsonObject, long postTime) {
List<TopicConnectionDO> connectionDOList = new ArrayList<>();
for (String clientType: jsonObject.keySet()) {
......
package com.xiaojukeji.kafka.manager.common.utils.jmx;
public class JmxConfig {
/**
* 单台最大连接数
*/
private Integer maxConn;
/**
* 用户名
*/
private String username;
/**
* 密码
*/
private String password;
/**
* 开启SSL
*/
private Boolean openSSL;
public Integer getMaxConn() {
return maxConn;
}
public void setMaxConn(Integer maxConn) {
this.maxConn = maxConn;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public Boolean isOpenSSL() {
return openSSL;
}
public void setOpenSSL(Boolean openSSL) {
this.openSSL = openSSL;
}
@Override
public String toString() {
return "JmxConfig{" +
"maxConn=" + maxConn +
", username='" + username + '\'' +
", password='" + password + '\'' +
", openSSL=" + openSSL +
'}';
}
}
package com.xiaojukeji.kafka.manager.common.utils.jmx;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
......@@ -7,8 +8,14 @@ import javax.management.*;
import javax.management.remote.JMXConnector;
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;
import javax.management.remote.rmi.RMIConnectorServer;
import javax.naming.Context;
import javax.rmi.ssl.SslRMIClientSocketFactory;
import java.io.IOException;
import java.net.MalformedURLException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
......@@ -28,13 +35,19 @@ public class JmxConnectorWrap {
private AtomicInteger atomicInteger;
public JmxConnectorWrap(String host, int port, int maxConn) {
private JmxConfig jmxConfig;
public JmxConnectorWrap(String host, int port, JmxConfig jmxConfig) {
this.host = host;
this.port = port;
if (maxConn <= 0) {
maxConn = 1;
this.jmxConfig = jmxConfig;
if (ValidateUtils.isNull(this.jmxConfig)) {
this.jmxConfig = new JmxConfig();
}
if (ValidateUtils.isNullOrLessThanZero(this.jmxConfig.getMaxConn())) {
this.jmxConfig.setMaxConn(1);
}
this.atomicInteger = new AtomicInteger(maxConn);
this.atomicInteger = new AtomicInteger(this.jmxConfig.getMaxConn());
}
public boolean checkJmxConnectionAndInitIfNeed() {
......@@ -64,8 +77,18 @@ public class JmxConnectorWrap {
}
String jmxUrl = String.format("service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi", host, port);
try {
JMXServiceURL url = new JMXServiceURL(jmxUrl);
jmxConnector = JMXConnectorFactory.connect(url, null);
Map<String, Object> environment = new HashMap<String, Object>();
if (!ValidateUtils.isBlank(this.jmxConfig.getUsername()) && !ValidateUtils.isBlank(this.jmxConfig.getPassword())) {
environment.put(javax.management.remote.JMXConnector.CREDENTIALS, Arrays.asList(this.jmxConfig.getUsername(), this.jmxConfig.getPassword()));
}
if (jmxConfig.isOpenSSL() != null && this.jmxConfig.isOpenSSL()) {
environment.put(Context.SECURITY_PROTOCOL, "ssl");
SslRMIClientSocketFactory clientSocketFactory = new SslRMIClientSocketFactory();
environment.put(RMIConnectorServer.RMI_CLIENT_SOCKET_FACTORY_ATTRIBUTE, clientSocketFactory);
environment.put("com.sun.jndi.rmi.factory.socket", clientSocketFactory);
}
jmxConnector = JMXConnectorFactory.connect(new JMXServiceURL(jmxUrl), environment);
LOGGER.info("JMX connect success, host:{} port:{}.", host, port);
return true;
} catch (MalformedURLException e) {
......
......@@ -69,6 +69,19 @@ public class LogicalClusterMetadataManager {
return LOGICAL_CLUSTER_ID_BROKER_ID_MAP.getOrDefault(logicClusterId, new HashSet<>());
}
public Long getTopicLogicalClusterId(Long physicalClusterId, String topicName) {
if (!LOADED.get()) {
flush();
}
Map<String, Long> logicalClusterIdMap = TOPIC_LOGICAL_MAP.get(physicalClusterId);
if (ValidateUtils.isNull(logicalClusterIdMap)) {
return null;
}
return logicalClusterIdMap.get(topicName);
}
public LogicalClusterDO getTopicLogicalCluster(Long physicalClusterId, String topicName) {
if (!LOADED.get()) {
flush();
......
......@@ -4,9 +4,11 @@ import com.xiaojukeji.kafka.manager.common.bizenum.KafkaBrokerRoleEnum;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.constant.KafkaConstant;
import com.xiaojukeji.kafka.manager.common.entity.KafkaVersion;
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConfig;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.ControllerData;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
......@@ -118,8 +120,15 @@ public class PhysicalClusterMetadataManager {
return;
}
JmxConfig jmxConfig = null;
try {
jmxConfig = JsonUtils.stringToObj(clusterDO.getJmxProperties(), JmxConfig.class);
} catch (Exception e) {
LOGGER.error("class=PhysicalClusterMetadataManager||method=addNew||clusterDO={}||msg=parse jmx properties failed", JsonUtils.toJSONString(clusterDO));
}
//增加Broker监控
BrokerStateListener brokerListener = new BrokerStateListener(clusterDO.getId(), zkConfig, configUtils.getJmxMaxConn());
BrokerStateListener brokerListener = new BrokerStateListener(clusterDO.getId(), zkConfig, jmxConfig);
brokerListener.init();
zkConfig.watchChildren(ZkPathUtil.BROKER_IDS_ROOT, brokerListener);
......@@ -280,7 +289,7 @@ public class PhysicalClusterMetadataManager {
//---------------------------Broker元信息相关--------------
public static void putBrokerMetadata(Long clusterId, Integer brokerId, BrokerMetadata brokerMetadata, Integer jmxMaxConn) {
public static void putBrokerMetadata(Long clusterId, Integer brokerId, BrokerMetadata brokerMetadata, JmxConfig jmxConfig) {
Map<Integer, BrokerMetadata> metadataMap = BROKER_METADATA_MAP.get(clusterId);
if (metadataMap == null) {
return;
......@@ -288,7 +297,7 @@ public class PhysicalClusterMetadataManager {
metadataMap.put(brokerId, brokerMetadata);
Map<Integer, JmxConnectorWrap> jmxMap = JMX_CONNECTOR_MAP.getOrDefault(clusterId, new ConcurrentHashMap<>());
jmxMap.put(brokerId, new JmxConnectorWrap(brokerMetadata.getHost(), brokerMetadata.getJmxPort(), jmxMaxConn));
jmxMap.put(brokerId, new JmxConnectorWrap(brokerMetadata.getHost(), brokerMetadata.getJmxPort(), jmxConfig));
JMX_CONNECTOR_MAP.put(clusterId, jmxMap);
Map<Integer, KafkaVersion> versionMap = KAFKA_VERSION_MAP.getOrDefault(clusterId, new ConcurrentHashMap<>());
......
......@@ -203,6 +203,7 @@ public class ClusterServiceImpl implements ClusterService {
zk.close();
}
} catch (Throwable t) {
return false;
}
}
return true;
......
......@@ -113,6 +113,7 @@ public class LogicalClusterServiceImpl implements LogicalClusterService {
LogicalCluster logicalCluster = new LogicalCluster();
logicalCluster.setLogicalClusterId(logicalClusterDO.getId());
logicalCluster.setLogicalClusterName(logicalClusterDO.getName());
logicalCluster.setLogicalClusterIdentification(logicalClusterDO.getIdentification());
logicalCluster.setClusterVersion(
physicalClusterMetadataManager.getKafkaVersion(
logicalClusterDO.getClusterId(),
......
......@@ -13,9 +13,6 @@ public class ConfigUtils {
@Value(value = "${custom.idc}")
private String idc;
@Value("${custom.jmx.max-conn}")
private Integer jmxMaxConn;
@Value(value = "${spring.profiles.active}")
private String kafkaManagerEnv;
......@@ -30,14 +27,6 @@ public class ConfigUtils {
this.idc = idc;
}
public Integer getJmxMaxConn() {
return jmxMaxConn;
}
public void setJmxMaxConn(Integer jmxMaxConn) {
this.jmxMaxConn = jmxMaxConn;
}
public String getKafkaManagerEnv() {
return kafkaManagerEnv;
}
......
package com.xiaojukeji.kafka.manager.service.zookeeper;
import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConfig;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata;
import com.xiaojukeji.kafka.manager.common.zookeeper.StateChangeListener;
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
......@@ -22,12 +23,12 @@ public class BrokerStateListener implements StateChangeListener {
private ZkConfigImpl zkConfig;
private Integer jmxMaxConn;
private JmxConfig jmxConfig;
public BrokerStateListener(Long clusterId, ZkConfigImpl zkConfig, Integer jmxMaxConn) {
public BrokerStateListener(Long clusterId, ZkConfigImpl zkConfig, JmxConfig jmxConfig) {
this.clusterId = clusterId;
this.zkConfig = zkConfig;
this.jmxMaxConn = jmxMaxConn;
this.jmxConfig = jmxConfig;
}
@Override
......@@ -84,7 +85,7 @@ public class BrokerStateListener implements StateChangeListener {
}
brokerMetadata.setClusterId(clusterId);
brokerMetadata.setBrokerId(brokerId);
PhysicalClusterMetadataManager.putBrokerMetadata(clusterId, brokerId, brokerMetadata, jmxMaxConn);
PhysicalClusterMetadataManager.putBrokerMetadata(clusterId, brokerId, brokerMetadata, jmxConfig);
} catch (Exception e) {
LOGGER.error("add broker failed, clusterId:{} brokerMetadata:{}.", clusterId, brokerMetadata, e);
}
......
......@@ -12,6 +12,7 @@
<result column="zookeeper" property="zookeeper" />
<result column="bootstrap_servers" property="bootstrapServers" />
<result column="security_properties" property="securityProperties" />
<result column="jmx_properties" property="jmxProperties" />
</resultMap>
<insert id="insert"
......@@ -19,9 +20,9 @@
useGeneratedKeys="true"
keyProperty="id">
INSERT INTO cluster (
cluster_name, zookeeper, bootstrap_servers, security_properties
cluster_name, zookeeper, bootstrap_servers, security_properties, jmx_properties
) VALUES (
#{clusterName}, #{zookeeper}, #{bootstrapServers}, #{securityProperties}
#{clusterName}, #{zookeeper}, #{bootstrapServers}, #{securityProperties}, #{jmxProperties}
)
</insert>
......@@ -30,6 +31,7 @@
cluster_name=#{clusterName},
bootstrap_servers=#{bootstrapServers},
security_properties=#{securityProperties},
jmx_properties=#{jmxProperties},
status=#{status}
WHERE id = #{id}
</update>
......
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="LogicalClusterDao">
<resultMap id="LogicalClusterMap" type="com.xiaojukeji.kafka.manager.common.entity.pojo.LogicalClusterDO">
<id column="id" property="id" />
<result column="gmt_create" property="gmtCreate" />
<result column="gmt_modify" property="gmtModify" />
<resultMap id="LogicalClusterMap" type="com.xiaojukeji.kafka.manager.common.entity.pojo.LogicalClusterDO">
<id column="id" property="id" />
<result column="gmt_create" property="gmtCreate" />
<result column="gmt_modify" property="gmtModify" />
<result column="name" property="name" />
<result column="app_id" property="appId" />
<result column="cluster_id" property="clusterId" />
<result column="region_list" property="regionList" />
<result column="mode" property="mode" />
<result column="description" property="description" />
<result column="name" property="name" />
<result column="identification" property="identification" />
<result column="app_id" property="appId" />
<result column="cluster_id" property="clusterId" />
<result column="region_list" property="regionList" />
<result column="mode" property="mode" />
<result column="description" property="description" />
</resultMap>
<insert id="insert" parameterType="com.xiaojukeji.kafka.manager.common.entity.pojo.LogicalClusterDO">
INSERT INTO logical_cluster
(name, app_id, cluster_id, region_list, mode, description)
(name, identification, app_id, cluster_id, region_list, mode, description)
VALUES
(#{name}, #{appId}, #{clusterId}, #{regionList}, #{mode}, #{description})
(#{name}, #{identification}, #{appId}, #{clusterId}, #{regionList}, #{mode}, #{description})
</insert>
<delete id="deleteById" parameterType="java.lang.Long">
......@@ -27,7 +28,8 @@
<update id="updateById" parameterType="com.xiaojukeji.kafka.manager.common.entity.pojo.LogicalClusterDO">
UPDATE logical_cluster SET
<!-- name=#{name}, 不允许修改 name, 会影响到上报的数据 -->
name=#{name},
<!-- identification=#{identification}, 不允许修改 identification, 会影响到上报的数据 -->
cluster_id=#{clusterId},
region_list=#{regionList},
description=#{description},
......
......@@ -4,6 +4,7 @@ import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.monitor.common.entry.*;
import com.xiaojukeji.kafka.manager.monitor.component.n9e.entry.*;
import com.xiaojukeji.kafka.manager.monitor.component.n9e.entry.bizenum.CategoryEnum;
import java.util.*;
......@@ -44,7 +45,7 @@ public class N9eConverter {
if (!ValidateUtils.isNull(strategy.getId())) {
n9eStrategy.setId(strategy.getId().intValue());
}
n9eStrategy.setCategory(1);
n9eStrategy.setCategory(CategoryEnum.DEVICE_INDEPENDENT.getCode());
n9eStrategy.setName(strategy.getName());
n9eStrategy.setNid(monitorN9eNid);
n9eStrategy.setExcl_nid(new ArrayList<>());
......@@ -77,7 +78,13 @@ public class N9eConverter {
n9eStrategy.setRecovery_notify(0);
StrategyAction strategyAction = strategy.getStrategyActionList().get(0);
n9eStrategy.setConverge(ListUtils.string2IntList(strategyAction.getConverge()));
// 单位转换, 夜莺的单位是秒, KM前端的单位是分钟
List<Integer> convergeList = ListUtils.string2IntList(strategyAction.getConverge());
if (!ValidateUtils.isEmptyList(convergeList)) {
convergeList.set(0, convergeList.get(0) * 60);
}
n9eStrategy.setConverge(convergeList);
List<Integer> notifyGroups = new ArrayList<>();
for (String name: ListUtils.string2StrList(strategyAction.getNotifyGroup())) {
......@@ -167,7 +174,13 @@ public class N9eConverter {
}
strategyAction.setNotifyGroup(ListUtils.strList2String(notifyGroups));
strategyAction.setConverge(ListUtils.intList2String(n9eStrategy.getConverge()));
// 单位转换, 夜莺的单位是秒, KM前端的单位是分钟
List<Integer> convergeList = n9eStrategy.getConverge();
if (!ValidateUtils.isEmptyList(convergeList)) {
convergeList.set(0, convergeList.get(0) / 60);
}
strategyAction.setConverge(ListUtils.intList2String(convergeList));
strategyAction.setCallback(n9eStrategy.getCallback());
strategy.setStrategyActionList(Arrays.asList(strategyAction));
......
package com.xiaojukeji.kafka.manager.monitor.component.n9e.entry.bizenum;
public enum CategoryEnum {
DEVICE_RELATED(1, "设备相关"),
DEVICE_INDEPENDENT(2, "设备无关"),
;
private int code;
private String msg;
CategoryEnum(int code, String msg) {
this.code = code;
this.msg = msg;
}
public int getCode() {
return code;
}
public String getMsg() {
return msg;
}
}
......@@ -73,7 +73,7 @@ public class SinkCommunityTopicMetrics2Monitor extends AbstractScheduledTask<Clu
continue;
}
metricSinkPoints.addAll(recordTopics(now, logicalClusterDO.getName(), metrics));
metricSinkPoints.addAll(recordTopics(now, logicalClusterDO.getIdentification(), metrics));
if (metricSinkPoints.size() > MonitorSinkConstant.MONITOR_SYSTEM_SINK_THRESHOLD) {
abstractMonitor.sinkMetrics(metricSinkPoints);
metricSinkPoints.clear();
......
......@@ -64,7 +64,7 @@ public class SinkConsumerMetrics2Monitor implements ApplicationListener<Consumer
continue;
}
metricSinkPoints.addAll(recordConsumer(elem.getTimestampUnitMs() / 1000, logicalClusterDO.getName(), elem));
metricSinkPoints.addAll(recordConsumer(elem.getTimestampUnitMs() / 1000, logicalClusterDO.getIdentification(), elem));
if (metricSinkPoints.size() > MonitorSinkConstant.MONITOR_SYSTEM_SINK_THRESHOLD) {
abstractMonitor.sinkMetrics(metricSinkPoints);
metricSinkPoints.clear();
......
......@@ -57,7 +57,7 @@ public class SinkTopicThrottledMetrics2Monitor implements ApplicationListener<To
continue;
}
MetricSinkPoint point = recordTopicThrottled(startTime, logicalClusterDO.getName(), elem);
MetricSinkPoint point = recordTopicThrottled(startTime, logicalClusterDO.getIdentification(), elem);
if (ValidateUtils.isNull(point)) {
continue;
}
......
......@@ -40,8 +40,7 @@ public class NormalAccountController {
public Result<List<AccountSummaryVO>> searchOnJobStaffByKeyWord(@RequestParam("keyWord") String keyWord) {
List<EnterpriseStaff> staffList = accountService.searchAccountByPrefix(keyWord);
if (ValidateUtils.isEmptyList(staffList)) {
LOGGER.info("class=NormalAccountController||method=searchOnJobStaffByKeyWord||keyWord={}||msg=staffList is empty!"
,keyWord);
LOGGER.info("class=NormalAccountController||method=searchOnJobStaffByKeyWord||keyWord={}||msg=staffList is empty!", keyWord);
return new Result<>();
}
List<AccountSummaryVO> voList = new ArrayList<>();
......
......@@ -69,7 +69,8 @@ public class NormalTopicController {
}
return new Result<>(TopicModelConverter.convert2TopicBasicVO(
topicService.getTopicBasicDTO(physicalClusterId, topicName),
clusterService.getById(physicalClusterId)
clusterService.getById(physicalClusterId),
logicalClusterMetadataManager.getTopicLogicalClusterId(physicalClusterId, topicName)
));
}
......
......@@ -166,7 +166,7 @@ public class OpUtilsController {
if (!ResultStatus.SUCCESS.equals(rs)) {
return Result.buildFrom(rs);
}
topicManagerService.modifyTopic(dto.getClusterId(), dto.getTopicName(), dto.getDescription(), operator);
topicManagerService.modifyTopicByOp(dto.getClusterId(), dto.getTopicName(), dto.getAppId(), dto.getDescription(), operator);
return new Result();
}
......
......@@ -55,6 +55,7 @@ public class ClusterModelConverter {
CopyUtils.copyProperties(vo, logicalCluster);
vo.setClusterId(logicalCluster.getLogicalClusterId());
vo.setClusterName(logicalCluster.getLogicalClusterName());
vo.setClusterIdentification(logicalCluster.getLogicalClusterIdentification());
return vo;
}
......@@ -78,9 +79,8 @@ public class ClusterModelConverter {
ClusterDO clusterDO = new ClusterDO();
CopyUtils.copyProperties(clusterDO, reqObj);
clusterDO.setId(reqObj.getClusterId());
clusterDO.setSecurityProperties(
ValidateUtils.isNull(clusterDO.getSecurityProperties())? "": clusterDO.getSecurityProperties()
);
clusterDO.setSecurityProperties(ValidateUtils.isNull(reqObj.getSecurityProperties())? "": reqObj.getSecurityProperties());
clusterDO.setJmxProperties(ValidateUtils.isNull(reqObj.getJmxProperties())? "": reqObj.getJmxProperties());
return clusterDO;
}
......
......@@ -21,6 +21,7 @@ public class LogicalClusterModelConverter {
LogicalClusterVO vo = new LogicalClusterVO();
vo.setLogicalClusterId(logicalClusterDO.getId());
vo.setLogicalClusterName(logicalClusterDO.getName());
vo.setLogicalClusterIdentification(logicalClusterDO.getIdentification());
vo.setPhysicalClusterId(logicalClusterDO.getClusterId());
vo.setMode(logicalClusterDO.getMode());
vo.setRegionIdList(ListUtils.string2LongList(logicalClusterDO.getRegionList()));
......@@ -45,6 +46,7 @@ public class LogicalClusterModelConverter {
public static LogicalClusterDO convert2LogicalClusterDO(LogicalClusterDTO dto) {
LogicalClusterDO logicalClusterDO = new LogicalClusterDO();
logicalClusterDO.setName(dto.getName());
logicalClusterDO.setIdentification(dto.getIdentification());
logicalClusterDO.setClusterId(dto.getClusterId());
logicalClusterDO.setRegionList(ListUtils.longList2String(dto.getRegionIdList()));
logicalClusterDO.setMode(dto.getMode());
......
......@@ -22,9 +22,9 @@ import java.util.List;
* @date 2017/6/1.
*/
public class TopicModelConverter {
public static TopicBasicVO convert2TopicBasicVO(TopicBasicDTO dto, ClusterDO clusterDO) {
public static TopicBasicVO convert2TopicBasicVO(TopicBasicDTO dto, ClusterDO clusterDO, Long logicalClusterId) {
TopicBasicVO vo = new TopicBasicVO();
vo.setClusterId(dto.getClusterId());
vo.setClusterId(logicalClusterId);
vo.setAppId(dto.getAppId());
vo.setAppName(dto.getAppName());
vo.setPartitionNum(dto.getPartitionNum());
......
......@@ -11,7 +11,7 @@ spring:
name: kafkamanager
datasource:
kafka-manager:
jdbc-url: jdbc:mysql://127.0.0.1:3306/kafka_manager?characterEncoding=UTF-8&serverTimezone=GMT%2B8
jdbc-url: jdbc:mysql://127.0.0.1:3306/logi_kafka_manager?characterEncoding=UTF-8&serverTimezone=GMT%2B8
username: admin
password: admin
driver-class-name: com.mysql.jdbc.Driver
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册