提交 d5680ffd 编写于 作者: Z zengqiao

增加Topic同步任务&Bug修复

上级 3c091a88
......@@ -54,6 +54,11 @@ custom:
topic-throttled-metrics: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭
save-days: 7 #指标在DB中保持的天数,-1表示永久保存,7表示保存近7天的数据
# 任务相关的开关
task:
op:
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
account: # ldap相关的配置, 社区版本暂时支持不够完善,可以先忽略,欢迎贡献代码对这块做优化
ldap:
......@@ -71,7 +76,7 @@ kcm: # 集群升级部署相关的功能,需要配合夜莺及S3进行使用
monitor: # 监控告警相关的功能,需要配合夜莺进行使用
enabled: false # 默认关闭,true就是开启
n9e:
nid: 2
nid: 2
user-token: 1234567890
mon:
# 夜莺 mon监控服务 地址
......
......@@ -6,8 +6,6 @@ package com.xiaojukeji.kafka.manager.common.bizenum;
*/
public enum IDCEnum {
CN("cn", "国内"),
US("us", "美东"),
RU("ru", "俄罗斯"),
;
private String idc;
......
......@@ -21,6 +21,8 @@ public enum ModuleEnum {
PARTITION(5, "分区"),
GATEWAY_CONFIG(6, "Gateway配置"),
UNKNOWN(-1, "未知")
;
ModuleEnum(int code, String message) {
......
......@@ -10,6 +10,7 @@ public enum RebalanceDimensionEnum {
REGION(1, "Region维度"),
BROKER(2, "Broker维度"),
TOPIC(3, "Topic维度"),
PARTITION(4, "Partition维度"),
;
private Integer code;
......
......@@ -45,4 +45,13 @@ public enum GatewayConfigKeyEnum {
", configName='" + configName + '\'' +
'}';
}
public static GatewayConfigKeyEnum getByConfigType(String configType) {
for (GatewayConfigKeyEnum configKeyEnum: GatewayConfigKeyEnum.values()) {
if (configKeyEnum.getConfigType().equals(configType)) {
return configKeyEnum;
}
}
return null;
}
}
\ No newline at end of file
......@@ -7,6 +7,8 @@ package com.xiaojukeji.kafka.manager.common.constant;
public class KafkaConstant {
public static final String COORDINATOR_TOPIC_NAME = "__consumer_offsets";
public static final String TRANSACTION_TOPIC_NAME = "__transaction_state";
public static final String BROKER_HOST_NAME_SUFFIX = ".diditaxi.com";
public static final String CLIENT_VERSION_CODE_UNKNOWN = "-1";
......
......@@ -12,11 +12,6 @@ public class TopicCreationConstant {
*/
public static final String LOG_X_CREATE_TOPIC_CONFIG_KEY_NAME = "LOG_X_CREATE_TOPIC_CONFIG";
/**
* 治理平台创建Topic配置KEY
*/
public static final String CHORUS_CREATE_TOPIC_CONFIG_KEY_NAME = "CHORUS_CREATE_TOPIC_CONFIG";
/**
* 内部创建Topic配置KEY
*/
......@@ -30,6 +25,8 @@ public class TopicCreationConstant {
public static final String TOPIC_RETENTION_TIME_KEY_NAME = "retention.ms";
public static final Long DEFAULT_QUOTA = 3 * 1024 * 1024L;
public static Properties createNewProperties(Long retentionTime) {
Properties properties = new Properties();
properties.put(TOPIC_RETENTION_TIME_KEY_NAME, String.valueOf(retentionTime));
......
......@@ -3,7 +3,6 @@ package com.xiaojukeji.kafka.manager.common.entity;
import kafka.admin.AdminClient;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
/**
* @author zengqiao
......@@ -16,17 +15,12 @@ public class ConsumerMetadata {
private Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap = new HashMap<>();
private Map<String, List<String>> consumerGroupAppMap = new ConcurrentHashMap<>();
public ConsumerMetadata(Set<String> consumerGroupSet,
Map<String, Set<String>> topicNameConsumerGroupMap,
Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap,
Map<String, List<String>> consumerGroupAppMap) {
Map<String, AdminClient.ConsumerGroupSummary> consumerGroupSummaryMap) {
this.consumerGroupSet = consumerGroupSet;
this.topicNameConsumerGroupMap = topicNameConsumerGroupMap;
this.consumerGroupSummaryMap = consumerGroupSummaryMap;
this.consumerGroupAppMap = consumerGroupAppMap;
}
public Set<String> getConsumerGroupSet() {
......@@ -40,8 +34,4 @@ public class ConsumerMetadata {
public Map<String, AdminClient.ConsumerGroupSummary> getConsumerGroupSummaryMap() {
return consumerGroupSummaryMap;
}
public Map<String, List<String>> getConsumerGroupAppMap() {
return consumerGroupAppMap;
}
}
\ No newline at end of file
package com.xiaojukeji.kafka.manager.common.entity;
import com.alibaba.fastjson.JSON;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import java.io.Serializable;
......@@ -118,4 +119,9 @@ public class Result<T> implements Serializable {
result.setData(data);
return result;
}
public boolean failed() {
return !Constant.SUCCESS.equals(code);
}
}
package com.xiaojukeji.kafka.manager.common.entity.ao.cluster;
public class ControllerPreferredCandidate {
private Integer brokerId;
private String host;
private Long startTime;
private Integer status;
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
@Override
public String toString() {
return "ControllerPreferredBroker{" +
"brokerId=" + brokerId +
", host='" + host + '\'' +
", startTime=" + startTime +
", status=" + status +
'}';
}
}
......@@ -2,30 +2,18 @@ package com.xiaojukeji.kafka.manager.common.entity.ao.consumer;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum;
import java.util.List;
import java.util.Objects;
/**
* 消费组信息
* @author zengqiao
* @date 19/4/18
*/
public class ConsumerGroupDTO {
public class ConsumerGroup {
private Long clusterId;
private String consumerGroup;
private List<String> appIdList;
private OffsetLocationEnum offsetStoreLocation;
public ConsumerGroupDTO(Long clusterId,
String consumerGroup,
List<String> appIdList,
OffsetLocationEnum offsetStoreLocation) {
public ConsumerGroup(Long clusterId, String consumerGroup, OffsetLocationEnum offsetStoreLocation) {
this.clusterId = clusterId;
this.consumerGroup = consumerGroup;
this.appIdList = appIdList;
this.offsetStoreLocation = offsetStoreLocation;
}
......@@ -45,14 +33,6 @@ public class ConsumerGroupDTO {
this.consumerGroup = consumerGroup;
}
public List<String> getAppIdList() {
return appIdList;
}
public void setAppIdList(List<String> appIdList) {
this.appIdList = appIdList;
}
public OffsetLocationEnum getOffsetStoreLocation() {
return offsetStoreLocation;
}
......@@ -63,10 +43,9 @@ public class ConsumerGroupDTO {
@Override
public String toString() {
return "ConsumerGroupDTO{" +
return "ConsumerGroup{" +
"clusterId=" + clusterId +
", consumerGroup='" + consumerGroup + '\'' +
", appIdList=" + appIdList +
", offsetStoreLocation=" + offsetStoreLocation +
'}';
}
......@@ -79,7 +58,7 @@ public class ConsumerGroupDTO {
if (o == null || getClass() != o.getClass()) {
return false;
}
ConsumerGroupDTO that = (ConsumerGroupDTO) o;
ConsumerGroup that = (ConsumerGroup) o;
return clusterId.equals(that.clusterId)
&& consumerGroup.equals(that.consumerGroup)
&& offsetStoreLocation == that.offsetStoreLocation;
......
package com.xiaojukeji.kafka.manager.common.entity.ao.consumer;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum;
import java.util.List;
public class ConsumerGroupSummary {
private Long clusterId;
private String consumerGroup;
private OffsetLocationEnum offsetStoreLocation;
private List<String> appIdList;
private String state;
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public OffsetLocationEnum getOffsetStoreLocation() {
return offsetStoreLocation;
}
public void setOffsetStoreLocation(OffsetLocationEnum offsetStoreLocation) {
this.offsetStoreLocation = offsetStoreLocation;
}
public List<String> getAppIdList() {
return appIdList;
}
public void setAppIdList(List<String> appIdList) {
this.appIdList = appIdList;
}
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
@Override
public String toString() {
return "ConsumerGroupSummary{" +
"clusterId=" + clusterId +
", consumerGroup='" + consumerGroup + '\'' +
", offsetStoreLocation=" + offsetStoreLocation +
", appIdList=" + appIdList +
", state='" + state + '\'' +
'}';
}
}
......@@ -25,7 +25,10 @@ public class RebalanceDTO {
@ApiModelProperty(value = "TopicName")
private String topicName;
@ApiModelProperty(value = "维度[0: Cluster维度, 1: Region维度, 2:Broker维度, 3:Topic维度]")
@ApiModelProperty(value = "分区ID")
private Integer partitionId;
@ApiModelProperty(value = "维度[0: Cluster维度, 1: Region维度, 2:Broker维度, 3:Topic维度, 4:Partition纬度]")
private Integer dimension;
public Long getClusterId() {
......@@ -60,6 +63,14 @@ public class RebalanceDTO {
this.topicName = topicName;
}
public Integer getPartitionId() {
return partitionId;
}
public void setPartitionId(Integer partitionId) {
this.partitionId = partitionId;
}
public Integer getDimension() {
return dimension;
}
......@@ -68,22 +79,12 @@ public class RebalanceDTO {
this.dimension = dimension;
}
@Override
public String toString() {
return "RebalanceDTO{" +
"clusterId=" + clusterId +
", regionId=" + regionId +
", brokerId=" + brokerId +
", topicName='" + topicName + '\'' +
", dimension=" + dimension +
'}';
}
public boolean paramLegal() {
if (ValidateUtils.isNull(clusterId)
|| RebalanceDimensionEnum.REGION.getCode().equals(dimension) && ValidateUtils.isNull(regionId)
|| RebalanceDimensionEnum.BROKER.getCode().equals(dimension) && ValidateUtils.isNull(brokerId)
|| RebalanceDimensionEnum.TOPIC.getCode().equals(dimension) && ValidateUtils.isNull(topicName) ) {
|| (RebalanceDimensionEnum.REGION.getCode().equals(dimension) && ValidateUtils.isNull(regionId))
|| (RebalanceDimensionEnum.BROKER.getCode().equals(dimension) && ValidateUtils.isNull(brokerId))
|| (RebalanceDimensionEnum.TOPIC.getCode().equals(dimension) && ValidateUtils.isNull(topicName))
|| (RebalanceDimensionEnum.PARTITION.getCode().equals(dimension) && (ValidateUtils.isNull(topicName) || ValidateUtils.isNull(partitionId))) ) {
return false;
}
return true;
......
package com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.util.List;
/**
* @author zengqiao
* @date 21/01/14
*/
@ApiModel(value = "Topic消费组概要信息")
public class ConsumerGroupSummaryVO {
@ApiModelProperty(value = "消费组名称")
private String consumerGroup;
@ApiModelProperty(value = "使用的AppID")
private String appIds;
@ApiModelProperty(value = "offset存储位置")
private String location;
@ApiModelProperty(value = "消费组状态")
private String state;
public String getConsumerGroup() {
return consumerGroup;
}
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public String getAppIds() {
return appIds;
}
public void setAppIds(String appIds) {
this.appIds = appIds;
}
public String getLocation() {
return location;
}
public void setLocation(String location) {
this.location = location;
}
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
@Override
public String toString() {
return "ConsumerGroupSummaryVO{" +
"consumerGroup='" + consumerGroup + '\'' +
", appIds=" + appIds +
", location='" + location + '\'' +
", state='" + state + '\'' +
'}';
}
}
package com.xiaojukeji.kafka.manager.common.entity.vo.rd;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.util.Date;
/**
* @author zengqiao
* @date 20/3/19
*/
@ApiModel(value = "GatewayConfigVO", description = "Gateway配置信息")
public class GatewayConfigVO {
@ApiModelProperty(value="ID")
private Long id;
@ApiModelProperty(value="配置类型")
private String type;
@ApiModelProperty(value="配置名称")
private String name;
@ApiModelProperty(value="配置值")
private String value;
@ApiModelProperty(value="版本")
private Long version;
@ApiModelProperty(value="创建时间")
private Date createTime;
@ApiModelProperty(value="修改时间")
private Date modifyTime;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public Long getVersion() {
return version;
}
public void setVersion(Long version) {
this.version = version;
}
public Date getCreateTime() {
return createTime;
}
public void setCreateTime(Date createTime) {
this.createTime = createTime;
}
public Date getModifyTime() {
return modifyTime;
}
public void setModifyTime(Date modifyTime) {
this.modifyTime = modifyTime;
}
@Override
public String toString() {
return "GatewayConfigVO{" +
"id=" + id +
", type='" + type + '\'' +
", name='" + name + '\'' +
", value='" + value + '\'' +
", version=" + version +
", createTime=" + createTime +
", modifyTime=" + modifyTime +
'}';
}
}
package com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
@ApiModel(description = "Broker基本信息")
public class ControllerPreferredCandidateVO {
@ApiModelProperty(value = "brokerId")
private Integer brokerId;
@ApiModelProperty(value = "主机名")
private String host;
@ApiModelProperty(value = "启动时间")
private Long startTime;
@ApiModelProperty(value = "broker状态[0:在线, -1:不在线]")
private Integer status;
public Integer getBrokerId() {
return brokerId;
}
public void setBrokerId(Integer brokerId) {
this.brokerId = brokerId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Long getStartTime() {
return startTime;
}
public void setStartTime(Long startTime) {
this.startTime = startTime;
}
public Integer getStatus() {
return status;
}
public void setStatus(Integer status) {
this.status = status;
}
@Override
public String toString() {
return "ControllerPreferredBrokerVO{" +
"brokerId=" + brokerId +
", host='" + host + '\'' +
", startTime=" + startTime +
", status=" + status +
'}';
}
}
......@@ -9,6 +9,7 @@ import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.TopicConnectionDO
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
/**
......@@ -52,7 +53,7 @@ public class JsonUtils {
return JSON.toJSONString(obj);
}
public static List<TopicConnectionDO> parseTopicConnections(Long clusterId, JSONObject jsonObject) {
public static List<TopicConnectionDO> parseTopicConnections(Long clusterId, JSONObject jsonObject, long postTime) {
List<TopicConnectionDO> connectionDOList = new ArrayList<>();
for (String clientType: jsonObject.keySet()) {
JSONObject topicObject = jsonObject.getJSONObject(clientType);
......@@ -73,6 +74,7 @@ public class JsonUtils {
connectionDO.setClusterId(clusterId);
connectionDO.setTopicName(topicName);
connectionDO.setType(clientType);
connectionDO.setCreateTime(new Date(postTime));
connectionDOList.add(connectionDO);
}
}
......
package com.xiaojukeji.kafka.manager.common.utils;
import com.xiaojukeji.kafka.manager.common.bizenum.IDCEnum;
import com.xiaojukeji.kafka.manager.common.constant.TopicCreationConstant;
import org.apache.commons.lang.StringUtils;
import java.util.List;
......@@ -83,23 +81,4 @@ public class ValidateUtils {
public static boolean isNullOrLessThanZero(Double value) {
return value == null || value < 0;
}
public static boolean topicNameLegal(String idc, String topicName) {
if (ValidateUtils.isNull(idc) || ValidateUtils.isNull(topicName)) {
return false;
}
// 校验Topic的长度
if (topicName.length() >= TopicCreationConstant.TOPIC_NAME_MAX_LENGTH) {
return false;
}
// 校验前缀
if (IDCEnum.CN.getIdc().equals(idc) ||
(IDCEnum.US.getIdc().equals(idc) && topicName.startsWith(TopicCreationConstant.TOPIC_NAME_PREFIX_US)) ||
(IDCEnum.RU.getIdc().equals(idc) && topicName.startsWith(TopicCreationConstant.TOPIC_NAME_PREFIX_RU))) {
return true;
}
return false;
}
}
\ No newline at end of file
......@@ -18,6 +18,8 @@ public class ZkPathUtil {
public static final String CONSUMER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "consumers";
public static final String REASSIGN_PARTITIONS_ROOT_NODE = "/admin/reassign_partitions";
/**
* config
*/
......@@ -27,11 +29,11 @@ public class ZkPathUtil {
public static final String CONFIG_CLIENTS_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "clients";
public static final String CONFIG_ENTITY_CHANGES_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "changes/config_change_";
public static final String CONFIG_ENTITY_CHANGES_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "changes/config_change_";
public static final String REASSIGN_PARTITIONS_ROOT_NODE = "/admin/reassign_partitions";
private static final String D_METRICS_CONFIG_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "KafkaExMetrics";
private static final String D_METRICS_CONFIG_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "KafkaExMetrics";
public static final String D_CONTROLLER_CANDIDATES = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "extension/candidates";
public static String getBrokerIdNodePath(Integer brokerId) {
return BROKER_IDS_ROOT + ZOOKEEPER_SEPARATOR + String.valueOf(brokerId);
......
......@@ -92,20 +92,4 @@ public class ConsumerMetadataCache {
}
return consumerMetadata.getTopicNameConsumerGroupMap().getOrDefault(topicName, new HashSet<>());
}
public static Map<String, List<String>> getConsumerGroupAppIdListInZk(Long clusterId) {
ConsumerMetadata consumerMetadata = CG_METADATA_IN_ZK_MAP.get(clusterId);
if(consumerMetadata == null){
return new HashMap<>(0);
}
return consumerMetadata.getConsumerGroupAppMap();
}
public static Map<String, List<String>> getConsumerGroupAppIdListInBK(Long clusterId) {
ConsumerMetadata consumerMetadata = CG_METADATA_IN_BK_MAP.get(clusterId);
if(consumerMetadata == null){
return new HashMap<>(0);
}
return consumerMetadata.getConsumerGroupAppMap();
}
}
package com.xiaojukeji.kafka.manager.service.cache;
import com.google.common.collect.Sets;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.entity.pojo.LogicalClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.RegionDO;
......@@ -15,6 +16,7 @@ import org.springframework.stereotype.Service;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
/**
* 逻辑集群元信息
......@@ -144,9 +146,16 @@ public class LogicalClusterMetadataManager {
@Scheduled(cron="0/30 * * * * ?")
public void flush() {
List<LogicalClusterDO> logicalClusterDOList = logicalClusterService.listAll();
if (ValidateUtils.isEmptyList(logicalClusterDOList)) {
return;
if (ValidateUtils.isNull(logicalClusterDOList)) {
logicalClusterDOList = Collections.EMPTY_LIST;
}
Set<Long> inDbLogicalClusterIds = logicalClusterDOList.stream()
.map(LogicalClusterDO::getId)
.collect(Collectors.toSet());
// inCache 和 inDb 取差集,差集结果为已删除的、新增的.
Sets.SetView<Long> diffLogicalClusterIds = Sets.difference(LOGICAL_CLUSTER_MAP.keySet(), inDbLogicalClusterIds);
diffLogicalClusterIds.forEach(logicalClusterId -> delLogicalClusterInCache(logicalClusterId));
Map<Long, RegionDO> regionMap = new HashMap<>();
List<RegionDO> regionDOList = regionService.listAll();
......@@ -197,4 +206,11 @@ public class LogicalClusterMetadataManager {
}
TOPIC_LOGICAL_MAP.put(logicalClusterDO.getClusterId(), subMap);
}
private void delLogicalClusterInCache(Long logicalClusterId) {
LOGICAL_CLUSTER_ID_TOPIC_NAME_MAP.remove(logicalClusterId);
LOGICAL_CLUSTER_ID_BROKER_ID_MAP.remove(logicalClusterId);
LOGICAL_CLUSTER_MAP.remove(logicalClusterId);
TOPIC_LOGICAL_MAP.remove(logicalClusterId);
}
}
\ No newline at end of file
......@@ -13,6 +13,8 @@ import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
import com.xiaojukeji.kafka.manager.dao.ControllerDao;
import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConnectorWrap;
import com.xiaojukeji.kafka.manager.dao.TopicDao;
import com.xiaojukeji.kafka.manager.dao.gateway.AuthorityDao;
import com.xiaojukeji.kafka.manager.service.service.JmxService;
import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils;
import com.xiaojukeji.kafka.manager.service.zookeeper.*;
......@@ -48,6 +50,12 @@ public class PhysicalClusterMetadataManager {
@Autowired
private ConfigUtils configUtils;
@Autowired
private TopicDao topicDao;
@Autowired
private AuthorityDao authorityDao;
private final static Map<Long, ClusterDO> CLUSTER_MAP = new ConcurrentHashMap<>();
private final static Map<Long, ControllerData> CONTROLLER_DATA_MAP = new ConcurrentHashMap<>();
......@@ -116,7 +124,7 @@ public class PhysicalClusterMetadataManager {
zkConfig.watchChildren(ZkPathUtil.BROKER_IDS_ROOT, brokerListener);
//增加Topic监控
TopicStateListener topicListener = new TopicStateListener(clusterDO.getId(), zkConfig);
TopicStateListener topicListener = new TopicStateListener(clusterDO.getId(), zkConfig, topicDao, authorityDao);
topicListener.init();
zkConfig.watchChildren(ZkPathUtil.BROKER_TOPICS_ROOT, topicListener);
......
......@@ -9,6 +9,19 @@ import java.util.List;
import java.util.Properties;
public interface AdminService {
/**
* 创建Topic
* @param clusterDO 集群DO
* @param topicDO TopicDO
* @param partitionNum 分区数
* @param replicaNum 副本数
* @param regionId RegionID
* @param brokerIdList BrokerId
* @param properties Topic属性
* @param applicant 申请人
* @param operator 操作人
* @return 操作状态
*/
ResultStatus createTopic(ClusterDO clusterDO,
TopicDO topicDO,
Integer partitionNum,
......@@ -19,19 +32,86 @@ public interface AdminService {
String applicant,
String operator);
/**
* 删除Topic
* @param clusterDO 集群DO
* @param topicName Topic名称
* @param operator 操作人
* @return 操作状态
*/
ResultStatus deleteTopic(ClusterDO clusterDO,
String topicName,
String operator);
/**
* 优先副本选举状态
* @param clusterDO 集群DO
* @return 任务状态
*/
TaskStatusEnum preferredReplicaElectionStatus(ClusterDO clusterDO);
/**
* 集群纬度优先副本选举
* @param clusterDO 集群DO
* @return 任务状态
*/
ResultStatus preferredReplicaElection(ClusterDO clusterDO, String operator);
/**
* Broker纬度优先副本选举
* @param clusterDO 集群DO
* @param brokerId BrokerID
* @param operator 操作人
* @return 任务状态
*/
ResultStatus preferredReplicaElection(ClusterDO clusterDO, Integer brokerId, String operator);
/**
* Topic纬度优先副本选举
* @param clusterDO 集群DO
* @param topicName Topic名称
* @param operator 操作人
* @return 任务状态
*/
ResultStatus preferredReplicaElection(ClusterDO clusterDO, String topicName, String operator);
/**
* 分区纬度优先副本选举
* @param clusterDO 集群DO
* @param topicName Topic名称
* @param partitionId 分区ID
* @param operator 操作人
* @return 任务状态
*/
ResultStatus preferredReplicaElection(ClusterDO clusterDO, String topicName, Integer partitionId, String operator);
/**
* Topic扩分区
* @param clusterDO 集群DO
* @param topicName Topic名称
* @param partitionNum 新增? 分区数
* @param regionId RegionID
* @param brokerIdList 集群ID
* @param operator 操作人
* @return 任务状态
*/
ResultStatus expandPartitions(ClusterDO clusterDO, String topicName, Integer partitionNum, Long regionId, List<Integer> brokerIdList, String operator);
/**
* 获取Topic配置
* @param clusterDO 集群DO
* @param topicName Topic名称
* @return 任务状态
*/
Properties getTopicConfig(ClusterDO clusterDO, String topicName);
/**
* 修改Topic配置
* @param clusterDO 集群DO
* @param topicName Topic名称
* @param properties 新的属性
* @param operator 操作人
* @return 任务状态
*/
ResultStatus modifyTopicConfig(ClusterDO clusterDO, String topicName, Properties properties, String operator);
}
package com.xiaojukeji.kafka.manager.service.service;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.ClusterDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.cluster.ControllerPreferredCandidate;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.cluster.ClusterNameDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterMetricsDO;
......@@ -43,5 +45,10 @@ public interface ClusterService {
ResultStatus deleteById(Long clusterId);
ClusterDO selectSuitableCluster(Long clusterId, String dataCenter);
/**
* 获取优先被选举为controller的broker
* @param clusterId 集群ID
* @return void
*/
Result<List<ControllerPreferredCandidate>> getControllerPreferredCandidates(Long clusterId);
}
......@@ -2,14 +2,14 @@ package com.xiaojukeji.kafka.manager.service.service;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
import com.xiaojukeji.kafka.manager.common.entity.ao.PartitionOffsetDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumeDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupSummary;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* consumer相关的服务接口
......@@ -20,33 +20,36 @@ public interface ConsumerService {
/**
* 获取消费组列表
*/
List<ConsumerGroupDTO> getConsumerGroupList(Long clusterId);
List<ConsumerGroup> getConsumerGroupList(Long clusterId);
/**
* 查询消费Topic的消费组
*/
List<ConsumerGroupDTO> getConsumerGroupList(Long clusterId, String topicName);
List<ConsumerGroup> getConsumerGroupList(Long clusterId, String topicName);
/**
* 获取消费Topic的消费组概要信息
*/
List<ConsumerGroupSummary> getConsumerGroupSummaries(Long clusterId, String topicName);
/**
* 查询消费详情
*/
List<ConsumeDetailDTO> getConsumeDetail(ClusterDO clusterDO, String topicName, ConsumerGroupDTO consumerGroupDTO);
List<ConsumeDetailDTO> getConsumeDetail(ClusterDO clusterDO, String topicName, ConsumerGroup consumerGroup);
/**
* 获取消费组消费的Topic列表
*/
List<String> getConsumerGroupConsumedTopicList(Long clusterId, String consumerGroup, String location);
Map<Integer, Long> getConsumerOffset(ClusterDO clusterDO,
String topicName,
ConsumerGroupDTO consumerGroupDTO);
Map<Integer, Long> getConsumerOffset(ClusterDO clusterDO, String topicName, ConsumerGroup consumerGroup);
/**
* 重置offset
*/
List<Result> resetConsumerOffset(ClusterDO clusterDO,
String topicName,
ConsumerGroupDTO consumerGroupDTO,
ConsumerGroup consumerGroup,
List<PartitionOffsetDTO> partitionOffsetDTOList);
Map<Long, Integer> getConsumerGroupNumMap(List<ClusterDO> clusterDOList);
......
......@@ -66,6 +66,19 @@ public interface TopicManagerService {
*/
ResultStatus modifyTopic(Long clusterId, String topicName, String description, String operator);
/**
* 修改Topic
* @param clusterId 集群ID
* @param topicName Topic名称
* @param appId 所属应用
* @param description 备注
* @param operator 操作人
* @author zengqiao
* @date 20/5/12
* @return ResultStatus
*/
ResultStatus modifyTopicByOp(Long clusterId, String topicName, String appId, String description, String operator);
/**
* 通过topictopic名称删除
* @param clusterId 集群id
......
......@@ -3,11 +3,27 @@ package com.xiaojukeji.kafka.manager.service.service;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.didi.TopicJmxSwitch;
import java.util.List;
/**
* ZK相关的接口
* @author tukun
* @date 2015/11/11.
*/
public interface ZookeeperService {
/**
* 开启JMX
* @param clusterId 集群ID
* @param topicName Topic名称
* @param jmxSwitch JMX开关
* @return 操作结果
*/
Result openTopicJmx(Long clusterId, String topicName, TopicJmxSwitch jmxSwitch);
/**
* 获取优先被选举为controller的broker
* @param clusterId 集群ID
* @return 操作结果
*/
Result<List<Integer>> getControllerPreferredCandidates(Long clusterId);
}
......@@ -60,4 +60,6 @@ public interface AuthorityService {
int addAuthorityAndQuota(AuthorityDO authorityDO, TopicQuota quota);
Map<String, Map<Long, Map<String, AuthorityDO>>> getAllAuthority();
int deleteAuthorityByTopic(Long clusterId, String topicName);
}
package com.xiaojukeji.kafka.manager.service.service.gateway;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ao.gateway.*;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO;
import java.util.List;
public interface GatewayConfigService {
/**
* 获取集群服务地址
* @param requestVersion 请求的版本
* @return
*/
KafkaBootstrapServerConfig getKafkaBootstrapServersConfig(Long requestVersion);
/**
* 获取服务发现的请求队列的配置
* @param requestVersion 请求的版本
* @return
*/
RequestQueueConfig getRequestQueueConfig(Long requestVersion);
/**
* 获取服务发现的App请求速度的配置
* @param requestVersion 请求的版本
* @return
*/
AppRateConfig getAppRateConfig(Long requestVersion);
/**
* 获取服务发现的IP请求速度的配置
* @param requestVersion 请求的版本
* @return
*/
IpRateConfig getIpRateConfig(Long requestVersion);
/**
* 获取服务发现的具体IP或者应用纬度的限速配置
* @param requestVersion 请求的版本
* @return
*/
SpRateConfig getSpRateConfig(Long requestVersion);
/**
* 获取配置
* @param configType 配置类型
* @param configName 配置名称
* @return
*/
GatewayConfigDO getByTypeAndName(String configType, String configName);
/**
* 获取配置
* @return
*/
List<GatewayConfigDO> list();
/**
* 新建配置
* @param gatewayConfigDO 配置信息
* @return
*/
Result insert(GatewayConfigDO gatewayConfigDO);
/**
* 删除配置
* @param id 配置ID
* @return
*/
Result deleteById(Long id);
/**
* 更新配置
* @param gatewayConfigDO 配置信息
* @return
*/
Result updateById(GatewayConfigDO gatewayConfigDO);
/**
* 获取配置
* @param id 配置ID
* @return
*/
GatewayConfigDO getById(Long id);
}
......@@ -196,8 +196,7 @@ public class AppServiceImpl implements AppService {
}
@Override
public List<AppTopicDTO> getAppTopicDTOList(String appId,
Boolean mine) {
public List<AppTopicDTO> getAppTopicDTOList(String appId, Boolean mine) {
// 查询AppID
AppDO appDO = appDao.getByAppId(appId);
if (ValidateUtils.isNull(appDO)) {
......@@ -223,13 +222,17 @@ public class AppServiceImpl implements AppService {
TopicDO topicDO = topicMap
.getOrDefault(authorityDO.getClusterId(), new HashMap<>())
.get(authorityDO.getTopicName());
if (ValidateUtils.isNull(topicDO)) {
continue;
}
if (Boolean.TRUE.equals(mine)
&& (ValidateUtils.isNull(topicDO) || !topicDO.getAppId().equals(appId))) {
&& !topicDO.getAppId().equals(appId)) {
continue;
}
if (Boolean.FALSE.equals(mine)
&& !ValidateUtils.isNull(topicDO)
&& topicDO.getAppId().equals(appId)) {
continue;
}
......
......@@ -192,4 +192,10 @@ public class AuthorityServiceImpl implements AuthorityService {
public Map<String, Map<Long, Map<String, AuthorityDO>>> getAllAuthority() {
return authorityDao.getAllAuthority();
}
@Override
public int deleteAuthorityByTopic(Long clusterId, String topicName) {
return authorityDao.deleteAuthorityByTopic(clusterId, topicName);
}
}
\ No newline at end of file
......@@ -2,6 +2,8 @@ package com.xiaojukeji.kafka.manager.service.service.gateway.impl;
import com.alibaba.fastjson.JSON;
import com.xiaojukeji.kafka.manager.common.bizenum.gateway.GatewayConfigKeyEnum;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.gateway.*;
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
......@@ -13,6 +15,7 @@ import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
......@@ -21,7 +24,7 @@ import java.util.Map;
* @author zengqiao
* @date 20/7/28
*/
@Service("gatewayConfigService")
@Service
public class GatewayConfigServiceImpl implements GatewayConfigService {
private final Logger LOGGER = LoggerFactory.getLogger(GatewayConfigServiceImpl.class);
......@@ -52,7 +55,8 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
? new KafkaBootstrapServerConfig(maxVersion, clusterIdBootstrapServersMap)
: new KafkaBootstrapServerConfig(requestVersion, new HashMap<>(0));
} catch (Exception e) {
LOGGER.error("get kafka bootstrap servers config failed, data:{}.", JSON.toJSONString(doList), e);
LOGGER.error("class=GatewayConfigServiceImpl||method=getKafkaBootstrapServersConfig||data={}||errMsg={}||msg=get kafka bootstrap servers config failed",
JSON.toJSONString(doList), e.getMessage());
}
return null;
}
......@@ -71,7 +75,8 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
return new RequestQueueConfig(configDO.getVersion(), Long.valueOf(configDO.getValue()));
} catch (Exception e) {
LOGGER.error("get request queue config failed, data:{}.", JSON.toJSONString(configDO), e);
LOGGER.error("class=GatewayConfigServiceImpl||method=getRequestQueueConfig||data={}||errMsg={}||msg=get request queue config failed",
JSON.toJSONString(configDO), e.getMessage());
}
return null;
}
......@@ -90,7 +95,8 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
return new AppRateConfig(configDO.getVersion(), Long.valueOf(configDO.getValue()));
} catch (Exception e) {
LOGGER.error("get app rate config failed, data:{}.", JSON.toJSONString(configDO), e);
LOGGER.error("class=GatewayConfigServiceImpl||method=getAppRateConfig||data={}||errMsg={}||msg=get app rate config failed",
JSON.toJSONString(configDO), e.getMessage());
}
return null;
}
......@@ -153,4 +159,94 @@ public class GatewayConfigServiceImpl implements GatewayConfigService {
}
return null;
}
@Override
public List<GatewayConfigDO> list() {
try {
return gatewayConfigDao.list();
} catch (Exception e) {
LOGGER.debug("class=GatewayConfigServiceImpl||method=list||errMsg={}||msg=list failed", e.getMessage());
}
return new ArrayList<>();
}
@Override
public Result insert(GatewayConfigDO gatewayConfigDO) {
try {
GatewayConfigKeyEnum configKeyEnum = GatewayConfigKeyEnum.getByConfigType(gatewayConfigDO.getType());
if (ValidateUtils.isNull(configKeyEnum)
&& ValidateUtils.isBlank(gatewayConfigDO.getName())
&& ValidateUtils.isBlank(gatewayConfigDO.getValue())) {
// 参数错误
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
// 获取当前同类配置, 插入之后需要增大这个version
List<GatewayConfigDO> gatewayConfigDOList = gatewayConfigDao.getByConfigType(gatewayConfigDO.getType());
Long version = 1L;
for (GatewayConfigDO elem: gatewayConfigDOList) {
if (elem.getVersion() > version) {
version = elem.getVersion() + 1L;
}
}
gatewayConfigDO.setVersion(version);
if (gatewayConfigDao.insert(gatewayConfigDO) > 0) {
return Result.buildSuc();
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
} catch (Exception e) {
LOGGER.debug("class=GatewayConfigServiceImpl||method=insert||data={}||errMsg={}||msg=insert failed", gatewayConfigDO, e.getMessage());
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
}
@Override
public Result deleteById(Long id) {
try {
if (gatewayConfigDao.deleteById(id) > 0) {
return Result.buildSuc();
}
return Result.buildFrom(ResultStatus.RESOURCE_NOT_EXIST);
} catch (Exception e) {
LOGGER.debug("class=GatewayConfigServiceImpl||method=deleteById||id={}||errMsg={}||msg=delete failed", id, e.getMessage());
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
}
@Override
public Result updateById(GatewayConfigDO newGatewayConfigDO) {
try {
GatewayConfigDO oldGatewayConfigDO = this.getById(newGatewayConfigDO.getId());
if (ValidateUtils.isNull(oldGatewayConfigDO)) {
return Result.buildFrom(ResultStatus.RESOURCE_NOT_EXIST);
}
if (!oldGatewayConfigDO.getName().equals(newGatewayConfigDO.getName())
|| !oldGatewayConfigDO.getType().equals(newGatewayConfigDO.getType())
|| ValidateUtils.isBlank(newGatewayConfigDO.getValue())) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
newGatewayConfigDO.setVersion(oldGatewayConfigDO.getVersion() + 1);
if (gatewayConfigDao.updateById(oldGatewayConfigDO) > 0) {
return Result.buildSuc();
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
} catch (Exception e) {
LOGGER.debug("class=GatewayConfigServiceImpl||method=updateById||data={}||errMsg={}||msg=update failed", newGatewayConfigDO, e.getMessage());
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
}
@Override
public GatewayConfigDO getById(Long id) {
if (ValidateUtils.isNull(id)) {
return null;
}
try {
return gatewayConfigDao.getById(id);
} catch (Exception e) {
LOGGER.debug("class=GatewayConfigServiceImpl||method=getById||id={}||errMsg={}||msg=get failed", id, e.getMessage());
}
return null;
}
}
\ No newline at end of file
......@@ -13,6 +13,7 @@ import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.TopicDO;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.service.*;
import com.xiaojukeji.kafka.manager.service.service.gateway.AuthorityService;
......@@ -139,6 +140,9 @@ public class AdminServiceImpl implements AdminService {
// 3. 数据库中删除topic
topicManagerService.deleteByTopicName(clusterDO.getId(), topicName);
// 4. 数据库中删除authority
authorityService.deleteAuthorityByTopic(clusterDO.getId(), topicName);
return rs;
}
......@@ -191,15 +195,55 @@ public class AdminServiceImpl implements AdminService {
@Override
public ResultStatus preferredReplicaElection(ClusterDO clusterDO, Integer brokerId, String operator) {
BrokerMetadata brokerMetadata = PhysicalClusterMetadataManager.getBrokerMetadata(clusterDO.getId(), brokerId);
if (null == brokerMetadata) {
if (ValidateUtils.isNull(brokerMetadata)) {
return ResultStatus.PARAM_ILLEGAL;
}
Map<String, List<Integer>> partitionMap = topicService.getTopicPartitionIdMap(clusterDO.getId(), brokerId);
if (ValidateUtils.isEmptyMap(partitionMap)) {
return ResultStatus.SUCCESS;
}
return preferredReplicaElection(clusterDO, partitionMap, operator);
}
@Override
public ResultStatus preferredReplicaElection(ClusterDO clusterDO, String topicName, String operator) {
TopicMetadata topicMetadata = PhysicalClusterMetadataManager.getTopicMetadata(clusterDO.getId(), topicName);
if (ValidateUtils.isNull(topicMetadata)) {
return ResultStatus.TOPIC_NOT_EXIST;
}
Map<String, List<Integer>> partitionMap = new HashMap<>();
partitionMap.put(topicName, new ArrayList<>(topicMetadata.getPartitionMap().getPartitions().keySet()));
return preferredReplicaElection(clusterDO, partitionMap, operator);
}
@Override
public ResultStatus preferredReplicaElection(ClusterDO clusterDO, String topicName, Integer partitionId, String operator) {
TopicMetadata topicMetadata = PhysicalClusterMetadataManager.getTopicMetadata(clusterDO.getId(), topicName);
if (ValidateUtils.isNull(topicMetadata)) {
return ResultStatus.TOPIC_NOT_EXIST;
}
if (!topicMetadata.getPartitionMap().getPartitions().containsKey(partitionId)) {
return ResultStatus.PARTITION_NOT_EXIST;
}
Map<String, List<Integer>> partitionMap = new HashMap<>();
partitionMap.put(topicName, Arrays.asList(partitionId));
return preferredReplicaElection(clusterDO, partitionMap, operator);
}
private ResultStatus preferredReplicaElection(ClusterDO clusterDO, Map<String, List<Integer>> partitionMap, String operator) {
if (ValidateUtils.isEmptyMap(partitionMap)) {
return ResultStatus.SUCCESS;
}
ZkUtils zkUtils = null;
try {
Map<String, List<Integer>> partitionMap = topicService.getTopicPartitionIdMap(clusterDO.getId(), brokerId);
if (partitionMap == null || partitionMap.isEmpty()) {
return ResultStatus.SUCCESS;
}
String preferredReplicaElectString = convert2preferredReplicaElectString(partitionMap);
zkUtils = ZkUtils.apply(clusterDO.getZookeeper(),
......
package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.bizenum.DBStatusEnum;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.ClusterDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.cluster.ControllerPreferredCandidate;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.cluster.ClusterNameDTO;
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.entity.pojo.*;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata;
import com.xiaojukeji.kafka.manager.dao.ClusterDao;
import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao;
import com.xiaojukeji.kafka.manager.dao.ControllerDao;
......@@ -14,6 +19,7 @@ import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
import com.xiaojukeji.kafka.manager.service.service.ConsumerService;
import com.xiaojukeji.kafka.manager.service.service.RegionService;
import com.xiaojukeji.kafka.manager.service.service.ZookeeperService;
import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils;
import org.apache.zookeeper.ZooKeeper;
import org.slf4j.Logger;
......@@ -57,6 +63,9 @@ public class ClusterServiceImpl implements ClusterService {
@Autowired
private ConfigUtils configUtils;
@Autowired
private ZookeeperService zookeeperService;
@Override
public ResultStatus addNew(ClusterDO clusterDO, String operator) {
if (ValidateUtils.isNull(clusterDO) || ValidateUtils.isNull(operator)) {
......@@ -262,21 +271,6 @@ public class ClusterServiceImpl implements ClusterService {
return ResultStatus.SUCCESS;
}
@Override
public ClusterDO selectSuitableCluster(Long clusterId, String dataCenter) {
if (!ValidateUtils.isNullOrLessThanZero(clusterId)) {
return getById(clusterId);
}
if (ValidateUtils.isBlank(dataCenter)) {
return null;
}
List<ClusterDO> clusterDOList = this.listAll();
if (ValidateUtils.isEmptyList(clusterDOList)) {
return null;
}
return clusterDOList.get(0);
}
private ClusterDetailDTO getClusterDetailDTO(ClusterDO clusterDO, Boolean needDetail) {
if (ValidateUtils.isNull(clusterDO)) {
return null;
......@@ -300,4 +294,31 @@ public class ClusterServiceImpl implements ClusterService {
dto.setControllerId(PhysicalClusterMetadataManager.getControllerId(clusterDO.getId()));
return dto;
}
@Override
public Result<List<ControllerPreferredCandidate>> getControllerPreferredCandidates(Long clusterId) {
Result<List<Integer>> candidateResult = zookeeperService.getControllerPreferredCandidates(clusterId);
if (candidateResult.failed()) {
return new Result<>(candidateResult.getCode(), candidateResult.getMessage());
}
if (ValidateUtils.isEmptyList(candidateResult.getData())) {
return Result.buildSuc(new ArrayList<>());
}
List<ControllerPreferredCandidate> controllerPreferredCandidateList = new ArrayList<>();
for (Integer brokerId: candidateResult.getData()) {
ControllerPreferredCandidate controllerPreferredCandidate = new ControllerPreferredCandidate();
controllerPreferredCandidate.setBrokerId(brokerId);
BrokerMetadata brokerMetadata = PhysicalClusterMetadataManager.getBrokerMetadata(clusterId, brokerId);
if (ValidateUtils.isNull(brokerMetadata)) {
controllerPreferredCandidate.setStatus(DBStatusEnum.DEAD.getStatus());
} else {
controllerPreferredCandidate.setHost(brokerMetadata.getHost());
controllerPreferredCandidate.setStartTime(brokerMetadata.getTimestamp());
controllerPreferredCandidate.setStatus(DBStatusEnum.ALIVE.getStatus());
}
controllerPreferredCandidateList.add(controllerPreferredCandidate);
}
return Result.buildSuc(controllerPreferredCandidateList);
}
}
......@@ -3,6 +3,7 @@ package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.TopicAuthorityEnum;
import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections;
import com.xiaojukeji.kafka.manager.common.constant.TopicCreationConstant;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.RdTopicBasic;
......@@ -14,6 +15,7 @@ import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AppDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AuthorityDO;
import com.xiaojukeji.kafka.manager.common.utils.DateUtils;
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
import com.xiaojukeji.kafka.manager.common.utils.NumberUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
......@@ -33,6 +35,7 @@ import com.xiaojukeji.kafka.manager.service.utils.KafkaZookeeperUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import org.springframework.util.StringUtils;
......@@ -345,6 +348,47 @@ public class TopicManagerServiceImpl implements TopicManagerService {
return ResultStatus.MYSQL_ERROR;
}
@Override
public ResultStatus modifyTopicByOp(Long clusterId, String topicName, String appId, String description, String operator) {
try {
if (!PhysicalClusterMetadataManager.isTopicExist(clusterId, topicName)) {
return ResultStatus.TOPIC_NOT_EXIST;
}
AppDO appDO = appService.getByAppId(appId);
if (ValidateUtils.isNull(appDO)) {
return ResultStatus.APP_NOT_EXIST;
}
TopicDO topicDO = topicDao.getByTopicName(clusterId, topicName);
if (ValidateUtils.isNull(topicDO)) {
// 不存在, 则需要插入
topicDO = new TopicDO();
topicDO.setAppId(appId);
topicDO.setClusterId(clusterId);
topicDO.setTopicName(topicName);
topicDO.setPeakBytesIn(TopicCreationConstant.DEFAULT_QUOTA);
topicDO.setDescription(description);
this.addTopic(topicDO);
} else {
// 存在, 则直接更新
topicDO.setAppId(appId);
topicDO.setDescription(description);
topicDao.updateByName(topicDO);
}
AuthorityDO authorityDO = new AuthorityDO();
authorityDO.setAppId(appId);
authorityDO.setClusterId(clusterId);
authorityDO.setTopicName(topicName);
authorityDO.setAccess(TopicAuthorityEnum.READ_WRITE.getCode());
authorityService.addAuthority(authorityDO);
} catch (Exception e) {
LOGGER.error("modify topic failed, clusterId:{} topicName:{} description:{} operator:{} ",
clusterId, topicName, description, operator, e);
}
return ResultStatus.MYSQL_ERROR;
}
@Override
public int deleteByTopicName(Long clusterId, String topicName) {
try {
......@@ -359,6 +403,9 @@ public class TopicManagerServiceImpl implements TopicManagerService {
public int addTopic(TopicDO topicDO) {
try {
return topicDao.insert(topicDO);
} catch (DuplicateKeyException duplicateKeyException) {
// 主建重复了, 非重要问题
LOGGER.debug("class=TopicManagerServiceImpl||method=addTopic||data={}||msg=exist duplicate topic", JsonUtils.toJSONString(topicDO));
} catch (Exception e) {
LOGGER.error("insert topic failed, TopicDO:{}", topicDO.toString(), e);
}
......
......@@ -29,6 +29,7 @@ import com.xiaojukeji.kafka.manager.service.cache.LogicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.service.*;
import com.xiaojukeji.kafka.manager.service.service.gateway.AppService;
import com.xiaojukeji.kafka.manager.service.strategy.AbstractHealthScoreStrategy;
import com.xiaojukeji.kafka.manager.service.utils.KafkaZookeeperUtils;
import com.xiaojukeji.kafka.manager.service.utils.MetricsConvertUtils;
import org.apache.kafka.clients.consumer.ConsumerRecord;
......@@ -83,6 +84,9 @@ public class TopicServiceImpl implements TopicService {
@Autowired
private RegionService regionService;
@Autowired
private AbstractHealthScoreStrategy healthScoreStrategy;
@Override
public List<TopicMetricsDO> getTopicMetricsFromDB(Long clusterId, String topicName, Date startTime, Date endTime) {
try {
......@@ -235,7 +239,7 @@ public class TopicServiceImpl implements TopicService {
basicDTO.setRegionNameList(regionDOList.stream().map(RegionDO::getName).collect(Collectors.toList()));
basicDTO.setTopicCodeC(jmxService.getTopicCodeCValue(clusterId, topicName));
basicDTO.setScore(100);
basicDTO.setScore(healthScoreStrategy.calTopicHealthScore(clusterId, topicName));
return basicDTO;
}
......
......@@ -2,8 +2,10 @@ package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkPathUtil;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.didi.TopicJmxSwitch;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
......@@ -13,6 +15,9 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.List;
/**
* @author zengqiao
* @date 20/8/27
......@@ -40,4 +45,29 @@ public class ZookeeperServiceImpl implements ZookeeperService {
}
return new Result();
}
@Override
public Result<List<Integer>> getControllerPreferredCandidates(Long clusterId) {
if (ValidateUtils.isNull(clusterId)) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
ZkConfigImpl zkConfig = PhysicalClusterMetadataManager.getZKConfig(clusterId);
if (ValidateUtils.isNull(zkConfig)) {
return Result.buildFrom(ResultStatus.CONNECT_ZOOKEEPER_FAILED);
}
try {
if (!zkConfig.checkPathExists(ZkPathUtil.D_CONTROLLER_CANDIDATES)) {
return Result.buildSuc(new ArrayList<>());
}
List<String> brokerIdList = zkConfig.getChildren(ZkPathUtil.D_CONTROLLER_CANDIDATES);
if (ValidateUtils.isEmptyList(brokerIdList)) {
return Result.buildSuc(new ArrayList<>());
}
return Result.buildSuc(ListUtils.string2IntList(ListUtils.strList2String(brokerIdList)));
} catch (Exception e) {
LOGGER.error("class=ZookeeperServiceImpl||method=getControllerPreferredCandidates||clusterId={}||errMsg={}", clusterId, e.getMessage());
}
return Result.buildFrom(ResultStatus.READ_ZOOKEEPER_FAILED);
}
}
\ No newline at end of file
......@@ -72,8 +72,8 @@ public class DidiHealthScoreStrategy extends AbstractHealthScoreStrategy {
// 数据获取失败
return Constant.INVALID_CODE;
}
if (((Double) failedFetchRequestsPerSecOneMinuteRate) > 0
|| ((Double) failedProduceRequestsPerSecOneMinuteRate) > 0) {
if (((Double) failedFetchRequestsPerSecOneMinuteRate) > 0.01
|| ((Double) failedProduceRequestsPerSecOneMinuteRate) > 0.01) {
return HEALTH_SCORE_VERY_BAD;
}
......
......@@ -5,6 +5,8 @@ import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata
import com.xiaojukeji.kafka.manager.common.zookeeper.StateChangeListener;
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkConfigImpl;
import com.xiaojukeji.kafka.manager.common.zookeeper.ZkPathUtil;
import com.xiaojukeji.kafka.manager.dao.TopicDao;
import com.xiaojukeji.kafka.manager.dao.gateway.AuthorityDao;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.cache.ThreadPool;
import org.apache.zookeeper.data.Stat;
......@@ -28,11 +30,22 @@ public class TopicStateListener implements StateChangeListener {
private ZkConfigImpl zkConfig;
private TopicDao topicDao;
private AuthorityDao authorityDao;
public TopicStateListener(Long clusterId, ZkConfigImpl zkConfig) {
this.clusterId = clusterId;
this.zkConfig = zkConfig;
}
public TopicStateListener(Long clusterId, ZkConfigImpl zkConfig, TopicDao topicDao, AuthorityDao authorityDao) {
this.clusterId = clusterId;
this.zkConfig = zkConfig;
this.topicDao = topicDao;
this.authorityDao = authorityDao;
}
@Override
public void init() {
try {
......@@ -79,6 +92,8 @@ public class TopicStateListener implements StateChangeListener {
private void processTopicDelete(String topicName) {
LOGGER.warn("delete topic, clusterId:{} topicName:{}.", clusterId, topicName);
PhysicalClusterMetadataManager.removeTopicMetadata(clusterId, topicName);
topicDao.removeTopicInCache(clusterId, topicName);
authorityDao.removeAuthorityInCache(clusterId, topicName);
}
private void processTopicAdded(String topicName) {
......
......@@ -22,4 +22,6 @@ public interface TopicDao {
List<TopicDO> listAll();
TopicDO getTopic(Long clusterId, String topicName, String appId);
TopicDO removeTopicInCache(Long clusterId, String topicName);
}
\ No newline at end of file
......@@ -37,4 +37,8 @@ public interface AuthorityDao {
List<AuthorityDO> listAll();
Map<String, Map<Long, Map<String, AuthorityDO>>> getAllAuthority();
void removeAuthorityInCache(Long clusterId, String topicName);
int deleteAuthorityByTopic(Long clusterId, String topicName);
}
package com.xiaojukeji.kafka.manager.dao.gateway;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO;
import java.util.List;
......@@ -12,4 +13,14 @@ public interface GatewayConfigDao {
List<GatewayConfigDO> getByConfigType(String configType);
GatewayConfigDO getByConfigTypeAndName(String configType, String configName);
List<GatewayConfigDO> list();
int insert(GatewayConfigDO gatewayConfigDO);
int deleteById(Long id);
int updateById(GatewayConfigDO gatewayConfigDO);
GatewayConfigDO getById(Long id);
}
\ No newline at end of file
package com.xiaojukeji.kafka.manager.dao.gateway.impl;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AuthorityDO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.dao.gateway.AuthorityDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
......@@ -86,6 +87,32 @@ public class AuthorityDaoImpl implements AuthorityDao {
return AUTHORITY_MAP;
}
@Override
public void removeAuthorityInCache(Long clusterId, String topicName) {
AUTHORITY_MAP.forEach((appId, map) -> {
map.forEach((id, subMap) -> {
if (id.equals(clusterId)) {
subMap.remove(topicName);
if (subMap.isEmpty()) {
map.remove(id);
}
}
});
if (map.isEmpty()) {
AUTHORITY_MAP.remove(appId);
}
});
}
@Override
public int deleteAuthorityByTopic(Long clusterId, String topicName) {
Map<String, Object> params = new HashMap<>(2);
params.put("clusterId", clusterId);
params.put("topicName", topicName);
return sqlSession.delete("AuthorityDao.deleteByTopic", params);
}
private void updateAuthorityCache() {
Long timestamp = System.currentTimeMillis();
......
......@@ -35,4 +35,29 @@ public class GatewayConfigDaoImpl implements GatewayConfigDao {
params.put("configName", configName);
return sqlSession.selectOne("GatewayConfigDao.getByConfigTypeAndName", params);
}
@Override
public List<GatewayConfigDO> list() {
return sqlSession.selectList("GatewayConfigDao.list");
}
@Override
public int insert(GatewayConfigDO gatewayConfigDO) {
return sqlSession.insert("GatewayConfigDao.insert", gatewayConfigDO);
}
@Override
public int deleteById(Long id) {
return sqlSession.delete("GatewayConfigDao.deleteById", id);
}
@Override
public int updateById(GatewayConfigDO gatewayConfigDO) {
return sqlSession.update("GatewayConfigDao.updateById", gatewayConfigDO);
}
@Override
public GatewayConfigDO getById(Long id) {
return sqlSession.selectOne("GatewayConfigDao.getById", id);
}
}
\ No newline at end of file
......@@ -89,6 +89,11 @@ public class TopicDaoImpl implements TopicDao {
return sqlSession.selectOne("TopicDao.getTopic", params);
}
@Override
public TopicDO removeTopicInCache(Long clusterId, String topicName) {
return TOPIC_MAP.getOrDefault(clusterId, new HashMap<>(0)).remove(topicName);
}
private void updateTopicCache() {
Long timestamp = System.currentTimeMillis();
......
......@@ -45,4 +45,9 @@
<select id="listAfterTime" parameterType="java.util.Date" resultMap="AuthorityMap">
SELECT * FROM authority WHERE modify_time >= #{afterTime}
</select>
<delete id="deleteByTopic" parameterType="java.util.Map">
DELETE FROM authority WHERE cluster_id = #{clusterId} AND topic_name = #{topicName}
</delete>
</mapper>
\ No newline at end of file
......@@ -19,4 +19,38 @@
<select id="getByConfigTypeAndName" parameterType="java.util.Map" resultMap="GatewayConfigMap">
SELECT * FROM gateway_config WHERE `type`=#{configType} AND `name`=#{configName}
</select>
<select id="list" resultMap="GatewayConfigMap">
SELECT * FROM gateway_config
</select>
<insert id="insert" parameterType="com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO">
<![CDATA[
INSERT INTO gateway_config
(`type`, name, value, version)
VALUES
(#{type}, #{name}, #{value}, #{version})
]]>
</insert>
<delete id="deleteById" parameterType="java.lang.Long">
<![CDATA[
DELETE FROM gateway_config WHERE id=#{id}
]]>
</delete>
<update id="updateById" parameterType="com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO">
<![CDATA[
UPDATE gateway_config SET
`type`=#{type},
`name`=#{name},
`value`=#{value},
`version`=#{version}
WHERE id=#{id}
]]>
</update>
<select id="getById" parameterType="java.lang.Long" resultMap="GatewayConfigMap">
SELECT * FROM gateway_config WHERE id=#{id}
</select>
</mapper>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="DeprecatedKafkaAclDao">
<resultMap id="DeprecatedKafkaAclMap" type="com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.DeprecatedKafkaAclDO">
<id column="id" jdbcType="BIGINT" property="id" />
<result column="user_name" jdbcType="VARCHAR" property="userName" />
<result column="cluster_id" jdbcType="BIGINT" property="clusterId" />
<result column="topic_name" jdbcType="VARCHAR" property="topicName" />
<result column="access" jdbcType="INTEGER" property="access" />
<result column="operation" jdbcType="INTEGER" property="operation" />
<result column="gm_create" jdbcType="TIMESTAMP" property="gmCreate" />
<result column="gm_modify" jdbcType="TIMESTAMP" property="gmModify" />
</resultMap>
<insert id="insert"
parameterType="com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.DeprecatedKafkaAclDO"
useGeneratedKeys="true"
keyProperty="id">
INSERT INTO kafka_acl
(cluster_id, topic_name, user_name, access, operation, gm_create, gm_modify)
VALUES
(#{clusterId}, #{topicName}, #{userName}, #{access}, #{operation}, #{gmCreate}, #{gmModify})
</insert>
<select id="listAll" resultMap="DeprecatedKafkaAclMap">
SELECT * FROM kafka_acl ORDER BY gm_create ASC
</select>
</mapper>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="DeprecatedKafkaUserDao">
<resultMap id="DeprecatedKafkaUserDOMap" type="com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.DeprecatedKafkaUserDO">
<id property="id" column="id"/>
<result property="name" column="name"/>
<result property="password" column="password"/>
<result property="userType" column="user_type"/>
<result property="operation" column="operation"/>
<result property="gmtCreate" column="gm_create"/>
<result property="gmtModify" column="gm_modify"/>
</resultMap>
<insert id="insert"
parameterType="com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.DeprecatedKafkaUserDO"
useGeneratedKeys="true"
keyProperty="id">
INSERT INTO kafka_user
(`name`, password, user_type, operation, gm_create, gm_modify)
VALUES
(#{name}, #{password}, #{userType}, #{operation}, #{gmtCreate}, #{gmtModify})
</insert>
<select id="listAll" resultMap="DeprecatedKafkaUserDOMap">
SELECT * FROM kafka_user
</select>
</mapper>
......@@ -7,30 +7,80 @@ import com.xiaojukeji.kafka.manager.common.entity.ao.account.Account;
import com.xiaojukeji.kafka.manager.common.entity.pojo.AccountDO;
import java.util.List;
import java.util.Map;
/**
* @author huangyiminghappy@163.com
* @date 2019-04-26
*/
public interface AccountService {
/**
* 增加账号
* @param accountDO 账号信息
* @return
*/
ResultStatus createAccount(AccountDO accountDO);
/**
* 查询账号信息
* @param username 用户名
* @return
*/
AccountDO getAccountDO(String username);
/**
* 删除用户
* @param username 用户名
* @return
*/
ResultStatus deleteByName(String username);
/**
* 更新账号
* @param accountDO 账号信息
* @return
*/
ResultStatus updateAccount(AccountDO accountDO);
/**
* 获取用户列表
* @return
*/
List<AccountDO> list();
/**
* 依据前缀获取查询用户信息
* @param prefix
* @return
*/
List<EnterpriseStaff> searchAccountByPrefix(String prefix);
/**
* 从cache中获取用户角色
* @param username
* @return
*/
AccountRoleEnum getAccountRoleFromCache(String username);
/**
* 从cache中获取用户信息
* @param userName
* @return
*/
Account getAccountFromCache(String userName);
/**
* 判断当前用户是否是管理员工单的审批人
* @param username
* @return
*/
boolean isAdminOrderHandler(String username);
/**
* 是否是运维或者研发角色
* @param username
* @return
*/
boolean isOpOrRd(String username);
List<Account> getAdminOrderHandlerFromCache();
}
......@@ -226,6 +226,18 @@ public class AccountServiceImpl implements AccountService {
return false;
}
@Override
public boolean isOpOrRd(String username) {
if (ValidateUtils.isNull(ACCOUNT_ROLE_CACHE)) {
flush();
}
AccountRoleEnum accountRoleEnum = ACCOUNT_ROLE_CACHE.getOrDefault(username, AccountRoleEnum.NORMAL);
if (accountRoleEnum.equals(AccountRoleEnum.OP) || accountRoleEnum.equals(AccountRoleEnum.RD)) {
return true;
}
return false;
}
@Override
public List<Account> getAdminOrderHandlerFromCache() {
if (ValidateUtils.isEmptyList(ADMIN_ORDER_HANDLER_CACHE)) {
......
......@@ -25,6 +25,10 @@ public enum OrderTypeEnum {
APPLY_EXPAND_CLUSTER (05, "集群扩容", "modifyClusterOrder"),
APPLY_REDUCE_CLUSTER (15, "集群缩容", "modifyClusterOrder"),
ADD_GATEWAY_CONFIG (06, "增加GateWay配置", "addGatewayConfigOrder"),
DELETE_GATEWAY_CONFIG (16, "删除GateWay配置", "deleteGatewayConfigOrder"),
MODIFY_GATEWAY_CONFIG (26, "修改GateWay配置", "modifyGatewayConfigOrder"),
;
private Integer code;
......
package com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import io.swagger.annotations.ApiModelProperty;
/**
* 增加gateway配置
* @author zengqiao
* @date 2021/01/12
*/
public class OrderExtensionAddGatewayConfigDTO {
@ApiModelProperty(value = "类型")
private String type;
@ApiModelProperty(value = "名称")
private String name;
@ApiModelProperty(value = "值")
private String value;
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
@Override
public String toString() {
return "OrderExtensionAddGatewayConfigDTO{" +
"type='" + type + '\'' +
", name='" + name + '\'' +
", value='" + value + '\'' +
'}';
}
public boolean legal() {
if (ValidateUtils.isBlank(type)
|| ValidateUtils.isBlank(name)
|| ValidateUtils.isBlank(value)) {
return false;
}
return true;
}
}
package com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import io.swagger.annotations.ApiModelProperty;
/**
* 删除gateway配置
* @author zengqiao
* @date 2021/01/12
*/
public class OrderExtensionDeleteGatewayConfigDTO {
@ApiModelProperty(value = "配置ID")
private Long id;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
@Override
public String toString() {
return "OrderExtensionDeleteGatewayConfigDTO{" +
"id=" + id +
'}';
}
public boolean legal() {
if (ValidateUtils.isNull(id)) {
return false;
}
return true;
}
}
package com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import io.swagger.annotations.ApiModelProperty;
/**
* 修改gateway配置
* @author zengqiao
* @date 2021/01/12
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public class OrderExtensionModifyGatewayConfigDTO {
@ApiModelProperty(value = "配置ID")
private Long id;
@ApiModelProperty(value = "类型")
private String type;
@ApiModelProperty(value = "名称")
private String name;
@ApiModelProperty(value = "值")
private String value;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
@Override
public String toString() {
return "OrderExtensionModifyGatewayConfigDTO{" +
"id=" + id +
", type='" + type + '\'' +
", name='" + name + '\'' +
", value='" + value + '\'' +
'}';
}
public boolean legal() {
if (ValidateUtils.isNull(id)
|| ValidateUtils.isBlank(name)
|| ValidateUtils.isBlank(type)
|| ValidateUtils.isBlank(value)) {
return false;
}
return true;
}
}
package com.xiaojukeji.kafka.manager.bpm.common.entry.detail;
public class OrderDetailGatewayConfigData extends AbstractOrderDetailData {
private Long id;
private String type;
private String name;
private String value;
private Long version;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public Long getVersion() {
return version;
}
public void setVersion(Long version) {
this.version = version;
}
@Override
public String toString() {
return "OrderDetailGatewayConfigData{" +
"id=" + id +
", type='" + type + '\'' +
", name='" + name + '\'' +
", value='" + value + '\'' +
", version=" + version +
'}';
}
}
package com.xiaojukeji.kafka.manager.bpm.common.entry.detail;
/**
* gateway config修改
* @author zengqiao
* @date 2021/01/13
*/
public class OrderDetailGatewayConfigModifyData extends AbstractOrderDetailData {
/**
* 旧的Gateway Config
*/
private OrderDetailGatewayConfigData oldGatewayConfig;
/**
* 新的Gateway Config
*/
private OrderDetailGatewayConfigData newGatewayConfig;
public OrderDetailGatewayConfigData getOldGatewayConfig() {
return oldGatewayConfig;
}
public void setOldGatewayConfig(OrderDetailGatewayConfigData oldGatewayConfig) {
this.oldGatewayConfig = oldGatewayConfig;
}
public OrderDetailGatewayConfigData getNewGatewayConfig() {
return newGatewayConfig;
}
public void setNewGatewayConfig(OrderDetailGatewayConfigData newGatewayConfig) {
this.newGatewayConfig = newGatewayConfig;
}
@Override
public String toString() {
return "OrderDetailGatewayConfigModifyData{" +
"oldGatewayConfig=" + oldGatewayConfig +
", newGatewayConfig=" + newGatewayConfig +
'}';
}
}
package com.xiaojukeji.kafka.manager.bpm.order;
import com.xiaojukeji.kafka.manager.account.AccountService;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.account.Account;
import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO;
import org.springframework.beans.factory.annotation.Autowired;
import java.util.List;
public abstract class AbstractGatewayConfigOrder extends AbstractOrder {
@Autowired
private AccountService accountService;
@Override
public ResultStatus checkAuthority(OrderDO orderDO, String username) {
if (!accountService.isAdminOrderHandler(username)) {
return ResultStatus.USER_WITHOUT_AUTHORITY;
}
return ResultStatus.SUCCESS;
}
@Override
public List<Account> getApproverList(String extensions) {
return accountService.getAdminOrderHandlerFromCache();
}
}
package com.xiaojukeji.kafka.manager.bpm.order.impl.gateway;
import com.xiaojukeji.kafka.manager.bpm.common.entry.detail.AbstractOrderDetailData;
import com.xiaojukeji.kafka.manager.bpm.common.handle.OrderHandleBaseDTO;
import com.xiaojukeji.kafka.manager.bpm.order.AbstractGatewayConfigOrder;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO;
import org.springframework.stereotype.Component;
/**
* @author zengqiao
* @date 2021/01/12
*/
@Component("addGatewayConfigOrder")
public class AddGatewayConfigOrder extends AbstractGatewayConfigOrder {
@Override
public Result<String> checkExtensionFieldsAndGenerateTitle(String extensions) {
return Result.buildSuc();
}
@Override
public AbstractOrderDetailData getOrderExtensionDetailData(String extensions) {
return null;
}
@Override
public ResultStatus handleOrderDetail(OrderDO orderDO, OrderHandleBaseDTO baseDTO, String userName) {
return ResultStatus.SUCCESS;
}
}
package com.xiaojukeji.kafka.manager.bpm.order.impl.gateway;
import com.xiaojukeji.kafka.manager.bpm.common.entry.detail.AbstractOrderDetailData;
import com.xiaojukeji.kafka.manager.bpm.common.handle.OrderHandleBaseDTO;
import com.xiaojukeji.kafka.manager.bpm.order.AbstractGatewayConfigOrder;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO;
import org.springframework.stereotype.Component;
/**
* @author zengqiao
* @date 2021/01/12
*/
@Component("deleteGatewayConfigOrder")
public class DeleteGatewayConfigOrder extends AbstractGatewayConfigOrder {
@Override
public Result<String> checkExtensionFieldsAndGenerateTitle(String extensions) {
return Result.buildSuc();
}
@Override
public AbstractOrderDetailData getOrderExtensionDetailData(String extensions) {
return null;
}
@Override
public ResultStatus handleOrderDetail(OrderDO orderDO, OrderHandleBaseDTO baseDTO, String userName) {
return ResultStatus.SUCCESS;
}
}
package com.xiaojukeji.kafka.manager.bpm.order.impl.gateway;
import com.alibaba.fastjson.JSONObject;
import com.xiaojukeji.kafka.manager.bpm.common.OrderTypeEnum;
import com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway.OrderExtensionModifyGatewayConfigDTO;
import com.xiaojukeji.kafka.manager.bpm.common.entry.detail.AbstractOrderDetailData;
import com.xiaojukeji.kafka.manager.bpm.common.entry.detail.OrderDetailGatewayConfigData;
import com.xiaojukeji.kafka.manager.bpm.common.entry.detail.OrderDetailGatewayConfigModifyData;
import com.xiaojukeji.kafka.manager.bpm.common.handle.OrderHandleBaseDTO;
import com.xiaojukeji.kafka.manager.bpm.order.AbstractGatewayConfigOrder;
import com.xiaojukeji.kafka.manager.common.bizenum.gateway.GatewayConfigKeyEnum;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.service.service.OperateRecordService;
import com.xiaojukeji.kafka.manager.service.service.gateway.GatewayConfigService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
/**
* @author zengqiao
* @date 2021/01/12
*/
@Component("modifyGatewayConfigOrder")
public class ModifyGatewayConfigOrder extends AbstractGatewayConfigOrder {
private static final Logger LOGGER = LoggerFactory.getLogger(ModifyGatewayConfigOrder.class);
@Autowired
private GatewayConfigService gatewayConfigService;
@Autowired
private OperateRecordService operateRecordService;
@Override
public Result<String> checkExtensionFieldsAndGenerateTitle(String extensions) {
OrderExtensionModifyGatewayConfigDTO orderExtensionDTO = null;
try {
orderExtensionDTO = JSONObject.parseObject(extensions, OrderExtensionModifyGatewayConfigDTO.class);
} catch (Exception e) {
LOGGER.error("class=ModifyGatewayConfigOrder||method=checkExtensionFieldsAndGenerateTitle||params={}||errMsg={}", extensions, e.getMessage());
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
if (!orderExtensionDTO.legal()) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
GatewayConfigDO gatewayConfigDO = gatewayConfigService.getById(orderExtensionDTO.getId());
if (ValidateUtils.isNull(gatewayConfigDO)) {
// 配置不存在
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
GatewayConfigKeyEnum configKeyEnum = GatewayConfigKeyEnum.getByConfigType(orderExtensionDTO.getType());
if (ValidateUtils.isNull(configKeyEnum)) {
// 配置类型不对
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
return new Result<>(OrderTypeEnum.MODIFY_GATEWAY_CONFIG.getMessage());
}
@Override
public AbstractOrderDetailData getOrderExtensionDetailData(String extensions) {
OrderExtensionModifyGatewayConfigDTO orderExtensionDTO = null;
try {
orderExtensionDTO = JSONObject.parseObject(extensions, OrderExtensionModifyGatewayConfigDTO.class);
} catch (Exception e) {
LOGGER.error("class=ModifyGatewayConfigOrder||method=getOrderExtensionDetailData||params={}||errMsg={}", extensions, e.getMessage());
return null;
}
// 返回的数据
OrderDetailGatewayConfigModifyData orderDetailDTO = new OrderDetailGatewayConfigModifyData();
// 新的配置
OrderDetailGatewayConfigData newGatewayConfig = new OrderDetailGatewayConfigData();
newGatewayConfig.setId(orderExtensionDTO.getId());
newGatewayConfig.setType(orderExtensionDTO.getType());
newGatewayConfig.setName(orderExtensionDTO.getName());
newGatewayConfig.setValue(orderExtensionDTO.getValue());
orderDetailDTO.setNewGatewayConfig(newGatewayConfig);
GatewayConfigDO gatewayConfigDO = gatewayConfigService.getById(orderExtensionDTO.getId());
if (ValidateUtils.isNull(gatewayConfigDO)) {
// 旧的配置不存在
return orderDetailDTO;
}
// 旧的配置
OrderDetailGatewayConfigData oldGatewayConfig = new OrderDetailGatewayConfigData();
newGatewayConfig.setId(gatewayConfigDO.getId());
newGatewayConfig.setType(gatewayConfigDO.getType());
newGatewayConfig.setName(gatewayConfigDO.getName());
newGatewayConfig.setValue(gatewayConfigDO.getValue());
newGatewayConfig.setVersion(gatewayConfigDO.getVersion());
orderDetailDTO.setOldGatewayConfig(oldGatewayConfig);
return orderDetailDTO;
}
@Override
public ResultStatus handleOrderDetail(OrderDO orderDO, OrderHandleBaseDTO baseDTO, String username) {
return ResultStatus.SUCCESS;
}
}
......@@ -10,6 +10,7 @@ import com.xiaojukeji.kafka.manager.notify.common.OrderNotifyTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.ApplicationListener;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
/**
......@@ -24,6 +25,7 @@ public class OrderApplyNotifyService implements ApplicationListener<OrderApplyEv
@Value("${notify.order.detail-url}")
private String orderDetailUrl;
@Async
@Override
public void onApplicationEvent(OrderApplyEvent orderApplyEvent) {
OrderDO orderDO = orderApplyEvent.getOrderDO();
......
......@@ -7,6 +7,7 @@ import com.xiaojukeji.kafka.manager.notify.notifyer.AbstractNotifyService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.ApplicationListener;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
/**
......@@ -21,6 +22,7 @@ public class OrderPassedNotifyService implements ApplicationListener<OrderPassed
@Value("${notify.order.detail-url}")
private String orderDetailUrl;
@Async
@Override
public void onApplicationEvent(OrderPassedEvent orderPassEvent) {
OrderDO orderDO = orderPassEvent.getOrderDO();
......
......@@ -7,6 +7,7 @@ import com.xiaojukeji.kafka.manager.notify.notifyer.AbstractNotifyService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.ApplicationListener;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
/**
......@@ -21,6 +22,7 @@ public class OrderRefusedNotifyService implements ApplicationListener<OrderRefus
@Value("${notify.order.detail-url}")
private String orderDetailUrl;
@Async
@Override
public void onApplicationEvent(OrderRefusedEvent orderRefuseEvent) {
OrderDO orderDO = orderRefuseEvent.getOrderDO();
......
......@@ -4,7 +4,7 @@ import com.xiaojukeji.kafka.manager.common.bizenum.*;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.PartitionOffsetDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
......@@ -144,16 +144,11 @@ public class ThirdPartServiceImpl implements ThirdPartService {
if (ResultStatus.SUCCESS.getCode() != result.getCode()) {
return null;
}
ConsumerGroupDTO consumerGroupDTO = new ConsumerGroupDTO(
clusterDO.getId(),
dto.getConsumerGroup(),
new ArrayList<>(),
OffsetLocationEnum.getOffsetStoreLocation(dto.getLocation())
);
ConsumerGroup consumerGroup = new ConsumerGroup(clusterDO.getId(), dto.getConsumerGroup(), OffsetLocationEnum.getOffsetStoreLocation(dto.getLocation()));
return consumerService.resetConsumerOffset(
clusterDO,
dto.getTopicName(),
consumerGroupDTO,
consumerGroup,
offsetDTOList
);
}
......
package com.xiaojukeji.kafka.manager.task.config;
public class SyncTopic2DBConfig {
/**
* 默认的App
*/
private String defaultAppId;
/**
* 进行同步的集群
*/
private Long clusterId;
/**
* 是否增加权限信息, 默认不增加
*/
private boolean addAuthority;
public String getDefaultAppId() {
return defaultAppId;
}
public void setDefaultAppId(String defaultAppId) {
this.defaultAppId = defaultAppId;
}
public Long getClusterId() {
return clusterId;
}
public void setClusterId(Long clusterId) {
this.clusterId = clusterId;
}
public boolean isAddAuthority() {
return addAuthority;
}
public void setAddAuthority(boolean addAuthority) {
this.addAuthority = addAuthority;
}
@Override
public String toString() {
return "SyncTopic2DBConfig{" +
"defaultAppId='" + defaultAppId + '\'' +
", clusterId=" + clusterId +
", addAuthority=" + addAuthority +
'}';
}
}
......@@ -2,7 +2,7 @@ package com.xiaojukeji.kafka.manager.task.dispatch.metrics.collect;
import com.xiaojukeji.kafka.manager.common.bizenum.OffsetPosEnum;
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
import com.xiaojukeji.kafka.manager.common.entity.metrics.ConsumerMetrics;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.events.ConsumerMetricsCollectedEvent;
......@@ -105,7 +105,7 @@ public class CollectAndPublishCGData extends AbstractScheduledTask<ClusterDO> {
private List<ConsumerMetrics> getTopicConsumerMetrics(ClusterDO clusterDO,
String topicName,
long startTimeUnitMs) {
List<ConsumerGroupDTO> consumerGroupDTOList = consumerService.getConsumerGroupList(clusterDO.getId(), topicName);
List<ConsumerGroup> consumerGroupDTOList = consumerService.getConsumerGroupList(clusterDO.getId(), topicName);
if (ValidateUtils.isEmptyList(consumerGroupDTOList)) {
// 重试
consumerGroupDTOList = consumerService.getConsumerGroupList(clusterDO.getId(), topicName);
......@@ -131,7 +131,7 @@ public class CollectAndPublishCGData extends AbstractScheduledTask<ClusterDO> {
partitionOffsetMap.put(entry.getKey().partition(), entry.getValue());
}
for (ConsumerGroupDTO consumerGroupDTO: consumerGroupDTOList) {
for (ConsumerGroup consumerGroupDTO: consumerGroupDTOList) {
try {
ConsumerMetrics consumerMetrics =
getTopicConsumerMetrics(clusterDO, topicName, consumerGroupDTO, partitionOffsetMap, startTimeUnitMs);
......@@ -150,20 +150,20 @@ public class CollectAndPublishCGData extends AbstractScheduledTask<ClusterDO> {
private ConsumerMetrics getTopicConsumerMetrics(ClusterDO clusterDO,
String topicName,
ConsumerGroupDTO consumerGroupDTO,
ConsumerGroup consumerGroup,
Map<Integer, Long> partitionOffsetMap,
long startTimeUnitMs) {
Map<Integer, Long> consumerOffsetMap =
consumerService.getConsumerOffset(clusterDO, topicName, consumerGroupDTO);
consumerService.getConsumerOffset(clusterDO, topicName, consumerGroup);
if (ValidateUtils.isEmptyMap(consumerOffsetMap)) {
return null;
}
ConsumerMetrics metrics = new ConsumerMetrics();
metrics.setClusterId(clusterDO.getId());
metrics.setTopicName(topicName);
metrics.setConsumerGroup(consumerGroupDTO.getConsumerGroup());
metrics.setLocation(consumerGroupDTO.getOffsetStoreLocation().location);
metrics.setConsumerGroup(consumerGroup.getConsumerGroup());
metrics.setLocation(consumerGroup.getOffsetStoreLocation().location);
metrics.setPartitionOffsetMap(partitionOffsetMap);
metrics.setConsumeOffsetMap(consumerOffsetMap);
metrics.setTimestampUnitMs(startTimeUnitMs);
......
......@@ -15,6 +15,7 @@ import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.entity.pojo.*;
import com.xiaojukeji.kafka.manager.service.cache.LogicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.service.*;
import com.xiaojukeji.kafka.manager.task.component.AbstractScheduledTask;
import com.xiaojukeji.kafka.manager.task.component.CustomScheduled;
......@@ -109,6 +110,11 @@ public class AutoHandleTopicOrder extends AbstractScheduledTask<EmptyEntry> {
return false;
}
if (PhysicalClusterMetadataManager.isTopicExist(physicalClusterId, dto.getTopicName())) {
rejectForRepeatedTopicName(orderDO);
return false;
}
if (ValidateUtils.isNull(dto.isPhysicalClusterId()) || !dto.isPhysicalClusterId()) {
return handleApplyTopicOrderByLogicalClusterId(clusterDO, orderDO, dto, createConfig);
}
......@@ -117,6 +123,13 @@ public class AutoHandleTopicOrder extends AbstractScheduledTask<EmptyEntry> {
return handleApplyTopicOrderByPhysicalClusterId(clusterDO, orderDO, dto, createConfig);
}
private void rejectForRepeatedTopicName(OrderDO orderDO) {
orderDO.setApplicant(Constant.AUTO_HANDLE_USER_NAME);
orderDO.setStatus(OrderStatusEnum.REFUSED.getCode());
orderDO.setOpinion("驳回:该 Topic 已被别人申请并生效");
orderService.updateOrderById(orderDO);
}
/**
* 逻辑集群申请单
*/
......
package com.xiaojukeji.kafka.manager.task.dispatch.op;
import com.xiaojukeji.kafka.manager.common.bizenum.TopicAuthorityEnum;
import com.xiaojukeji.kafka.manager.common.constant.KafkaConstant;
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.constant.TopicCreationConstant;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.TopicDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AppDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AuthorityDO;
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
import com.xiaojukeji.kafka.manager.service.service.ConfigService;
import com.xiaojukeji.kafka.manager.service.service.TopicManagerService;
import com.xiaojukeji.kafka.manager.service.service.gateway.AppService;
import com.xiaojukeji.kafka.manager.service.service.gateway.AuthorityService;
import com.xiaojukeji.kafka.manager.task.component.AbstractScheduledTask;
import com.xiaojukeji.kafka.manager.task.component.CustomScheduled;
import com.xiaojukeji.kafka.manager.task.component.EmptyEntry;
import com.xiaojukeji.kafka.manager.task.config.SyncTopic2DBConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.stereotype.Component;
import java.util.*;
import java.util.stream.Collectors;
/**
* 定期将未落盘的Topic刷新到DB中, 仅存储对应的关系, 并不会增加权限等信息
* @author zengqiao
* @date 19/12/29
*/
@Component
@CustomScheduled(name = "syncTopic2DB", cron = "0 0/2 * * * ?", threadNum = 1)
@ConditionalOnProperty(prefix = "task.op", name = "sync-topic-enabled", havingValue = "true", matchIfMissing = false)
public class SyncTopic2DB extends AbstractScheduledTask<EmptyEntry> {
private static final Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
private static final String SYNC_TOPIC_2_DB_CONFIG_KEY = "SYNC_TOPIC_2_DB_CONFIG_KEY";
@Autowired
private AppService appService;
@Autowired
private ConfigService configService;
@Autowired
private ClusterService clusterService;
@Autowired
private AuthorityService authorityService;
@Autowired
private TopicManagerService topicManagerService;
@Override
public List<EmptyEntry> listAllTasks() {
EmptyEntry emptyEntry = new EmptyEntry();
emptyEntry.setId(System.currentTimeMillis() / 1000);
return Arrays.asList(emptyEntry);
}
@Override
public void processTask(EmptyEntry entryEntry) {
Map<Long, SyncTopic2DBConfig> clusterIdConfigMap = getConfig();
if (ValidateUtils.isEmptyMap(clusterIdConfigMap)) {
LOGGER.warn("class=SyncTopic2DB||method=processTask||msg=without config or config illegal");
return;
}
LOGGER.info("class=SyncTopic2DB||method=processTask||data={}||msg=start sync", JsonUtils.toJSONString(clusterIdConfigMap));
List<ClusterDO> clusterDOList = clusterService.list();
if (ValidateUtils.isEmptyList(clusterDOList)) {
return;
}
for (ClusterDO clusterDO: clusterDOList) {
if (!clusterIdConfigMap.containsKey(clusterDO.getId())) {
continue;
}
try {
syncTopic2DB(clusterDO.getId(), clusterIdConfigMap.get(clusterDO.getId()));
} catch (Exception e) {
LOGGER.error("class=SyncTopic2DB||method=processTask||clusterId={}||errMsg={}||msg=sync failed", clusterDO.getId(), e.getMessage());
}
}
}
private void syncTopic2DB(Long clusterId, SyncTopic2DBConfig syncTopic2DBConfig) {
List<TopicDO> doList = topicManagerService.getByClusterId(clusterId);
if (ValidateUtils.isNull(doList)) {
doList = new ArrayList<>();
}
Set<String> existedTopicNameSet = doList.stream().map(elem -> elem.getTopicName()).collect(Collectors.toSet());
for (String topicName: PhysicalClusterMetadataManager.getTopicNameList(clusterId)) {
if (existedTopicNameSet.contains(topicName)
|| KafkaConstant.COORDINATOR_TOPIC_NAME.equals(topicName)
|| KafkaConstant.TRANSACTION_TOPIC_NAME.equals(topicName)) {
continue;
}
TopicMetadata topicMetadata = PhysicalClusterMetadataManager.getTopicMetadata(clusterId, topicName);
if (ValidateUtils.isNull(topicMetadata)) {
continue;
}
// 新创建10分钟内的Topic不进行同步, 避免KM平台上新建的, 有所属应用的Topic被错误的同步了
if (System.currentTimeMillis() - topicMetadata.getCreateTime() < 10 * 60 * 1000) {
continue;
}
TopicDO topicDO = new TopicDO();
topicDO.setAppId(syncTopic2DBConfig.getDefaultAppId());
topicDO.setClusterId(clusterId);
topicDO.setTopicName(topicName);
topicDO.setDescription("定期同步至DB中的无主Topic");
topicDO.setPeakBytesIn(TopicCreationConstant.DEFAULT_QUOTA);
topicManagerService.addTopic(topicDO);
if (ValidateUtils.isNull(syncTopic2DBConfig.isAddAuthority()) || !syncTopic2DBConfig.isAddAuthority()) {
// 不增加权限信息, 则直接忽略
return;
}
// TODO 当前添加 Topic 和 添加 Authority 是非事务的, 中间出现异常之后, 会导致数据错误, 后续还需要优化一下
AuthorityDO authorityDO = new AuthorityDO();
authorityDO.setAppId(syncTopic2DBConfig.getDefaultAppId());
authorityDO.setClusterId(clusterId);
authorityDO.setTopicName(topicName);
authorityDO.setAccess(TopicAuthorityEnum.READ_WRITE.getCode());
authorityService.addAuthority(authorityDO);
}
}
private Map<Long, SyncTopic2DBConfig> getConfig() {
List<SyncTopic2DBConfig> configList = configService.getArrayByKey(SYNC_TOPIC_2_DB_CONFIG_KEY, SyncTopic2DBConfig.class);
if (ValidateUtils.isEmptyList(configList)) {
return Collections.EMPTY_MAP;
}
Map<Long, SyncTopic2DBConfig> clusterIdConfigMap = new HashMap<>();
for (SyncTopic2DBConfig syncTopic2DBConfig: configList) {
if (ValidateUtils.isNullOrLessThanZero(syncTopic2DBConfig.getClusterId())
|| ValidateUtils.isBlank(syncTopic2DBConfig.getDefaultAppId())) {
continue;
}
AppDO appDO = appService.getByAppId(syncTopic2DBConfig.getDefaultAppId());
if (ValidateUtils.isNull(appDO)) {
continue;
}
clusterIdConfigMap.put(syncTopic2DBConfig.getClusterId(), syncTopic2DBConfig);
}
return clusterIdConfigMap;
}
}
......@@ -50,8 +50,7 @@ public class FlushBKConsumerGroupMetadata {
private void flush(Long clusterId) {
// 获取消费组列表
Set<String> consumerGroupSet = new HashSet<>();
Map<String, List<String>> consumerGroupAppIdMap = new HashMap<>();
collectAndSaveConsumerGroup(clusterId, consumerGroupSet, consumerGroupAppIdMap);
collectAndSaveConsumerGroup(clusterId, consumerGroupSet);
// 获取消费组summary信息
Map<String, Set<String>> topicNameConsumerGroupMap = new HashMap<>();
......@@ -67,15 +66,12 @@ public class FlushBKConsumerGroupMetadata {
new ConsumerMetadata(
consumerGroupSet,
topicNameConsumerGroupMap,
consumerGroupSummary,
consumerGroupAppIdMap
consumerGroupSummary
)
);
}
private void collectAndSaveConsumerGroup(Long clusterId,
Set<String> consumerGroupSet,
Map<String, List<String>> consumerGroupAppIdMap) {
private void collectAndSaveConsumerGroup(Long clusterId, Set<String> consumerGroupSet) {
try {
AdminClient adminClient = KafkaClientPool.getAdminClient(clusterId);
......@@ -83,20 +79,14 @@ public class FlushBKConsumerGroupMetadata {
for (scala.collection.immutable.List<kafka.coordinator.GroupOverview> brokerGroup : JavaConversions.asJavaMap(brokerGroupMap).values()) {
List<kafka.coordinator.GroupOverview> lists = JavaConversions.asJavaList(brokerGroup);
for (kafka.coordinator.GroupOverview groupOverview : lists) {
String consumerGroup = groupOverview.groupId();
List<String> appIdList = new ArrayList<>();
if (consumerGroup != null && consumerGroup.contains("#")) {
String[] splitArray = consumerGroup.split("#");
consumerGroup = splitArray[splitArray.length - 1];
appIdList = Arrays.asList(splitArray).subList(0, splitArray.length - 1);
}
consumerGroupAppIdMap.put(consumerGroup, appIdList);
consumerGroupSet.add(consumerGroup);
}
}
return ;
} catch (Exception e) {
LOGGER.error("collect consumerGroup failed, clusterId:{}.", clusterId, e);
}
......
......@@ -55,7 +55,7 @@ public class FlushZKConsumerGroupMetadata {
collectTopicAndConsumerGroupMap(clusterId, new ArrayList<>(consumerGroupSet));
ConsumerMetadataCache.putConsumerMetadataInZK(
clusterId,
new ConsumerMetadata(consumerGroupSet, topicNameConsumerGroupMap, new HashMap<>(0), new HashMap<>(0))
new ConsumerMetadata(consumerGroupSet, topicNameConsumerGroupMap, new HashMap<>(0))
);
}
......
<assembly
xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
<id>bin</id>
<formats>
<format>dir</format>
<format>tar.gz</format>
</formats>
<fileSets>
<fileSet>
<includes>
<include>bin/*</include>
</includes>
<fileMode>0755</fileMode>
</fileSet>
<fileSet>
<directory>../docs/install_guide</directory>
<outputDirectory>install</outputDirectory>
<includes>
<include>*</include>
</includes>
</fileSet>
<fileSet>
<directory>src/main/resources/</directory>
<outputDirectory>conf</outputDirectory>
<includes>
<include>application.yml</include>
<include>logback-spring.xml</include>
</includes>
</fileSet>
<fileSet>
<directory>${project.build.directory}</directory>
<outputDirectory>libs</outputDirectory>
<includes>
<include>*.jar</include>
</includes>
</fileSet>
</fileSets>
</assembly>
\ No newline at end of file
......@@ -122,26 +122,6 @@
</execution>
</executions>
</plugin>
<!--<plugin>-->
<!--<groupId>org.apache.maven.plugins</groupId>-->
<!--<artifactId>maven-assembly-plugin</artifactId>-->
<!--<executions>-->
<!--<execution>-->
<!--<id>make-assembly</id>-->
<!--<phase>package</phase>-->
<!--<goals>-->
<!--<goal>single</goal>-->
<!--</goals>-->
<!--<configuration>-->
<!--<finalName>kafka-manager-${project.version}</finalName>-->
<!--<descriptors>-->
<!--<descriptor>assembly.xml</descriptor>-->
<!--</descriptors>-->
<!--<tarLongFileMode>posix</tarLongFileMode>-->
<!--</configuration>-->
<!--</execution>-->
<!--</executions>-->
<!--</plugin>-->
</plugins>
</build>
</project>
......@@ -47,7 +47,7 @@ public class GatewayHeartbeatController {
List<TopicConnectionDO> doList = null;
try {
doList = JsonUtils.parseTopicConnections(clusterId, jsonObject);
doList = JsonUtils.parseTopicConnections(clusterId, jsonObject, System.currentTimeMillis());
} catch (Exception e) {
LOGGER.error("class=GatewayHeartbeatController||method=receiveTopicConnections||clusterId={}||brokerId={}||msg=parse data failed||exception={}", clusterId, brokerId, e.getMessage());
return Result.buildFailure("fail");
......
......@@ -76,7 +76,7 @@ public class NormalAppController {
@RequestMapping(value = "apps/{appId}/basic-info", method = RequestMethod.GET)
@ResponseBody
public Result<AppVO> getAppBasicInfo(@PathVariable String appId) {
if (accountService.isAdminOrderHandler(SpringTool.getUserName())) {
if (accountService.isOpOrRd(SpringTool.getUserName())) {
return new Result<>(AppConverter.convert2AppVO(appService.getByAppId(appId)));
}
......@@ -101,7 +101,7 @@ public class NormalAppController {
@RequestMapping(value = "apps/{appId}/topics", method = RequestMethod.GET)
@ResponseBody
public Result<List<AppTopicVO>> getAppTopics(@PathVariable String appId,
@RequestParam(value = "mine") Boolean mine) {
@RequestParam(value = "mine", required = false) Boolean mine) {
List<AppTopicDTO> dtoList = appService.getAppTopicDTOList(appId, mine);
List<AppTopicVO> voList = new ArrayList<>();
......
......@@ -6,10 +6,10 @@ import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.PartitionOffsetDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumeDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
import com.xiaojukeji.kafka.manager.common.entity.dto.normal.TopicOffsetResetDTO;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer.ConsumerGroupDetailVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer.ConsumerGroupVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer.ConsumerGroupSummaryVO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.service.cache.LogicalClusterMetadataManager;
......@@ -55,7 +55,7 @@ public class NormalConsumerController {
@ApiOperation(value = "查询消费Topic的消费组", notes = "")
@RequestMapping(value = "{clusterId}/consumers/{topicName}/consumer-groups", method = RequestMethod.GET)
@ResponseBody
public Result<List<ConsumerGroupVO>> getConsumeGroups(
public Result<List<ConsumerGroupSummaryVO>> getConsumeGroups(
@PathVariable Long clusterId,
@PathVariable String topicName,
@RequestParam(value = "isPhysicalClusterId", required = false) Boolean isPhysicalClusterId) {
......@@ -63,9 +63,9 @@ public class NormalConsumerController {
if (ValidateUtils.isNull(physicalClusterId)) {
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
return new Result<>(ConsumerModelConverter.convert2ConsumerGroupVOList(
consumerService.getConsumerGroupList(physicalClusterId, topicName))
);
return new Result<>(ConsumerModelConverter.convert2ConsumerGroupSummaryVOList(
consumerService.getConsumerGroupSummaries(physicalClusterId, topicName)
));
}
@ApiOperation(value = "查询消费组的消费详情", notes = "")
......@@ -95,15 +95,10 @@ public class NormalConsumerController {
return Result.buildFrom(ResultStatus.CG_LOCATION_ILLEGAL);
}
ConsumerGroupDTO consumeGroupDTO = new ConsumerGroupDTO(
clusterDO.getId(),
consumerGroup,
new ArrayList<>(),
offsetStoreLocation
);
ConsumerGroup consumeGroup = new ConsumerGroup(clusterDO.getId(), consumerGroup, offsetStoreLocation);
try {
List<ConsumeDetailDTO> consumeDetailDTOList =
consumerService.getConsumeDetail(clusterDO, topicName, consumeGroupDTO);
consumerService.getConsumeDetail(clusterDO, topicName, consumeGroup);
return new Result<>(
ConsumerModelConverter.convert2ConsumerGroupDetailVO(
topicName,
......@@ -113,7 +108,7 @@ public class NormalConsumerController {
)
);
} catch (Exception e) {
LOGGER.error("get consume detail failed, consumerGroup:{}.", consumeGroupDTO, e);
LOGGER.error("get consume detail failed, consumerGroup:{}.", consumeGroup, e);
}
return Result.buildFrom(ResultStatus.OPERATION_FAILED);
......@@ -139,16 +134,11 @@ public class NormalConsumerController {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
ConsumerGroupDTO consumerGroupDTO = new ConsumerGroupDTO(
physicalClusterId,
dto.getConsumerGroup(),
new ArrayList<>(),
OffsetLocationEnum.getOffsetStoreLocation(dto.getLocation())
);
ConsumerGroup consumerGroup = new ConsumerGroup(physicalClusterId, dto.getConsumerGroup(), OffsetLocationEnum.getOffsetStoreLocation(dto.getLocation()));
List<Result> resultList = consumerService.resetConsumerOffset(
clusterDO,
dto.getTopicName(),
consumerGroupDTO,
consumerGroup,
offsetDTOList
);
for (Result result: resultList) {
......
package com.xiaojukeji.kafka.manager.web.api.versionone.op;
import com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway.OrderExtensionAddGatewayConfigDTO;
import com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway.OrderExtensionDeleteGatewayConfigDTO;
import com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway.OrderExtensionModifyGatewayConfigDTO;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.service.service.gateway.GatewayConfigService;
import com.xiaojukeji.kafka.manager.web.converters.GatewayModelConverter;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
@Api(tags = "OP-Gateway配置相关接口(REST)")
@RestController
public class OpGatewayConfigController {
@Autowired
private GatewayConfigService gatewayConfigService;
@ApiOperation(value = "创建Gateway配置", notes = "")
@RequestMapping(value = "gateway-configs", method = RequestMethod.POST)
@ResponseBody
public Result createGatewayConfig(@RequestBody OrderExtensionAddGatewayConfigDTO dto) {
if (ValidateUtils.isNull(dto) || !dto.legal()) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
return gatewayConfigService.insert(GatewayModelConverter.convert2GatewayConfigDO(dto));
}
@ApiOperation(value = "修改Gateway配置", notes = "")
@RequestMapping(value = "gateway-configs", method = RequestMethod.PUT)
@ResponseBody
public Result modifyGatewayConfig(@RequestBody OrderExtensionModifyGatewayConfigDTO dto) {
if (ValidateUtils.isNull(dto) || !dto.legal()) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
return gatewayConfigService.updateById(GatewayModelConverter.convert2GatewayConfigDO(dto));
}
@ApiOperation(value = "删除Gateway配置", notes = "")
@RequestMapping(value = "gateway-configs", method = RequestMethod.DELETE)
@ResponseBody
public Result deleteGatewayConfig(@RequestBody OrderExtensionDeleteGatewayConfigDTO dto) {
if (ValidateUtils.isNull(dto) || !dto.legal()) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
return gatewayConfigService.deleteById(dto.getId());
}
}
......@@ -194,15 +194,22 @@ public class OpUtilsController {
if (ValidateUtils.isNull(clusterDO)) {
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
String operator = SpringTool.getUserName();
ResultStatus rs = null;
if (RebalanceDimensionEnum.CLUSTER.getCode().equals(reqObj.getDimension())) {
rs = adminService.preferredReplicaElection(clusterDO, operator);
// 按照Cluster纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, SpringTool.getUserName());
} else if (RebalanceDimensionEnum.BROKER.getCode().equals(reqObj.getDimension())) {
rs = adminService.preferredReplicaElection(clusterDO, reqObj.getBrokerId(), operator);
// 按照Broker纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, reqObj.getBrokerId(), SpringTool.getUserName());
} else if (RebalanceDimensionEnum.TOPIC.getCode().equals(reqObj.getDimension())) {
// 按照Topic纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, reqObj.getTopicName(), SpringTool.getUserName());
} else if (RebalanceDimensionEnum.PARTITION.getCode().equals(reqObj.getDimension())) {
// 按照Partition纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, reqObj.getTopicName(), reqObj.getPartitionId(), SpringTool.getUserName());
} else {
// TODO: 19/7/8 Topic维度 & Region维度 优先副本选举
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
return Result.buildFrom(rs);
}
......
package com.xiaojukeji.kafka.manager.web.api.versionone.rd;
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.PeakFlowStatusEnum;
import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ao.cluster.ControllerPreferredCandidate;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.cluster.TopicMetadataVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster.ControllerPreferredCandidateVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster.RdClusterMetricsVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster.ClusterBrokerStatusVO;
import com.xiaojukeji.kafka.manager.common.entity.ao.BrokerOverviewDTO;
......@@ -26,7 +27,6 @@ import io.swagger.annotations.ApiOperation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
......@@ -168,4 +168,15 @@ public class RdClusterController {
public Result<List<TopicMetadataVO>> getTopicMetadatas(@PathVariable("clusterId") Long clusterId) {
return new Result<>(ClusterModelConverter.convert2TopicMetadataVOList(clusterId));
}
@ApiOperation(value = "Controller优先候选的Broker", notes = "滴滴内部引擎特性")
@RequestMapping(value = "clusters/{clusterId}/controller-preferred-candidates", method = RequestMethod.GET)
@ResponseBody
public Result<List<ControllerPreferredCandidateVO>> getControllerPreferredCandidates(@PathVariable("clusterId") Long clusterId) {
Result<List<ControllerPreferredCandidate>> candidateResult = clusterService.getControllerPreferredCandidates(clusterId);
if (candidateResult.failed()) {
return new Result(candidateResult.getCode(), candidateResult.getMessage());
}
return Result.buildSuc(ClusterModelConverter.convert2ControllerPreferredCandidateVOList(candidateResult.getData()));
}
}
\ No newline at end of file
package com.xiaojukeji.kafka.manager.web.api.versionone.rd;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO;
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.GatewayConfigVO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.service.service.gateway.GatewayConfigService;
import com.xiaojukeji.kafka.manager.web.converters.GatewayModelConverter;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import java.util.List;
@Api(tags = "RD-Gateway配置相关接口(REST)")
@RestController
public class RdGatewayConfigController {
@Autowired
private GatewayConfigService gatewayConfigService;
@ApiOperation(value = "Gateway相关配置信息", notes = "")
@RequestMapping(value = "gateway-configs", method = RequestMethod.GET)
@ResponseBody
public Result<List<GatewayConfigVO>> getGatewayConfigs() {
List<GatewayConfigDO> doList = gatewayConfigService.list();
if (ValidateUtils.isEmptyList(doList)) {
return Result.buildSuc();
}
return Result.buildSuc(GatewayModelConverter.convert2GatewayConfigVOList(doList));
}
}
......@@ -8,7 +8,7 @@ import com.xiaojukeji.kafka.manager.common.constant.SystemCodeConstant;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumeDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
import com.xiaojukeji.kafka.manager.openapi.common.dto.ConsumeHealthDTO;
import com.xiaojukeji.kafka.manager.openapi.common.dto.OffsetResetDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
......@@ -29,7 +29,6 @@ import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
......@@ -152,15 +151,10 @@ public class ThirdPartConsumeController {
return Result.buildFrom(ResultStatus.CG_LOCATION_ILLEGAL);
}
ConsumerGroupDTO consumeGroupDTO = new ConsumerGroupDTO(
clusterDO.getId(),
consumerGroup,
new ArrayList<>(),
offsetStoreLocation
);
ConsumerGroup consumeGroup = new ConsumerGroup(clusterDO.getId(), consumerGroup, offsetStoreLocation);
try {
List<ConsumeDetailDTO> consumeDetailDTOList =
consumerService.getConsumeDetail(clusterDO, topicName, consumeGroupDTO);
consumerService.getConsumeDetail(clusterDO, topicName, consumeGroup);
return new Result<>(
ConsumerModelConverter.convert2ConsumerGroupDetailVO(
topicName,
......@@ -170,7 +164,7 @@ public class ThirdPartConsumeController {
)
);
} catch (Exception e) {
LOGGER.error("get consume detail failed, consumerGroup:{}.", consumeGroupDTO, e);
LOGGER.error("get consume detail failed, consumerGroup:{}.", consumeGroup, e);
}
return Result.buildFrom(ResultStatus.OPERATION_FAILED);
}
......
package com.xiaojukeji.kafka.manager.web.api.versionone.thirdpart;
import com.xiaojukeji.kafka.manager.common.bizenum.RebalanceDimensionEnum;
import com.xiaojukeji.kafka.manager.common.constant.ApiPrefix;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.dto.op.RebalanceDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.utils.SpringTool;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.service.service.AdminService;
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
/**
* @author zengqiao
* @date 20/9/23
*/
@Api(tags = "开放接口-OP相关接口(REST)")
@RestController
@RequestMapping(ApiPrefix.API_V1_THIRD_PART_PREFIX)
public class ThirdPartOpController {
@Autowired
private AdminService adminService;
@Autowired
private ClusterService clusterService;
@ApiOperation(value = "优先副本选举")
@RequestMapping(value = "op/rebalance", method = RequestMethod.POST)
@ResponseBody
public Result preferredReplicaElect(@RequestBody RebalanceDTO reqObj) {
if (!reqObj.paramLegal()) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
ClusterDO clusterDO = clusterService.getById(reqObj.getClusterId());
if (ValidateUtils.isNull(clusterDO)) {
return Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST);
}
ResultStatus rs = null;
if (RebalanceDimensionEnum.CLUSTER.getCode().equals(reqObj.getDimension())) {
// 按照Cluster纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, SpringTool.getUserName());
} else if (RebalanceDimensionEnum.BROKER.getCode().equals(reqObj.getDimension())) {
// 按照Broker纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, reqObj.getBrokerId(), SpringTool.getUserName());
} else if (RebalanceDimensionEnum.TOPIC.getCode().equals(reqObj.getDimension())) {
// 按照Topic纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, reqObj.getTopicName(), SpringTool.getUserName());
} else if (RebalanceDimensionEnum.PARTITION.getCode().equals(reqObj.getDimension())) {
// 按照Partition纬度均衡
rs = adminService.preferredReplicaElection(clusterDO, reqObj.getTopicName(), reqObj.getPartitionId(), SpringTool.getUserName());
} else {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
return Result.buildFrom(rs);
}
}
......@@ -3,6 +3,7 @@ package com.xiaojukeji.kafka.manager.web.converters;
import com.xiaojukeji.kafka.manager.common.entity.ao.BrokerOverviewDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.ClusterDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.cluster.ClusterBrokerStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.cluster.ControllerPreferredCandidate;
import com.xiaojukeji.kafka.manager.common.entity.ao.cluster.LogicalCluster;
import com.xiaojukeji.kafka.manager.common.entity.ao.cluster.LogicalClusterMetrics;
import com.xiaojukeji.kafka.manager.common.entity.dto.rd.ClusterDTO;
......@@ -15,6 +16,7 @@ import com.xiaojukeji.kafka.manager.common.entity.vo.normal.cluster.TopicMetadat
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.KafkaControllerVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster.ClusterBrokerStatusVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster.ClusterDetailVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster.ControllerPreferredCandidateVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster.RdClusterMetricsVO;
import com.xiaojukeji.kafka.manager.common.utils.CopyUtils;
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
......@@ -249,4 +251,21 @@ public class ClusterModelConverter {
vo.setBrokerReplicaStatusList(clusterBrokerStatus.getBrokerReplicaStatusList());
return vo;
}
public static List<ControllerPreferredCandidateVO> convert2ControllerPreferredCandidateVOList(List<ControllerPreferredCandidate> candidateList) {
if (ValidateUtils.isEmptyList(candidateList)) {
return new ArrayList<>();
}
List<ControllerPreferredCandidateVO> voList = new ArrayList<>();
for (ControllerPreferredCandidate candidate: candidateList) {
ControllerPreferredCandidateVO vo = new ControllerPreferredCandidateVO();
vo.setBrokerId(candidate.getBrokerId());
vo.setHost(candidate.getHost());
vo.setStatus(candidate.getStatus());
vo.setStartTime(candidate.getStartTime());
voList.add(vo);
}
return voList;
}
}
package com.xiaojukeji.kafka.manager.web.converters;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroup;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupSummary;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer.ConsumerGroupDetailVO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumeDetailDTO;
import com.xiaojukeji.kafka.manager.common.entity.ao.consumer.ConsumerGroupDTO;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer.ConsumerGroupSummaryVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer.ConsumerGroupVO;
import com.xiaojukeji.kafka.manager.common.utils.ListUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
......@@ -41,18 +45,34 @@ public class ConsumerModelConverter {
return consumerGroupDetailVOList;
}
public static List<ConsumerGroupVO> convert2ConsumerGroupVOList(List<ConsumerGroupDTO> consumeGroupDTOList) {
if (consumeGroupDTOList == null || consumeGroupDTOList.isEmpty()) {
return new ArrayList<>();
public static List<ConsumerGroupVO> convert2ConsumerGroupVOList(List<ConsumerGroup> consumerGroupList) {
if (ValidateUtils.isEmptyList(consumerGroupList)) {
return Collections.emptyList();
}
List<ConsumerGroupVO> consumerGroupVOList = new ArrayList<>();
for (ConsumerGroupDTO consumeGroupDTO : consumeGroupDTOList) {
for (ConsumerGroup consumerGroup : consumerGroupList) {
ConsumerGroupVO vo = new ConsumerGroupVO();
vo.setConsumerGroup(consumeGroupDTO.getConsumerGroup());
vo.setAppIds(ListUtils.strList2String(consumeGroupDTO.getAppIdList()));
vo.setLocation(consumeGroupDTO.getOffsetStoreLocation().location);
vo.setConsumerGroup(consumerGroup.getConsumerGroup());
vo.setAppIds("");
vo.setLocation(consumerGroup.getOffsetStoreLocation().location);
consumerGroupVOList.add(vo);
}
return consumerGroupVOList;
}
public static List<ConsumerGroupSummaryVO> convert2ConsumerGroupSummaryVOList(List<ConsumerGroupSummary> summaryList) {
if (ValidateUtils.isEmptyList(summaryList)) {
return Collections.emptyList();
}
List<ConsumerGroupSummaryVO> voList = new ArrayList<>();
for (ConsumerGroupSummary consumerGroupSummary : summaryList) {
ConsumerGroupSummaryVO vo = new ConsumerGroupSummaryVO();
vo.setConsumerGroup(consumerGroupSummary.getConsumerGroup());
vo.setAppIds(ListUtils.strList2String(consumerGroupSummary.getAppIdList()));
vo.setLocation(consumerGroupSummary.getOffsetStoreLocation().location);
vo.setState(consumerGroupSummary.getState());
voList.add(vo);
}
return voList;
}
}
\ No newline at end of file
package com.xiaojukeji.kafka.manager.web.converters;
import com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway.OrderExtensionAddGatewayConfigDTO;
import com.xiaojukeji.kafka.manager.bpm.common.entry.apply.gateway.OrderExtensionModifyGatewayConfigDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.GatewayConfigDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.KafkaAclDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.KafkaUserDO;
import com.xiaojukeji.kafka.manager.common.entity.vo.gateway.KafkaAclVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.gateway.KafkaUserVO;
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.GatewayConfigVO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import java.util.ArrayList;
......@@ -49,4 +53,41 @@ public class GatewayModelConverter {
}
return voList;
}
public static List<GatewayConfigVO> convert2GatewayConfigVOList(List<GatewayConfigDO> doList) {
if (ValidateUtils.isNull(doList)) {
return new ArrayList<>();
}
List<GatewayConfigVO> voList = new ArrayList<>();
for (GatewayConfigDO configDO: doList) {
GatewayConfigVO vo = new GatewayConfigVO();
vo.setId(configDO.getId());
vo.setType(configDO.getType());
vo.setName(configDO.getName());
vo.setValue(configDO.getValue());
vo.setVersion(configDO.getVersion());
vo.setCreateTime(configDO.getCreateTime());
vo.setModifyTime(configDO.getModifyTime());
voList.add(vo);
}
return voList;
}
public static GatewayConfigDO convert2GatewayConfigDO(OrderExtensionAddGatewayConfigDTO configDTO) {
GatewayConfigDO configDO = new GatewayConfigDO();
configDO.setType(configDO.getType());
configDO.setName(configDO.getName());
configDO.setValue(configDO.getValue());
return configDO;
}
public static GatewayConfigDO convert2GatewayConfigDO(OrderExtensionModifyGatewayConfigDTO configDTO) {
GatewayConfigDO configDO = new GatewayConfigDO();
configDO.setId(configDO.getId());
configDO.setType(configDO.getType());
configDO.setName(configDO.getName());
configDO.setValue(configDO.getValue());
return configDO;
}
}
\ No newline at end of file
......@@ -37,6 +37,7 @@ public class TopicModelConverter {
vo.setTopicCodeC(dto.getTopicCodeC());
vo.setDescription(dto.getDescription());
vo.setBootstrapServers("");
vo.setRegionNameList(dto.getRegionNameList());
if (!ValidateUtils.isNull(clusterDO)) {
vo.setBootstrapServers(clusterDO.getBootstrapServers());
}
......
......@@ -42,6 +42,11 @@ custom:
topic-throttled-metrics: false
save-days: 7
# 任务相关的开关
task:
op:
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
account:
ldap:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册