提交 2790099e 编写于 作者: Z zengqiao

梳理Task模块任务-BrokerMetrics任务梳理

上级 f6ba8bc9
......@@ -27,7 +27,6 @@ spring:
main:
allow-bean-definition-overriding: true
servlet:
multipart:
max-file-size: 100MB
......@@ -37,28 +36,32 @@ logging:
config: classpath:logback-spring.xml
custom:
idc: cn # 部署的数据中心, 忽略该配置, 后续会进行删除
jmx:
max-conn: 10 # 2.3版本配置不在这个地方生效
idc: cn
store-metrics-task:
community:
broker-metrics-enabled: true # 社区部分broker metrics信息收集开关, 关闭之后metrics信息将不会进行收集及写DB
topic-metrics-enabled: true # 社区部分topic的metrics信息收集开关, 关闭之后metrics信息将不会进行收集及写DB
didi:
app-topic-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭
topic-request-time-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭
topic-throttled-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭
topic-metrics-enabled: true
didi: # 滴滴Kafka特有的指标
app-topic-metrics-enabled: false
topic-request-time-metrics-enabled: false
topic-throttled-metrics-enabled: false
# 任务相关的开关
# 任务相关的配置
task:
op:
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
order-auto-exec: # 工单自动化审批线程的开关
topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启
app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
order-auto-exec: # 工单自动化审批线程的开关
topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启
app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启
metrics:
delete-metrics:
delete-limit-size: 1000
collect: # 收集指标
broker-metrics-enabled: true # 收集Broker指标
sink: # 上报指标
cluster-metrics: # 上报cluster指标
sink-db-enabled: true # 上报到db
broker-metrics: # 上报broker指标
sink-db-enabled: true # 上报到db
delete: # 删除指标
delete-limit-size: 1000 # 单次删除的批大小
cluster-metrics-save-days: 14 # 集群指标保存天数
broker-metrics-save-days: 14 # Broker指标保存天数
topic-metrics-save-days: 7 # Topic指标保存天数
......@@ -66,7 +69,21 @@ task:
topic-throttled-metrics-save-days: 7 # Topic限流指标保存天数
app-topic-metrics-save-days: 7 # App+Topic指标保存天数
# ldap相关的配置
thread-pool:
collect-metrics:
thread-num: 256 # 收集指标线程池大小
queue-size: 5000 # 收集指标线程池的queue大小
api-call:
thread-num: 16 # api服务线程池大小
queue-size: 5000 # api服务线程池的queue大小
client-pool:
kafka-consumer:
min-idle-client-num: 24 # 最小空闲客户端数
max-idle-client-num: 24 # 最大空闲客户端数
max-total-client-num: 24 # 最大客户端数
borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒
account:
ldap:
enabled: false
......@@ -81,7 +98,6 @@ account:
auth-user-registration: true
auth-user-registration-role: normal
# 集群升级部署相关的功能,需要配合夜莺及S3进行使用
kcm:
enabled: false
s3:
......@@ -96,14 +112,6 @@ kcm:
account: root
script-file: kcm_script.sh
# 监控告警相关的功能,需要配合夜莺进行使用
# enabled: 表示是否开启监控告警的功能, true: 开启, false: 不开启
# n9e.nid: 夜莺的节点ID
# n9e.user-token: 用户的密钥,在夜莺的个人设置中
# n9e.mon.base-url: 监控地址
# n9e.sink.base-url: 数据上报地址
# n9e.rdb.base-url: 用户资源中心地址
monitor:
enabled: false
n9e:
......@@ -116,25 +124,9 @@ monitor:
rdb:
base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000
notify: # 通知的功能
kafka: # 默认通知发送到kafka的指定Topic中
cluster-id: 95 # Topic的集群ID
topic-name: didi-kafka-notify # Topic名称
order: # 部署的KM的地址
notify:
kafka:
cluster-id: 95
topic-name: didi-kafka-notify
order:
detail-url: http://127.0.0.1
thread-pool:
collect-metrics:
thread-num: 256 # 收集指标线程池大小
queue-size: 5000 # 收集指标线程池的queue大小
api-call:
thread-num: 16 # api服务线程池大小
queue-size: 5000 # api服务线程池的queue大小
client-pool:
kafka-consumer:
min-idle-client-num: 24 # 最小空闲客户端数
max-idle-client-num: 24 # 最大空闲客户端数
max-total-client-num: 24 # 最大客户端数
borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒
\ No newline at end of file
package com.xiaojukeji.kafka.manager.common.events.metrics;
import org.springframework.context.ApplicationEvent;
/**
* @author zengqiao
* @date 22/01/17
*/
public class BaseMetricsCollectedEvent extends ApplicationEvent {
/**
* 物理集群ID
*/
protected final Long physicalClusterId;
/**
* 收集时间,依据业务需要来设置,可以设置任务开始时间,也可以设置任务结束时间
*/
protected final Long collectTime;
public BaseMetricsCollectedEvent(Object source, Long physicalClusterId, Long collectTime) {
super(source);
this.physicalClusterId = physicalClusterId;
this.collectTime = collectTime;
}
public Long getPhysicalClusterId() {
return physicalClusterId;
}
public Long getCollectTime() {
return collectTime;
}
}
package com.xiaojukeji.kafka.manager.common.events.metrics;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
import java.util.List;
/**
* @author zengqiao
* @date 20/8/31
*/
public class BatchBrokerMetricsCollectedEvent extends BaseMetricsCollectedEvent {
private final List<BrokerMetrics> metricsList;
public BatchBrokerMetricsCollectedEvent(Object source, Long physicalClusterId, Long collectTime, List<BrokerMetrics> metricsList) {
super(source, physicalClusterId, collectTime);
this.metricsList = metricsList;
}
public List<BrokerMetrics> getMetricsList() {
return metricsList;
}
}
\ No newline at end of file
package com.xiaojukeji.kafka.manager.task.component;
import com.google.common.collect.Lists;
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.utils.factory.DefaultThreadFactory;
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
import com.xiaojukeji.kafka.manager.common.utils.NetUtils;
......@@ -29,7 +28,7 @@ import java.util.concurrent.*;
* @date 20/8/10
*/
public abstract class AbstractScheduledTask<E extends Comparable> implements SchedulingConfigurer {
private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractScheduledTask.class);
@Autowired
private HeartbeatDao heartbeatDao;
......
package com.xiaojukeji.kafka.manager.task.component;
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
......@@ -9,11 +8,11 @@ import org.slf4j.LoggerFactory;
* @date 20/8/10
*/
public class BaseBizTask<E extends Comparable> implements Runnable {
private final static Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractScheduledTask.class);
private E task;
private final E task;
private AbstractScheduledTask scheduledTask;
private final AbstractScheduledTask scheduledTask;
public BaseBizTask(E task, AbstractScheduledTask scheduledTask) {
this.task = task;
......@@ -30,6 +29,7 @@ public class BaseBizTask<E extends Comparable> implements Runnable {
} catch (Throwable t) {
LOGGER.error("scheduled task scheduleName:{} execute failed, task:{}", scheduledTask.getScheduledName(), task, t);
}
LOGGER.info("scheduled task scheduleName:{} finished, cost-time:{}ms.", scheduledTask.getScheduledName(), System.currentTimeMillis() - startTime);
}
}
\ No newline at end of file
package com.xiaojukeji.kafka.manager.task.dispatch.metrics.store;
package com.xiaojukeji.kafka.manager.task.dispatch.metrics.collect;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections;
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BaseMetrics;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
import com.xiaojukeji.kafka.manager.common.entity.metrics.ClusterMetrics;
import com.xiaojukeji.kafka.manager.common.events.metrics.BatchBrokerMetricsCollectedEvent;
import com.xiaojukeji.kafka.manager.common.utils.SpringTool;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConstant;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.dao.BrokerMetricsDao;
import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao;
import com.xiaojukeji.kafka.manager.common.entity.pojo.BrokerMetricsDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterMetricsDO;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
import com.xiaojukeji.kafka.manager.service.service.JmxService;
import com.xiaojukeji.kafka.manager.service.strategy.AbstractHealthScoreStrategy;
import com.xiaojukeji.kafka.manager.service.utils.MetricsConvertUtils;
import com.xiaojukeji.kafka.manager.task.component.AbstractScheduledTask;
import com.xiaojukeji.kafka.manager.task.component.CustomScheduled;
import org.slf4j.Logger;
......@@ -28,17 +20,16 @@ import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* Broker指标信息存DB, Broker流量, 集群流量
* Broker指标信息收集
* @author zengqiao
* @date 20/5/7
*/
@CustomScheduled(name = "storeBrokerMetrics", cron = "21 0/1 * * * ?", threadNum = 2)
@ConditionalOnProperty(prefix = "custom.store-metrics-task.community", name = "broker-metrics-enabled", havingValue = "true", matchIfMissing = true)
public class StoreBrokerMetrics extends AbstractScheduledTask<ClusterDO> {
private static final Logger LOGGER = LoggerFactory.getLogger(LogConstant.SCHEDULED_TASK_LOGGER);
@CustomScheduled(name = "collectAndPublishBrokerMetrics", cron = "21 0/1 * * * ?", threadNum = 2)
@ConditionalOnProperty(prefix = "task.metrics.collect", name = "broker-metrics-enabled", havingValue = "true", matchIfMissing = true)
public class CollectAndPublishBrokerMetrics extends AbstractScheduledTask<ClusterDO> {
private static final Logger LOGGER = LoggerFactory.getLogger(CollectAndPublishBrokerMetrics.class);
@Autowired
private JmxService jmxService;
......@@ -46,12 +37,6 @@ public class StoreBrokerMetrics extends AbstractScheduledTask<ClusterDO> {
@Autowired
private ClusterService clusterService;
@Autowired
private BrokerMetricsDao brokerMetricsDao;
@Autowired
private ClusterMetricsDao clusterMetricsDao;
@Autowired
private AbstractHealthScoreStrategy healthScoreStrategy;
......@@ -63,33 +48,22 @@ public class StoreBrokerMetrics extends AbstractScheduledTask<ClusterDO> {
@Override
public void processTask(ClusterDO clusterDO) {
long startTime = System.currentTimeMillis();
List<ClusterMetrics> clusterMetricsList = new ArrayList<>();
try {
List<BrokerMetrics> brokerMetricsList = getAndBatchAddMetrics(startTime, clusterDO.getId());
clusterMetricsList.add(supplyAndConvert2ClusterMetrics(
SpringTool.publish(new BatchBrokerMetricsCollectedEvent(
this,
clusterDO.getId(),
MetricsConvertUtils.merge2BaseMetricsByAdd(brokerMetricsList))
startTime,
this.getBrokerMetrics(clusterDO.getId()))
);
} catch (Exception t) {
LOGGER.error("collect failed, clusterId:{}.", clusterDO.getId(), t);
}
long endTime = System.currentTimeMillis();
LOGGER.info("collect finish, clusterId:{} costTime:{}", clusterDO.getId(), endTime - startTime);
List<ClusterMetricsDO> doList = MetricsConvertUtils.convertAndUpdateCreateTime2ClusterMetricsDOList(
startTime,
clusterMetricsList
);
if (ValidateUtils.isEmptyList(doList)) {
return;
} catch (Exception e) {
LOGGER.error("collect broker-metrics failed, physicalClusterId:{}.", clusterDO.getId(), e);
}
clusterMetricsDao.batchAdd(doList);
LOGGER.info("collect broker-metrics finished, physicalClusterId:{} costTime:{}", clusterDO.getId(), System.currentTimeMillis() - startTime);
}
private List<BrokerMetrics> getAndBatchAddMetrics(Long startTime, Long clusterId) {
private List<BrokerMetrics> getBrokerMetrics(Long clusterId) {
List<BrokerMetrics> metricsList = new ArrayList<>();
for (Integer brokerId: PhysicalClusterMetadataManager.getBrokerIdList(clusterId)) {
BrokerMetrics metrics = jmxService.getBrokerMetrics(
......@@ -97,50 +71,23 @@ public class StoreBrokerMetrics extends AbstractScheduledTask<ClusterDO> {
brokerId,
KafkaMetricsCollections.BROKER_TO_DB_METRICS
);
if (ValidateUtils.isNull(metrics)) {
continue;
}
metrics.getMetricsMap().put(
JmxConstant.HEALTH_SCORE,
healthScoreStrategy.calBrokerHealthScore(clusterId, brokerId, metrics)
);
metricsList.add(metrics);
}
if (ValidateUtils.isEmptyList(metricsList)) {
return new ArrayList<>();
}
List<BrokerMetricsDO> doList =
MetricsConvertUtils.convertAndUpdateCreateTime2BrokerMetricsDOList(startTime, metricsList);
int i = 0;
do {
List<BrokerMetricsDO> subDOList = doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size()));
if (ValidateUtils.isEmptyList(subDOList)) {
break;
}
brokerMetricsDao.batchAdd(subDOList);
i += Constant.BATCH_INSERT_SIZE;
} while (i < doList.size());
return metricsList;
}
private ClusterMetrics supplyAndConvert2ClusterMetrics(Long clusterId, BaseMetrics baseMetrics) {
ClusterMetrics metrics = new ClusterMetrics(clusterId);
Map<String, Object> metricsMap = metrics.getMetricsMap();
metricsMap.putAll(baseMetrics.getMetricsMap());
metricsMap.put(JmxConstant.TOPIC_NUM, PhysicalClusterMetadataManager.getTopicNameList(clusterId).size());
metricsMap.put(JmxConstant.BROKER_NUM, PhysicalClusterMetadataManager.getBrokerIdList(clusterId).size());
Integer partitionNum = 0;
for (String topicName : PhysicalClusterMetadataManager.getTopicNameList(clusterId)) {
TopicMetadata topicMetaData = PhysicalClusterMetadataManager.getTopicMetadata(clusterId, topicName);
if (ValidateUtils.isNull(topicMetaData)) {
continue;
}
partitionNum += topicMetaData.getPartitionNum();
}
metricsMap.put(JmxConstant.PARTITION_NUM, partitionNum);
return metrics;
}
}
\ No newline at end of file
......@@ -42,25 +42,25 @@ public class DeleteMetrics extends AbstractScheduledTask<EmptyEntry> {
@Autowired
private TopicThrottledMetricsDao topicThrottledMetricsDao;
@Value(value = "${task.metrics.delete-metrics.delete-limit-size:1000}")
@Value(value = "${task.metrics.delete.delete-limit-size:1000}")
private Integer deleteLimitSize;
@Value(value = "${task.metrics.delete-metrics.cluster-metrics-save-days:14}")
@Value(value = "${task.metrics.delete.cluster-metrics-save-days:14}")
private Integer clusterMetricsSaveDays;
@Value(value = "${task.metrics.delete-metrics.broker-metrics-save-days:14}")
@Value(value = "${task.metrics.delete.broker-metrics-save-days:14}")
private Integer brokerMetricsSaveDays;
@Value(value = "${task.metrics.delete-metrics.topic-metrics-save-days:7}")
@Value(value = "${task.metrics.delete.topic-metrics-save-days:7}")
private Integer topicMetricsSaveDays;
@Value(value = "${task.metrics.delete-metrics.topic-request-time-metrics-save-days:7}")
@Value(value = "${task.metrics.delete.topic-request-time-metrics-save-days:7}")
private Integer topicRequestTimeMetricsSaveDays;
@Value(value = "${task.metrics.delete-metrics.topic-throttled-metrics-save-days:7}")
@Value(value = "${task.metrics.delete.topic-throttled-metrics-save-days:7}")
private Integer topicThrottledMetricsSaveDays;
@Value(value = "${task.metrics.delete-metrics.app-topic-metrics-save-days:7}")
@Value(value = "${task.metrics.delete.app-topic-metrics-save-days:7}")
private Integer appTopicMetricsSaveDays;
@Override
......
package com.xiaojukeji.kafka.manager.task.listener.sink.db;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
import com.xiaojukeji.kafka.manager.common.entity.pojo.BrokerMetricsDO;
import com.xiaojukeji.kafka.manager.common.events.metrics.BatchBrokerMetricsCollectedEvent;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.dao.BrokerMetricsDao;
import com.xiaojukeji.kafka.manager.service.utils.MetricsConvertUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.ApplicationListener;
import org.springframework.stereotype.Component;
import java.util.List;
/**
* @author zengqiao
* @date 22/01/17
*/
@Component
@ConditionalOnProperty(prefix = "task.metrics.sink.broker-metrics", name = "sink-db-enabled", havingValue = "true", matchIfMissing = true)
public class SinkBrokerMetrics2DB implements ApplicationListener<BatchBrokerMetricsCollectedEvent> {
private static final Logger logger = LoggerFactory.getLogger(SinkBrokerMetrics2DB.class);
@Autowired
private BrokerMetricsDao metricsDao;
@Override
public void onApplicationEvent(BatchBrokerMetricsCollectedEvent event) {
logger.debug("sink broker-metrics to db start, event:{}.", event);
List<BrokerMetrics> metricsList = event.getMetricsList();
if (ValidateUtils.isEmptyList(metricsList)) {
logger.warn("sink broker-metrics to db finished, without need sink, event:{}.", event);
return;
}
List<BrokerMetricsDO> doList = MetricsConvertUtils.convertAndUpdateCreateTime2BrokerMetricsDOList(event.getCollectTime(), metricsList);
int i = 0;
while (i < doList.size()) {
List<BrokerMetricsDO> subDOList = doList.subList(i, Math.min(i + Constant.BATCH_INSERT_SIZE, doList.size()));
if (ValidateUtils.isEmptyList(subDOList)) {
break;
}
metricsDao.batchAdd(subDOList);
i += Constant.BATCH_INSERT_SIZE;
}
logger.debug("sink broker-metrics to db finished, event:{}.", event);
}
}
\ No newline at end of file
package com.xiaojukeji.kafka.manager.task.listener.sink.db;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BaseMetrics;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
import com.xiaojukeji.kafka.manager.common.entity.metrics.ClusterMetrics;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterMetricsDO;
import com.xiaojukeji.kafka.manager.common.events.metrics.BatchBrokerMetricsCollectedEvent;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConstant;
import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.TopicMetadata;
import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.utils.MetricsConvertUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.ApplicationListener;
import org.springframework.stereotype.Component;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
/**
* @author zengqiao
* @date 22/01/17
*/
@Component
@ConditionalOnProperty(prefix = "task.metrics.sink.cluster-metrics", name = "sink-db-enabled", havingValue = "true", matchIfMissing = true)
public class SinkClusterMetrics2DB implements ApplicationListener<BatchBrokerMetricsCollectedEvent> {
private static final Logger logger = LoggerFactory.getLogger(SinkClusterMetrics2DB.class);
@Autowired
private ClusterMetricsDao clusterMetricsDao;
@Override
public void onApplicationEvent(BatchBrokerMetricsCollectedEvent event) {
logger.debug("sink cluster-metrics to db start, event:{}.", event);
List<BrokerMetrics> metricsList = event.getMetricsList();
if (ValidateUtils.isEmptyList(metricsList)) {
logger.warn("sink cluster-metrics to db finished, without need sink, event:{}.", event);
return;
}
List<ClusterMetricsDO> doList = MetricsConvertUtils.convertAndUpdateCreateTime2ClusterMetricsDOList(
event.getCollectTime(),
// 合并broker-metrics为cluster-metrics
Arrays.asList(supplyAndConvert2ClusterMetrics(event.getPhysicalClusterId(), MetricsConvertUtils.merge2BaseMetricsByAdd(event.getMetricsList())))
);
if (ValidateUtils.isEmptyList(doList)) {
logger.warn("sink cluster-metrics to db finished, without need sink, event:{}.", event);
return;
}
clusterMetricsDao.batchAdd(doList);
logger.debug("sink cluster-metrics to db finished, event:{}.", event);
}
private ClusterMetrics supplyAndConvert2ClusterMetrics(Long clusterId, BaseMetrics baseMetrics) {
ClusterMetrics metrics = new ClusterMetrics(clusterId);
Map<String, Object> metricsMap = metrics.getMetricsMap();
metricsMap.putAll(baseMetrics.getMetricsMap());
metricsMap.put(JmxConstant.TOPIC_NUM, PhysicalClusterMetadataManager.getTopicNameList(clusterId).size());
metricsMap.put(JmxConstant.BROKER_NUM, PhysicalClusterMetadataManager.getBrokerIdList(clusterId).size());
Integer partitionNum = 0;
for (String topicName : PhysicalClusterMetadataManager.getTopicNameList(clusterId)) {
TopicMetadata topicMetaData = PhysicalClusterMetadataManager.getTopicMetadata(clusterId, topicName);
if (ValidateUtils.isNull(topicMetaData)) {
continue;
}
partitionNum += topicMetaData.getPartitionNum();
}
metricsMap.put(JmxConstant.PARTITION_NUM, partitionNum);
return metrics;
}
}
\ No newline at end of file
package com.xiaojukeji.kafka.manager.task.listener;
package com.xiaojukeji.kafka.manager.task.listener.sink.db;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
......
package com.xiaojukeji.kafka.manager.task.listener;
package com.xiaojukeji.kafka.manager.task.listener.sink.db;
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
......
package com.xiaojukeji.kafka.manager.task.listener;
package com.xiaojukeji.kafka.manager.task.listener.sink.kafka;
import com.xiaojukeji.kafka.manager.common.constant.ConfigConstant;
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
......
package com.xiaojukeji.kafka.manager.task.listener;
package com.xiaojukeji.kafka.manager.task.listener.sink.kafka;
import com.xiaojukeji.kafka.manager.common.constant.ConfigConstant;
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
......
package com.xiaojukeji.kafka.manager.task.listener;
package com.xiaojukeji.kafka.manager.task.listener.sink.monitor;
import com.xiaojukeji.kafka.manager.monitor.common.entry.bizenum.MonitorMetricNameEnum;
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
......
package com.xiaojukeji.kafka.manager.task.listener;
package com.xiaojukeji.kafka.manager.task.listener.sink.monitor;
import com.xiaojukeji.kafka.manager.monitor.common.entry.bizenum.MonitorMetricNameEnum;
import com.xiaojukeji.kafka.manager.common.constant.LogConstant;
......
package com.xiaojukeji.kafka.manager.task.listener;
package com.xiaojukeji.kafka.manager.task.listener.sink.monitor;
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum;
import com.xiaojukeji.kafka.manager.monitor.common.MonitorSinkConstant;
......
......@@ -33,7 +33,6 @@ custom:
idc: cn
store-metrics-task:
community:
broker-metrics-enabled: true
topic-metrics-enabled: true
didi: # 滴滴Kafka特有的指标
app-topic-metrics-enabled: false
......@@ -43,13 +42,20 @@ custom:
# 任务相关的配置
task:
op:
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
order-auto-exec: # 工单自动化审批线程的开关
topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启
app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
order-auto-exec: # 工单自动化审批线程的开关
topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启
app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启
metrics:
delete-metrics:
delete-limit-size: 1000
collect: # 收集指标
broker-metrics-enabled: true # 收集Broker指标
sink: # 上报指标
cluster-metrics: # 上报cluster指标
sink-db-enabled: true # 上报到db
broker-metrics: # 上报broker指标
sink-db-enabled: true # 上报到db
delete: # 删除指标
delete-limit-size: 1000 # 单次删除的批大小
cluster-metrics-save-days: 14 # 集群指标保存天数
broker-metrics-save-days: 14 # Broker指标保存天数
topic-metrics-save-days: 7 # Topic指标保存天数
......@@ -57,6 +63,21 @@ task:
topic-throttled-metrics-save-days: 7 # Topic限流指标保存天数
app-topic-metrics-save-days: 7 # App+Topic指标保存天数
thread-pool:
collect-metrics:
thread-num: 256 # 收集指标线程池大小
queue-size: 5000 # 收集指标线程池的queue大小
api-call:
thread-num: 16 # api服务线程池大小
queue-size: 5000 # api服务线程池的queue大小
client-pool:
kafka-consumer:
min-idle-client-num: 24 # 最小空闲客户端数
max-idle-client-num: 24 # 最大空闲客户端数
max-total-client-num: 24 # 最大客户端数
borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒
account:
ldap:
enabled: false
......@@ -103,18 +124,3 @@ notify:
topic-name: didi-kafka-notify
order:
detail-url: http://127.0.0.1
thread-pool:
collect-metrics:
thread-num: 256 # 收集指标线程池大小
queue-size: 5000 # 收集指标线程池的queue大小
api-call:
thread-num: 16 # api服务线程池大小
queue-size: 5000 # api服务线程池的queue大小
client-pool:
kafka-consumer:
min-idle-client-num: 24 # 最小空闲客户端数
max-idle-client-num: 24 # 最大空闲客户端数
max-total-client-num: 24 # 最大客户端数
borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册