提交 1668d8a3 编写于 作者: P pengys5

1. comments of collector config

2. index creator support options: overwrite, ignore, off
上级 b95b568a
......@@ -5,6 +5,10 @@ package com.a.eye.skywalking.collector.worker.config;
*/
public class EsConfig {
public static final String Create_Overwrite = "overwrite";
public static final String Create_Ignore = "ignore";
public static final String Create_Off = "off";
public static class Es {
public static class Cluster {
public static String name = "";
......@@ -16,12 +20,14 @@ public class EsConfig {
}
public static class Index {
public static String create = "";
public static class Shards {
public static String number;
public static String number = "";
}
public static class Replicas{
public static String number;
public static class Replicas {
public static String number = "";
}
}
}
......
......@@ -25,6 +25,9 @@ public class EsConfigProvider implements ConfigProvider {
EsConfig.Es.Cluster.Transport.sniffer = System.getProperty("es.cluster.transport.sniffer");
}
if (!StringUtil.isEmpty(System.getProperty("es.index.create"))) {
EsConfig.Es.Index.create = System.getProperty("es.index.create");
}
if (!StringUtil.isEmpty(System.getProperty("es.index.shards.number"))) {
EsConfig.Es.Index.Shards.number = System.getProperty("es.index.shards.number");
}
......
......@@ -5,6 +5,7 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
import org.elasticsearch.client.IndicesAdminClient;
import org.elasticsearch.common.settings.Settings;
......@@ -86,5 +87,11 @@ public abstract class AbstractIndex {
return false;
}
final boolean isExists() {
IndicesAdminClient client = EsClient.INSTANCE.getClient().admin().indices();
IndicesExistsResponse response = client.prepareExists(index()).get();
return response.isExists();
}
public abstract String index();
}
package com.a.eye.skywalking.collector.worker.storage;
import com.a.eye.skywalking.collector.worker.config.EsConfig;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
......@@ -16,10 +17,19 @@ public enum IndexCreator {
private Logger logger = LogManager.getFormatterLogger(IndexCreator.class);
public void create() {
Set<AbstractIndex> indexSet = loadIndex();
for (AbstractIndex index : indexSet) {
index.deleteIndex();
index.createIndex();
if (!EsConfig.Create_Off.equals(EsConfig.Es.Index.create)) {
Set<AbstractIndex> indexSet = loadIndex();
for (AbstractIndex index : indexSet) {
boolean isExists = index.isExists();
if (isExists) {
if (EsConfig.Create_Overwrite.equals(EsConfig.Es.Index.create)) {
index.deleteIndex();
index.createIndex();
}
} else {
index.createIndex();
}
}
}
}
......
# The remote server should connect to, hostname can be either hostname or IP address.
# Suggestion: set the real ip address.
cluster.current.hostname=127.0.0.1
cluster.current.port=1000
# The roles of this member. List of strings, e.g. roles = A, B
# In the future, the roles are part of the membership information and can be used by
# routers or other services to distribute work to certain member types,
# e.g. front-end and back-end nodes.
# In this version, all members has same roles, each of them will listen others status,
# because of network trouble or member jvm crash or every reason led to not reachable,
# the routers will stop to sending the message to the untouchable member.
cluster.current.roles=WorkersListener
# Initial contact points of the cluster, e.g. seed_nodes = 127.0.0.1:1000, 127.0.0.1:1001.
# The nodes to join automatically at startup.
# When setting akka configuration, it will be change.
# like: ["akka.tcp://system@127.0.0.1:1000", "akka.tcp://system@127.0.0.1:1001"].
# This is akka configuration, see: http://doc.akka.io/docs/akka/2.4/general/configuration.html
cluster.seed_nodes=127.0.0.1:1000,127.0.0.1:1001
# elasticsearch configuration, config/elasticsearch.yml, see cluster.name
es.cluster.name=CollectorDBCluster
es.cluster.nodes=127.0.0.1:9300
es.cluster.transport.sniffer=true
# The elasticsearch nodes of cluster, comma separated, e.g. nodes=ip:port, ip:port
es.cluster.nodes=127.0.0.1:9300
# Automatic create elasticsearch index
# Options: overwrite, ignore, off
# Overwrite: delete the index then create
# Ignore: just create new index when index not created.
es.index.create=ignore
es.index.shards.number=2
es.index.replicas.number=0
# You can configure a host either as a host name or IP address to identify a specific network
# interface on which to listen.
# Be used for web ui get the view data or agent post the trace segment.
http.hostname=127.0.0.1
# The TCP/IP port on which the connector listens for connections.
http.port=7001
# The contextPath is a URL prefix that identifies which context a HTTP request is destined for.
http.contextPath=/
# The analysis worker max cache size, when worker data size reach the size,
# then worker will send all cached data to the next worker and clear the cache.
cache.analysis.size=1024
# The persistence worker max cache size, same of "cache.analysis.size" ability.
cache.persistence.size=1024
WorkerNum.Node.NodeCompAgg.Value=10
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册