未验证 提交 0d471456 编写于 作者: 静夜思朝颜's avatar 静夜思朝颜 提交者: GitHub

Merge branch 'master' into master

......@@ -39,17 +39,19 @@ jobs:
./mvnw --batch-mode -Dcheckstyle.skip -Drat.skip -T2 -Dmaven.compile.fork -Dmaven.compiler.maxmem=3072 -DskipTests clean install
./mvnw --batch-mode -f test/e2e/pom.xml -pl e2e-base clean install
- name: Single Node Tests(JDK8)
run: export E2E_VERSION=jdk8-1.3 && bash -x test/e2e/run.sh e2e-single-service
run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-single-service
- name: Single Node Tests(InfluxDB/JDK8)
run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-influxdb
- name: Single Node Tests(MySQL/JDK8)
run: export E2E_VERSION=jdk8-1.3 && bash -x test/e2e/run.sh e2e-mysql
run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-mysql
- name: Single Node Tests(JDK9)
run: export E2E_VERSION=jdk9-1.3 && bash -x test/e2e/run.sh e2e-single-service
run: export E2E_VERSION=jdk9-1.5 && bash -x test/e2e/run.sh e2e-single-service
- name: Single Node Tests(JDK11)
run: export E2E_VERSION=jdk11-1.3 && bash -x test/e2e/run.sh e2e-single-service
run: export E2E_VERSION=jdk11-1.5 && bash -x test/e2e/run.sh e2e-single-service
- name: Single Node Tests(JDK12)
run: export E2E_VERSION=jdk12-1.3 && bash -x test/e2e/run.sh e2e-single-service
run: export E2E_VERSION=jdk12-1.5 && bash -x test/e2e/run.sh e2e-single-service
- name: Agent Reboot Tests(JDK8)
run: export E2E_VERSION=jdk8-1.3 && bash -x test/e2e/run.sh e2e-agent-reboot
run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-agent-reboot
Cluster:
runs-on: ubuntu-latest
......@@ -71,15 +73,19 @@ jobs:
./mvnw --batch-mode -Dcheckstyle.skip -Drat.skip -T2 -Dmaven.compile.fork -Dmaven.compiler.maxmem=3072 -DskipTests clean install
./mvnw --batch-mode -f test/e2e/pom.xml -pl e2e-base clean install
- name: Cluster Tests (ES6/ZK/JDK8)
run: export E2E_VERSION=jdk8-1.3 && bash -x test/e2e/run.sh e2e-cluster/e2e-cluster-test-runner
run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-cluster/e2e-cluster-test-runner --storage=elasticsearch
- name: Cluster Tests (InfluxDB/ZK/JDK8)
run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-cluster/e2e-cluster-test-runner --storage=influxdb
- name: Cluster With Gateway Tests (ES6/ZK/JDK8)
run: export E2E_VERSION=jdk8-1.3 && bash -x test/e2e/run.sh e2e-cluster-with-gateway/e2e-cluster-with-gateway-test-runner
run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-cluster-with-gateway/e2e-cluster-with-gateway-test-runner
- name: Cluster Tests (ES7/ZK/JDK8)
run: export E2E_VERSION=jdk8-1.3 DIST_PACKAGE=apache-skywalking-apm-bin-es7.tar.gz ES_VERSION=7.4.2 && bash -x test/e2e/run.sh e2e-cluster/e2e-cluster-test-runner
run: export E2E_VERSION=jdk8-1.5 DIST_PACKAGE=apache-skywalking-apm-bin-es7.tar.gz ES_VERSION=7.4.2 && bash -x test/e2e/run.sh e2e-cluster/e2e-cluster-test-runner --storage=elasticsearch
- name: TTL ES Tests(JDK8)
run: export E2E_VERSION=jdk8-1.3 && bash -x test/e2e/run.sh e2e-ttl/e2e-ttl-es
run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-ttl/e2e-ttl-es
- name: TTL ES7 Tests(JDK8)
run: export E2E_VERSION=jdk8-1.3 DIST_PACKAGE=apache-skywalking-apm-bin-es7.tar.gz ES_VERSION=7.4.2 && bash -x test/e2e/run.sh e2e-ttl/e2e-ttl-es
run: export E2E_VERSION=jdk8-1.5 DIST_PACKAGE=apache-skywalking-apm-bin-es7.tar.gz ES_VERSION=7.4.2 && bash -x test/e2e/run.sh e2e-ttl/e2e-ttl-es
- name: TTL InfluxDB Tests(JDK8)
run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-ttl/e2e-ttl-influxdb
Compatibilities:
runs-on: ubuntu-latest
......@@ -95,7 +101,7 @@ jobs:
./mvnw --batch-mode -Dcheckstyle.skip -Drat.skip -T2 -Dmaven.compile.fork -Dmaven.compiler.maxmem=3072 -DskipTests clean install
./mvnw --batch-mode -f test/e2e/pom.xml -pl e2e-base clean install
- name: 6.x Agents & 7.x Backend
run: export E2E_VERSION=jdk8-1.3 && bash -x test/e2e/run.sh e2e-6.x-agent-7.x-oap-compatibility
run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-6.x-agent-7.x-oap-compatibility
Profile:
runs-on: ubuntu-latest
......@@ -111,10 +117,12 @@ jobs:
./mvnw --batch-mode -Dcheckstyle.skip -Drat.skip -T2 -Dmaven.compile.fork -Dmaven.compiler.maxmem=3072 -DskipTests clean install
./mvnw --batch-mode -f test/e2e/pom.xml -pl e2e-base clean install
- name: Profile Tests H2(JDK8)
run: export E2E_VERSION=jdk8-1.3 && bash -x test/e2e/run.sh e2e-profile/e2e-profile-test-runner --storage=h2
run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-profile/e2e-profile-test-runner --storage=h2
- name: Profile Tests MySQL(JDK8)
run: export E2E_VERSION=jdk8-1.3 && bash -x test/e2e/run.sh e2e-profile/e2e-profile-test-runner --storage=mysql
run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-profile/e2e-profile-test-runner --storage=mysql
- name: Profile Tests ES6(JDK8)
run: export E2E_VERSION=jdk8-1.3 && bash -x test/e2e/run.sh e2e-profile/e2e-profile-test-runner --storage=elasticsearch
run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-profile/e2e-profile-test-runner --storage=elasticsearch
- name: Profile Tests InfluxDB(JDK8)
run: export E2E_VERSION=jdk8-1.5 && bash -x test/e2e/run.sh e2e-profile/e2e-profile-test-runner --storage=influxdb
- name: Profile Tests ES7(JDK8)
run: export E2E_VERSION=jdk8-1.3 DIST_PACKAGE=apache-skywalking-apm-bin-es7.tar.gz ES_VERSION=7.4.2 && bash -x test/e2e/run.sh e2e-profile/e2e-profile-test-runner --storage=elasticsearch
run: export E2E_VERSION=jdk8-1.5 DIST_PACKAGE=apache-skywalking-apm-bin-es7.tar.gz ES_VERSION=7.4.2 && bash -x test/e2e/run.sh e2e-profile/e2e-profile-test-runner --storage=elasticsearch
......@@ -139,6 +139,31 @@ storage:
# dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
# dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
# metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
# influx:
# # Metadata storage provider configuration
# metabaseType: ${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
# h2Props:
# dataSourceClassName: ${SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}
# dataSource.url: ${SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}
# dataSource.user: ${SW_STORAGE_METABASE_USER:sa}
# dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:}
# mysqlProps:
# jdbcUrl: ${SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}
# dataSource.user: ${SW_STORAGE_METABASE_USER:root}
# dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:root@1234}
# dataSource.cachePrepStmts: ${SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}
# dataSource.prepStmtCacheSize: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}
# dataSource.prepStmtCacheSqlLimit: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}
# dataSource.useServerPrepStmts: ${SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}
# metadataQueryMaxSize: ${SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}
# # InfluxDB configuration
# url: ${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
# user: ${SW_STORAGE_INFLUXDB_USER:root}
# password: ${SW_STORAGE_INFLUXDB_PASSWORD:}
# database: ${SW_STORAGE_INFLUXDB_DATABASE:skywalking}
# actions: ${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
# duration: ${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
# fetchTaskLogMaxSize: ${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request
receiver-sharing-server:
default:
receiver-register:
......
......@@ -323,7 +323,10 @@ The text of each license is the standard Apache 2.0 license.
etcd4j 2.17.0: https://github.com/jurmous/etcd4j Apache 2.0
javaassist 3.25.0-GA: https://github.com/jboss-javassist/javassist Apache 2.0
jackson-module-afterburner 2.9.5: https://github.com/FasterXML/jackson-modules-base, Apache 2.0
perfmark-api 0.19.0: https://github.com/perfmark/perfmark
perfmark-api 0.19.0: https://github.com/perfmark/perfmark, Apache 2.0
moshi 1.5.0: https://github.com/square/moshi, Apache 2.0
logging-interceptor 3.13.1: https://github.com/square/okhttp/tree/master/okhttp-logging-interceptor, Apache 2.0
msgpack-core 0.8.16: https://github.com/msgpack/msgpack-java, Apache 2.0
========================================================================
MIT licenses
......@@ -341,6 +344,7 @@ The text of each license is also included at licenses/LICENSE-[project].txt.
bcprov-jdk15on 1.55: http://www.bouncycastle.org/licence.html , MIT
minimal-json 0.9.5: https://github.com/ralfstx/minimal-json, MIT
checker-qual 2.8.1: https://github.com/typetools/checker-framework, MIT
influxdb-java 2.15: https://github.com/influxdata/influxdb-java, MIT
========================================================================
MIT licenses
......
......@@ -905,4 +905,13 @@ be obtained at:
* HOMEPAGE:
* https://github.com/catapult-project/catapult
------
\ No newline at end of file
------
===========================================================================
MessagePackage Notice
===========================================================================
This product includes the software developed by third-party:
* Google Guava https://code.google.com/p/guava-libraries/ (APL2)
* sbt-extras: https://github.com/paulp/sbt-extras (BSD) (LICENSE.sbt-extras.txt)
\ No newline at end of file
The MIT License (MIT)
Copyright (c) 2014-2017 Stefan Majer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
\ No newline at end of file
......@@ -149,6 +149,37 @@ storage:
EOT
}
generateStorageInfluxDB() {
cat <<EOT >> ${var_application_file}
storage:
influx:
# Metadata storage provider configuration
metabaseType: \${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
h2Props:
dataSourceClassName: \${SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}
dataSource.url: \${SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}
dataSource.user: \${SW_STORAGE_METABASE_USER:sa}
dataSource.password: \${SW_STORAGE_METABASE_PASSWORD:}
mysqlProps:
jdbcUrl: \${SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}
dataSource.user: \${SW_STORAGE_METABASE_USER:root}
dataSource.password: \${SW_STORAGE_METABASE_PASSWORD:root@1234}
dataSource.cachePrepStmts: \${SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}
dataSource.prepStmtCacheSize: \${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}
dataSource.prepStmtCacheSqlLimit: \${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}
dataSource.useServerPrepStmts: \${SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}
metadataQueryMaxSize: \${SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}
# InfluxDB configuration
url: \${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
user: \${SW_STORAGE_INFLUXDB_USER:root}
password: \${SW_STORAGE_INFLUXDB_PASSWORD:}
database: \${SW_STORAGE_INFLUXDB_DATABASE:skywalking}
actions: \${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
duration: \${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
fetchTaskLogMaxSize: \${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request
EOT
}
generateConfigurationNone() {
cat <<EOT >> ${var_application_file}
configuration:
......
......@@ -150,6 +150,37 @@ storage:
EOT
}
generateStorageInfluxDB() {
cat <<EOT >> ${var_application_file}
storage:
influx:
# Metadata storage provider configuration
metabaseType: \${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
h2Props:
dataSourceClassName: \${SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}
dataSource.url: \${SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}
dataSource.user: \${SW_STORAGE_METABASE_USER:sa}
dataSource.password: \${SW_STORAGE_METABASE_PASSWORD:}
mysqlProps:
jdbcUrl: \${SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}
dataSource.user: \${SW_STORAGE_METABASE_USER:root}
dataSource.password: \${SW_STORAGE_METABASE_PASSWORD:root@1234}
dataSource.cachePrepStmts: \${SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}
dataSource.prepStmtCacheSize: \${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}
dataSource.prepStmtCacheSqlLimit: \${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}
dataSource.useServerPrepStmts: \${SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}
metadataQueryMaxSize: \${SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}
# InfluxDB configuration
url: \${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
user: \${SW_STORAGE_INFLUXDB_USER:root}
password: \${SW_STORAGE_INFLUXDB_PASSWORD:}
database: \${SW_STORAGE_INFLUXDB_DATABASE:skywalking}
actions: \${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
duration: \${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
fetchTaskLogMaxSize: \${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request
EOT
}
generateConfigurationNone() {
cat <<EOT >> ${var_application_file}
configuration:
......
......@@ -7,6 +7,7 @@ Native supported storage
- ElasticSearch 6, 7
- MySQL
- TiDB
- InfluxDB
Redistribution version with supported storage.
- ElasticSearch 5
......@@ -249,6 +250,39 @@ storage:
All connection related settings including link url, username and password are in `application.yml`.
These settings can refer to the configuration of *MySQL* above.
## InfluxDB
InfluxDB as storage since SkyWalking 7.0. It depends on `H2/MySQL` storage-plugin to store `metadata` like `Inventory` and `ProfileTask`. So, when we set `InfluxDB` as storage provider. We need to configure properties of InfluxDB and Metabase.
```yaml
storage:
influx:
# Metadata storage provider configuration
metabaseType: ${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
h2Props:
dataSourceClassName: ${SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}
dataSource.url: ${SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}
dataSource.user: ${SW_STORAGE_METABASE_USER:sa}
dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:}
mysqlProps:
jdbcUrl: ${SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}
dataSource.user: ${SW_STORAGE_METABASE_USER:root}
dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:root@1234}
dataSource.cachePrepStmts: ${SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}
dataSource.prepStmtCacheSize: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}
dataSource.prepStmtCacheSqlLimit: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}
dataSource.useServerPrepStmts: ${SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}
metadataQueryMaxSize: ${SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}
# InfluxDB configuration
url: ${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
user: ${SW_STORAGE_INFLUXDB_USER:root}
password: ${SW_STORAGE_INFLUXDB_PASSWORD:}
database: ${SW_STORAGE_INFLUXDB_DATABASE:skywalking}
actions: ${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
duration: ${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
fetchTaskLogMaxSize: ${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request
```
All connection related settings including link url, username and password are in `application.yml`. The Metadata storage provider settings can refer to the configuration of **H2/MySQL** above.
## ElasticSearch 5
ElasticSearch 5 is incompatible with ElasticSearch 6 Java client jar, so it could not be included in native distribution.
[OpenSkyWalking/SkyWalking-With-Es5x-Storage](https://github.com/OpenSkywalking/SkyWalking-With-Es5x-Storage) repo includes the distribution version.
......
......@@ -130,6 +130,11 @@
<artifactId>storage-jdbc-hikaricp-plugin</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.skywalking</groupId>
<artifactId>storage-influxdb-plugin</artifactId>
<version>${project.version}</version>
</dependency>
<!-- storage module -->
<!-- queryBuild module -->
......
......@@ -139,6 +139,31 @@ storage:
# dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
# dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
# metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
# influx:
# # Metadata storage provider configuration
# metabaseType: ${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
# h2Props:
# dataSourceClassName: ${SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}
# dataSource.url: ${SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}
# dataSource.user: ${SW_STORAGE_METABASE_USER:sa}
# dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:}
# mysqlProps:
# jdbcUrl: ${SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}
# dataSource.user: ${SW_STORAGE_METABASE_USER:root}
# dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:root@1234}
# dataSource.cachePrepStmts: ${SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}
# dataSource.prepStmtCacheSize: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}
# dataSource.prepStmtCacheSqlLimit: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}
# dataSource.useServerPrepStmts: ${SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}
# metadataQueryMaxSize: ${SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}
# # InfluxDB configuration
# url: ${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
# user: ${SW_STORAGE_INFLUXDB_USER:root}
# password: ${SW_STORAGE_INFLUXDB_PASSWORD:}
# database: ${SW_STORAGE_INFLUXDB_DATABASE:skywalking}
# actions: ${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
# duration: ${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
# fetchTaskLogMaxSize: ${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request
receiver-sharing-server:
default:
authentication: ${SW_AUTHENTICATION:""}
......
......@@ -32,10 +32,119 @@ public class TimeBucket {
return getTimeBucket(time, Downsampling.Second);
}
/**
* Record time bucket format in Minute Unit.
*
* @param time Timestamp
* @return time in minute format.
*/
public static long getMinuteTimeBucket(long time) {
return getTimeBucket(time, Downsampling.Minute);
}
/**
* Convert TimeBucket to Timestamp in millisecond.
*
* @param timeBucket long
* @return timestamp in millisecond unit
*/
public static long getTimestamp(long timeBucket) {
if (isSecondBucket(timeBucket)) {
return getTimestamp(timeBucket, Downsampling.Second);
} else if (isMinuteBucket(timeBucket)) {
return getTimestamp(timeBucket, Downsampling.Minute);
} else if (isHourBucket(timeBucket)) {
return getTimestamp(timeBucket, Downsampling.Hour);
} else if (isDayBucket(timeBucket)) {
return getTimestamp(timeBucket, Downsampling.Day);
} else if (isMonthBucket(timeBucket)) {
return getTimestamp(timeBucket, Downsampling.Month);
} else {
throw new UnexpectedException("Unknown downsampling value.");
}
}
/**
* The format of timeBucket in minute Unit is "yyyyMMddHHmmss", so which means the TimeBucket must be between
* 10000000000000 and 99999999999999.
*/
public static boolean isSecondBucket(long timeBucket) {
return timeBucket < 99999999999999L && timeBucket > 10000000000000L;
}
/**
* The format of timeBucket in minute Unit is "yyyyMMddHHmm", so which means the TimeBucket must be between
* 100000000000 and 999999999999.
*/
public static boolean isMinuteBucket(long timeBucket) {
return timeBucket < 999999999999L && timeBucket > 100000000000L;
}
/**
* The format of timeBucket in hour Unit is "yyyyMMddHH", so which means the TimeBucket must be between 1000000000 and
* 9999999999.
*/
public static boolean isHourBucket(long timeBucket) {
return timeBucket < 9999999999L && timeBucket > 1000000000L;
}
/**
* The format of timeBucket in day Unit is "yyyyMMdd", so which means the TimeBucket must be between 10000000 and
* 99999999.
*/
public static boolean isDayBucket(long timeBucket) {
return timeBucket < 99999999L && timeBucket > 10000000L;
}
/**
* The format of timeBucket in month Unit is "yyyyMM", so which means the TimeBucket must be between 100000 and
* 999999.
*/
public static boolean isMonthBucket(long timeBucket) {
return timeBucket < 999999L && timeBucket > 100000L;
}
/**
* Convert TimeBucket to Timestamp in millisecond.
*
* @param timeBucket long
* @param downsampling Downsampling
* @return timestamp in millisecond unit
*/
public static long getTimestamp(long timeBucket, Downsampling downsampling) {
Calendar calendar = Calendar.getInstance();
calendar.setTimeInMillis(0);
switch (downsampling) {
case Second:
calendar.set(Calendar.SECOND, (int) (timeBucket % 100));
timeBucket /= 100;
case Minute:
calendar.set(Calendar.MINUTE, (int) (timeBucket % 100));
timeBucket /= 100;
case Hour:
calendar.set(Calendar.HOUR_OF_DAY, (int) (timeBucket % 100));
timeBucket /= 100;
case Day:
calendar.set(Calendar.DAY_OF_MONTH, (int) (timeBucket % 100));
timeBucket /= 100;
case Month:
calendar.set(Calendar.MONTH, (int) (timeBucket % 100) - 1);
calendar.set(Calendar.YEAR, (int) (timeBucket / 100));
break;
default:
throw new UnexpectedException("Unknown downsampling value.");
}
return calendar.getTimeInMillis();
}
/**
* Record time bucket format in Downsampling Unit.
*
* @param time Timestamp
* @param downsampling Downsampling
* @return time in downsampling format.
*/
public static long getTimeBucket(long time, Downsampling downsampling) {
Calendar calendar = Calendar.getInstance();
calendar.setTimeInMillis(time);
......
......@@ -34,6 +34,7 @@
<module>storage-elasticsearch7-plugin</module>
<module>storage-zipkin-plugin</module>
<module>storage-jaeger-plugin</module>
<module>storage-influxdb-plugin</module>
</modules>
</project>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specrm ific language governing permissions and
~ limitations under the License.
~
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>server-storage-plugin</artifactId>
<groupId>org.apache.skywalking</groupId>
<version>7.0.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>storage-influxdb-plugin</artifactId>
<packaging>jar</packaging>
<properties>
<influxdb-jave.version>2.15</influxdb-jave.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.skywalking</groupId>
<artifactId>storage-jdbc-hikaricp-plugin</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.influxdb</groupId>
<artifactId>influxdb-java</artifactId>
<version>${influxdb-jave.version}</version>
</dependency>
</dependencies>
</project>
\ No newline at end of file
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
import lombok.extern.slf4j.Slf4j;
import okhttp3.OkHttpClient;
import org.apache.skywalking.oap.server.core.analysis.Downsampling;
import org.apache.skywalking.oap.server.core.analysis.TimeBucket;
import org.apache.skywalking.oap.server.library.client.Client;
import org.apache.skywalking.oap.server.library.util.CollectionUtils;
import org.influxdb.InfluxDB;
import org.influxdb.InfluxDBFactory;
import org.influxdb.dto.BatchPoints;
import org.influxdb.dto.Point;
import org.influxdb.dto.Query;
import org.influxdb.dto.QueryResult;
import org.influxdb.querybuilder.time.TimeInterval;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.ti;
/**
* InfluxDB connection maintainer, provides base data write/query API.
*/
@Slf4j
public class InfluxClient implements Client {
private InfluxStorageConfig config;
private InfluxDB influx;
/**
* A constant, the name of time field in Time-series database.
*/
public static final String TIME = "time";
/**
* A constant, the name of tag of time_bucket.
*/
public static final String TAG_TIME_BUCKET = "_time_bucket";
private final String database;
public InfluxClient(InfluxStorageConfig config) {
this.config = config;
this.database = config.getDatabase();
}
public final String getDatabase() {
return database;
}
@Override
public void connect() {
influx = InfluxDBFactory.connect(config.getUrl(), config.getUser(), config.getPassword(),
new OkHttpClient.Builder().readTimeout(3, TimeUnit.MINUTES)
.writeTimeout(3, TimeUnit.MINUTES),
InfluxDB.ResponseFormat.MSGPACK
);
influx.query(new Query("CREATE DATABASE " + database));
influx.enableBatch(config.getActions(), config.getDuration(), TimeUnit.MILLISECONDS);
influx.setDatabase(database);
}
/**
* To get a connection of InfluxDB.
*
* @return InfluxDB's connection
*/
private InfluxDB getInflux() {
return influx;
}
/**
* Execute a query against InfluxDB and return a set of {@link QueryResult.Result}s. Normally, InfluxDB supports
* combining multiple statements into one query, so that we do get multi-results.
*
* @throws IOException if there is an error on the InfluxDB server or communication error.
*/
public List<QueryResult.Result> query(Query query) throws IOException {
if (log.isDebugEnabled()) {
log.debug("SQL Statement: {}", query.getCommand());
}
try {
QueryResult result = getInflux().query(query);
if (result.hasError()) {
throw new IOException(result.getError());
}
return result.getResults();
} catch (Exception e) {
throw new IOException(e.getMessage() + System.lineSeparator() + "SQL Statement: " + query.getCommand(), e);
}
}
/**
* Execute a query against InfluxDB with a single statement.
*
* @throws IOException if there is an error on the InfluxDB server or communication error
*/
public List<QueryResult.Series> queryForSeries(Query query) throws IOException {
List<QueryResult.Result> results = query(query);
if (CollectionUtils.isEmpty(results)) {
return null;
}
return results.get(0).getSeries();
}
/**
* Execute a query against InfluxDB with a single statement but return a single {@link QueryResult.Series}.
*
* @throws IOException if there is an error on the InfluxDB server or communication error
*/
public QueryResult.Series queryForSingleSeries(Query query) throws IOException {
List<QueryResult.Series> series = queryForSeries(query);
if (CollectionUtils.isEmpty(series)) {
return null;
}
return series.get(0);
}
/**
* Data management, to drop a time-series by measurement and time-series name specified. If an exception isn't
* thrown, it means execution success. Notice, drop series don't support to drop series by range
*
* @throws IOException if there is an error on the InfluxDB server or communication error
*/
public void dropSeries(String measurement, long timeBucket) throws IOException {
Query query = new Query("DROP SERIES FROM " + measurement + " WHERE time_bucket='" + timeBucket + "'");
QueryResult result = getInflux().query(query);
if (result.hasError()) {
throw new IOException("Statement: " + query.getCommand() + ", ErrorMsg: " + result.getError());
}
}
public void deleteByQuery(String measurement, long timestamp) throws IOException {
this.query(new Query("delete from " + measurement + " where time < " + timestamp + "ms"));
}
/**
* Write a {@link Point} into InfluxDB. Note that, the {@link Point} is written into buffer of InfluxDB Client and
* wait for buffer flushing.
*/
public void write(Point point) {
getInflux().write(point);
}
/**
* A batch operation of write. {@link Point}s flush directly.
*/
public void write(BatchPoints points) {
getInflux().write(points);
}
@Override
public void shutdown() throws IOException {
influx.close();
}
/**
* Convert to InfluxDB {@link TimeInterval}.
*/
public static TimeInterval timeInterval(long timeBucket, Downsampling downsampling) {
return ti(TimeBucket.getTimestamp(timeBucket, downsampling), "ms");
}
/**
* Convert to InfluxDB {@link TimeInterval}.
*/
public static TimeInterval timeInterval(long timeBucket) {
return ti(TimeBucket.getTimestamp(timeBucket), "ms");
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb;
public interface InfluxModelConstants {
/**
* Override column because the 'duration' is a identifier of InfluxDB.
*/
String DURATION = "dur";
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb;
import java.util.Properties;
import lombok.Getter;
import lombok.Setter;
import org.apache.skywalking.oap.server.library.module.ModuleConfig;
@Setter
@Getter
public class InfluxStorageConfig extends ModuleConfig {
private String metabaseType;
private Properties h2Props;
private Properties mysqlProps;
private int metadataQueryMaxSize = 5000;
private String url;
private String user;
private String password;
private String database;
private int actions;
private int duration;
private int fetchTaskLogMaxSize = 5000;
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb;
import java.util.Properties;
import lombok.extern.slf4j.Slf4j;
import org.apache.skywalking.oap.server.core.CoreModule;
import org.apache.skywalking.oap.server.core.storage.IBatchDAO;
import org.apache.skywalking.oap.server.core.storage.IHistoryDeleteDAO;
import org.apache.skywalking.oap.server.core.storage.IRegisterLockDAO;
import org.apache.skywalking.oap.server.core.storage.StorageDAO;
import org.apache.skywalking.oap.server.core.storage.StorageException;
import org.apache.skywalking.oap.server.core.storage.StorageModule;
import org.apache.skywalking.oap.server.core.storage.cache.IEndpointInventoryCacheDAO;
import org.apache.skywalking.oap.server.core.storage.cache.INetworkAddressInventoryCacheDAO;
import org.apache.skywalking.oap.server.core.storage.cache.IServiceInstanceInventoryCacheDAO;
import org.apache.skywalking.oap.server.core.storage.cache.IServiceInventoryCacheDAO;
import org.apache.skywalking.oap.server.core.storage.model.ModelInstaller;
import org.apache.skywalking.oap.server.core.storage.profile.IProfileTaskLogQueryDAO;
import org.apache.skywalking.oap.server.core.storage.profile.IProfileTaskQueryDAO;
import org.apache.skywalking.oap.server.core.storage.profile.IProfileThreadSnapshotQueryDAO;
import org.apache.skywalking.oap.server.core.storage.query.IAggregationQueryDAO;
import org.apache.skywalking.oap.server.core.storage.query.IAlarmQueryDAO;
import org.apache.skywalking.oap.server.core.storage.query.ILogQueryDAO;
import org.apache.skywalking.oap.server.core.storage.query.IMetadataQueryDAO;
import org.apache.skywalking.oap.server.core.storage.query.IMetricsQueryDAO;
import org.apache.skywalking.oap.server.core.storage.query.ITopNRecordsQueryDAO;
import org.apache.skywalking.oap.server.core.storage.query.ITopologyQueryDAO;
import org.apache.skywalking.oap.server.core.storage.query.ITraceQueryDAO;
import org.apache.skywalking.oap.server.core.storage.ttl.GeneralStorageTTL;
import org.apache.skywalking.oap.server.library.client.jdbc.hikaricp.JDBCHikariCPClient;
import org.apache.skywalking.oap.server.library.module.ModuleConfig;
import org.apache.skywalking.oap.server.library.module.ModuleDefine;
import org.apache.skywalking.oap.server.library.module.ModuleProvider;
import org.apache.skywalking.oap.server.library.module.ModuleStartException;
import org.apache.skywalking.oap.server.library.module.ServiceNotProvidedException;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.base.BatchDAO;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.base.HistoryDeleteDAO;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.base.InfluxStorageDAO;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.installer.H2Installer;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.installer.MySQLInstaller;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.query.AggregationQuery;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.query.AlarmQuery;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.query.LogQuery;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.query.MetricsQuery;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.query.ProfileTaskLogQuery;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.query.ProfileTaskQuery;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.query.ProfileThreadSnapshotQuery;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.query.TopNRecordsQuery;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.query.TopologyQuery;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.query.TraceQuery;
import org.apache.skywalking.oap.server.storage.plugin.jdbc.h2.dao.H2EndpointInventoryCacheDAO;
import org.apache.skywalking.oap.server.storage.plugin.jdbc.h2.dao.H2MetadataQueryDAO;
import org.apache.skywalking.oap.server.storage.plugin.jdbc.h2.dao.H2NetworkAddressInventoryCacheDAO;
import org.apache.skywalking.oap.server.storage.plugin.jdbc.h2.dao.H2RegisterLockDAO;
import org.apache.skywalking.oap.server.storage.plugin.jdbc.h2.dao.H2RegisterLockInstaller;
import org.apache.skywalking.oap.server.storage.plugin.jdbc.h2.dao.H2ServiceInstanceInventoryCacheDAO;
import org.apache.skywalking.oap.server.storage.plugin.jdbc.h2.dao.H2ServiceInventoryCacheDAO;
@Slf4j
public class InfluxStorageProvider extends ModuleProvider {
private InfluxStorageConfig config;
private JDBCHikariCPClient client;
private InfluxClient influxClient;
private H2RegisterLockDAO lockDAO;
public InfluxStorageProvider() {
config = new InfluxStorageConfig();
}
@Override
public String name() {
return "influx";
}
@Override
public Class<? extends ModuleDefine> module() {
return StorageModule.class;
}
@Override
public ModuleConfig createConfigBeanIfAbsent() {
return config;
}
@Override
public void prepare() throws ServiceNotProvidedException {
Properties settings;
if ("mysql".equalsIgnoreCase(config.getMetabaseType())) {
settings = config.getMysqlProps();
} else {
settings = config.getH2Props();
}
client = new JDBCHikariCPClient(settings);
influxClient = new InfluxClient(config);
this.registerServiceImplementation(IBatchDAO.class, new BatchDAO(influxClient));
this.registerServiceImplementation(StorageDAO.class, new InfluxStorageDAO(client, influxClient));
this.lockDAO = new H2RegisterLockDAO(client);
this.registerServiceImplementation(IRegisterLockDAO.class, new H2RegisterLockDAO(client));
this.registerServiceImplementation(IServiceInventoryCacheDAO.class, new H2ServiceInventoryCacheDAO(client));
this.registerServiceImplementation(
IServiceInstanceInventoryCacheDAO.class, new H2ServiceInstanceInventoryCacheDAO(client));
this.registerServiceImplementation(IEndpointInventoryCacheDAO.class, new H2EndpointInventoryCacheDAO(client));
this.registerServiceImplementation(
INetworkAddressInventoryCacheDAO.class, new H2NetworkAddressInventoryCacheDAO(client));
this.registerServiceImplementation(
IMetadataQueryDAO.class, new H2MetadataQueryDAO(client, config.getMetadataQueryMaxSize()));
this.registerServiceImplementation(ITopologyQueryDAO.class, new TopologyQuery(influxClient));
this.registerServiceImplementation(IMetricsQueryDAO.class, new MetricsQuery(influxClient));
this.registerServiceImplementation(ITraceQueryDAO.class, new TraceQuery(influxClient));
this.registerServiceImplementation(IAggregationQueryDAO.class, new AggregationQuery(influxClient));
this.registerServiceImplementation(IAlarmQueryDAO.class, new AlarmQuery(influxClient));
this.registerServiceImplementation(ITopNRecordsQueryDAO.class, new TopNRecordsQuery(influxClient));
this.registerServiceImplementation(ILogQueryDAO.class, new LogQuery(influxClient));
this.registerServiceImplementation(IProfileTaskQueryDAO.class, new ProfileTaskQuery(influxClient));
this.registerServiceImplementation(
IProfileThreadSnapshotQueryDAO.class, new ProfileThreadSnapshotQuery(influxClient));
this.registerServiceImplementation(
IProfileTaskLogQueryDAO.class, new ProfileTaskLogQuery(influxClient, config.getFetchTaskLogMaxSize()));
this.registerServiceImplementation(
IHistoryDeleteDAO.class, new HistoryDeleteDAO(getManager(), influxClient, new GeneralStorageTTL()));
}
@Override
public void start() throws ServiceNotProvidedException, ModuleStartException {
try {
client.connect();
influxClient.connect();
ModelInstaller installer;
if (config.getMetabaseType().equalsIgnoreCase("h2")) {
installer = new H2Installer(getManager());
} else {
installer = new MySQLInstaller(getManager());
}
installer.install(client);
new H2RegisterLockInstaller().install(client, lockDAO);
} catch (StorageException e) {
throw new ModuleStartException(e.getMessage(), e);
}
}
@Override
public void notifyAfterCompleted() throws ServiceNotProvidedException, ModuleStartException {
}
@Override
public String[] requiredModules() {
return new String[] {CoreModule.NAME};
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.base;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.skywalking.oap.server.core.storage.IBatchDAO;
import org.apache.skywalking.oap.server.library.client.request.InsertRequest;
import org.apache.skywalking.oap.server.library.client.request.PrepareRequest;
import org.apache.skywalking.oap.server.library.util.CollectionUtils;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.influxdb.dto.BatchPoints;
@Slf4j
public class BatchDAO implements IBatchDAO {
private final InfluxClient client;
public BatchDAO(InfluxClient client) {
this.client = client;
}
@Override
public void asynchronous(InsertRequest insertRequest) {
client.write(((InfluxInsertRequest) insertRequest).getPoint());
}
@Override
public void synchronous(List<PrepareRequest> prepareRequests) {
if (CollectionUtils.isEmpty(prepareRequests)) {
return;
}
if (log.isDebugEnabled()) {
log.debug("batch sql statements execute, data size: {}", prepareRequests.size());
}
final BatchPoints.Builder builder = BatchPoints.builder();
prepareRequests.forEach(e -> {
builder.point(((InfluxInsertRequest) e).getPoint());
});
client.write(builder.build());
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.base;
import java.io.IOException;
import lombok.extern.slf4j.Slf4j;
import org.apache.skywalking.oap.server.core.CoreModule;
import org.apache.skywalking.oap.server.core.analysis.TimeBucket;
import org.apache.skywalking.oap.server.core.config.ConfigService;
import org.apache.skywalking.oap.server.core.storage.IHistoryDeleteDAO;
import org.apache.skywalking.oap.server.core.storage.model.Model;
import org.apache.skywalking.oap.server.core.storage.ttl.StorageTTL;
import org.apache.skywalking.oap.server.core.storage.ttl.TTLCalculator;
import org.apache.skywalking.oap.server.library.module.ModuleDefineHolder;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.joda.time.DateTime;
@Slf4j
public class HistoryDeleteDAO implements IHistoryDeleteDAO {
private final ModuleDefineHolder moduleDefineHolder;
private final InfluxClient client;
private final StorageTTL storageTTL;
public HistoryDeleteDAO(ModuleDefineHolder moduleDefineHolder, InfluxClient client, StorageTTL storageTTL) {
this.moduleDefineHolder = moduleDefineHolder;
this.storageTTL = storageTTL;
this.client = client;
}
@Override
public void deleteHistory(Model model, String timeBucketColumnName) throws IOException {
if (log.isDebugEnabled()) {
log.debug("TTL execution log, model: {}", model.getName());
}
try {
ConfigService configService = moduleDefineHolder.find(CoreModule.NAME)
.provider()
.getService(ConfigService.class);
TTLCalculator ttlCalculator;
if (model.isRecord()) {
ttlCalculator = storageTTL.recordCalculator();
} else {
ttlCalculator = storageTTL.metricsCalculator(model.getDownsampling());
}
client.deleteByQuery(
model.getName(),
TimeBucket.getTimestamp(ttlCalculator.timeBefore(DateTime.now(), configService.getDataTTLConfig()) + 1)
);
} catch (Exception e) {
log.error("TTL execution log, model: {}, errMsg: {}", model.getName(), e.getMessage());
}
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.base;
import com.google.common.collect.Maps;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.skywalking.oap.server.core.analysis.metrics.Metrics;
import org.apache.skywalking.oap.server.core.storage.StorageBuilder;
import org.apache.skywalking.oap.server.core.storage.StorageData;
import org.apache.skywalking.oap.server.core.storage.model.Model;
import org.apache.skywalking.oap.server.core.storage.model.ModelColumn;
import org.apache.skywalking.oap.server.core.storage.type.StorageDataType;
import org.apache.skywalking.oap.server.library.client.request.InsertRequest;
import org.apache.skywalking.oap.server.library.client.request.UpdateRequest;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.influxdb.dto.Point;
/**
* InfluxDB Point wrapper.
*/
public class InfluxInsertRequest implements InsertRequest, UpdateRequest {
public static final String ID = "id";
private Point.Builder builder;
private Map<String, Object> fields = Maps.newHashMap();
public InfluxInsertRequest(Model model, StorageData storageData, StorageBuilder storageBuilder) {
Map<String, Object> objectMap = storageBuilder.data2Map(storageData);
for (ModelColumn column : model.getColumns()) {
Object value = objectMap.get(column.getColumnName().getName());
if (value instanceof StorageDataType) {
fields.put(
column.getColumnName().getStorageName(),
((StorageDataType) value).toStorageData()
);
} else {
fields.put(column.getColumnName().getStorageName(), value);
}
}
builder = Point.measurement(model.getName())
.addField(ID, storageData.id())
.fields(fields)
.tag(InfluxClient.TAG_TIME_BUCKET, String.valueOf(fields.get(Metrics.TIME_BUCKET)));
}
public InfluxInsertRequest time(long time, TimeUnit unit) {
builder.time(time, unit);
return this;
}
public InfluxInsertRequest addFieldAsTag(String fieldName, String tagName) {
if (fields.containsKey(fieldName)) {
builder.tag(tagName, String.valueOf(fields.get(fieldName)));
}
return this;
}
public Point getPoint() {
return builder.build();
}
}
\ No newline at end of file
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.base;
import org.apache.skywalking.oap.server.core.analysis.config.NoneStream;
import org.apache.skywalking.oap.server.core.analysis.metrics.Metrics;
import org.apache.skywalking.oap.server.core.analysis.record.Record;
import org.apache.skywalking.oap.server.core.register.RegisterSource;
import org.apache.skywalking.oap.server.core.storage.IMetricsDAO;
import org.apache.skywalking.oap.server.core.storage.INoneStreamDAO;
import org.apache.skywalking.oap.server.core.storage.IRecordDAO;
import org.apache.skywalking.oap.server.core.storage.IRegisterDAO;
import org.apache.skywalking.oap.server.core.storage.StorageBuilder;
import org.apache.skywalking.oap.server.core.storage.StorageDAO;
import org.apache.skywalking.oap.server.library.client.jdbc.hikaricp.JDBCHikariCPClient;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.apache.skywalking.oap.server.storage.plugin.jdbc.h2.dao.H2RegisterDAO;
public class InfluxStorageDAO implements StorageDAO {
private final InfluxClient influxClient;
private final JDBCHikariCPClient client;
public InfluxStorageDAO(JDBCHikariCPClient client, InfluxClient influxdbClient) {
this.client = client;
this.influxClient = influxdbClient;
}
@Override
public IMetricsDAO newMetricsDao(StorageBuilder<Metrics> storageBuilder) {
return new MetricsDAO(influxClient, storageBuilder);
}
@Override
public IRegisterDAO newRegisterDao(StorageBuilder<RegisterSource> storageBuilder) {
return new H2RegisterDAO(client, storageBuilder);
}
@Override
public IRecordDAO newRecordDao(StorageBuilder<Record> storageBuilder) {
return new RecordDAO(influxClient, storageBuilder);
}
@Override
public INoneStreamDAO newNoneStreamDao(StorageBuilder<NoneStream> storageBuilder) {
return new NoneStreamDAO(influxClient, storageBuilder);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.base;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.skywalking.oap.server.core.analysis.TimeBucket;
import org.apache.skywalking.oap.server.core.analysis.metrics.Metrics;
import org.apache.skywalking.oap.server.core.storage.IMetricsDAO;
import org.apache.skywalking.oap.server.core.storage.StorageBuilder;
import org.apache.skywalking.oap.server.core.storage.model.Model;
import org.apache.skywalking.oap.server.core.storage.model.ModelColumn;
import org.apache.skywalking.oap.server.core.storage.type.StorageDataType;
import org.apache.skywalking.oap.server.library.client.request.InsertRequest;
import org.apache.skywalking.oap.server.library.client.request.UpdateRequest;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.influxdb.dto.QueryResult;
import org.influxdb.querybuilder.SelectQueryImpl;
import org.influxdb.querybuilder.WhereQueryImpl;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.contains;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.select;
public class MetricsDAO implements IMetricsDAO {
public static final String TAG_ENTITY_ID = "_entity_id";
private final StorageBuilder<Metrics> storageBuilder;
private final InfluxClient client;
public MetricsDAO(InfluxClient client, StorageBuilder<Metrics> storageBuilder) {
this.client = client;
this.storageBuilder = storageBuilder;
}
@Override
public List<Metrics> multiGet(Model model, List<String> ids) throws IOException {
WhereQueryImpl<SelectQueryImpl> query = select()
.regex("*::field")
.from(client.getDatabase(), model.getName())
.where(contains("id", Joiner.on("|").join(ids)));
QueryResult.Series series = client.queryForSingleSeries(query);
if (series == null) {
return Collections.emptyList();
}
final List<Metrics> metrics = Lists.newArrayList();
List<String> columns = series.getColumns();
Map<String, String> storageAndColumnNames = Maps.newHashMap();
for (ModelColumn column : model.getColumns()) {
storageAndColumnNames.put(column.getColumnName().getStorageName(), column.getColumnName().getName());
}
series.getValues().forEach(values -> {
Map<String, Object> data = Maps.newHashMap();
for (int i = 1; i < columns.size(); i++) {
Object value = values.get(i);
if (value instanceof StorageDataType) {
value = ((StorageDataType) value).toStorageData();
}
data.put(storageAndColumnNames.get(columns.get(i)), value);
}
metrics.add(storageBuilder.map2Data(data));
});
return metrics;
}
@Override
public InsertRequest prepareBatchInsert(Model model, Metrics metrics) throws IOException {
final long timestamp = TimeBucket.getTimestamp(metrics.getTimeBucket(), model.getDownsampling());
return new InfluxInsertRequest(model, metrics, storageBuilder)
.time(timestamp, TimeUnit.MILLISECONDS)
.addFieldAsTag(Metrics.ENTITY_ID, TAG_ENTITY_ID);
}
@Override
public UpdateRequest prepareBatchUpdate(Model model, Metrics metrics) throws IOException {
return (UpdateRequest) this.prepareBatchInsert(model, metrics);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.base;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import org.apache.skywalking.apm.commons.datacarrier.common.AtomicRangeInteger;
import org.apache.skywalking.oap.server.core.analysis.TimeBucket;
import org.apache.skywalking.oap.server.core.analysis.config.NoneStream;
import org.apache.skywalking.oap.server.core.profile.ProfileTaskRecord;
import org.apache.skywalking.oap.server.core.storage.INoneStreamDAO;
import org.apache.skywalking.oap.server.core.storage.StorageBuilder;
import org.apache.skywalking.oap.server.core.storage.model.Model;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.influxdb.dto.Point;
public class NoneStreamDAO implements INoneStreamDAO {
public static final String TAG_SERVICE_ID = "_service_id";
private static final int PADDING_SIZE = 1_000_000;
private static final AtomicRangeInteger SUFFIX = new AtomicRangeInteger(0, PADDING_SIZE);
private InfluxClient client;
private StorageBuilder<NoneStream> storageBuilder;
public NoneStreamDAO(InfluxClient client, StorageBuilder<NoneStream> storageBuilder) {
this.client = client;
this.storageBuilder = storageBuilder;
}
@Override
public void insert(final Model model, final NoneStream noneStream) throws IOException {
final long timestamp = TimeBucket.getTimestamp(
noneStream.getTimeBucket(), model.getDownsampling()) * PADDING_SIZE + SUFFIX.getAndIncrement();
Point point = new InfluxInsertRequest(model, noneStream, storageBuilder)
.time(timestamp, TimeUnit.NANOSECONDS)
.addFieldAsTag(ProfileTaskRecord.SERVICE_ID, TAG_SERVICE_ID).getPoint();
client.write(point);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.base;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import org.apache.skywalking.apm.commons.datacarrier.common.AtomicRangeInteger;
import org.apache.skywalking.oap.server.core.analysis.TimeBucket;
import org.apache.skywalking.oap.server.core.analysis.manual.segment.SegmentRecord;
import org.apache.skywalking.oap.server.core.analysis.record.Record;
import org.apache.skywalking.oap.server.core.storage.IRecordDAO;
import org.apache.skywalking.oap.server.core.storage.StorageBuilder;
import org.apache.skywalking.oap.server.core.storage.model.Model;
import org.apache.skywalking.oap.server.library.client.request.InsertRequest;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
public class RecordDAO implements IRecordDAO {
public static final String TAG_SERVICE_ID = "_service_id";
private static final int PADDING_SIZE = 1_000_000;
private static final AtomicRangeInteger SUFFIX = new AtomicRangeInteger(0, PADDING_SIZE);
private InfluxClient client;
private StorageBuilder<Record> storageBuilder;
public RecordDAO(InfluxClient client, StorageBuilder<Record> storageBuilder) {
this.client = client;
this.storageBuilder = storageBuilder;
}
@Override
public InsertRequest prepareBatchInsert(Model model, Record record) throws IOException {
final long timestamp = TimeBucket.getTimestamp(
record.getTimeBucket(), model.getDownsampling()) * PADDING_SIZE + SUFFIX.getAndIncrement();
return new InfluxInsertRequest(model, record, storageBuilder)
.time(timestamp, TimeUnit.NANOSECONDS)
.addFieldAsTag(SegmentRecord.SERVICE_ID, TAG_SERVICE_ID);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.installer;
import org.apache.skywalking.oap.server.core.profile.ProfileTaskRecord;
import org.apache.skywalking.oap.server.core.storage.StorageException;
import org.apache.skywalking.oap.server.core.storage.model.Model;
import org.apache.skywalking.oap.server.library.client.Client;
import org.apache.skywalking.oap.server.library.module.ModuleManager;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxModelConstants;
import org.apache.skywalking.oap.server.storage.plugin.jdbc.h2.dao.H2TableInstaller;
public class H2Installer extends H2TableInstaller {
public H2Installer(ModuleManager moduleManager) {
super(moduleManager);
overrideColumnName(ProfileTaskRecord.DURATION, InfluxModelConstants.DURATION);
}
@Override
protected boolean isExists(Client client, Model model) throws StorageException {
if (MetaTableDefine.contains(model)) {
return super.isExists(client, model);
}
return true;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.installer;
import org.apache.skywalking.oap.server.core.storage.model.Model;
import org.apache.skywalking.oap.server.storage.plugin.jdbc.TableMetaInfo;
/**
* Here defines which table is stored in metadata database(H2/MySQL).
*/
public class MetaTableDefine {
/**
* In the InfluxDB implementation, only the metadata is managed by the traditional MySQL/H2 storages.
* Model#isCapableOfTimeSeries represents the metadata.
*
* @param model Model
* @return true if the {@link Model} is stored in H2/MySQL
*/
public static boolean contains(Model model) {
if (!model.isCapableOfTimeSeries()) {
return true;
}
TableMetaInfo.addModel(model);
return false;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.installer;
import org.apache.skywalking.oap.server.core.profile.ProfileTaskRecord;
import org.apache.skywalking.oap.server.core.storage.StorageException;
import org.apache.skywalking.oap.server.core.storage.model.Model;
import org.apache.skywalking.oap.server.library.client.Client;
import org.apache.skywalking.oap.server.library.module.ModuleManager;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxModelConstants;
import org.apache.skywalking.oap.server.storage.plugin.jdbc.mysql.MySQLTableInstaller;
public class MySQLInstaller extends MySQLTableInstaller {
public MySQLInstaller(ModuleManager moduleManager) {
super(moduleManager);
overrideColumnName(ProfileTaskRecord.DURATION, InfluxModelConstants.DURATION);
}
@Override
protected boolean isExists(Client client, Model model) throws StorageException {
if (MetaTableDefine.contains(model)) {
return super.isExists(client, model);
}
return true;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.query;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.skywalking.oap.server.core.analysis.Downsampling;
import org.apache.skywalking.oap.server.core.query.entity.Order;
import org.apache.skywalking.oap.server.core.query.entity.TopNEntity;
import org.apache.skywalking.oap.server.core.register.EndpointInventory;
import org.apache.skywalking.oap.server.core.register.ServiceInstanceInventory;
import org.apache.skywalking.oap.server.core.storage.model.ModelName;
import org.apache.skywalking.oap.server.core.storage.query.IAggregationQueryDAO;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.base.MetricsDAO;
import org.influxdb.dto.QueryResult;
import org.influxdb.querybuilder.SelectQueryImpl;
import org.influxdb.querybuilder.SelectSubQueryImpl;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.eq;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.gte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.lte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.select;
@Slf4j
public class AggregationQuery implements IAggregationQueryDAO {
private InfluxClient client;
public AggregationQuery(InfluxClient client) {
this.client = client;
}
@Override
public List<TopNEntity> getServiceTopN(String indName, String valueCName, int topN, Downsampling downsampling,
long startTB, long endTB, Order order) throws IOException {
return getTopNEntity(downsampling, indName, subQuery(indName, valueCName, startTB, endTB), order, topN);
}
@Override
public List<TopNEntity> getAllServiceInstanceTopN(String indName, String valueCName, int topN,
Downsampling downsampling,
long startTB, long endTB, Order order) throws IOException {
return getTopNEntity(downsampling, indName, subQuery(indName, valueCName, startTB, endTB), order, topN);
}
@Override
public List<TopNEntity> getServiceInstanceTopN(int serviceId, String indName, String valueCName, int topN,
Downsampling downsampling,
long startTB, long endTB, Order order) throws IOException {
return getTopNEntity(
downsampling, indName,
subQuery(ServiceInstanceInventory.SERVICE_ID, serviceId, indName, valueCName, startTB, endTB), order, topN
);
}
@Override
public List<TopNEntity> getAllEndpointTopN(String indName, String valueCName, int topN, Downsampling downsampling,
long startTB, long endTB, Order order) throws IOException {
return getTopNEntity(downsampling, indName, subQuery(indName, valueCName, startTB, endTB), order, topN);
}
@Override
public List<TopNEntity> getEndpointTopN(int serviceId, String indName, String valueCName, int topN,
Downsampling downsampling,
long startTB, long endTB, Order order) throws IOException {
return getTopNEntity(
downsampling, indName,
subQuery(EndpointInventory.SERVICE_ID, serviceId, indName, valueCName, startTB, endTB), order, topN
);
}
private List<TopNEntity> getTopNEntity(Downsampling downsampling,
String name,
SelectSubQueryImpl<SelectQueryImpl> subQuery,
Order order,
int topN) throws IOException {
String measurement = ModelName.build(downsampling, name);
// Have to re-sort here. Because the function, top()/bottom(), get the result ordered by the `time`.
Comparator<TopNEntity> comparator = DESCENDING;
String functionName = "top";
if (order == Order.ASC) {
functionName = "bottom";
comparator = ASCENDING;
}
SelectQueryImpl query = select().function(functionName, "mean", topN).as("value")
.column(MetricsDAO.TAG_ENTITY_ID)
.from(client.getDatabase(), measurement);
query.setSubQuery(subQuery);
List<QueryResult.Series> series = client.queryForSeries(query);
if (log.isDebugEnabled()) {
log.debug("SQL: {} result set: {}", query.getCommand(), series);
}
if (series == null || series.isEmpty()) {
return Collections.emptyList();
}
List<List<Object>> dataset = series.get(0).getValues();
List<TopNEntity> entities = Lists.newArrayListWithCapacity(dataset.size());
dataset.forEach(values -> {
final TopNEntity entity = new TopNEntity();
entity.setId((String) values.get(2));
entity.setValue(((Double) values.get(1)).longValue());
entities.add(entity);
});
Collections.sort(entities, comparator); // re-sort by self, because of the result order by time.
return entities;
}
private SelectSubQueryImpl<SelectQueryImpl> subQuery(String serviceColumnName, int serviceId, String name,
String columnName,
long startTB, long endTB) {
return select().fromSubQuery(client.getDatabase()).mean(columnName).from(name)
.where()
.and(eq(serviceColumnName, serviceId))
.and(gte(InfluxClient.TIME, InfluxClient.timeInterval(startTB)))
.and(lte(InfluxClient.TIME, InfluxClient.timeInterval(endTB)))
.groupBy(MetricsDAO.TAG_ENTITY_ID);
}
private SelectSubQueryImpl<SelectQueryImpl> subQuery(String name, String columnName, long startTB, long endTB) {
return select().fromSubQuery(client.getDatabase()).mean(columnName).from(name)
.where()
.and(gte(InfluxClient.TIME, InfluxClient.timeInterval(startTB)))
.and(lte(InfluxClient.TIME, InfluxClient.timeInterval(endTB)))
.groupBy(MetricsDAO.TAG_ENTITY_ID);
}
private static final Comparator<TopNEntity> ASCENDING = Comparator.comparingLong(TopNEntity::getValue);
private static final Comparator<TopNEntity> DESCENDING = (a, b) -> Long.compare(b.getValue(), a.getValue());
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.query;
import java.io.IOException;
import java.util.List;
import java.util.Objects;
import lombok.extern.slf4j.Slf4j;
import org.apache.skywalking.oap.server.core.alarm.AlarmRecord;
import org.apache.skywalking.oap.server.core.query.entity.AlarmMessage;
import org.apache.skywalking.oap.server.core.query.entity.Alarms;
import org.apache.skywalking.oap.server.core.query.entity.Scope;
import org.apache.skywalking.oap.server.core.storage.query.IAlarmQueryDAO;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.elasticsearch.common.Strings;
import org.influxdb.dto.Query;
import org.influxdb.dto.QueryResult;
import org.influxdb.querybuilder.SelectQueryImpl;
import org.influxdb.querybuilder.WhereQueryImpl;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.eq;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.gte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.lte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.regex;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.select;
@Slf4j
public class AlarmQuery implements IAlarmQueryDAO {
private final InfluxClient client;
public AlarmQuery(InfluxClient client) {
this.client = client;
}
@Override
public Alarms getAlarm(Integer scopeId, String keyword, int limit, int from, long startTB,
long endTB) throws IOException {
WhereQueryImpl<SelectQueryImpl> recallQuery = select()
.function("top", AlarmRecord.START_TIME, limit + from).as(AlarmRecord.START_TIME)
.column(AlarmRecord.ID0)
.column(AlarmRecord.ALARM_MESSAGE)
.column(AlarmRecord.SCOPE)
.from(client.getDatabase(), AlarmRecord.INDEX_NAME)
.where();
if (startTB > 0 && endTB > 0) {
recallQuery.and(gte(InfluxClient.TIME, InfluxClient.timeInterval(startTB)))
.and(lte(InfluxClient.TIME, InfluxClient.timeInterval(endTB)));
}
if (!Strings.isNullOrEmpty(keyword)) {
recallQuery.and(regex(AlarmRecord.ALARM_MESSAGE, keyword));
}
if (Objects.nonNull(scopeId)) {
recallQuery.and(eq(AlarmRecord.SCOPE, scopeId));
}
WhereQueryImpl<SelectQueryImpl> countQuery = select().count(AlarmRecord.ID0)
.from(client.getDatabase(), AlarmRecord.INDEX_NAME)
.where();
recallQuery.getClauses().forEach(clause -> {
countQuery.where(clause);
});
Query query = new Query(countQuery.getCommand() + recallQuery.getCommand());
List<QueryResult.Result> results = client.query(query);
if (log.isDebugEnabled()) {
log.debug("SQL: {} result set: {}", query.getCommand(), results);
}
if (results.size() != 2) {
throw new IOException("Expecting to get 2 Results, but it is " + results.size());
}
List<QueryResult.Series> series = results.get(1).getSeries();
if (series == null || series.isEmpty()) {
return new Alarms();
}
List<QueryResult.Series> counter = results.get(0).getSeries();
Alarms alarms = new Alarms();
alarms.setTotal(((Number) counter.get(0).getValues().get(0).get(1)).intValue());
series.get(0).getValues()
.stream()
// re-sort by self, because of the result order by time.
.sorted((a, b) -> Long.compare((long) b.get(1), (long) a.get(1)))
.skip(from)
.forEach(values -> {
final int sid = (int) values.get(4);
Scope scope = Scope.Finder.valueOf(sid);
AlarmMessage message = new AlarmMessage();
message.setStartTime((long) values.get(1));
message.setId((String) values.get(2));
message.setMessage((String) values.get(3));
message.setScope(scope);
message.setScopeId(sid);
alarms.getMsgs().add(message);
});
return alarms;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.query;
import com.google.common.collect.Maps;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
import org.apache.skywalking.oap.server.core.Const;
import org.apache.skywalking.oap.server.core.analysis.manual.log.AbstractLogRecord;
import org.apache.skywalking.oap.server.core.query.entity.ContentType;
import org.apache.skywalking.oap.server.core.query.entity.Log;
import org.apache.skywalking.oap.server.core.query.entity.LogState;
import org.apache.skywalking.oap.server.core.query.entity.Logs;
import org.apache.skywalking.oap.server.core.query.entity.Pagination;
import org.apache.skywalking.oap.server.core.storage.query.ILogQueryDAO;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.base.RecordDAO;
import org.elasticsearch.common.Strings;
import org.influxdb.dto.Query;
import org.influxdb.dto.QueryResult;
import org.influxdb.querybuilder.SelectQueryImpl;
import org.influxdb.querybuilder.WhereQueryImpl;
import org.influxdb.querybuilder.clauses.ConjunctionClause;
import static org.apache.skywalking.oap.server.core.analysis.manual.log.AbstractLogRecord.CONTENT;
import static org.apache.skywalking.oap.server.core.analysis.manual.log.AbstractLogRecord.CONTENT_TYPE;
import static org.apache.skywalking.oap.server.core.analysis.manual.log.AbstractLogRecord.ENDPOINT_ID;
import static org.apache.skywalking.oap.server.core.analysis.manual.log.AbstractLogRecord.IS_ERROR;
import static org.apache.skywalking.oap.server.core.analysis.manual.log.AbstractLogRecord.SERVICE_ID;
import static org.apache.skywalking.oap.server.core.analysis.manual.log.AbstractLogRecord.SERVICE_INSTANCE_ID;
import static org.apache.skywalking.oap.server.core.analysis.manual.log.AbstractLogRecord.STATUS_CODE;
import static org.apache.skywalking.oap.server.core.analysis.manual.log.AbstractLogRecord.TIMESTAMP;
import static org.apache.skywalking.oap.server.core.analysis.manual.log.AbstractLogRecord.TRACE_ID;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.eq;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.gte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.lte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.select;
@Slf4j
public class LogQuery implements ILogQueryDAO {
private final InfluxClient client;
public LogQuery(InfluxClient client) {
this.client = client;
}
@Override
public Logs queryLogs(String metricName, int serviceId, int serviceInstanceId, int endpointId, String traceId,
LogState state, String stateCode, Pagination paging, int from, int limit,
long startTB, long endTB) throws IOException {
WhereQueryImpl<SelectQueryImpl> recallQuery = select().regex("*::field")
.from(client.getDatabase(), metricName)
.where();
if (serviceId != Const.NONE) {
recallQuery.and(eq(RecordDAO.TAG_SERVICE_ID, String.valueOf(serviceId)));
}
if (serviceInstanceId != Const.NONE) {
recallQuery.and(eq(SERVICE_INSTANCE_ID, serviceInstanceId));
}
if (endpointId != Const.NONE) {
recallQuery.and(eq(ENDPOINT_ID, endpointId));
}
if (!Strings.isNullOrEmpty(traceId)) {
recallQuery.and(eq(TRACE_ID, traceId));
}
switch (state) {
case ERROR: {
recallQuery.and(eq(IS_ERROR, true));
break;
}
case SUCCESS: {
recallQuery.and(eq(IS_ERROR, false));
break;
}
}
if (!Strings.isNullOrEmpty(stateCode)) {
recallQuery.and(eq(STATUS_CODE, stateCode));
}
recallQuery.and(gte(AbstractLogRecord.TIME_BUCKET, startTB))
.and(lte(AbstractLogRecord.TIME_BUCKET, endTB));
if (from > Const.NONE) {
recallQuery.limit(limit, from);
} else {
recallQuery.limit(limit);
}
SelectQueryImpl countQuery = select().count(ENDPOINT_ID).from(client.getDatabase(), metricName);
for (ConjunctionClause clause : recallQuery.getClauses()) {
countQuery.where(clause);
}
Query query = new Query(countQuery.getCommand() + recallQuery.getCommand());
List<QueryResult.Result> results = client.query(query);
if (log.isDebugEnabled()) {
log.debug("SQL: {} \nresult set: {}", query.getCommand(), results);
}
if (results.size() != 2) {
throw new IOException("Expecting to get 2 Results, but it is " + results.size());
}
final Logs logs = new Logs();
QueryResult.Result counter = results.get(0);
QueryResult.Result seriesList = results.get(1);
logs.setTotal(((Number) counter.getSeries().get(0).getValues().get(0).get(1)).intValue());
seriesList.getSeries().forEach(series -> {
final List<String> columns = series.getColumns();
series.getValues().forEach(values -> {
Map<String, Object> data = Maps.newHashMap();
Log log = new Log();
for (int i = 0; i < columns.size(); i++) {
data.put(columns.get(i), values.get(i));
}
log.setContent((String) data.get(CONTENT));
log.setContentType(ContentType.instanceOf((int) data.get(CONTENT_TYPE)));
log.setEndpointId((int) data.get(ENDPOINT_ID));
log.setTraceId((String) data.get(TRACE_ID));
log.setTimestamp((String) data.get(TIMESTAMP));
log.setStatusCode((String) data.get(STATUS_CODE));
log.setServiceId((int) data.get(SERVICE_ID));
log.setServiceInstanceId((int) data.get(SERVICE_INSTANCE_ID));
logs.getLogs().add(log);
});
});
return logs;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.query;
import com.google.common.base.Joiner;
import com.google.common.collect.Maps;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
import org.apache.skywalking.oap.server.core.analysis.Downsampling;
import org.apache.skywalking.oap.server.core.analysis.metrics.IntKeyLongValue;
import org.apache.skywalking.oap.server.core.analysis.metrics.IntKeyLongValueHashMap;
import org.apache.skywalking.oap.server.core.analysis.metrics.ThermodynamicMetrics;
import org.apache.skywalking.oap.server.core.query.entity.IntValues;
import org.apache.skywalking.oap.server.core.query.entity.KVInt;
import org.apache.skywalking.oap.server.core.query.entity.Thermodynamic;
import org.apache.skywalking.oap.server.core.query.sql.Function;
import org.apache.skywalking.oap.server.core.query.sql.KeyValues;
import org.apache.skywalking.oap.server.core.query.sql.Where;
import org.apache.skywalking.oap.server.core.storage.model.ModelColumn;
import org.apache.skywalking.oap.server.core.storage.model.ModelName;
import org.apache.skywalking.oap.server.core.storage.query.IMetricsQueryDAO;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.base.MetricsDAO;
import org.apache.skywalking.oap.server.storage.plugin.jdbc.TableMetaInfo;
import org.influxdb.dto.QueryResult;
import org.influxdb.querybuilder.SelectQueryImpl;
import org.influxdb.querybuilder.SelectionQueryImpl;
import org.influxdb.querybuilder.WhereQueryImpl;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.contains;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.eq;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.gte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.lte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.select;
@Slf4j
public class MetricsQuery implements IMetricsQueryDAO {
private final InfluxClient client;
public MetricsQuery(InfluxClient client) {
this.client = client;
}
@Override
public IntValues getValues(String indName, Downsampling downsampling, long startTB, long endTB,
Where where, String valueCName, Function function) throws IOException {
String measurement = ModelName.build(downsampling, indName);
SelectionQueryImpl query = select();
switch (function) {
case Avg:
query.mean(valueCName);
break;
default:
query.sum(valueCName);
}
WhereQueryImpl<SelectQueryImpl> queryWhereQuery = query.from(client.getDatabase(), measurement).where();
Map<String, Class<?>> columnTypes = Maps.newHashMap();
for (ModelColumn column : TableMetaInfo.get(measurement).getColumns()) {
columnTypes.put(column.getColumnName().getStorageName(), column.getType());
}
List<String> ids = new ArrayList<>(20);
List<KeyValues> whereKeyValues = where.getKeyValues();
if (!whereKeyValues.isEmpty()) {
StringBuilder clauseBuilder = new StringBuilder();
for (KeyValues kv : whereKeyValues) {
final List<String> values = kv.getValues();
Class<?> type = columnTypes.get(kv.getKey());
if (values.size() == 1) {
String value = kv.getValues().get(0);
if (type == String.class) {
value = "'" + value + "'";
}
clauseBuilder.append(kv.getKey()).append("=").append(value).append(" OR ");
} else {
ids.addAll(values);
if (type == String.class) {
clauseBuilder.append(kv.getKey())
.append(" =~ /")
.append(Joiner.on("|").join(values))
.append("/ OR ");
continue;
}
for (String value : values) {
clauseBuilder.append(kv.getKey()).append(" = '").append(value).append("' OR ");
}
}
}
queryWhereQuery.where(clauseBuilder.substring(0, clauseBuilder.length() - 4));
}
queryWhereQuery
.and(gte(InfluxClient.TIME, InfluxClient.timeInterval(startTB, downsampling)))
.and(lte(InfluxClient.TIME, InfluxClient.timeInterval(endTB, downsampling)))
.groupBy(MetricsDAO.TAG_ENTITY_ID);
IntValues intValues = new IntValues();
List<QueryResult.Series> seriesList = client.queryForSeries(queryWhereQuery);
if (log.isDebugEnabled()) {
log.debug("SQL: {} result set: {}", queryWhereQuery.getCommand(), seriesList);
}
if (!(seriesList == null || seriesList.isEmpty())) {
for (QueryResult.Series series : seriesList) {
KVInt kv = new KVInt();
kv.setId(series.getTags().get(MetricsDAO.TAG_ENTITY_ID));
Number value = (Number) series.getValues().get(0).get(1);
kv.setValue(value.longValue());
intValues.addKVInt(kv);
}
}
return orderWithDefault0(intValues, ids);
}
@Override
public IntValues getLinearIntValues(String indName, Downsampling downsampling, List<String> ids, String valueCName)
throws IOException {
String measurement = ModelName.build(downsampling, indName);
WhereQueryImpl<SelectQueryImpl> query = select()
.column("id")
.column(valueCName)
.from(client.getDatabase(), measurement)
.where();
if (ids != null && !ids.isEmpty()) {
if (ids.size() == 1) {
query.where(eq("id", ids.get(0)));
} else {
query.where(contains("id", Joiner.on("|").join(ids)));
}
}
List<QueryResult.Series> seriesList = client.queryForSeries(query);
if (log.isDebugEnabled()) {
log.debug("SQL: {} result set: {}", query.getCommand(), seriesList);
}
IntValues intValues = new IntValues();
if (!(seriesList == null || seriesList.isEmpty())) {
seriesList.get(0).getValues().forEach(values -> {
KVInt kv = new KVInt();
kv.setValue(((Number) values.get(2)).longValue());
kv.setId((String) values.get(1));
intValues.addKVInt(kv);
});
}
return orderWithDefault0(intValues, ids);
}
/**
* Make sure the order is same as the expected order, and keep default value as 0.
*
* @param origin IntValues
* @param expectedOrder List
* @return
*/
private IntValues orderWithDefault0(IntValues origin, List<String> expectedOrder) {
IntValues intValues = new IntValues();
expectedOrder.forEach(id -> {
KVInt e = new KVInt();
e.setId(id);
e.setValue(origin.findValue(id, 0));
intValues.addKVInt(e);
});
return intValues;
}
@Override
public IntValues[] getMultipleLinearIntValues(String indName, Downsampling downsampling, List<String> ids,
List<Integer> linearIndex, String valueCName) throws IOException {
String measurement = ModelName.build(downsampling, indName);
WhereQueryImpl<SelectQueryImpl> query = select()
.column("id")
.column(valueCName)
.from(client.getDatabase(), measurement)
.where();
if (ids != null && !ids.isEmpty()) {
if (ids.size() == 1) {
query.where(eq("id", ids.get(0)));
} else {
query.where(contains("id", Joiner.on("|").join(ids)));
}
}
List<QueryResult.Series> series = client.queryForSeries(query);
if (log.isDebugEnabled()) {
log.debug("SQL: {} result set: {}", query.getCommand(), series);
}
IntValues[] intValues = new IntValues[linearIndex.size()];
for (int i = 0; i < intValues.length; i++) {
intValues[i] = new IntValues();
}
if (series == null || series.isEmpty()) {
return intValues;
}
series.get(0).getValues().forEach(values -> {
IntKeyLongValueHashMap multipleValues = new IntKeyLongValueHashMap(5);
multipleValues.toObject((String) values.get(2));
final String id = (String) values.get(1);
for (int i = 0; i < intValues.length; i++) {
Integer index = linearIndex.get(i);
KVInt kv = new KVInt();
kv.setId(id);
kv.setValue(multipleValues.get(index).getValue());
intValues[i].addKVInt(kv);
}
});
return orderWithDefault0(intValues, ids);
}
/**
* Make sure the order is same as the expected order, and keep default value as 0.
*
* @param origin IntValues[]
* @param expectedOrder List
* @return
*/
private IntValues[] orderWithDefault0(IntValues[] origin, List<String> expectedOrder) {
for (int i = 0; i < origin.length; i++) {
origin[i] = orderWithDefault0(origin[i], expectedOrder);
}
return origin;
}
@Override
public Thermodynamic getThermodynamic(String indName, Downsampling downsampling, List<String> ids,
String valueCName)
throws IOException {
String measurement = ModelName.build(downsampling, indName);
WhereQueryImpl<SelectQueryImpl> query = select()
.column(ThermodynamicMetrics.STEP)
.column(ThermodynamicMetrics.NUM_OF_STEPS)
.column(ThermodynamicMetrics.DETAIL_GROUP)
.column("id")
.from(client.getDatabase(), measurement)
.where(contains("id", Joiner.on("|").join(ids)));
Map<String, List<Long>> thermodynamicValueMatrix = new HashMap<>();
QueryResult.Series series = client.queryForSingleSeries(query);
if (log.isDebugEnabled()) {
log.debug("SQL: {} result set: {}", query.getCommand(), series);
}
if (series == null) {
return new Thermodynamic();
}
int numOfSteps = 0, axisYStep = 0;
List<List<Long>> thermodynamicValueCollection = new ArrayList<>();
Thermodynamic thermodynamic = new Thermodynamic();
for (List<Object> values : series.getValues()) {
numOfSteps = (int) values.get(2) + 1;
axisYStep = (int) values.get(1);
IntKeyLongValueHashMap intKeyLongValues = new IntKeyLongValueHashMap(5);
intKeyLongValues.toObject((String) values.get(3));
List<Long> axisYValues = new ArrayList<>(numOfSteps);
for (int i = 0; i < numOfSteps; i++) {
axisYValues.add(0L);
}
for (IntKeyLongValue intKeyLongValue : intKeyLongValues.values()) {
axisYValues.set(intKeyLongValue.getKey(), intKeyLongValue.getValue());
}
thermodynamicValueMatrix.put((String) values.get(4), axisYValues);
}
// try to add default values when there is no data in that time bucket.
ids.forEach(id -> {
if (thermodynamicValueMatrix.containsKey(id)) {
thermodynamicValueCollection.add(thermodynamicValueMatrix.get(id));
} else {
thermodynamicValueCollection.add(new ArrayList<>());
}
});
thermodynamic.fromMatrixData(thermodynamicValueCollection, numOfSteps);
thermodynamic.setAxisYStep(axisYStep);
return thermodynamic;
}
}
\ No newline at end of file
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.query;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import lombok.extern.slf4j.Slf4j;
import org.apache.skywalking.apm.util.StringUtil;
import org.apache.skywalking.oap.server.core.profile.ProfileTaskLogRecord;
import org.apache.skywalking.oap.server.core.query.entity.ProfileTaskLog;
import org.apache.skywalking.oap.server.core.query.entity.ProfileTaskLogOperationType;
import org.apache.skywalking.oap.server.core.storage.profile.IProfileTaskLogQueryDAO;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.influxdb.dto.QueryResult;
import org.influxdb.querybuilder.SelectQueryImpl;
import org.influxdb.querybuilder.WhereQueryImpl;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.eq;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.select;
@Slf4j
public class ProfileTaskLogQuery implements IProfileTaskLogQueryDAO {
private InfluxClient client;
private int fetchTaskLogMaxSize;
public ProfileTaskLogQuery(InfluxClient client, int fetchTaskLogMaxSize) {
this.client = client;
this.fetchTaskLogMaxSize = fetchTaskLogMaxSize;
}
@Override
public List<ProfileTaskLog> getTaskLogList(String taskId) throws IOException {
WhereQueryImpl<SelectQueryImpl> query = select()
.function("top", ProfileTaskLogRecord.OPERATION_TIME, fetchTaskLogMaxSize)
.column("id")
.column(ProfileTaskLogRecord.TASK_ID)
.column(ProfileTaskLogRecord.INSTANCE_ID)
.column(ProfileTaskLogRecord.OPERATION_TIME)
.column(ProfileTaskLogRecord.OPERATION_TYPE)
.from(client.getDatabase(), ProfileTaskLogRecord.INDEX_NAME)
.where();
if (StringUtil.isNotEmpty(taskId)) {
query.and(eq(ProfileTaskLogRecord.TASK_ID, taskId));
}
QueryResult.Series series = client.queryForSingleSeries(query);
if (log.isDebugEnabled()) {
log.debug("SQL: {} result set: {}", query.getCommand(), series);
}
if (series == null) {
return Collections.emptyList();
}
List<String> columns = series.getColumns();
Map<String, Integer> columnsMap = Maps.newHashMap();
for (int i = 0; i < columns.size(); i++) {
columnsMap.put(columns.get(i), i);
}
List<ProfileTaskLog> taskLogs = Lists.newArrayList();
series.getValues().stream()
// re-sort by self, because of the result order by time.
.sorted((a, b) -> Long.compare(((Number) b.get(1)).longValue(), ((Number) a.get(1)).longValue()))
.forEach(values -> {
taskLogs.add(ProfileTaskLog.builder()
.id((String) values.get(columnsMap.get("id")))
.taskId((String) values.get(columnsMap.get(ProfileTaskLogRecord.TASK_ID)))
.instanceId(
(int) values.get(columnsMap.get(ProfileTaskLogRecord.INSTANCE_ID)))
.operationTime(
(Long) values.get(columnsMap.get(ProfileTaskLogRecord.OPERATION_TIME)))
.operationType(ProfileTaskLogOperationType.parse(
(int) values.get(columnsMap.get(ProfileTaskLogRecord.OPERATION_TYPE))))
.build());
});
return taskLogs;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.query;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.util.List;
import java.util.Objects;
import org.apache.skywalking.apm.util.StringUtil;
import org.apache.skywalking.oap.server.core.profile.ProfileTaskRecord;
import org.apache.skywalking.oap.server.core.query.entity.ProfileTask;
import org.apache.skywalking.oap.server.core.storage.profile.IProfileTaskQueryDAO;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxModelConstants;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.base.NoneStreamDAO;
import org.influxdb.dto.QueryResult;
import org.influxdb.querybuilder.SelectQueryImpl;
import org.influxdb.querybuilder.WhereQueryImpl;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.eq;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.gte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.lte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.select;
public class ProfileTaskQuery implements IProfileTaskQueryDAO {
private InfluxClient client;
public ProfileTaskQuery(InfluxClient client) {
this.client = client;
}
@Override
public List<ProfileTask> getTaskList(final Integer serviceId,
final String endpointName,
final Long startTimeBucket,
final Long endTimeBucket,
final Integer limit) throws IOException {
WhereQueryImpl<SelectQueryImpl> query =
select("id", ProfileTaskRecord.SERVICE_ID,
ProfileTaskRecord.ENDPOINT_NAME, ProfileTaskRecord.START_TIME,
ProfileTaskRecord.CREATE_TIME,
InfluxModelConstants.DURATION,
ProfileTaskRecord.MIN_DURATION_THRESHOLD,
ProfileTaskRecord.DUMP_PERIOD,
ProfileTaskRecord.MAX_SAMPLING_COUNT
)
.from(client.getDatabase(), ProfileTaskRecord.INDEX_NAME)
.where();
if (Objects.nonNull(serviceId)) {
query.and(eq(NoneStreamDAO.TAG_SERVICE_ID, String.valueOf(serviceId)));
}
if (StringUtil.isNotEmpty(endpointName)) {
query.and(eq(ProfileTaskRecord.ENDPOINT_NAME, endpointName));
}
if (Objects.nonNull(startTimeBucket)) {
query.and(gte(ProfileTaskRecord.TIME_BUCKET, startTimeBucket));
}
if (Objects.nonNull(endTimeBucket)) {
query.and(lte(ProfileTaskRecord.TIME_BUCKET, endTimeBucket));
}
if (Objects.nonNull(limit)) {
query.limit(limit);
}
List<ProfileTask> tasks = Lists.newArrayList();
QueryResult.Series series = client.queryForSingleSeries(query);
if (series != null) {
series.getValues().forEach(values -> {
tasks.add(profileTaskBuilder(values));
});
}
return tasks;
}
@Override
public ProfileTask getById(final String id) throws IOException {
if (StringUtil.isEmpty(id)) {
return null;
}
SelectQueryImpl query = select("id", ProfileTaskRecord.SERVICE_ID,
ProfileTaskRecord.ENDPOINT_NAME, ProfileTaskRecord.START_TIME,
ProfileTaskRecord.CREATE_TIME,
InfluxModelConstants.DURATION,
ProfileTaskRecord.MIN_DURATION_THRESHOLD,
ProfileTaskRecord.DUMP_PERIOD,
ProfileTaskRecord.MAX_SAMPLING_COUNT
)
.from(client.getDatabase(), ProfileTaskRecord.INDEX_NAME)
.where()
.and(eq("id", id))
.limit(1);
QueryResult.Series series = client.queryForSingleSeries(query);
if (Objects.nonNull(series)) {
return profileTaskBuilder(series.getValues().get(0));
}
return null;
}
private static final ProfileTask profileTaskBuilder(List<Object> values) {
return ProfileTask.builder()
.id((String) values.get(1))
.serviceId(((Number) values.get(2)).intValue())
.endpointName((String) values.get(3))
.startTime(((Number) values.get(4)).longValue())
.createTime(((Number) values.get(5)).longValue())
.duration((int) values.get(6))
.minDurationThreshold((int) values.get(7))
.dumpPeriod((int) values.get(8))
.maxSamplingCount((int) values.get(9))
.build();
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.query;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Base64;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import org.apache.skywalking.apm.util.StringUtil;
import org.apache.skywalking.oap.server.core.analysis.manual.segment.SegmentRecord;
import org.apache.skywalking.oap.server.core.profile.ProfileThreadSnapshotRecord;
import org.apache.skywalking.oap.server.core.query.entity.BasicTrace;
import org.apache.skywalking.oap.server.core.storage.profile.IProfileThreadSnapshotQueryDAO;
import org.apache.skywalking.oap.server.library.util.BooleanUtils;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.influxdb.dto.QueryResult;
import org.influxdb.querybuilder.WhereQueryImpl;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.contains;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.eq;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.gte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.lte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.select;
public class ProfileThreadSnapshotQuery implements IProfileThreadSnapshotQueryDAO {
private final InfluxClient client;
public ProfileThreadSnapshotQuery(InfluxClient client) {
this.client = client;
}
@Override
public List<BasicTrace> queryProfiledSegments(String taskId) throws IOException {
WhereQueryImpl query = select(ProfileThreadSnapshotRecord.SEGMENT_ID)
.from(client.getDatabase(), ProfileThreadSnapshotRecord.INDEX_NAME)
.where()
.and(eq(ProfileThreadSnapshotRecord.TASK_ID, taskId))
.and(eq(ProfileThreadSnapshotRecord.SEQUENCE, 0));
final LinkedList<String> segments = new LinkedList<>();
QueryResult.Series series = client.queryForSingleSeries(query);
if (series == null) {
return Collections.emptyList();
}
series.getValues().forEach(values -> {
segments.add((String) values.get(1));
});
if (segments.isEmpty()) {
return Collections.emptyList();
}
query = select()
.function("bottom", SegmentRecord.START_TIME, segments.size())
.column(SegmentRecord.SEGMENT_ID)
.column(SegmentRecord.START_TIME)
.column(SegmentRecord.ENDPOINT_NAME)
.column(SegmentRecord.LATENCY)
.column(SegmentRecord.IS_ERROR)
.column(SegmentRecord.TRACE_ID)
.from(client.getDatabase(), SegmentRecord.INDEX_NAME)
.where()
.and(contains(SegmentRecord.SEGMENT_ID, Joiner.on("|").join(segments)));
ArrayList<BasicTrace> result = Lists.newArrayListWithCapacity(segments.size());
client.queryForSingleSeries(query)
.getValues()
.stream()
.sorted((a, b) -> Long.compare(((Number) b.get(1)).longValue(), ((Number) a.get(1)).longValue()))
.forEach(values -> {
BasicTrace basicTrace = new BasicTrace();
basicTrace.setSegmentId((String) values.get(2));
basicTrace.setStart(String.valueOf(values.get(3)));
basicTrace.getEndpointNames().add((String) values.get(4));
basicTrace.setDuration((int) values.get(5));
basicTrace.setError(BooleanUtils.valueToBoolean((int) values.get(6)));
String traceIds = (String) values.get(7);
basicTrace.getTraceIds().add(traceIds);
result.add(basicTrace);
});
return result;
}
@Override
public int queryMinSequence(String segmentId, long start, long end) throws IOException {
return querySequenceWithAgg("min", segmentId, start, end);
}
@Override
public int queryMaxSequence(String segmentId, long start, long end) throws IOException {
return querySequenceWithAgg("max", segmentId, start, end);
}
@Override
public List<ProfileThreadSnapshotRecord> queryRecords(String segmentId, int minSequence,
int maxSequence) throws IOException {
WhereQueryImpl query = select(
ProfileThreadSnapshotRecord.TASK_ID,
ProfileThreadSnapshotRecord.SEGMENT_ID,
ProfileThreadSnapshotRecord.DUMP_TIME,
ProfileThreadSnapshotRecord.SEQUENCE,
ProfileThreadSnapshotRecord.STACK_BINARY
)
.from(client.getDatabase(), ProfileThreadSnapshotRecord.INDEX_NAME)
.where(eq(ProfileThreadSnapshotRecord.SEGMENT_ID, segmentId))
.and(gte(ProfileThreadSnapshotRecord.SEQUENCE, minSequence))
.and(lte(ProfileThreadSnapshotRecord.SEQUENCE, maxSequence));
ArrayList<ProfileThreadSnapshotRecord> result = new ArrayList<>(maxSequence - minSequence);
client.queryForSingleSeries(query).getValues().forEach(values -> {
ProfileThreadSnapshotRecord record = new ProfileThreadSnapshotRecord();
record.setTaskId((String) values.get(1));
record.setSegmentId((String) values.get(2));
record.setDumpTime(((Number) values.get(3)).longValue());
record.setSequence((int) values.get(4));
String dataBinaryBase64 = String.valueOf(values.get(5));
if (StringUtil.isNotEmpty(dataBinaryBase64)) {
record.setStackBinary(Base64.getDecoder().decode(dataBinaryBase64));
}
result.add(record);
});
return result;
}
private int querySequenceWithAgg(String function, String segmentId, long start, long end) throws IOException {
WhereQueryImpl query = select()
.function(function, ProfileThreadSnapshotRecord.SEQUENCE)
.from(client.getDatabase(), ProfileThreadSnapshotRecord.INDEX_NAME)
.where()
.and(eq(ProfileThreadSnapshotRecord.SEGMENT_ID, segmentId))
.and(gte(ProfileThreadSnapshotRecord.DUMP_TIME, start))
.and(lte(ProfileThreadSnapshotRecord.DUMP_TIME, end));
QueryResult.Series series = client.queryForSingleSeries(query);
if (series == null) {
return -1;
}
return ((Number) series.getValues().get(0).get(1)).intValue();
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.query;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.skywalking.oap.server.core.Const;
import org.apache.skywalking.oap.server.core.analysis.topn.TopN;
import org.apache.skywalking.oap.server.core.query.entity.Order;
import org.apache.skywalking.oap.server.core.query.entity.TopNRecord;
import org.apache.skywalking.oap.server.core.storage.query.ITopNRecordsQueryDAO;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.base.RecordDAO;
import org.influxdb.dto.QueryResult;
import org.influxdb.querybuilder.WhereQueryImpl;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.eq;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.gte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.lte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.select;
@Slf4j
public class TopNRecordsQuery implements ITopNRecordsQueryDAO {
private final InfluxClient client;
public TopNRecordsQuery(InfluxClient client) {
this.client = client;
}
@Override
public List<TopNRecord> getTopNRecords(long startSecondTB, long endSecondTB, String metricName,
int serviceId, int topN, Order order) throws IOException {
String function = "bottom";
// Have to re-sort here. Because the function, top()/bottom(), get the result ordered by the `time`.
Comparator<TopNRecord> comparator = Comparator.comparingLong(TopNRecord::getLatency);
if (order.equals(Order.DES)) {
function = "top";
comparator = (a, b) -> Long.compare(b.getLatency(), a.getLatency());
}
WhereQueryImpl query = select()
.function(function, TopN.LATENCY, topN)
.column(TopN.STATEMENT)
.column(TopN.TRACE_ID)
.from(client.getDatabase(), metricName)
.where()
.and(gte(TopN.TIME_BUCKET, startSecondTB))
.and(lte(TopN.TIME_BUCKET, endSecondTB));
if (serviceId != Const.NONE) {
query.and(eq(RecordDAO.TAG_SERVICE_ID, String.valueOf(serviceId)));
}
QueryResult.Series series = client.queryForSingleSeries(query);
if (log.isDebugEnabled()) {
log.debug("SQL: {} result set: {}", query.getCommand(), series);
}
if (series == null) {
return Collections.emptyList();
}
final List<TopNRecord> records = new ArrayList<>();
series.getValues().forEach(values -> {
TopNRecord record = new TopNRecord();
record.setLatency((long) values.get(1));
record.setTraceId((String) values.get(3));
record.setStatement((String) values.get(2));
records.add(record);
});
Collections.sort(records, comparator); // re-sort by self, because of the result order by time.
return records;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.query;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.skywalking.oap.server.core.analysis.Downsampling;
import org.apache.skywalking.oap.server.core.analysis.manual.RelationDefineUtil;
import org.apache.skywalking.oap.server.core.analysis.manual.endpointrelation.EndpointRelationServerSideMetrics;
import org.apache.skywalking.oap.server.core.analysis.manual.relation.instance.ServiceInstanceRelationClientSideMetrics;
import org.apache.skywalking.oap.server.core.analysis.manual.relation.instance.ServiceInstanceRelationServerSideMetrics;
import org.apache.skywalking.oap.server.core.analysis.manual.relation.service.ServiceRelationClientSideMetrics;
import org.apache.skywalking.oap.server.core.analysis.manual.relation.service.ServiceRelationServerSideMetrics;
import org.apache.skywalking.oap.server.core.analysis.metrics.Metrics;
import org.apache.skywalking.oap.server.core.query.entity.Call;
import org.apache.skywalking.oap.server.core.source.DetectPoint;
import org.apache.skywalking.oap.server.core.storage.model.ModelName;
import org.apache.skywalking.oap.server.core.storage.query.ITopologyQueryDAO;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.influxdb.dto.QueryResult;
import org.influxdb.querybuilder.WhereNested;
import org.influxdb.querybuilder.WhereQueryImpl;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.eq;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.gte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.lte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.select;
@Slf4j
public class TopologyQuery implements ITopologyQueryDAO {
private final InfluxClient client;
public TopologyQuery(InfluxClient client) {
this.client = client;
}
@Override
public List<Call.CallDetail> loadSpecifiedServerSideServiceRelations(Downsampling downsampling, long startTB,
long endTB,
List<Integer> serviceIds) throws IOException {
String measurement = ModelName.build(downsampling, ServiceRelationServerSideMetrics.INDEX_NAME);
WhereQueryImpl query = buildServiceCallsQuery(
measurement,
startTB,
endTB,
ServiceRelationServerSideMetrics.SOURCE_SERVICE_ID,
ServiceRelationServerSideMetrics.DEST_SERVICE_ID,
serviceIds
);
return buildCalls(query, DetectPoint.SERVER);
}
@Override
public List<Call.CallDetail> loadSpecifiedClientSideServiceRelations(Downsampling downsampling, long startTB,
long endTB,
List<Integer> serviceIds) throws IOException {
String measurement = ModelName.build(downsampling, ServiceRelationClientSideMetrics.INDEX_NAME);
WhereQueryImpl query = buildServiceCallsQuery(
measurement,
startTB,
endTB,
ServiceRelationServerSideMetrics.SOURCE_SERVICE_ID,
ServiceRelationServerSideMetrics.DEST_SERVICE_ID,
serviceIds
);
return buildCalls(query, DetectPoint.CLIENT);
}
@Override
public List<Call.CallDetail> loadServerSideServiceRelations(Downsampling downsampling, long startTB,
long endTB) throws IOException {
String measurement = ModelName.build(downsampling, ServiceRelationServerSideMetrics.INDEX_NAME);
WhereQueryImpl query = buildServiceCallsQuery(
measurement,
startTB,
endTB,
ServiceRelationServerSideMetrics.SOURCE_SERVICE_ID,
ServiceRelationServerSideMetrics.DEST_SERVICE_ID,
new ArrayList<>(0)
);
return buildCalls(query, DetectPoint.SERVER);
}
@Override
public List<Call.CallDetail> loadClientSideServiceRelations(Downsampling downsampling, long startTB,
long endTB) throws IOException {
String tableName = ModelName.build(downsampling, ServiceRelationClientSideMetrics.INDEX_NAME);
WhereQueryImpl query = buildServiceCallsQuery(
tableName,
startTB,
endTB,
ServiceRelationServerSideMetrics.SOURCE_SERVICE_ID,
ServiceRelationServerSideMetrics.DEST_SERVICE_ID,
new ArrayList<>(0)
);
return buildCalls(query, DetectPoint.CLIENT);
}
@Override
public List<Call.CallDetail> loadServerSideServiceInstanceRelations(int clientServiceId,
int serverServiceId,
Downsampling downsampling,
long startTB,
long endTB) throws IOException {
String measurement = ModelName.build(downsampling, ServiceInstanceRelationServerSideMetrics.INDEX_NAME);
WhereQueryImpl query = buildServiceInstanceCallsQuery(measurement,
startTB,
endTB,
ServiceInstanceRelationServerSideMetrics.SOURCE_SERVICE_ID,
ServiceInstanceRelationServerSideMetrics.DEST_SERVICE_ID,
clientServiceId, serverServiceId
);
return buildCalls(query, DetectPoint.SERVER);
}
@Override
public List<Call.CallDetail> loadClientSideServiceInstanceRelations(int clientServiceId,
int serverServiceId,
Downsampling downsampling,
long startTB,
long endTB) throws IOException {
String measurement = ModelName.build(downsampling, ServiceInstanceRelationClientSideMetrics.INDEX_NAME);
WhereQueryImpl query = buildServiceInstanceCallsQuery(measurement,
startTB,
endTB,
ServiceInstanceRelationClientSideMetrics.SOURCE_SERVICE_ID,
ServiceInstanceRelationClientSideMetrics.DEST_SERVICE_ID,
clientServiceId, serverServiceId
);
return buildCalls(query, DetectPoint.CLIENT);
}
@Override
public List<Call.CallDetail> loadSpecifiedDestOfServerSideEndpointRelations(Downsampling downsampling,
long startTB,
long endTB,
int destEndpointId) throws IOException {
String measurement = ModelName.build(downsampling, EndpointRelationServerSideMetrics.INDEX_NAME);
WhereQueryImpl query = buildServiceCallsQuery(
measurement,
startTB,
endTB,
EndpointRelationServerSideMetrics.SOURCE_ENDPOINT_ID,
EndpointRelationServerSideMetrics.DEST_ENDPOINT_ID,
Collections.emptyList()
);
query.and(eq(EndpointRelationServerSideMetrics.DEST_ENDPOINT_ID, destEndpointId));
WhereQueryImpl query2 = buildServiceCallsQuery(
measurement,
startTB,
endTB,
EndpointRelationServerSideMetrics.SOURCE_ENDPOINT_ID,
EndpointRelationServerSideMetrics.DEST_ENDPOINT_ID,
Collections.emptyList()
);
query2.and(eq(EndpointRelationServerSideMetrics.SOURCE_ENDPOINT_ID, destEndpointId));
List<Call.CallDetail> calls = buildCalls(query, DetectPoint.SERVER);
calls.addAll(buildCalls(query2, DetectPoint.CLIENT));
return calls;
}
private WhereQueryImpl buildServiceCallsQuery(String measurement, long startTB, long endTB, String sourceCName,
String destCName, List<Integer> serviceIds) {
WhereQueryImpl query = select()
.function("distinct", Metrics.ENTITY_ID)
.from(client.getDatabase(), measurement)
.where()
.and(gte(InfluxClient.TIME, InfluxClient.timeInterval(startTB)))
.and(lte(InfluxClient.TIME, InfluxClient.timeInterval(endTB)));
if (!serviceIds.isEmpty()) {
WhereNested whereNested = query.andNested();
for (Integer id : serviceIds) {
whereNested.or(eq(sourceCName, id))
.or(eq(destCName, id));
}
whereNested.close();
}
return query;
}
private WhereQueryImpl buildServiceInstanceCallsQuery(String measurement,
long startTB,
long endTB,
String sourceCName,
String destCName,
int sourceServiceId,
int destServiceId) {
WhereQueryImpl query = select()
.function("distinct", Metrics.ENTITY_ID)
.from(client.getDatabase(), measurement)
.where()
.and(gte(InfluxClient.TIME, InfluxClient.timeInterval(startTB)))
.and(lte(InfluxClient.TIME, InfluxClient.timeInterval(endTB)));
StringBuilder builder = new StringBuilder("((");
builder.append(sourceCName).append("=").append(sourceServiceId)
.append(" and ")
.append(destCName).append("=").append(destServiceId)
.append(") or (")
.append(sourceCName).append("=").append(destServiceId)
.append(") and (")
.append(destCName).append("=").append(sourceServiceId)
.append("))");
query.where(builder.toString());
return query;
}
private List<Call.CallDetail> buildCalls(WhereQueryImpl query,
DetectPoint detectPoint) throws IOException {
QueryResult.Series series = client.queryForSingleSeries(query);
if (log.isDebugEnabled()) {
log.debug("SQL: {} result set: {}", query.getCommand(), series);
}
if (series == null) {
return Collections.emptyList();
}
List<Call.CallDetail> calls = new ArrayList<>();
series.getValues().forEach(values -> {
Call.CallDetail call = new Call.CallDetail();
String entityId = (String) values.get(1);
RelationDefineUtil.RelationDefine relationDefine = RelationDefineUtil.splitEntityId(entityId);
call.setSource(relationDefine.getSource());
call.setTarget(relationDefine.getDest());
call.setComponentId(relationDefine.getComponentId());
call.setDetectPoint(detectPoint);
call.generateID();
calls.add(call);
});
return calls;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.storage.plugin.influxdb.query;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.util.Base64;
import java.util.Collections;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.skywalking.oap.server.core.analysis.manual.segment.SegmentRecord;
import org.apache.skywalking.oap.server.core.query.entity.BasicTrace;
import org.apache.skywalking.oap.server.core.query.entity.QueryOrder;
import org.apache.skywalking.oap.server.core.query.entity.Span;
import org.apache.skywalking.oap.server.core.query.entity.TraceBrief;
import org.apache.skywalking.oap.server.core.query.entity.TraceState;
import org.apache.skywalking.oap.server.core.storage.query.ITraceQueryDAO;
import org.apache.skywalking.oap.server.library.util.BooleanUtils;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxClient;
import org.apache.skywalking.oap.server.storage.plugin.influxdb.base.RecordDAO;
import org.elasticsearch.common.Strings;
import org.influxdb.dto.Query;
import org.influxdb.dto.QueryResult;
import org.influxdb.querybuilder.SelectQueryImpl;
import org.influxdb.querybuilder.WhereQueryImpl;
import org.influxdb.querybuilder.clauses.Clause;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.eq;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.gte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.lte;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.regex;
import static org.influxdb.querybuilder.BuiltQuery.QueryBuilder.select;
@Slf4j
public class TraceQuery implements ITraceQueryDAO {
private final InfluxClient client;
public TraceQuery(InfluxClient client) {
this.client = client;
}
@Override
public TraceBrief queryBasicTraces(long startSecondTB,
long endSecondTB,
long minDuration,
long maxDuration,
String endpointName,
int serviceId,
int serviceInstanceId,
int endpointId,
String traceId,
int limit,
int from,
TraceState traceState,
QueryOrder queryOrder)
throws IOException {
String orderBy = SegmentRecord.START_TIME;
if (queryOrder == QueryOrder.BY_DURATION) {
orderBy = SegmentRecord.LATENCY;
}
WhereQueryImpl<SelectQueryImpl> recallQuery = select()
.function("top", orderBy, limit + from)
.column(SegmentRecord.SEGMENT_ID)
.column(SegmentRecord.START_TIME)
.column(SegmentRecord.ENDPOINT_NAME)
.column(SegmentRecord.LATENCY)
.column(SegmentRecord.IS_ERROR)
.column(SegmentRecord.TRACE_ID)
.from(client.getDatabase(), SegmentRecord.INDEX_NAME)
.where();
if (startSecondTB != 0 && endSecondTB != 0) {
recallQuery.and(gte(SegmentRecord.TIME_BUCKET, startSecondTB))
.and(lte(SegmentRecord.TIME_BUCKET, endSecondTB));
}
if (minDuration != 0) {
recallQuery.and(gte(SegmentRecord.LATENCY, minDuration));
}
if (maxDuration != 0) {
recallQuery.and(lte(SegmentRecord.LATENCY, maxDuration));
}
if (!Strings.isNullOrEmpty(endpointName)) {
recallQuery.and(regex(SegmentRecord.ENDPOINT_NAME, "/" + endpointName.replaceAll("/", "\\\\/") + "/"));
}
if (serviceId != 0) {
recallQuery.and(eq(RecordDAO.TAG_SERVICE_ID, String.valueOf(serviceId)));
}
if (serviceInstanceId != 0) {
recallQuery.and(eq(SegmentRecord.SERVICE_INSTANCE_ID, serviceInstanceId));
}
if (endpointId != 0) {
recallQuery.and(eq(SegmentRecord.ENDPOINT_ID, endpointId));
}
if (!Strings.isNullOrEmpty(traceId)) {
recallQuery.and(eq(SegmentRecord.TRACE_ID, traceId));
}
switch (traceState) {
case ERROR:
recallQuery.and(eq(SegmentRecord.IS_ERROR, BooleanUtils.TRUE));
break;
case SUCCESS:
recallQuery.and(eq(SegmentRecord.IS_ERROR, BooleanUtils.FALSE));
break;
}
WhereQueryImpl<SelectQueryImpl> countQuery = select()
.count(SegmentRecord.ENDPOINT_ID)
.from(client.getDatabase(), SegmentRecord.INDEX_NAME)
.where();
for (Clause clause : recallQuery.getClauses()) {
countQuery.where(clause);
}
Query query = new Query(countQuery.getCommand() + recallQuery.getCommand());
List<QueryResult.Result> results = client.query(query);
if (log.isDebugEnabled()) {
log.debug("SQL: {} result set: {}", query.getCommand(), results);
}
if (results.size() != 2) {
throw new IOException("Expecting to get 2 Results, but it is " + results.size());
}
List<QueryResult.Series> counter = results.get(0).getSeries();
List<QueryResult.Series> result = results.get(1).getSeries();
if (result == null || result.isEmpty()) {
return new TraceBrief();
}
TraceBrief traceBrief = new TraceBrief();
traceBrief.setTotal(((Number) counter.get(0).getValues().get(0).get(1)).intValue());
result.get(0).getValues().stream().sorted((a, b) -> {
// Have to re-sort here. Because the function, top()/bottom(), get the result ordered by the `time`.
return Long.compare(((Number) b.get(1)).longValue(), ((Number) a.get(1)).longValue());
}).skip(from).forEach(values -> {
BasicTrace basicTrace = new BasicTrace();
basicTrace.setSegmentId((String) values.get(2));
basicTrace.setStart(String.valueOf((long) values.get(3)));
basicTrace.getEndpointNames().add((String) values.get(4));
basicTrace.setDuration((int) values.get(5));
basicTrace.setError(BooleanUtils.valueToBoolean((int) values.get(6)));
basicTrace.getTraceIds().add((String) values.get(7));
traceBrief.getTraces().add(basicTrace);
});
return traceBrief;
}
@Override
public List<SegmentRecord> queryByTraceId(String traceId) throws IOException {
WhereQueryImpl query = select().column(SegmentRecord.SEGMENT_ID)
.column(SegmentRecord.TRACE_ID)
.column(SegmentRecord.SERVICE_ID)
.column(SegmentRecord.ENDPOINT_NAME)
.column(SegmentRecord.START_TIME)
.column(SegmentRecord.END_TIME)
.column(SegmentRecord.LATENCY)
.column(SegmentRecord.IS_ERROR)
.column(SegmentRecord.DATA_BINARY)
.column(SegmentRecord.VERSION)
.from(client.getDatabase(), SegmentRecord.INDEX_NAME)
.where()
.and(eq(SegmentRecord.TRACE_ID, traceId));
List<QueryResult.Series> series = client.queryForSeries(query);
if (log.isDebugEnabled()) {
log.debug("SQL: {} result set: {}", query.getCommand(), series);
}
if (series == null || series.isEmpty()) {
return Collections.emptyList();
}
List<SegmentRecord> segmentRecords = Lists.newArrayList();
series.get(0).getValues().forEach(values -> {
SegmentRecord segmentRecord = new SegmentRecord();
segmentRecord.setSegmentId((String) values.get(1));
segmentRecord.setTraceId((String) values.get(2));
segmentRecord.setServiceId((int) values.get(3));
segmentRecord.setEndpointName((String) values.get(4));
segmentRecord.setStartTime((long) values.get(5));
segmentRecord.setEndTime((long) values.get(6));
segmentRecord.setLatency((int) values.get(7));
segmentRecord.setIsError((int) values.get(8));
segmentRecord.setVersion((int) values.get(10));
String base64 = (String) values.get(9);
if (!Strings.isNullOrEmpty(base64)) {
segmentRecord.setDataBinary(Base64.getDecoder().decode(base64));
}
segmentRecords.add(segmentRecord);
});
return segmentRecords;
}
@Override
public List<Span> doFlexibleTraceQuery(String traceId) throws IOException {
return Collections.emptyList();
}
}
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
org.apache.skywalking.oap.server.storage.plugin.influxdb.InfluxStorageProvider
\ No newline at end of file
......@@ -15,8 +15,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
apt-get update && apt-get install -y gawk
if test "${MODE}" = "cluster"; then
original_wd=$(pwd)
......
......@@ -21,8 +21,8 @@ BEGIN {
in_cluster_zk_section=0;
in_storage_section=0;
in_storage_es_section=0;
in_storage_h2_section=0;
in_storage_selected=0;
}
{
......@@ -60,23 +60,29 @@ BEGIN {
} else if (in_storage_section == 1) {
# in the storage: section now
# disable h2 module
if (in_storage_es_section == 0) {
if (ENVIRON["ES_VERSION"] ~ /^6.+/) {
in_storage_es_section=$0 ~ /^#?\s+elasticsearch:$/
} else if (ENVIRON["ES_VERSION"] ~ /^7.+/) {
in_storage_es_section=$0 ~ /^#?\s+elasticsearch7:$/
if (in_storage_selected == 0) {
if (ENVIRON["STORAGE"] == "elasticsearch") {
if (ENVIRON["ES_VERSION"] ~ /^6.+/) {
in_storage_selected=$0 ~ /^#?\s+elasticsearch:$/
} else if (ENVIRON["ES_VERSION"] ~ /^7.+/) {
in_storage_selected=$0 ~ /^#?\s+elasticsearch7:$/
}
} else if (ENVIRON["STORAGE"] == "influxdb") {
in_storage_selected=$0 ~ /^#?\s+influx:$/
}
} else {
in_storage_es_section=$0 ~ /^#?\s{4}/
in_storage_selected=$0 ~ /^#?\s{4}/
}
if (in_storage_h2_section == 0) {
in_storage_h2_section=$0 ~ /^#?\s+h2:$/
} else {
in_storage_h2_section=$0 ~ /^#?\s{4}/
}
if (in_storage_es_section == 1) {
# in the storage.elasticsearch section now
# uncomment es config
if (in_storage_selected == 1) {
# enable selected storage
# uncomment es/influx config
gsub("^#", "", $0)
print
} else if (in_storage_h2_section == 1) {
......@@ -92,5 +98,4 @@ BEGIN {
} else {
print
}
}
}
\ No newline at end of file
......@@ -15,7 +15,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
apt-get update && apt-get install -y gawk
if test "${SW_STORAGE_METABASE_TYPE}" = "mysql"; then
MYSQL_URL="https://repo.maven.apache.org/maven2/mysql/mysql-connector-java/8.0.13/mysql-connector-java-8.0.13.jar"
MYSQL_DRIVER="mysql-connector-java-8.0.13.jar"
# Download MySQL connector.
curl -L -o "${SW_HOME}/oap-libs/${MYSQL_DRIVER}" ${MYSQL_URL}
[[ $? -ne 0 ]] && echo "Fail to download ${MYSQL_DRIVER}." && exit 1
fi
if test "${MODE}" = "cluster"; then
original_wd=$(pwd)
......
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
~
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>apache-skywalking-e2e</artifactId>
<groupId>org.apache.skywalking</groupId>
<version>1.0.0</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>e2e-influxdb</artifactId>
<properties>
<e2e.container.name.prefix>skywalking-e2e-container-${build.id}-single-node-influxdb</e2e.container.name.prefix>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-jpa</artifactId>
<version>${spring.boot.version}</version>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<version>${h2.version}</version>
</dependency>
<dependency>
<groupId>org.apache.skywalking</groupId>
<artifactId>e2e-base</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<version>${spring.boot.version}</version>
<configuration>
<executable>true</executable>
<addResources>true</addResources>
<excludeDevtools>true</excludeDevtools>
</configuration>
<executions>
<execution>
<goals>
<goal>repackage</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>io.fabric8</groupId>
<artifactId>docker-maven-plugin</artifactId>
<configuration>
<containerNamePattern>%a-%t-%i</containerNamePattern>
<images>


</images>
</configuration>
</plugin>
<!-- set the system properties that can be used in test codes -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
<systemPropertyVariables>
<sw.webapp.host>
${webapp.host}
</sw.webapp.host>
<sw.webapp.port>
${webapp.port}
</sw.webapp.port>
<client.host>
${client.host}
</client.host>
<client.port>
${client.port}
</client.port>
</systemPropertyVariables>
</configuration>
<executions>
<execution>
<goals>
<goal>verify</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
\ No newline at end of file
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cluster:
standalone:
# Please check your ZooKeeper is 3.5+, However, it is also compatible with ZooKeeper 3.4.x. Replace the ZooKeeper 3.5+
# library the oap-libs folder with your ZooKeeper 3.4.x library.
# zookeeper:
# nameSpace: ${SW_NAMESPACE:""}
# hostPort: ${SW_CLUSTER_ZK_HOST_PORT:localhost:2181}
# #Retry Policy
# baseSleepTimeMs: ${SW_CLUSTER_ZK_SLEEP_TIME:1000} # initial amount of time to wait between retries
# maxRetries: ${SW_CLUSTER_ZK_MAX_RETRIES:3} # max number of times to retry
# # Enable ACL
# enableACL: ${SW_ZK_ENABLE_ACL:false} # disable ACL in default
# schema: ${SW_ZK_SCHEMA:digest} # only support digest schema
# expression: ${SW_ZK_EXPRESSION:skywalking:skywalking}
# kubernetes:
# watchTimeoutSeconds: ${SW_CLUSTER_K8S_WATCH_TIMEOUT:60}
# namespace: ${SW_CLUSTER_K8S_NAMESPACE:default}
# labelSelector: ${SW_CLUSTER_K8S_LABEL:app=collector,release=skywalking}
# uidEnvName: ${SW_CLUSTER_K8S_UID:SKYWALKING_COLLECTOR_UID}
# consul:
# serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
# Consul cluster nodes, example: 10.0.0.1:8500,10.0.0.2:8500,10.0.0.3:8500
# hostPort: ${SW_CLUSTER_CONSUL_HOST_PORT:localhost:8500}
# nacos:
# serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
# # Nacos Configuration namespace
# namespace: ${SW_CLUSTER_NACOS_NAMESPACE:"public"}
# hostPort: ${SW_CLUSTER_NACOS_HOST_PORT:localhost:8848}
# etcd:
# serviceName: ${SW_SERVICE_NAME:"SkyWalking_OAP_Cluster"}
# etcd cluster nodes, example: 10.0.0.1:2379,10.0.0.2:2379,10.0.0.3:2379
# hostPort: ${SW_CLUSTER_ETCD_HOST_PORT:localhost:2379}
core:
default:
# Mixed: Receive agent data, Level 1 aggregate, Level 2 aggregate
# Receiver: Receive agent data, Level 1 aggregate
# Aggregator: Level 2 aggregate
role: ${SW_CORE_ROLE:Mixed} # Mixed/Receiver/Aggregator
restHost: ${SW_CORE_REST_HOST:0.0.0.0}
restPort: ${SW_CORE_REST_PORT:12800}
restContextPath: ${SW_CORE_REST_CONTEXT_PATH:/}
gRPCHost: ${SW_CORE_GRPC_HOST:0.0.0.0}
gRPCPort: ${SW_CORE_GRPC_PORT:11800}
downsampling:
- Hour
- Day
- Month
# Set a timeout on metrics data. After the timeout has expired, the metrics data will automatically be deleted.
enableDataKeeperExecutor: ${SW_CORE_ENABLE_DATA_KEEPER_EXECUTOR:true} # Turn it off then automatically metrics data delete will be close.
dataKeeperExecutePeriod: ${SW_CORE_DATA_KEEPER_EXECUTE_PERIOD:5} # How often the data keeper executor runs periodically, unit is minute
recordDataTTL: ${SW_CORE_RECORD_DATA_TTL:90} # Unit is minute
minuteMetricsDataTTL: ${SW_CORE_MINUTE_METRIC_DATA_TTL:90} # Unit is minute
hourMetricsDataTTL: ${SW_CORE_HOUR_METRIC_DATA_TTL:36} # Unit is hour
dayMetricsDataTTL: ${SW_CORE_DAY_METRIC_DATA_TTL:45} # Unit is day
monthMetricsDataTTL: ${SW_CORE_MONTH_METRIC_DATA_TTL:18} # Unit is month
# Cache metric data for 1 minute to reduce database queries, and if the OAP cluster changes within that minute,
# the metrics may not be accurate within that minute.
enableDatabaseSession: ${SW_CORE_ENABLE_DATABASE_SESSION:true}
storage:
# elasticsearch:
# nameSpace: ${SW_NAMESPACE:""}
# clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:localhost:9200}
# protocol: ${SW_STORAGE_ES_HTTP_PROTOCOL:"http"}
# #trustStorePath: ${SW_SW_STORAGE_ES_SSL_JKS_PATH:"../es_keystore.jks"}
# #trustStorePass: ${SW_SW_STORAGE_ES_SSL_JKS_PASS:""}
# user: ${SW_ES_USER:""}
# password: ${SW_ES_PASSWORD:""}
# indexShardsNumber: ${SW_STORAGE_ES_INDEX_SHARDS_NUMBER:2}
# indexReplicasNumber: ${SW_STORAGE_ES_INDEX_REPLICAS_NUMBER:0}
# # Those data TTL settings will override the same settings in core module.
# recordDataTTL: ${SW_STORAGE_ES_RECORD_DATA_TTL:7} # Unit is day
# otherMetricsDataTTL: ${SW_STORAGE_ES_OTHER_METRIC_DATA_TTL:45} # Unit is day
# monthMetricsDataTTL: ${SW_STORAGE_ES_MONTH_METRIC_DATA_TTL:18} # Unit is month
# # Batch process setting, refer to https://www.elastic.co/guide/en/elasticsearch/client/java-api/5.5/java-docs-bulk-processor.html
# bulkActions: ${SW_STORAGE_ES_BULK_ACTIONS:1000} # Execute the bulk every 1000 requests
# flushInterval: ${SW_STORAGE_ES_FLUSH_INTERVAL:10} # flush the bulk every 10 seconds whatever the number of requests
# concurrentRequests: ${SW_STORAGE_ES_CONCURRENT_REQUESTS:2} # the number of concurrent requests
# metadataQueryMaxSize: ${SW_STORAGE_ES_QUERY_MAX_SIZE:5000}
# segmentQueryMaxSize: ${SW_STORAGE_ES_QUERY_SEGMENT_SIZE:200}
# profileTaskQueryMaxSize: ${SW_STORAGE_ES_QUERY_PROFILE_TASK_SIZE:200}
# h2:
# driver: ${SW_STORAGE_H2_DRIVER:org.h2.jdbcx.JdbcDataSource}
# url: ${SW_STORAGE_H2_URL:jdbc:h2:mem:skywalking-oap-db}
# user: ${SW_STORAGE_H2_USER:sa}
# metadataQueryMaxSize: ${SW_STORAGE_H2_QUERY_MAX_SIZE:5000}
# mysql:
# properties:
# jdbcUrl: ${SW_JDBC_URL:"jdbc:mysql://localhost:3306/swtest"}
# dataSource.user: ${SW_DATA_SOURCE_USER:root}
# dataSource.password: ${SW_DATA_SOURCE_PASSWORD:root@1234}
# dataSource.cachePrepStmts: ${SW_DATA_SOURCE_CACHE_PREP_STMTS:true}
# dataSource.prepStmtCacheSize: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_SIZE:250}
# dataSource.prepStmtCacheSqlLimit: ${SW_DATA_SOURCE_PREP_STMT_CACHE_SQL_LIMIT:2048}
# dataSource.useServerPrepStmts: ${SW_DATA_SOURCE_USE_SERVER_PREP_STMTS:true}
# metadataQueryMaxSize: ${SW_STORAGE_MYSQL_QUERY_MAX_SIZE:5000}
influx:
# Metadata storage provider configuration
metabaseType: ${SW_STORAGE_METABASE_TYPE:H2} # There are 2 options as Metabase provider, H2 or MySQL.
h2Props:
dataSourceClassName: ${SW_STORAGE_METABASE_DRIVER:org.h2.jdbcx.JdbcDataSource}
dataSource.url: ${SW_STORAGE_METABASE_URL:jdbc:h2:mem:skywalking-oap-db}
dataSource.user: ${SW_STORAGE_METABASE_USER:sa}
dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:}
mysqlProps:
jdbcUrl: ${SW_STORAGE_METABASE_URL:"jdbc:mysql://localhost:3306/swtest"}
dataSource.user: ${SW_STORAGE_METABASE_USER:root}
dataSource.password: ${SW_STORAGE_METABASE_PASSWORD:root@1234}
dataSource.cachePrepStmts: ${SW_STORAGE_METABASE_CACHE_PREP_STMTS:true}
dataSource.prepStmtCacheSize: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_SIZE:250}
dataSource.prepStmtCacheSqlLimit: ${SW_STORAGE_METABASE_PREP_STMT_CACHE_SQL_LIMIT:2048}
dataSource.useServerPrepStmts: ${SW_STORAGE_METABASE_USE_SERVER_PREP_STMTS:true}
metadataQueryMaxSize: ${SW_STORAGE_METABASE_QUERY_MAX_SIZE:5000}
# InfluxDB configuration
url: ${SW_STORAGE_INFLUXDB_URL:http://localhost:8086}
user: ${SW_STORAGE_INFLUXDB_USER:root}
password: ${SW_STORAGE_INFLUXDB_PASSWORD:}
database: ${SW_STORAGE_INFLUXDB_DATABASE:skywalking}
actions: ${SW_STORAGE_INFLUXDB_ACTIONS:1000} # the number of actions to collect
duration: ${SW_STORAGE_INFLUXDB_DURATION:1000} # the time to wait at most (milliseconds)
fetchTaskLogMaxSize: ${SW_STORAGE_INFLUXDB_FETCH_TASK_LOG_MAX_SIZE:5000} # the max number of fetch task log in a request
receiver-sharing-server:
default:
receiver-register:
default:
receiver-trace:
default:
bufferPath: ${SW_RECEIVER_BUFFER_PATH:../trace-buffer/} # Path to trace buffer files, suggest to use absolute path
bufferOffsetMaxFileSize: ${SW_RECEIVER_BUFFER_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
bufferDataMaxFileSize: ${SW_RECEIVER_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MB
bufferFileCleanWhenRestart: ${SW_RECEIVER_BUFFER_FILE_CLEAN_WHEN_RESTART:false}
sampleRate: ${SW_TRACE_SAMPLE_RATE:10000} # The sample rate precision is 1/10000. 10000 means 100% sample in default.
slowDBAccessThreshold: ${SW_SLOW_DB_THRESHOLD:default:200,mongodb:100} # The slow database access thresholds. Unit ms.
receiver-jvm:
default:
receiver-clr:
default:
#receiver-so11y:
# default:
receiver-profile:
default:
service-mesh:
default:
bufferPath: ${SW_SERVICE_MESH_BUFFER_PATH:../mesh-buffer/} # Path to trace buffer files, suggest to use absolute path
bufferOffsetMaxFileSize: ${SW_SERVICE_MESH_OFFSET_MAX_FILE_SIZE:100} # Unit is MB
bufferDataMaxFileSize: ${SW_SERVICE_MESH_BUFFER_DATA_MAX_FILE_SIZE:500} # Unit is MB
bufferFileCleanWhenRestart: ${SW_SERVICE_MESH_BUFFER_FILE_CLEAN_WHEN_RESTART:false}
istio-telemetry:
default:
envoy-metric:
default:
# alsHTTPAnalysis: ${SW_ENVOY_METRIC_ALS_HTTP_ANALYSIS:k8s-mesh}
#receiver_zipkin:
# default:
# host: ${SW_RECEIVER_ZIPKIN_HOST:0.0.0.0}
# port: ${SW_RECEIVER_ZIPKIN_PORT:9411}
# contextPath: ${SW_RECEIVER_ZIPKIN_CONTEXT_PATH:/}
#receiver_jaeger:
# default:
# gRPCHost: ${SW_RECEIVER_JAEGER_HOST:0.0.0.0}
# gRPCPort: ${SW_RECEIVER_JAEGER_PORT:14250}
query:
graphql:
path: ${SW_QUERY_GRAPHQL_PATH:/graphql}
alarm:
default:
telemetry:
none:
# prometheus:
# host: ${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
# port: ${SW_TELEMETRY_PROMETHEUS_PORT:1234}
# so11y:
# prometheusExporterEnabled: ${SW_TELEMETRY_SO11Y_PROMETHEUS_ENABLED:true}
# prometheusExporterHost: ${SW_TELEMETRY_PROMETHEUS_HOST:0.0.0.0}
# prometheusExporterPort: ${SW_TELEMETRY_PROMETHEUS_PORT:1234}
configuration:
none:
# apollo:
# apolloMeta: http://106.12.25.204:8080
# apolloCluster: default
# # apolloEnv: # defaults to null
# appId: skywalking
# period: 5
# nacos:
# # Nacos Server Host
# serverAddr: 127.0.0.1
# # Nacos Server Port
# port: 8848
# # Nacos Configuration Group
# group: 'skywalking'
# # Nacos Configuration namespace
# namespace: ''
# # Unit seconds, sync period. Default fetch every 60 seconds.
# period : 5
# # the name of current cluster, set the name if you want to upstream system known.
# clusterName: "default"
# zookeeper:
# period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
# nameSpace: /default
# hostPort: localhost:2181
# #Retry Policy
# baseSleepTimeMs: 1000 # initial amount of time to wait between retries
# maxRetries: 3 # max number of times to retry
# etcd:
# period : 60 # Unit seconds, sync period. Default fetch every 60 seconds.
# group : 'skywalking'
# serverAddr: localhost:2379
# clusterName: "default"
# consul:
# # Consul host and ports, separated by comma, e.g. 1.2.3.4:8500,2.3.4.5:8500
# hostAndPorts: ${consul.address}
# # Sync period in seconds. Defaults to 60 seconds.
# period: 1
#exporter:
# grpc:
# targetHost: ${SW_EXPORTER_GRPC_HOST:127.0.0.1}
# targetPort: ${SW_EXPORTER_GRPC_PORT:9870}
#!/usr/bin/env bash
# Licensed to the SkyAPM under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "InfluxDB with H2 database is storage provider..."
# Modify application.yml to set InfluxDB as storage provider.
cat /application.yml > "${SW_HOME}/config/application.yml"
#!/usr/bin/env bash
# Licensed to the SkyAPM under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo 'starting OAP server...' && start_oap 'init'
echo 'starting Web app...' && start_webapp '0.0.0.0' 8081
echo 'starting instrumented services...' && start_instrumented_services
check_tcp 127.0.0.1 \
9090 \
60 \
10 \
"waiting for the instrumented service to be ready"
if [[ $? -ne 0 ]]; then
echo "instrumented service 0 failed to start in 30 * 10 seconds: "
cat ${SERVICE_LOG}/*
exit 1
fi
echo "SkyWalking e2e container is ready for tests"
tail -f ${OAP_LOG_DIR}/* \
${WEBAPP_LOG_DIR}/* \
${SERVICE_LOG}/*
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.e2e.sample.client;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.data.jpa.repository.config.EnableJpaRepositories;
@EnableJpaRepositories
@SpringBootApplication
public class SampleClientApplication {
public static void main(String[] args) {
SpringApplication.run(SampleClientApplication.class, args);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.e2e.sample.client;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
@RestController
@RequestMapping("/e2e")
public class TestController {
private final UserRepo userRepo;
public TestController(final UserRepo userRepo) {
this.userRepo = userRepo;
}
@GetMapping("/health-check")
public String hello() {
return "healthy";
}
@PostMapping("/users")
public User createAuthor(@RequestBody final User user) throws InterruptedException {
Thread.sleep(1000L);
return userRepo.save(user);
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.e2e.sample.client;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
@Entity
public class User {
public User() {
}
@Id
@GeneratedValue
private Long id;
@Column
private String name;
public Long getId() {
return id;
}
public void setId(final Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
}
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.e2e.sample.client;
import org.springframework.data.jpa.repository.JpaRepository;
public interface UserRepo extends JpaRepository<User, Long> {
}
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
server:
port: 9090
spring:
main:
banner-mode: 'off'
datasource:
url: jdbc:h2:mem:testdb
driver-class-name: org.h2.Driver
data-username: sa
password: sa
platform: org.hibernate.dialect.H2Dialect
jpa:
generate-ddl: true
hibernate:
ddl-auto: create-drop
properties:
hibernate.format_sql: true
show-sql: true
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.e2e;
import org.apache.skywalking.e2e.metrics.AtLeastOneOfMetricsMatcher;
import org.apache.skywalking.e2e.metrics.Metrics;
import org.apache.skywalking.e2e.metrics.MetricsQuery;
import org.apache.skywalking.e2e.metrics.MetricsValueMatcher;
import org.apache.skywalking.e2e.service.Service;
import org.apache.skywalking.e2e.service.ServicesMatcher;
import org.apache.skywalking.e2e.service.ServicesQuery;
import org.apache.skywalking.e2e.service.endpoint.Endpoint;
import org.apache.skywalking.e2e.service.endpoint.EndpointQuery;
import org.apache.skywalking.e2e.service.endpoint.Endpoints;
import org.apache.skywalking.e2e.service.endpoint.EndpointsMatcher;
import org.apache.skywalking.e2e.service.instance.Instance;
import org.apache.skywalking.e2e.service.instance.Instances;
import org.apache.skywalking.e2e.service.instance.InstancesMatcher;
import org.apache.skywalking.e2e.service.instance.InstancesQuery;
import org.apache.skywalking.e2e.topo.*;
import org.apache.skywalking.e2e.trace.Trace;
import org.apache.skywalking.e2e.trace.TracesMatcher;
import org.apache.skywalking.e2e.trace.TracesQuery;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.core.io.ClassPathResource;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.test.annotation.DirtiesContext;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import org.springframework.web.client.RestTemplate;
import org.yaml.snakeyaml.Yaml;
import java.io.InputStream;
import java.time.LocalDateTime;
import java.time.ZoneOffset;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.apache.skywalking.e2e.metrics.MetricsMatcher.verifyMetrics;
import static org.apache.skywalking.e2e.metrics.MetricsQuery.*;
import static org.assertj.core.api.Assertions.assertThat;
@RunWith(SpringJUnit4ClassRunner.class)
public class SampleVerificationITCase {
private static final Logger LOGGER = LoggerFactory.getLogger(SampleVerificationITCase.class);
private final RestTemplate restTemplate = new RestTemplate();
private final int retryInterval = 30;
private SimpleQueryClient queryClient;
private String instrumentedServiceUrl;
@Before
public void setUp() {
final String swWebappHost = System.getProperty("sw.webapp.host", "127.0.0.1");
final String swWebappPort = System.getProperty("sw.webapp.port", "32783");
final String instrumentedServiceHost = System.getProperty("client.host", "127.0.0.1");
final String instrumentedServicePort = System.getProperty("client.port", "32782");
queryClient = new SimpleQueryClient(swWebappHost, swWebappPort);
instrumentedServiceUrl = "http://" + instrumentedServiceHost + ":" + instrumentedServicePort;
}
@Test(timeout = 1200000)
@DirtiesContext
public void verify() throws Exception {
final LocalDateTime minutesAgo = LocalDateTime.now(ZoneOffset.UTC);
while (true) {
try {
final Map<String, String> user = new HashMap<>();
user.put("name", "SkyWalking");
final ResponseEntity<String> responseEntity = restTemplate.postForEntity(
instrumentedServiceUrl + "/e2e/users",
user,
String.class
);
LOGGER.info("responseEntity: {}", responseEntity);
assertThat(responseEntity.getStatusCode()).isEqualTo(HttpStatus.OK);
final List<Trace> traces = queryClient.traces(
new TracesQuery()
.start(minutesAgo)
.end(LocalDateTime.now())
.orderByDuration()
);
if (!traces.isEmpty()) {
break;
}
Thread.sleep(10000L);
} catch (Exception ignored) {
}
}
doRetryableVerification(() -> {
try {
verifyTraces(minutesAgo);
} catch (Exception e) {
LOGGER.warn(e.getMessage(), e);
}
});
doRetryableVerification(() -> {
try {
verifyServices(minutesAgo);
} catch (Exception e) {
LOGGER.warn(e.getMessage(), e);
}
});
doRetryableVerification(() -> {
try {
verifyTopo(minutesAgo);
} catch (Exception e) {
LOGGER.warn(e.getMessage(), e);
}
});
doRetryableVerification(() -> {
try{
verifyServiceInstanceTopo(minutesAgo);
}catch (Exception e) {
LOGGER.warn(e.getMessage(), e);
}
});
}
private void verifyTopo(LocalDateTime minutesAgo) throws Exception {
final LocalDateTime now = LocalDateTime.now(ZoneOffset.UTC);
final TopoData topoData = queryClient.topo(
new TopoQuery()
.stepByMinute()
.start(minutesAgo.minusDays(1))
.end(now)
);
LOGGER.info("topoData: {}", topoData);
InputStream expectedInputStream =
new ClassPathResource("expected-data/org.apache.skywalking.e2e.SampleVerificationITCase.topo.yml").getInputStream();
final TopoMatcher topoMatcher = new Yaml().loadAs(expectedInputStream, TopoMatcher.class);
topoMatcher.verify(topoData);
verifyServiceRelationMetrics(topoData.getCalls(), minutesAgo);
}
private void verifyServiceInstanceTopo(LocalDateTime minutesAgo) throws Exception {
final LocalDateTime now = LocalDateTime.now(ZoneOffset.UTC);
final ServiceInstanceTopoData topoData = queryClient.serviceInstanceTopo(
new ServiceInstanceTopoQuery()
.stepByMinute()
.start(minutesAgo.minusDays(1))
.end(now)
.clientServiceId("1")
.serverServiceId("2")
);
LOGGER.info("instanceTopoData: {}", topoData);
InputStream expectedInputStream =
new ClassPathResource("expected-data/org.apache.skywalking.e2e.SampleVerificationITCase.serviceInstanceTopo.yml").getInputStream();
final ServiceInstanceTopoMatcher topoMatcher = new Yaml().loadAs(expectedInputStream, ServiceInstanceTopoMatcher.class);
topoMatcher.verify(topoData);
verifyServiceInstanceRelationMetrics(topoData.getCalls(), minutesAgo);
}
private void verifyServices(LocalDateTime minutesAgo) throws Exception {
final LocalDateTime now = LocalDateTime.now(ZoneOffset.UTC);
final List<Service> services = queryClient.services(
new ServicesQuery()
.start(minutesAgo)
.end(now)
);
LOGGER.info("services: {}", services);
InputStream expectedInputStream =
new ClassPathResource("expected-data/org.apache.skywalking.e2e.SampleVerificationITCase.services.yml").getInputStream();
final ServicesMatcher servicesMatcher = new Yaml().loadAs(expectedInputStream, ServicesMatcher.class);
servicesMatcher.verify(services);
for (Service service : services) {
LOGGER.info("verifying service instances: {}", service);
verifyServiceMetrics(service);
Instances instances = verifyServiceInstances(minutesAgo, now, service);
verifyInstancesMetrics(instances);
Endpoints endpoints = verifyServiceEndpoints(minutesAgo, now, service);
verifyEndpointsMetrics(endpoints);
}
}
private Instances verifyServiceInstances(LocalDateTime minutesAgo, LocalDateTime now,
Service service) throws Exception {
InputStream expectedInputStream;
Instances instances = queryClient.instances(
new InstancesQuery()
.serviceId(service.getKey())
.start(minutesAgo)
.end(now)
);
LOGGER.info("instances: {}", instances);
expectedInputStream =
new ClassPathResource("expected-data/org.apache.skywalking.e2e.SampleVerificationITCase.instances.yml").getInputStream();
final InstancesMatcher instancesMatcher = new Yaml().loadAs(expectedInputStream, InstancesMatcher.class);
instancesMatcher.verify(instances);
return instances;
}
private Endpoints verifyServiceEndpoints(LocalDateTime minutesAgo, LocalDateTime now,
Service service) throws Exception {
Endpoints instances = queryClient.endpoints(
new EndpointQuery().serviceId(service.getKey())
);
LOGGER.info("instances: {}", instances);
InputStream expectedInputStream =
new ClassPathResource("expected-data/org.apache.skywalking.e2e.SampleVerificationITCase.endpoints.yml").getInputStream();
final EndpointsMatcher endpointsMatcher = new Yaml().loadAs(expectedInputStream, EndpointsMatcher.class);
endpointsMatcher.verify(instances);
return instances;
}
private void verifyInstancesMetrics(Instances instances) throws Exception {
for (Instance instance : instances.getInstances()) {
for (String metricsName : ALL_INSTANCE_METRICS) {
LOGGER.info("verifying service instance response time: {}", instance);
final Metrics instanceMetrics = queryClient.metrics(
new MetricsQuery()
.stepByMinute()
.metricsName(metricsName)
.id(instance.getKey())
);
LOGGER.info("instanceMetrics: {}", instanceMetrics);
AtLeastOneOfMetricsMatcher instanceRespTimeMatcher = new AtLeastOneOfMetricsMatcher();
MetricsValueMatcher greaterThanZero = new MetricsValueMatcher();
greaterThanZero.setValue("gt 0");
instanceRespTimeMatcher.setValue(greaterThanZero);
instanceRespTimeMatcher.verify(instanceMetrics);
LOGGER.info("{}: {}", metricsName, instanceMetrics);
}
}
}
private void verifyEndpointsMetrics(Endpoints endpoints) throws Exception {
for (Endpoint endpoint : endpoints.getEndpoints()) {
if (!endpoint.getLabel().equals("/e2e/users")) {
continue;
}
for (String metricName : ALL_ENDPOINT_METRICS) {
LOGGER.info("verifying endpoint {}, metrics: {}", endpoint, metricName);
final Metrics metrics = queryClient.metrics(
new MetricsQuery()
.stepByMinute()
.metricsName(metricName)
.id(endpoint.getKey())
);
LOGGER.info("metrics: {}", metrics);
AtLeastOneOfMetricsMatcher instanceRespTimeMatcher = new AtLeastOneOfMetricsMatcher();
MetricsValueMatcher greaterThanZero = new MetricsValueMatcher();
greaterThanZero.setValue("gt 0");
instanceRespTimeMatcher.setValue(greaterThanZero);
instanceRespTimeMatcher.verify(metrics);
LOGGER.info("{}: {}", metricName, metrics);
}
}
}
private void verifyServiceMetrics(Service service) throws Exception {
for (String metricName : ALL_SERVICE_METRICS) {
LOGGER.info("verifying service {}, metrics: {}", service, metricName);
final Metrics serviceMetrics = queryClient.metrics(
new MetricsQuery()
.stepByMinute()
.metricsName(metricName)
.id(service.getKey())
);
LOGGER.info("serviceMetrics: {}", serviceMetrics);
AtLeastOneOfMetricsMatcher instanceRespTimeMatcher = new AtLeastOneOfMetricsMatcher();
MetricsValueMatcher greaterThanZero = new MetricsValueMatcher();
greaterThanZero.setValue("gt 0");
instanceRespTimeMatcher.setValue(greaterThanZero);
instanceRespTimeMatcher.verify(serviceMetrics);
LOGGER.info("{}: {}", metricName, serviceMetrics);
}
}
private void verifyTraces(LocalDateTime minutesAgo) throws Exception {
final LocalDateTime now = LocalDateTime.now(ZoneOffset.UTC);
final List<Trace> traces = queryClient.traces(
new TracesQuery()
.start(minutesAgo)
.end(now)
.orderByDuration()
);
LOGGER.info("traces: {}", traces);
InputStream expectedInputStream =
new ClassPathResource("expected-data/org.apache.skywalking.e2e.SampleVerificationITCase.traces.yml").getInputStream();
final TracesMatcher tracesMatcher = new Yaml().loadAs(expectedInputStream, TracesMatcher.class);
tracesMatcher.verifyLoosely(traces);
}
private void verifyServiceInstanceRelationMetrics(List<Call> calls, final LocalDateTime minutesAgo) throws Exception {
verifyRelationMetrics(calls, minutesAgo, ALL_SERVICE_INSTANCE_RELATION_CLIENT_METRICS, ALL_SERVICE_INSTANCE_RELATION_SERVER_METRICS);
}
private void verifyServiceRelationMetrics(List<Call> calls, final LocalDateTime minutesAgo) throws Exception {
verifyRelationMetrics(calls, minutesAgo, ALL_SERVICE_RELATION_CLIENT_METRICS, ALL_SERVICE_RELATION_SERVER_METRICS);
}
private void verifyRelationMetrics(List<Call> calls, final LocalDateTime minutesAgo, String[] relationClientMetrics, String[] relationServerMetrics) throws Exception {
for (Call call : calls) {
for (String detectPoint : call.getDetectPoints()) {
switch (detectPoint) {
case "CLIENT": {
for (String metricName : relationClientMetrics) {
verifyMetrics(queryClient, metricName, call.getId(), minutesAgo);
}
break;
}
case "SERVER": {
for (String metricName : relationServerMetrics) {
verifyMetrics(queryClient, metricName, call.getId(), minutesAgo);
}
break;
}
}
}
}
}
private void doRetryableVerification(Runnable runnable) throws InterruptedException {
while (true) {
try {
runnable.run();
break;
} catch (Throwable ignored) {
Thread.sleep(retryInterval);
}
}
}
}
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 1 health-check by docker-maven-plugin
# 1 drop table if exists, because we have `ddl-auto: create-drop`
# 1 drop sequence
# 1 create sequence
# 1 create table statement
endpoints:
- key: not null
label: /e2e/health-check
- key: not null
label: /e2e/users
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 1 health-check by docker-maven-plugin
# 1 drop table if exists, because we have `ddl-auto: create-drop`
# 1 drop sequence
# 1 create sequence
# 1 create table statement
instances:
- key: 2
label: not null
attributes:
- name: os_name
value: not null
- name: host_name
value: not null
- name: process_no
value: gt 0
- name: ipv4s
value: not null
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 1 health-check by docker-maven-plugin
# 1 drop table if exists, because we have `ddl-auto: create-drop`
# 1 drop sequence
# 1 create sequence
# 1 create table statement
nodes:
- id: 1
name: User
type: USER
serviceId: 1
serviceName: User
isReal: false
- id: 2
name: not null
serviceId: 2
serviceName: Your_ApplicationName
type: Tomcat
isReal: true
calls:
- id: 1_2
source: 1
detectPoints:
- SERVER
target: 2
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 1 health-check by docker-maven-plugin
# 1 drop table if exists, because we have `ddl-auto: create-drop`
# 1 drop sequence
# 1 create sequence
# 1 create table statement
services:
- key: 2
label: "Your_ApplicationName"
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 1 health-check by docker-maven-plugin
# 1 drop table if exists, because we have `ddl-auto: create-drop`
# 1 drop sequence
# 1 create sequence
# 1 create table statement
nodes:
- id: 1
name: User
type: USER
isReal: false
- id: 2
name: Your_ApplicationName
type: Tomcat
isReal: true
- id: 3
name: "localhost:-1"
type: H2
isReal: false
calls:
- id: 2_3
source: 2
detectPoints:
- CLIENT
target: 3
- id: 1_2
source: 1
detectPoints:
- SERVER
target: 2
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 1 health-check by docker-maven-plugin
# 1 drop table if exists, because we have `ddl-auto: create-drop`
# 1 drop sequence
# 1 create sequence
# 1 create table statement
traces:
- key: not null
endpointNames:
- /e2e/users
duration: ge 0
start: gt 0
isError: false
traceIds:
- not null
......@@ -36,9 +36,11 @@ BEGIN {
} else if (ENVIRON["ES_VERSION"] ~ /^7.+/) {
in_storage_type_section=$0 ~ /^#?\s+elasticsearch7:$/
} else if (ENVIRON["STORAGE"] ~ /^mysql.*$/) {
in_storage_type_section=$0 ~ /^#?\s+mysql/
in_storage_type_section=$0 ~ /^#?\s+mysql:/
} else if (ENVIRON["STORAGE"] ~ /^h2.*$/) {
in_storage_type_section=$0 ~ /^#?\s+h2:$/
} else if (ENVIRON["STORAGE"] ~ /^influx.*$/) {
in_storage_type_section=$0 ~ /^#?\s+influx:$/
}
} else {
in_storage_type_section=$0 ~ /^#?\s{4}/
......
......@@ -15,8 +15,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
apt-get update && apt-get install -y gawk
original_wd=$(pwd)
# substitute application.yml to be capable of es mode
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册