提交 efdc657c 编写于 作者: P Ping Xiao

Merge branch 'develop' into coverity_scan

......@@ -2,7 +2,6 @@
[![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
[![Docker Pulls](https://img.shields.io/docker/pulls/tdengine/tdengine)](https://hub.docker.com/repository/docker/tdengine/tdengine)
[![tdengine](https://snapcraft.io//tdengine/badge.svg)](https://snapcraft.io/tdengine)
[![TDengine](TDenginelogo.png)](https://www.taosdata.com)
......
......@@ -9,9 +9,7 @@ set -e
script_dir=$(dirname $(readlink -f "$0"))
# Dynamic directory
lib_link_dir="/usr/lib"
#install main path
install_main_dir="/usr/local/taos"
lib64_link_dir="/usr/lib64"
# Color setting
RED='\033[0;31m'
......@@ -25,24 +23,23 @@ if command -v sudo > /dev/null; then
csudo="sudo"
fi
function clean_driver() {
${csudo} rm -f /usr/lib/libtaos.so || :
}
function install_driver() {
echo -e "${GREEN}Start to install TDengine client driver ...${NC}"
#create install main dir and all sub dir
${csudo} mkdir -p ${install_main_dir}
${csudo} mkdir -p ${install_main_dir}/driver
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
echo
echo -e "\033[44;32;1mTDengine client driver is successfully installed!${NC}"
if [[ -d ${lib_link_dir} && ! -e ${lib_link_dir}/libtaos.so ]]; then
echo -e "${GREEN}Start to install TDengine client driver ...${NC}"
${csudo} ln -s ${script_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so || :
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo} ln -s ${script_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
echo
echo -e "${GREEN}TDengine client driver is successfully installed!${NC}"
else
echo -e "${GREEN}TDengine client driver already exists, Please confirm whether the alert version matches the client driver version!${NC}"
fi
}
install_driver
......@@ -119,7 +119,7 @@ WantedBy=multi-user.target
return nil
}
const version = "TDengine alert v2.0.0.1"
var version = "2.0.0.1s"
func main() {
var (
......@@ -133,7 +133,7 @@ func main() {
flag.Parse()
if showVersion {
fmt.Println(version)
fmt.Println("TDengine alert v" + version)
return
}
......
......@@ -6,9 +6,9 @@ set -e
# set parameters by default value
cpuType=amd64 # [armv6l | arm64 | amd64 | 386]
osType=linux # [linux | darwin | windows]
version=""
declare -A archMap=(["armv6l"]="arm" ["arm64"]="arm64" ["amd64"]="x64" ["386"]="x86")
while getopts "h:c:o:" arg
while getopts "h:c:o:n:" arg
do
case $arg in
c)
......@@ -19,6 +19,10 @@ do
#echo "osType=$OPTARG"
osType=$(echo $OPTARG)
;;
n)
#echo "version=$OPTARG"
version=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [armv6l | arm64 | amd64 | 386] -o [linux | darwin | windows]"
exit 0
......@@ -30,18 +34,27 @@ do
esac
done
if [ "$version" == "" ]; then
echo "Please input the correct version!"
exit 1
fi
startdir=$(pwd)
scriptdir=$(dirname $(readlink -f $0))
cd ${scriptdir}/cmd/alert
version=$(grep 'const version =' main.go | awk '{print $NF}')
version=${version%\"}
version=${version:1}
echo "cpuType=${cpuType}"
echo "osType=${osType}"
echo "version=${version}"
GOOS=${osType} GOARCH=${cpuType} go build
GOOS=${osType} GOARCH=${cpuType} go build -ldflags '-X main.version='${version}
mkdir -p TDengine-alert/driver
cp alert alert.cfg install_driver.sh ./TDengine-alert/.
cp ../../../debug/build/lib/libtaos.so.${version} ./TDengine-alert/driver/.
chmod 777 ./TDengine-alert/install_driver.sh
tar -I 'gzip -9' -cf ${startdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz TDengine-alert/
rm -rf ./TDengine-alert
tar -I 'gzip -9' -cf ${startdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz alert alert.cfg install_driver.sh driver/
......@@ -100,7 +100,7 @@ IF (TD_LINUX)
ENDIF ()
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0")
SET(RELEASE_FLAGS "-O0 -Wno-unused-variable -Wunused-but-set-variable")
IF (${COVER} MATCHES "true")
MESSAGE(STATUS "Test coverage mode, add extra flags")
......
......@@ -13,7 +13,7 @@ ELSEIF (TD_WINDOWS)
IF (NOT TD_GODLL)
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector)
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .)
INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include)
......
......@@ -11,7 +11,7 @@ TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/)
### 配置Grafana
TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafana目录下。
TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin目录下。
以CentOS 7.2操作系统为例,将tdengine目录拷贝到/var/lib/grafana/plugins目录下,重新启动grafana即可。
......
......@@ -78,6 +78,18 @@ taos> SELECT SUM(current) FROM meters INTERVAL(1s);
2018-10-03 14:38:16.000 | 36.000000000 |
Query OK, 5 row(s) in set (0.001538s)
```
降采样操作也支持时间偏移,比如:将所有智能电表采集的电流值每秒钟求和,但要求每个时间窗口从 500 毫秒开始
```mysql
taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.500 | 11.189999809 |
2018-10-03 14:38:05.500 | 31.900000572 |
2018-10-03 14:38:06.500 | 11.600000000 |
2018-10-03 14:38:15.500 | 12.300000381 |
2018-10-03 14:38:16.500 | 35.000000000 |
Query OK, 5 row(s) in set (0.001521s)
```
物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如FFT)需要把采集的数据严格按照时间等间隔的对齐,在很多系统里,需要应用自己写程序来处理,但使用TDengine的降采样操作就轻松解决。如果一个时间间隔里,没有采集的数据,TDengine还提供插值计算的功能。
......
......@@ -148,7 +148,7 @@ INSERT INTO <tb1_name> USING <stb1_name> TAGS (<tag1_value1>, ...) VALUES (<fiel
SELECT function<field_name>,…
FROM <stable_name>
WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
INTERVAL (<time range>)
INTERVAL (<interval> [, offset])
GROUP BY <tag_name>, <tag_name>…
ORDER BY <tag_name> <asc|desc>
SLIMIT <group_limit>
......
......@@ -33,8 +33,7 @@ taos> DESCRIBE meters;
- 内部函数now是服务器的当前时间
- 插入记录时,如果时间戳为now,插入数据时使用服务器当前时间
- Epoch Time: 时间戳也可以是一个长整数,表示从1970-01-01 08:00:00.000开始的毫秒数
- 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。数字后面的时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据
- TDengine暂不支持时间窗口按照自然年和自然月切分。Where条件中的时间窗口单位的换算关系如下:interval(1y) 等效于 interval(365d), interval(1n) 等效于 interval(30d), interval(1w) 等效于 interval(7d)
- 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。 数字后面的时间单位可以是 a(毫秒)、s(秒)、 m(分)、h(小时)、d(天)、w(周)。 比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据。 在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMicrosecond就可支持微秒。
......@@ -299,7 +298,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
SELECT select_expr [, select_expr ...]
FROM {tb_name_list}
[WHERE where_condition]
[INTERVAL [interval_offset,] interval_val]
[INTERVAL (interval_val [, interval_offset])]
[FILL fill_val]
[SLIDING fill_val]
[GROUP BY col_list]
......@@ -972,17 +971,17 @@ TDengine支持按时间段进行聚合,可以将表中数据按照时间段进
```mysql
SELECT function_list FROM tb_name
[WHERE where_condition]
INTERVAL (interval)
INTERVAL (interval [, offset])
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
SELECT function_list FROM stb_name
[WHERE where_condition]
INTERVAL (interval)
INTERVAL (interval [, offset])
[FILL ({ VALUE | PREV | NULL | LINEAR})]
[GROUP BY tags]
```
- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
- WHERE语句可以指定查询的起止时间和其他过滤条件
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种:
1. 不进行填充:NONE(默认填充模式)。
......@@ -1020,5 +1019,5 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER
- 表名最大长度为193,每行数据最大长度16k个字符
- 列名最大长度为65,最多允许1024列,最少需要2列,第一列必须是时间戳
- 标签最多允许128个,可以0个,标签总长度不超过16k个字符
- SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为8M
- SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
# TDengine 2.0 错误码以及对应的十进制码
| Code | bit | error code | 错误描述 | 十进制错误码 |
| 状态码 | 模 | 错误码(十六进制) | 错误描述 | 错误码(十进制) |
|-----------------------| :---: | :---------: | :------------------------ | ---------------- |
|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647|
|TSDB_CODE_RPC_AUTH_REQUIRED| 0 | 0x0002 | "Authentication required"| -2147483646|
......@@ -87,7 +87,7 @@
|TSDB_CODE_MND_INVALID_ACCT_OPTION| 0 | 0x0342 | "Invalid account options"| -2147482814|
|TSDB_CODE_MND_USER_ALREADY_EXIST| 0 | 0x0350 | "User already exists"| -2147482800|
|TSDB_CODE_MND_INVALID_USER |0 | 0x0351 | "Invalid user" |-2147482799|
|TSDB_CODE_MND_INVALID_USER_FORMAT| |0 |0x0352 |"Invalid user format" |-2147482798|
|TSDB_CODE_MND_INVALID_USER_FORMAT| 0 |0x0352 |"Invalid user format" |-2147482798|
|TSDB_CODE_MND_INVALID_PASS_FORMAT| 0| 0x0353 | "Invalid password format"| -2147482797|
|TSDB_CODE_MND_NO_USER_FROM_CONN| 0 | 0x0354 | "Can not get user from conn"| -2147482796|
|TSDB_CODE_MND_TOO_MANY_USERS| 0 | 0x0355| "Too many users"| -2147482795|
......@@ -107,7 +107,7 @@
|TSDB_CODE_MND_DB_NOT_SELECTED| 0 | 0x0380 | "Database not specified or available"| -2147482752|
|TSDB_CODE_MND_DB_ALREADY_EXIST| 0 | 0x0381 | "Database already exists"| -2147482751|
|TSDB_CODE_MND_INVALID_DB_OPTION| 0 | 0x0382 | "Invalid database options"| -2147482750|
|TSDB_CODE_MND_INVALID_DB| |0 | 0x0383 | "Invalid database name"| -2147482749|
|TSDB_CODE_MND_INVALID_DB| 0 | 0x0383 | "Invalid database name"| -2147482749|
|TSDB_CODE_MND_MONITOR_DB_FORBIDDEN| 0 | 0x0384 | "Cannot delete monitor database"| -2147482748|
|TSDB_CODE_MND_TOO_MANY_DATABASES| 0| 0x0385 | "Too many databases for account"| -2147482747|
|TSDB_CODE_MND_DB_IN_DROPPING| 0 | 0x0386| "Database not available" |-2147482746|
......
......@@ -67,7 +67,7 @@ TDengine 分布式架构的逻辑结构图如下:
<center> 图 1 TDengine架构示意图 </center>
一个完整的 TDengine 系统是运行在一到多个物理节点上的,逻辑上,它包含数据节点(dnode)、TDengine客户端(taosc)以及应用(app)。系统中存在一到多个数据节点,这些数据节点组成一个集群(cluster)。应用通过taosc的API与TDengine集群进行互动。下面对每个逻辑单元进行简要介绍。
**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。
**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机,可以是安装有OS的物理机、虚拟机或容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯,如果不了解FQDN,请看博文《[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)》。
**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例,一个工作的系统必须有至少一个数据节点。dnode包含零到多个逻辑的虚拟节点(VNODE),零或者至多一个逻辑的管理节点(mnode)。dnode在系统中的唯一标识由实例的End Point (EP )决定。EP是dnode所在物理节点的FQDN (Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。
......
......@@ -69,17 +69,13 @@ typedef struct SJoinSupporter {
SSubqueryState* pState;
SSqlObj* pObj; // parent SqlObj
int32_t subqueryIndex; // index of sub query
char intervalTimeUnit;
char slidingTimeUnit;
int64_t intervalTime; // interval time
int64_t slidingTime; // sliding time
SInterval interval;
SLimitVal limit; // limit info
uint64_t uid; // query meter uid
uint64_t uid; // query table uid
SArray* colList; // previous query information, no need to use this attribute, and the corresponding attribution
SArray* exprList;
SFieldInfo fieldsInfo;
STagCond tagCond;
SSqlGroupbyExpr groupbyExpr;
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
FILE* f; // temporary file in order to create TSBuf
char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory
......
......@@ -29,6 +29,7 @@ extern "C" {
#include "tglobal.h"
#include "tsqlfunction.h"
#include "tutil.h"
#include "tcache.h"
#include "qExecutor.h"
#include "qSqlparser.h"
......@@ -225,12 +226,8 @@ typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
uint32_t type; // query/insert type
// TODO refactor
char intervalTimeUnit;
char slidingTimeUnit;
STimeWindow window; // query time window
int64_t intervalTime; // aggregation time window range
int64_t slidingTime; // sliding window in mseconds
int64_t intervalOffset;// start offset of each time window
SInterval interval;
int32_t tz; // query client timezone
SSqlGroupbyExpr groupbyExpr; // group by tags info
......@@ -333,6 +330,7 @@ typedef struct STscObj {
struct SSqlStream *streamList;
void* pDnodeConn;
pthread_mutex_t mutex;
T_REF_DECLARE()
} STscObj;
typedef struct SSqlObj {
......@@ -359,6 +357,8 @@ typedef struct SSqlObj {
uint16_t numOfSubs;
struct SSqlObj **pSubs;
struct SSqlObj * prev, *next;
struct SSqlObj **self;
} SSqlObj;
typedef struct SSqlStream {
......@@ -366,8 +366,6 @@ typedef struct SSqlStream {
uint32_t streamId;
char listed;
bool isProject;
char intervalTimeUnit;
char slidingTimeUnit;
int16_t precision;
int64_t num; // number of computing count
......@@ -381,8 +379,7 @@ typedef struct SSqlStream {
int64_t ctime; // stream created time
int64_t stime; // stream next executed time
int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed
int64_t intervalTime;
int64_t slidingTime;
SInterval interval;
void * pTimer;
void (*fp)();
......@@ -413,7 +410,6 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
void tscRestoreSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscDestroyResPointerInfo(SSqlRes *pRes);
void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache);
......@@ -425,17 +421,19 @@ void tscFreeSqlResult(SSqlObj *pSql);
/**
* only free part of resources allocated during query.
* TODO remove it later
* Note: this function is multi-thread safe.
* @param pObj
*/
void tscPartiallyFreeSqlObj(SSqlObj *pObj);
void tscPartiallyFreeSqlObj(SSqlObj *pSql);
/**
* free sql object, release allocated resource
* @param pObj Free metric/meta information, dynamically allocated payload, and
* response buffer, object itself
* @param pObj
*/
void tscFreeSqlObj(SSqlObj *pObj);
void tscFreeSqlObj(SSqlObj *pSql);
void tscFreeSqlObjInCache(void *pSql);
void tscCloseTscObj(STscObj *pObj);
......@@ -451,9 +449,6 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen)
bool tscIsUpdateQuery(SSqlObj* pSql);
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
// todo remove this function.
bool tscResultsetFetchCompleted(TAOS_RES *result);
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
......@@ -468,7 +463,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
int32_t type = pInfo->pSqlExpr->resType;
int32_t bytes = pInfo->pSqlExpr->resBytes;
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
char* pData = pRes->data + (int32_t)(pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row);
// user defined constant value output columns
if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
......@@ -502,7 +497,8 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
}
}
extern void * tscCacheHandle;
extern SCacheObj* tscMetaCache;
extern SCacheObj* tscObjCache;
extern void * tscTmr;
extern void * tscQhandle;
extern int tscKeepConn[];
......
......@@ -18,6 +18,7 @@
#include "tnote.h"
#include "trpc.h"
#include "tcache.h"
#include "tscLog.h"
#include "tscSubquery.h"
#include "tscLocalMerge.h"
......@@ -40,6 +41,8 @@ static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows);
static void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows);
void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const char* sqlstr, size_t sqlLen) {
SSqlCmd* pCmd = &pSql->cmd;
pSql->signature = pSql;
pSql->param = param;
pSql->pTscObj = pObj;
......@@ -48,6 +51,11 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
pSql->fp = fp;
pSql->fetchFp = fp;
uint64_t handle = (uint64_t) pSql;
pSql->self = taosCachePut(tscObjCache, &handle, sizeof(uint64_t), &pSql, sizeof(uint64_t), 2*3600*1000);
T_REF_INC(pSql->pTscObj);
pSql->sqlstr = calloc(1, sqlLen + 1);
if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
......@@ -59,7 +67,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
strntolower(pSql->sqlstr, sqlstr, (int32_t)sqlLen);
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
pSql->cmd.curSql = pSql->sqlstr;
pCmd->curSql = pSql->sqlstr;
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return;
......@@ -69,7 +77,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
tscQueueAsyncRes(pSql);
return;
}
tscDoQuery(pSql);
}
......
......@@ -117,6 +117,10 @@ typedef struct SFirstLastInfo {
typedef struct SFirstLastInfo SLastrowInfo;
typedef struct SPercentileInfo {
tMemBucket *pMemBucket;
int32_t stage;
double minval;
double maxval;
int64_t numOfElems;
} SPercentileInfo;
typedef struct STopBotInfo {
......@@ -302,7 +306,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
} else if (functionId == TSDB_FUNC_PERCT) {
*type = (int16_t)TSDB_DATA_TYPE_DOUBLE;
*bytes = (int16_t)sizeof(double);
*interBytes = (int16_t)sizeof(double);
*interBytes = (int16_t)sizeof(SPercentileInfo);
} else if (functionId == TSDB_FUNC_LEASTSQR) {
*type = TSDB_DATA_TYPE_BINARY;
*bytes = TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE; // string
......@@ -707,13 +711,16 @@ static int32_t firstDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY en
if (pCtx->aOutputBuf == NULL) {
return BLK_DATA_ALL_NEEDED;
}
SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
if (pInfo->hasResult != DATA_SET_FLAG) {
return BLK_DATA_ALL_NEEDED;
} else { // data in current block is not earlier than current result
return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
}
return BLK_DATA_ALL_NEEDED;
// TODO pCtx->aOutputBuf is the previous windowRes output buffer, not current unloaded block. so the following filter
// is invalid
// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
// if (pInfo->hasResult != DATA_SET_FLAG) {
// return BLK_DATA_ALL_NEEDED;
// } else { // data in current block is not earlier than current result
// return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
// }
}
static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
......@@ -726,12 +733,16 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end
return BLK_DATA_ALL_NEEDED;
}
SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
if (pInfo->hasResult != DATA_SET_FLAG) {
return BLK_DATA_ALL_NEEDED;
} else {
return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
}
return BLK_DATA_ALL_NEEDED;
// TODO pCtx->aOutputBuf is the previous windowRes output buffer, not current unloaded block. so the following filter
// is invalid
// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
// if (pInfo->hasResult != DATA_SET_FLAG) {
// return BLK_DATA_ALL_NEEDED;
// } else {
// return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
// }
}
//////////////////////////////////////////////////////////////////////////////////////////////
......@@ -2428,12 +2439,14 @@ static bool percentile_function_setup(SQLFunctionCtx *pCtx) {
if (!function_setup(pCtx)) {
return false;
}
// in the first round, get the min-max value of all involved data
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo *pInfo = pResInfo->interResultBuf;
pInfo->minval = DBL_MAX;
pInfo->maxval = -DBL_MAX;
pInfo->numOfElems = 0;
((SPercentileInfo *)(pResInfo->interResultBuf))->pMemBucket =
tMemBucketCreate(pCtx->inputBytes, pCtx->inputType);
return true;
}
......@@ -2442,7 +2455,65 @@ static void percentile_function(SQLFunctionCtx *pCtx) {
SResultInfo * pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo *pInfo = pResInfo->interResultBuf;
// the first stage, only acquire the min/max value
if (pInfo->stage == 0) {
if (pCtx->preAggVals.isSet) {
if (pInfo->minval > pCtx->preAggVals.statis.min) {
pInfo->minval = (double)pCtx->preAggVals.statis.min;
}
if (pInfo->maxval < pCtx->preAggVals.statis.max) {
pInfo->maxval = (double)pCtx->preAggVals.statis.max;
}
pInfo->numOfElems += (pCtx->size - pCtx->preAggVals.statis.numOfNull);
} else {
for (int32_t i = 0; i < pCtx->size; ++i) {
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
continue;
}
// TODO extract functions
double v = 0;
switch (pCtx->inputType) {
case TSDB_DATA_TYPE_TINYINT:
v = GET_INT8_VAL(data);
break;
case TSDB_DATA_TYPE_SMALLINT:
v = GET_INT16_VAL(data);
break;
case TSDB_DATA_TYPE_BIGINT:
v = (double)(GET_INT64_VAL(data));
break;
case TSDB_DATA_TYPE_FLOAT:
v = GET_FLOAT_VAL(data);
break;
case TSDB_DATA_TYPE_DOUBLE:
v = GET_DOUBLE_VAL(data);
break;
default:
v = GET_INT32_VAL(data);
break;
}
if (v < pInfo->minval) {
pInfo->minval = v;
}
if (v > pInfo->maxval) {
pInfo->maxval = v;
}
pInfo->numOfElems += 1;
}
}
return;
}
// the second stage, calculate the true percentile value
for (int32_t i = 0; i < pCtx->size; ++i) {
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
......@@ -2462,10 +2533,47 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) {
if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
return;
}
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo *pInfo = (SPercentileInfo *)pResInfo->interResultBuf;
if (pInfo->stage == 0) {
// TODO extract functions
double v = 0;
switch (pCtx->inputType) {
case TSDB_DATA_TYPE_TINYINT:
v = GET_INT8_VAL(pData);
break;
case TSDB_DATA_TYPE_SMALLINT:
v = GET_INT16_VAL(pData);
break;
case TSDB_DATA_TYPE_BIGINT:
v = (double)(GET_INT64_VAL(pData));
break;
case TSDB_DATA_TYPE_FLOAT:
v = GET_FLOAT_VAL(pData);
break;
case TSDB_DATA_TYPE_DOUBLE:
v = GET_DOUBLE_VAL(pData);
break;
default:
v = GET_INT32_VAL(pData);
break;
}
if (v < pInfo->minval) {
pInfo->minval = v;
}
if (v > pInfo->maxval) {
pInfo->maxval = v;
}
pInfo->numOfElems += 1;
return;
}
tMemBucketPut(pInfo->pMemBucket, pData, 1);
SET_VAL(pCtx, 1, 1);
......@@ -2488,6 +2596,23 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) {
doFinalizer(pCtx);
}
static void percentile_next_step(SQLFunctionCtx *pCtx) {
SResultInfo * pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo *pInfo = pResInfo->interResultBuf;
if (pInfo->stage == 0) {
// all data are null, set it completed
if (pInfo->numOfElems == 0) {
pResInfo->complete = true;
}
pInfo->stage += 1;
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
} else {
pResInfo->complete = true;
}
}
//////////////////////////////////////////////////////////////////////////////////
static SAPercentileInfo *getAPerctInfo(SQLFunctionCtx *pCtx) {
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
......@@ -4513,7 +4638,7 @@ SQLAggFuncElem aAggs[] = {{
percentile_function_setup,
percentile_function,
percentile_function_f,
no_next_step,
percentile_next_step,
percentile_finalizer,
noop1,
noop1,
......
......@@ -16,7 +16,6 @@
#include "os.h"
#include "taosmsg.h"
#include "qExtbuffer.h"
#include "taosdef.h"
#include "tcache.h"
#include "tname.h"
......@@ -430,7 +429,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
pRes->qhandle = 0x1;
pRes->numOfRows = 0;
} else if (pCmd->command == TSDB_SQL_RESET_CACHE) {
taosCacheEmpty(tscCacheHandle);
taosCacheEmpty(tscMetaCache);
pRes->code = TSDB_CODE_SUCCESS;
} else if (pCmd->command == TSDB_SQL_SERV_VERSION) {
pRes->code = tscProcessServerVer(pSql);
......
......@@ -368,13 +368,12 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
TSKEY stime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey : pQueryInfo->window.ekey;
int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision);
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
4096, (int32_t)numOfCols, pQueryInfo->slidingTime, pQueryInfo->slidingTimeUnit,
4096, (int32_t)numOfCols, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit,
tinfo.precision, pQueryInfo->fillType, pFillCol);
}
}
......@@ -472,10 +471,8 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
return;
}
tscDebug("%p start to free local reducer", pSql);
SSqlRes *pRes = &(pSql->res);
if (pRes->pLocalReducer == NULL) {
tscDebug("%p local reducer has been freed, abort", pSql);
return;
}
......@@ -553,7 +550,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
}
// primary timestamp column is involved in final result
if (pQueryInfo->intervalTime != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pQueryInfo->interval.interval != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
numOfGroupByCols++;
}
......@@ -570,7 +567,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
orderIdx[i] = startCols++;
}
if (pQueryInfo->intervalTime != 0) {
if (pQueryInfo->interval.interval != 0) {
// the first column is the timestamp, handles queries like "interval(10m) group by tags"
orderIdx[numOfGroupByCols - 1] = PRIMARYKEY_TIMESTAMP_COL_INDEX;
}
......@@ -614,12 +611,12 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
* super table interval query
* if the order columns is the primary timestamp, all result data belongs to one group
*/
assert(pQueryInfo->intervalTime > 0);
assert(pQueryInfo->interval.interval > 0);
if (numOfCols == 1) {
return true;
}
} else { // simple group by query
assert(pQueryInfo->intervalTime == 0);
assert(pQueryInfo->interval.interval == 0);
}
// only one row exists
......@@ -827,8 +824,7 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
if (pFillInfo != NULL) {
int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey;
int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision);
taosResetFillInfo(pFillInfo, revisedSTime);
}
......@@ -841,7 +837,7 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
}
static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo) {
assert(pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
assert(pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
tFilePage * pBeforeFillData = pLocalReducer->pResultBuf;
......@@ -1222,7 +1218,7 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
#endif
// no interval query, no fill operation
if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
genFinalResWithoutFill(pRes, pLocalReducer, pQueryInfo);
} else {
SFillInfo* pFillInfo = pLocalReducer->pFillInfo;
......@@ -1260,13 +1256,10 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
int8_t precision = tinfo.precision;
// for group result interpolation, do not return if not data is generated
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
TSKEY skey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey:pQueryInfo->window.ekey;//MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t newTime =
taosGetIntervalStartTimestamp(skey, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision);
int64_t newTime = taosTimeTruncate(skey, &pQueryInfo->interval, tinfo.precision);
taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
}
}
......
......@@ -142,7 +142,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
return tscInvalidSQLErrMsg(error, "value expected in timestamp", sToken.z);
}
if (getTimestampInUsFromStr(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -526,7 +526,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe
int32_t index = 0;
SStrToken sToken;
int16_t numOfRows = 0;
int32_t numOfRows = 0;
SSchema *pSchema = tscGetTableSchema(pTableMeta);
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
......
......@@ -259,11 +259,11 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
pSdesc->num = htobe64(pStream->num);
pSdesc->useconds = htobe64(pStream->useconds);
pSdesc->stime = htobe64(pStream->stime - pStream->intervalTime);
pSdesc->stime = htobe64(pStream->stime - pStream->interval.interval);
pSdesc->ctime = htobe64(pStream->ctime);
pSdesc->slidingTime = htobe64(pStream->slidingTime);
pSdesc->interval = htobe64(pStream->intervalTime);
pSdesc->slidingTime = htobe64(pStream->interval.sliding);
pSdesc->interval = htobe64(pStream->interval.interval);
pHeartbeat->numOfStreams++;
pSdesc++;
......
此差异已折叠。
......@@ -27,10 +27,7 @@
#include "tutil.h"
#include "tlockfree.h"
#define TSC_MGMT_VNODE 999
SRpcCorEpSet tscMgmtEpSet;
SRpcEpSet tscDnodeEpSet;
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
......@@ -236,20 +233,27 @@ int tscSendMsgToServer(SSqlObj *pSql) {
}
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
SSqlObj *pSql = (SSqlObj *)rpcMsg->ahandle;
if (pSql == NULL || pSql->signature != pSql) {
tscError("%p sql is already released", pSql);
uint64_t handle = (uint64_t) rpcMsg->ahandle;
void** p = taosCacheAcquireByKey(tscObjCache, &handle, sizeof(uint64_t));
if (p == NULL) {
rpcFreeCont(rpcMsg->pCont);
return;
}
SSqlObj* pSql = *p;
assert(pSql != NULL);
STscObj *pObj = pSql->pTscObj;
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
assert(*pSql->self == pSql);
if (pObj->signature != pObj) {
tscDebug("%p DB connection is closed, cmd:%d pObj:%p signature:%p", pSql, pCmd->command, pObj, pObj->signature);
tscFreeSqlObj(pSql);
taosCacheRelease(tscObjCache, (void**) &p, true);
rpcFreeCont(rpcMsg->pCont);
return;
}
......@@ -261,18 +265,21 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
tscDebug("%p sqlObj needs to be released or DB connection is closed, cmd:%d type:%d, pObj:%p signature:%p",
pSql, pCmd->command, pQueryInfo->type, pObj, pObj->signature);
tscFreeSqlObj(pSql);
void** p1 = p;
taosCacheRelease(tscObjCache, (void**) &p1, false);
taosCacheRelease(tscObjCache, (void**) &p, true);
rpcFreeCont(rpcMsg->pCont);
return;
}
if (pEpSet) {
if (pEpSet) {
if (!tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
if(pCmd->command < TSDB_SQL_MGMT) {
tscUpdateVgroupInfo(pSql, pEpSet);
if (pCmd->command < TSDB_SQL_MGMT) {
tscUpdateVgroupInfo(pSql, pEpSet);
} else {
tscUpdateMgmtEpSet(pEpSet);
}
}
}
}
......@@ -294,7 +301,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
if (pSql->retry > pSql->maxRetry) {
tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
} else {
// wait for a little bit moment and then retry
// wait for a little bit moment and then retry, todo do not sleep in rpc callback thread
if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
int32_t duration = getWaitingTimeInterval(pSql->retry);
taosMsleep(duration);
......@@ -304,6 +311,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
// if there is an error occurring, proceed to the following error handling procedure.
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosCacheRelease(tscObjCache, (void**) &p, false);
rpcFreeCont(rpcMsg->pCont);
return;
}
......@@ -365,16 +373,18 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql);
}
bool shouldFree = tscShouldBeFreed(pSql);
if (rpcMsg->code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? (int32_t)pRes->numOfRows : pRes->code;
bool shouldFree = tscShouldBeFreed(pSql);
(*pSql->fp)(pSql->param, pSql, rpcMsg->code);
}
if (shouldFree) {
tscDebug("%p sqlObj is automatically freed", pSql);
tscFreeSqlObj(pSql);
}
void** p1 = p;
taosCacheRelease(tscObjCache, (void**) &p1, false);
if (shouldFree) { // in case of table-meta/vgrouplist query, automatically free it
taosCacheRelease(tscObjCache, (void **)&p, true);
tscDebug("%p sqlObj is automatically freed", pSql);
}
rpcFreeCont(rpcMsg->pCont);
......@@ -637,8 +647,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_TSC_INVALID_SQL;
}
if (pQueryInfo->intervalTime < 0) {
tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pQueryInfo->intervalTime);
if (pQueryInfo->interval.interval < 0) {
tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pQueryInfo->interval.interval);
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -665,10 +675,12 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->limit = htobe64(pQueryInfo->limit.limit);
pQueryMsg->offset = htobe64(pQueryInfo->limit.offset);
pQueryMsg->numOfCols = htons((int16_t)taosArrayGetSize(pQueryInfo->colList));
pQueryMsg->intervalTime = htobe64(pQueryInfo->intervalTime);
pQueryMsg->slidingTime = htobe64(pQueryInfo->slidingTime);
pQueryMsg->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pQueryMsg->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pQueryMsg->interval.interval = htobe64(pQueryInfo->interval.interval);
pQueryMsg->interval.sliding = htobe64(pQueryInfo->interval.sliding);
pQueryMsg->interval.offset = htobe64(pQueryInfo->interval.offset);
pQueryMsg->interval.intervalUnit = pQueryInfo->interval.intervalUnit;
pQueryMsg->interval.slidingUnit = pQueryInfo->interval.slidingUnit;
pQueryMsg->interval.offsetUnit = pQueryInfo->interval.offsetUnit;
pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols);
pQueryMsg->numOfTags = htonl(numOfTags);
pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType);
......@@ -1667,8 +1679,10 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
pMetaMsg->contLen = htons(pMetaMsg->contLen);
pMetaMsg->numOfColumns = htons(pMetaMsg->numOfColumns);
if (pMetaMsg->sid < 0 || pMetaMsg->vgroup.numOfEps < 0) {
tscError("invalid meter vgId:%d, sid%d", pMetaMsg->vgroup.numOfEps, pMetaMsg->sid);
if ((pMetaMsg->tableType != TSDB_SUPER_TABLE) &&
(pMetaMsg->sid <= 0 || pMetaMsg->vgroup.vgId < 2 || pMetaMsg->vgroup.numOfEps <= 0)) {
tscError("invalid value in table numOfEps:%d, vgId:%d tid:%d, name:%s", pMetaMsg->vgroup.numOfEps, pMetaMsg->vgroup.vgId,
pMetaMsg->sid, pMetaMsg->tableId);
return TSDB_CODE_TSC_INVALID_VALUE;
}
......@@ -1708,7 +1722,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
assert(pTableMetaInfo->pTableMeta == NULL);
pTableMetaInfo->pTableMeta = (STableMeta *) taosCachePut(tscCacheHandle, pTableMetaInfo->name,
pTableMetaInfo->pTableMeta = (STableMeta *) taosCachePut(tscMetaCache, pTableMetaInfo->name,
strlen(pTableMetaInfo->name), pTableMeta, size, tsTableMetaKeepTimer * 1000);
// todo handle out of memory case
......@@ -1820,7 +1834,7 @@ int tscProcessMultiMeterMetaRsp(SSqlObj *pSql) {
// int32_t size = (int32_t)(rsp - ((char *)pMeta)); // Consistent with STableMeta in cache
//
// pMeta->index = 0;
// (void)taosCachePut(tscCacheHandle, pMeta->tableId, (char *)pMeta, size, tsTableMetaKeepTimer);
// (void)taosCachePut(tscMetaCache, pMeta->tableId, (char *)pMeta, size, tsTableMetaKeepTimer);
// }
}
......@@ -1907,12 +1921,14 @@ int tscProcessShowRsp(SSqlObj *pSql) {
key[0] = pCmd->msgType + 'a';
strcpy(key + 1, "showlist");
taosCacheRelease(tscCacheHandle, (void *)&(pTableMetaInfo->pTableMeta), false);
if (pTableMetaInfo->pTableMeta != NULL) {
taosCacheRelease(tscMetaCache, (void *)&(pTableMetaInfo->pTableMeta), false);
}
size_t size = 0;
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg, &size);
pTableMetaInfo->pTableMeta = taosCachePut(tscCacheHandle, key, strlen(key), (char *)pTableMeta, size,
pTableMetaInfo->pTableMeta = taosCachePut(tscMetaCache, key, strlen(key), (char *)pTableMeta, size,
tsTableMetaKeepTimer * 1000);
SSchema *pTableSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
......@@ -1971,6 +1987,8 @@ static void createHBObj(STscObj* pObj) {
pSql->pTscObj = pObj;
pSql->signature = pSql;
pObj->pHb = pSql;
T_REF_INC(pObj);
tscAddSubqueryInfo(&pObj->pHb->cmd);
tscDebug("%p HB is allocated, pObj:%p", pObj->pHb, pObj);
......@@ -2000,7 +2018,7 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
createHBObj(pObj);
taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, pObj, tscTmr, &pObj->pTimer);
// taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, pObj, tscTmr, &pObj->pTimer);
return 0;
}
......@@ -2015,14 +2033,14 @@ int tscProcessUseDbRsp(SSqlObj *pSql) {
int tscProcessDropDbRsp(SSqlObj *pSql) {
pSql->pTscObj->db[0] = 0;
taosCacheEmpty(tscCacheHandle);
taosCacheEmpty(tscMetaCache);
return 0;
}
int tscProcessDropTableRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
STableMeta *pTableMeta = taosCacheAcquireByKey(tscCacheHandle, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
STableMeta *pTableMeta = taosCacheAcquireByKey(tscMetaCache, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
if (pTableMeta == NULL) { /* not in cache, abort */
return 0;
}
......@@ -2035,10 +2053,10 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
* instead.
*/
tscDebug("%p force release table meta after drop table:%s", pSql, pTableMetaInfo->name);
taosCacheRelease(tscCacheHandle, (void **)&pTableMeta, true);
taosCacheRelease(tscMetaCache, (void **)&pTableMeta, true);
if (pTableMetaInfo->pTableMeta) {
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), true);
taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), true);
}
return 0;
......@@ -2047,21 +2065,21 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
STableMeta *pTableMeta = taosCacheAcquireByKey(tscCacheHandle, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
STableMeta *pTableMeta = taosCacheAcquireByKey(tscMetaCache, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
if (pTableMeta == NULL) { /* not in cache, abort */
return 0;
}
tscDebug("%p force release metermeta in cache after alter-table: %s", pSql, pTableMetaInfo->name);
taosCacheRelease(tscCacheHandle, (void **)&pTableMeta, true);
taosCacheRelease(tscMetaCache, (void **)&pTableMeta, true);
if (pTableMetaInfo->pTableMeta) {
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), true);
taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), true);
if (isSuperTable) { // if it is a super table, reset whole query cache
tscDebug("%p reset query cache since table:%s is stable", pSql, pTableMetaInfo->name);
taosCacheEmpty(tscCacheHandle);
taosCacheEmpty(tscMetaCache);
}
}
......@@ -2146,6 +2164,12 @@ static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInf
pNew->signature = pNew;
pNew->cmd.command = TSDB_SQL_META;
T_REF_INC(pNew->pTscObj);
// TODO add test case on x86 platform
uint64_t adr = (uint64_t) pNew;
pNew->self = taosCachePut(tscObjCache, &adr, sizeof(uint64_t), &pNew, sizeof(uint64_t), 2*60*1000);
tscAddSubqueryInfo(&pNew->cmd);
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0);
......@@ -2182,10 +2206,10 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
// If this STableMetaInfo owns a table meta, release it first
if (pTableMetaInfo->pTableMeta != NULL) {
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), false);
taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), false);
}
pTableMetaInfo->pTableMeta = (STableMeta *)taosCacheAcquireByKey(tscCacheHandle, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
pTableMetaInfo->pTableMeta = (STableMeta *)taosCacheAcquireByKey(tscMetaCache, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
if (pTableMetaInfo->pTableMeta != NULL) {
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
tscDebug("%p retrieve table Meta from cache, the number of columns:%d, numOfTags:%d, %p", pSql, tinfo.numOfColumns,
......@@ -2220,7 +2244,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid, pTableMeta);
}
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), true);
taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), true);
return getTableMetaFromMgmt(pSql, pTableMetaInfo);
}
......@@ -2250,7 +2274,8 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
pNew->signature = pNew;
pNew->cmd.command = TSDB_SQL_STABLEVGROUP;
// TODO TEST IT
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0);
if (pNewQueryInfo == NULL) {
tscFreeSqlObj(pNew);
......@@ -2260,7 +2285,7 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pMInfo = tscGetMetaInfo(pQueryInfo, i);
STableMeta *pTableMeta = taosCacheAcquireByData(tscCacheHandle, pMInfo->pTableMeta);
STableMeta *pTableMeta = taosCacheAcquireByData(tscMetaCache, pMInfo->pTableMeta);
tscAddTableMetaInfo(pNewQueryInfo, pMInfo->name, pTableMeta, NULL, pMInfo->tagColList);
}
......@@ -2270,6 +2295,10 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
}
pNewQueryInfo->numOfTables = pQueryInfo->numOfTables;
T_REF_INC(pNew->pTscObj);
uint64_t p = (uint64_t) pNew;
pNew->self = taosCachePut(tscObjCache, &p, sizeof(uint64_t), &pNew, sizeof(uint64_t), 2 * 600 * 1000);
tscDebug("%p new sqlObj:%p to get vgroupInfo, numOfTables:%d", pSql, pNew, pNewQueryInfo->numOfTables);
pNew->fp = tscTableMetaCallBack;
......
......@@ -26,6 +26,7 @@
#include "tsclient.h"
#include "ttokendef.h"
#include "tutil.h"
#include "tscProfile.h"
static bool validImpl(const char* str, size_t maxsize) {
if (str == NULL) {
......@@ -100,6 +101,8 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
}
pObj->signature = pObj;
pObj->pDnodeConn = pDnodeConn;
T_REF_INIT_VAL(pObj, 1);
tstrncpy(pObj->user, user, sizeof(pObj->user));
secretEncryptLen = MIN(secretEncryptLen, sizeof(pObj->pass));
......@@ -132,20 +135,15 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
return NULL;
}
pSql->pTscObj = pObj;
pSql->pTscObj = pObj;
pSql->signature = pSql;
pSql->maxRetry = TSDB_MAX_REPLICA;
pSql->maxRetry = TSDB_MAX_REPLICA;
pSql->fp = fp;
pSql->param = param;
pSql->cmd.command = TSDB_SQL_CONNECT;
tsem_init(&pSql->rspSem, 0, 0);
pObj->pDnodeConn = pDnodeConn;
pSql->fp = fp;
pSql->param = param;
if (taos != NULL) {
*taos = pObj;
}
pSql->cmd.command = TSDB_SQL_CONNECT;
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
rpcClose(pDnodeConn);
......@@ -154,7 +152,16 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
return NULL;
}
if (taos != NULL) {
*taos = pObj;
}
T_REF_INC(pSql->pTscObj);
uint64_t key = (uint64_t) pSql;
pSql->self = taosCachePut(tscObjCache, &key, sizeof(uint64_t), &pSql, sizeof(uint64_t), 2*3600*1000);
tsInsertHeadSize = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
return pSql;
}
......@@ -257,6 +264,33 @@ void taos_close(TAOS *taos) {
tscFreeSqlObj(pObj->pHb);
}
// free all sqlObjs created by using this connect before free the STscObj
// while(1) {
// pthread_mutex_lock(&pObj->mutex);
// void* p = pObj->sqlList;
// pthread_mutex_unlock(&pObj->mutex);
//
// if (p == NULL) {
// break;
// }
//
// tscDebug("%p waiting for sqlObj to be freed, %p", pObj, p);
// taosMsleep(100);
//
// // todo fix me!! two threads call taos_free_result will cause problem.
// tscDebug("%p free :%p", pObj, p);
// taos_free_result(p);
// }
int32_t ref = T_REF_DEC(pObj);
assert(ref >= 0);
if (ref > 0) {
tscDebug("%p %d remain sqlObjs, not free tscObj and dnodeConn:%p", pObj, ref, pObj->pDnodeConn);
return;
}
tscDebug("%p all sqlObj are freed, free tscObj and close dnodeConn:%p", pObj, pObj->pDnodeConn);
tscCloseTscObj(pObj);
}
......@@ -533,57 +567,51 @@ int taos_select_db(TAOS *taos, const char *db) {
}
// send free message to vnode to free qhandle and corresponding resources in vnode
static bool tscKillQueryInVnode(SSqlObj* pSql) {
static UNUSED_FUNC bool tscKillQueryInDnode(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pRes == NULL || pRes->qhandle == 0) {
return true;
}
if (pRes->code == TSDB_CODE_SUCCESS && pRes->completed == false && !tscIsTwoStageSTableQuery(pQueryInfo, 0) &&
(pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_RETRIEVE ||
pCmd->command == TSDB_SQL_FETCH) &&
(pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
if ((pQueryInfo == NULL) || tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
return true;
}
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
tscRemoveFromSqlList(pSql);
int32_t cmd = pCmd->command;
if (pRes->code == TSDB_CODE_SUCCESS && pRes->completed == false && pSql->pStream == NULL && (pTableMetaInfo->pTableMeta != NULL) &&
(cmd == TSDB_SQL_SELECT ||
cmd == TSDB_SQL_SHOW ||
cmd == TSDB_SQL_RETRIEVE ||
cmd == TSDB_SQL_FETCH)) {
pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
tscDebug("%p send msg to dnode to free qhandle ASAP, command:%s, ", pSql, sqlCmd[pCmd->command]);
tscDebug("%p send msg to dnode to free qhandle ASAP before free sqlObj, command:%s", pSql, sqlCmd[pCmd->command]);
tscProcessSql(pSql);
return true;
return false;
}
return false;
return true;
}
void taos_free_result(TAOS_RES *res) {
SSqlObj *pSql = (SSqlObj *)res;
SSqlObj* pSql = (SSqlObj*) res;
if (pSql == NULL || pSql->signature != pSql) {
tscDebug("%p sqlObj has been freed", pSql);
return;
}
// The semaphore can not be changed while freeing async sub query objects.
SSqlRes *pRes = &pSql->res;
if (pRes == NULL || pRes->qhandle == 0) {
tscFreeSqlObj(pSql);
tscDebug("%p SqlObj is freed by app, qhandle is null", pSql);
return;
}
// set freeFlag to 1 in retrieve message if there are un-retrieved results data in node
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo == NULL) {
tscFreeSqlObj(pSql);
tscDebug("%p SqlObj is freed by app", pSql);
tscError("%p already released sqlObj", res);
return;
}
pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
if (!tscKillQueryInVnode(pSql)) {
tscFreeSqlObj(pSql);
tscDebug("%p sqlObj is freed by app", pSql);
bool freeNow = tscKillQueryInDnode(pSql);
if (freeNow) {
tscDebug("%p free sqlObj in cache", pSql);
SSqlObj** p = pSql->self;
taosCacheRelease(tscObjCache, (void**) &p, true);
}
}
......
......@@ -51,7 +51,7 @@ static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, in
int64_t retryDelta = (int64_t)(tsStreamCompRetryDelay * retryRangeFactor);
retryDelta = ((rand() % retryDelta) + tsStreamCompRetryDelay) * 1000L;
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
// change to ms
if (prec == TSDB_TIME_PRECISION_MICRO) {
slidingTime = slidingTime / 1000;
......@@ -87,7 +87,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
// failed to get meter/metric meta, retry in 10sec.
if (code != TSDB_CODE_SUCCESS) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
tscSetRetryTimer(pStream, pSql, retryDelayTime);
......@@ -132,15 +132,16 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
}
if (etime > pStream->etime) {
etime = pStream->etime;
} else if (pStream->intervalTimeUnit != 'y' && pStream->intervalTimeUnit != 'n') {
etime = pStream->stime + (etime - pStream->stime) / pStream->intervalTime * pStream->intervalTime;
} else if (pStream->interval.intervalUnit != 'y' && pStream->interval.intervalUnit != 'n') {
etime = pStream->stime + (etime - pStream->stime) / pStream->interval.interval * pStream->interval.interval;
} else {
etime = taosGetIntervalStartTimestamp(etime, pStream->slidingTime, pStream->intervalTime, pStream->slidingTimeUnit, pStream->precision);
etime = taosTimeTruncate(etime, &pStream->interval, pStream->precision);
//etime = taosGetIntervalStartTimestamp(etime, pStream->interval.sliding, pStream->interval.sliding, pStream->interval.slidingUnit, pStream->precision);
}
pQueryInfo->window.ekey = etime;
if (pQueryInfo->window.skey >= pQueryInfo->window.ekey) {
int64_t timer = pStream->slidingTime;
if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
int64_t timer = pStream->interval.sliding;
if (pStream->interval.intervalUnit == 'y' || pStream->interval.intervalUnit == 'n') {
timer = 86400 * 1000l;
} else if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
timer /= 1000l;
......@@ -162,12 +163,12 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows) {
SSqlStream *pStream = (SSqlStream *)param;
if (tres == NULL || numOfRows < 0) {
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
retryDelay);
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), true);
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), true);
taosTFree(pTableMetaInfo->vgroupList);
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
......@@ -223,7 +224,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
SSqlObj * pSql = (SSqlObj *)res;
if (pSql == NULL || numOfRows < 0) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
......@@ -246,11 +247,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
}
if (!pStream->isProject) {
if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
pStream->stime = taosAddNatualInterval(pStream->stime, pStream->slidingTime, pStream->slidingTimeUnit, pStream->precision);
} else {
pStream->stime += pStream->slidingTime;
}
pStream->stime = taosTimeAdd(pStream->stime, pStream->interval.sliding, pStream->interval.slidingUnit, pStream->precision);
}
// actually only one row is returned. this following is not necessary
taos_fetch_rows_a(res, tscProcessStreamRetrieveResult, pStream);
......@@ -275,7 +272,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
// release the metric/meter meta information reference, so data in cache can be updated
taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), false);
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), false);
tscFreeSqlResult(pSql);
taosTFree(pSql->pSubs);
pSql->numOfSubs = 0;
......@@ -310,7 +307,7 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
now + timer, timer, delay, pStream->stime, etime);
} else {
tscDebug("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
pStream->stime, timer, delay, pStream->stime - pStream->intervalTime, pStream->stime - 1);
pStream->stime, timer, delay, pStream->stime - pStream->interval.interval, pStream->stime - 1);
}
pSql->cmd.command = TSDB_SQL_SELECT;
......@@ -324,12 +321,12 @@ static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
int64_t delayDelta = maxDelay;
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio);
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
delayDelta = (int64_t)(pStream->interval.sliding * tsStreamComputDelayRatio);
if (delayDelta > maxDelay) {
delayDelta = maxDelay;
}
int64_t remainTimeWindow = pStream->slidingTime - delayDelta;
int64_t remainTimeWindow = pStream->interval.sliding - delayDelta;
if (maxDelay > remainTimeWindow) {
maxDelay = (int64_t)(remainTimeWindow / 1.5f);
}
......@@ -337,8 +334,8 @@ static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
int64_t currentDelay = (rand() % maxDelay); // a random number
currentDelay += delayDelta;
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
assert(currentDelay < pStream->slidingTime);
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
assert(currentDelay < pStream->interval.sliding);
}
return currentDelay;
......@@ -353,7 +350,7 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
* for project query, no mater fetch data successfully or not, next launch will issue
* more than the sliding time window
*/
timer = pStream->slidingTime;
timer = pStream->interval.sliding;
if (pStream->stime > pStream->etime) {
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
......@@ -366,7 +363,8 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
return;
}
} else {
int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
int64_t stime = taosTimeTruncate(pStream->stime - 1, &pStream->interval, pStream->precision);
//int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
if (stime >= pStream->etime) {
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
......@@ -400,43 +398,43 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->intervalTime < minIntervalTime) {
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.interval < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->intervalTime, minIntervalTime);
pQueryInfo->intervalTime = minIntervalTime;
pQueryInfo->interval.interval, minIntervalTime);
pQueryInfo->interval.interval = minIntervalTime;
}
pStream->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pStream->intervalTime = pQueryInfo->intervalTime; // it shall be derived from sql string
pStream->interval.intervalUnit = pQueryInfo->interval.intervalUnit;
pStream->interval.interval = pQueryInfo->interval.interval; // it shall be derived from sql string
if (pQueryInfo->slidingTime <= 0) {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
if (pQueryInfo->interval.sliding <= 0) {
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
pQueryInfo->interval.slidingUnit = pQueryInfo->interval.intervalUnit;
}
int64_t minSlidingTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->slidingTime < minSlidingTime) {
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.sliding < minSlidingTime) {
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->slidingTime, minSlidingTime);
pQueryInfo->interval.sliding, minSlidingTime);
pQueryInfo->slidingTime = minSlidingTime;
pQueryInfo->interval.sliding = minSlidingTime;
}
if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) {
tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql, pStream,
pQueryInfo->slidingTime, pQueryInfo->intervalTime);
pQueryInfo->interval.sliding, pQueryInfo->interval.interval);
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
}
pStream->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pStream->slidingTime = pQueryInfo->slidingTime;
pStream->interval.slidingUnit = pQueryInfo->interval.slidingUnit;
pStream->interval.sliding = pQueryInfo->interval.sliding;
if (pStream->isProject) {
pQueryInfo->intervalTime = 0; // clear the interval value to avoid the force time window split by query processor
pQueryInfo->slidingTime = 0;
pQueryInfo->interval.interval = 0; // clear the interval value to avoid the force time window split by query processor
pQueryInfo->interval.sliding = 0;
}
}
......@@ -445,8 +443,8 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
if (pStream->isProject) {
// no data in table, flush all data till now to destination meter, 10sec delay
pStream->intervalTime = tsProjectExecInterval;
pStream->slidingTime = tsProjectExecInterval;
pStream->interval.interval = tsProjectExecInterval;
pStream->interval.sliding = tsProjectExecInterval;
if (stime != 0) { // first projection start from the latest event timestamp
assert(stime >= pQueryInfo->window.skey);
......@@ -459,12 +457,15 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
stime = pQueryInfo->window.skey;
if (stime == INT64_MIN) {
stime = (int64_t)taosGetTimestamp(pStream->precision);
stime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
stime = taosGetIntervalStartTimestamp(stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
stime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
stime = taosTimeTruncate(stime - 1, &pStream->interval, pStream->precision);
//stime = taosGetIntervalStartTimestamp(stime, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
//stime = taosGetIntervalStartTimestamp(stime - 1, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
}
} else {
int64_t newStime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
//int64_t newStime = taosGetIntervalStartTimestamp(stime, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
int64_t newStime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
if (newStime != stime) {
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime);
stime = newStime;
......@@ -534,7 +535,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
pStream, pTableMetaInfo->name, pStream->intervalTime, pStream->slidingTime, starttime, pSql->sqlstr);
pStream, pTableMetaInfo->name, pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr);
}
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
......
......@@ -92,7 +92,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
STSElem elem2 = tsBufGetElem(pSupporter2->pTSBuf);
#ifdef _DEBUG_VIEW
tscInfo("%" PRId64 ", tags:%d \t %" PRId64 ", tags:%d", elem1.ts, elem1.tag, elem2.ts, elem2.tag);
tscInfo("%" PRId64 ", tags:%"PRId64" \t %" PRId64 ", tags:%"PRId64, elem1.ts, elem1.tag.i64Key, elem2.ts, elem2.tag.i64Key);
#endif
int32_t res = tVariantCompare(&elem1.tag, &elem2.tag);
......@@ -113,7 +113,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
* in case of stable query, limit/offset is not applied here. the limit/offset is applied to the
* final results which is acquired after the secondry merge of in the client.
*/
if (pLimit->offset == 0 || pQueryInfo->intervalTime > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
if (pLimit->offset == 0 || pQueryInfo->interval.interval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
if (win->skey > elem1.ts) {
win->skey = elem1.ts;
}
......@@ -178,10 +178,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, in
pSupporter->subqueryIndex = index;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
pSupporter->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pSupporter->slidingTime = pQueryInfo->slidingTimeUnit;
pSupporter->intervalTime = pQueryInfo->intervalTime;
pSupporter->slidingTime = pQueryInfo->slidingTime;
memcpy(&pSupporter->interval, &pQueryInfo->interval, sizeof(pSupporter->interval));
pSupporter->limit = pQueryInfo->limit;
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, index);
......@@ -301,28 +298,24 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
success = false;
break;
}
tscClearSubqueryInfo(&pNew->cmd);
pSql->pSubs[i] = pNew;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
pQueryInfo->tsBuf = pTSBuf; // transfer the ownership of timestamp comp-z data to the new created object
// set the second stage sub query for join process
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE);
pQueryInfo->intervalTimeUnit = pSupporter->intervalTimeUnit;
pQueryInfo->slidingTimeUnit = pSupporter->slidingTimeUnit;
pQueryInfo->intervalTime = pSupporter->intervalTime;
pQueryInfo->slidingTime = pSupporter->slidingTime;
pQueryInfo->groupbyExpr = pSupporter->groupbyExpr;
memcpy(&pQueryInfo->interval, &pSupporter->interval, sizeof(pQueryInfo->interval));
tscTagCondCopy(&pQueryInfo->tagCond, &pSupporter->tagCond);
pQueryInfo->colList = pSupporter->colList;
pQueryInfo->exprList = pSupporter->exprList;
pQueryInfo->fieldsInfo = pSupporter->fieldsInfo;
pSupporter->exprList = NULL;
pSupporter->colList = NULL;
memset(&pSupporter->fieldsInfo, 0, sizeof(SFieldInfo));
......@@ -1214,14 +1207,13 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
}
pNew->cmd.numOfCols = 0;
pNewQueryInfo->intervalTime = 0;
pNewQueryInfo->interval.interval = 0;
pSupporter->limit = pNewQueryInfo->limit;
pNewQueryInfo->limit.limit = -1;
pNewQueryInfo->limit.offset = 0;
// backup the data and clear it in the sqlcmd object
pSupporter->groupbyExpr = pNewQueryInfo->groupbyExpr;
memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SSqlGroupbyExpr));
tscInitQueryInfo(pNewQueryInfo);
......@@ -1523,9 +1515,9 @@ static void tscFreeSubSqlObj(SRetrieveSupport *trsupport, SSqlObj *pSql) {
SSqlObj *pParentSql = trsupport->pParentSql;
assert(pSql == pParentSql->pSubs[index]);
pParentSql->pSubs[index] = NULL;
taos_free_result(pSql);
// pParentSql->pSubs[index] = NULL;
//
// taos_free_result(pSql);
taosTFree(trsupport->localBuffer);
taosTFree(trsupport);
}
......@@ -1739,10 +1731,6 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
assert(tres != NULL);
SSqlObj *pSql = (SSqlObj *)tres;
// if (pSql == NULL) { // sql object has been released in error process, return immediately
// tscDebug("%p subquery has been released, idx:%d, abort", pParentSql, idx);
// return;
// }
SSubqueryState* pState = trsupport->pState;
assert(pState->numOfRemain <= pState->numOfTotal && pState->numOfRemain >= 0 && pParentSql->numOfSubs == pState->numOfTotal);
......@@ -1918,9 +1906,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
pParentObj->res.code = pSql->res.code;
}
taos_free_result(tres);
taosTFree(pSupporter);
if (atomic_sub_fetch_32(&pState->numOfRemain, 1) > 0) {
return;
}
......@@ -1964,28 +1950,27 @@ int32_t tscHandleInsertRetry(SSqlObj* pSql) {
}
int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
size_t size = taosArrayGetSize(pCmd->pDataBlocks);
assert(size > 0);
SSqlRes *pRes = &pSql->res;
pSql->numOfSubs = (uint16_t)taosArrayGetSize(pCmd->pDataBlocks);
assert(pSql->numOfSubs > 0);
pRes->code = TSDB_CODE_SUCCESS;
// the number of already initialized subqueries
int32_t numOfSub = 0;
pSql->numOfSubs = (uint16_t)size;
pSql->pSubs = calloc(size, POINTER_BYTES);
SSubqueryState *pState = calloc(1, sizeof(SSubqueryState));
pState->numOfTotal = pSql->numOfSubs;
pState->numOfRemain = pSql->numOfSubs;
pSql->pSubs = calloc(pSql->numOfSubs, POINTER_BYTES);
if (pSql->pSubs == NULL) {
goto _error;
}
tscDebug("%p submit data to %" PRIzu " vnode(s)", pSql, size);
SSubqueryState *pState = calloc(1, sizeof(SSubqueryState));
pState->numOfTotal = pSql->numOfSubs;
pState->numOfRemain = pSql->numOfSubs;
pRes->code = TSDB_CODE_SUCCESS;
tscDebug("%p submit data to %d vnode(s)", pSql, pSql->numOfSubs);
while(numOfSub < pSql->numOfSubs) {
SInsertSupporter* pSupporter = calloc(1, sizeof(SInsertSupporter));
......@@ -2016,8 +2001,8 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
tscDebug("%p sub:%p create subObj success. orderOfSub:%d", pSql, pNew, numOfSub);
numOfSub++;
} else {
tscDebug("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%" PRIzu ", code:%s", pSql, numOfSub,
size, tstrerror(pRes->code));
tscDebug("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%d, code:%s", pSql, numOfSub,
pSql->numOfSubs, tstrerror(pRes->code));
goto _error;
}
}
......@@ -2040,11 +2025,6 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
_error:
for(int32_t j = 0; j < numOfSub; ++j) {
taosTFree(pSql->pSubs[j]->param);
taos_free_result(pSql->pSubs[j]);
}
taosTFree(pState);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
......@@ -2208,7 +2188,7 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
}
// primary key column cannot be null in interval query, no need to check
if (i == 0 && pQueryInfo->intervalTime > 0) {
if (i == 0 && pQueryInfo->interval.interval > 0) {
continue;
}
......@@ -2220,16 +2200,15 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
// calculate the result from several other columns
if (pSup->pArithExprInfo != NULL) {
if (pRes->pArithSup == NULL) {
SArithmeticSupport *sas = (SArithmeticSupport *) calloc(1, sizeof(SArithmeticSupport));
sas->offset = 0;
sas->pArithExpr = pSup->pArithExprInfo;
sas->numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
sas->exprList = pQueryInfo->exprList;
sas->data = calloc(sas->numOfCols, POINTER_BYTES);
pRes->pArithSup = sas;
pRes->pArithSup = (SArithmeticSupport*)calloc(1, sizeof(SArithmeticSupport));
}
pRes->pArithSup->offset = 0;
pRes->pArithSup->pArithExpr = pSup->pArithExprInfo;
pRes->pArithSup->numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
pRes->pArithSup->exprList = pQueryInfo->exprList;
pRes->pArithSup->data = calloc(pRes->pArithSup->numOfCols, POINTER_BYTES);
if (pRes->buffer[i] == NULL) {
TAOS_FIELD* field = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
pRes->buffer[i] = malloc(field->bytes);
......
......@@ -30,7 +30,8 @@
#include "tlocale.h"
// global, not configurable
void * tscCacheHandle;
SCacheObj* tscMetaCache;
SCacheObj* tscObjCache;
void * tscTmr;
void * tscQhandle;
void * tscCheckDiskUsageTmr;
......@@ -144,8 +145,9 @@ void taos_init_imp(void) {
refreshTime = refreshTime > 10 ? 10 : refreshTime;
refreshTime = refreshTime < 10 ? 10 : refreshTime;
if (tscCacheHandle == NULL) {
tscCacheHandle = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, NULL, "tableMeta");
if (tscMetaCache == NULL) {
tscMetaCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, NULL, "tableMeta");
tscObjCache = taosCacheInit(TSDB_DATA_TYPE_BIGINT, refreshTime/2, false, tscFreeSqlObjInCache, "sqlObj");
}
tscDebug("client is initialized successfully");
......@@ -154,9 +156,12 @@ void taos_init_imp(void) {
void taos_init() { pthread_once(&tscinit, taos_init_imp); }
void taos_cleanup() {
if (tscCacheHandle != NULL) {
taosCacheCleanup(tscCacheHandle);
tscCacheHandle = NULL;
if (tscMetaCache != NULL) {
taosCacheCleanup(tscMetaCache);
tscMetaCache = NULL;
taosCacheCleanup(tscObjCache);
tscObjCache = NULL;
}
if (tscQhandle != NULL) {
......
......@@ -252,11 +252,11 @@ int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
if (pRes->tsrow == NULL) {
int32_t numOfOutput = pQueryInfo->fieldsInfo.numOfOutput;
pRes->numOfCols = numOfOutput;
pRes->tsrow = calloc(numOfOutput, POINTER_BYTES);
pRes->length = calloc(numOfOutput, sizeof(int32_t));
pRes->buffer = calloc(numOfOutput, POINTER_BYTES);
// not enough memory
if (pRes->tsrow == NULL || (pRes->buffer == NULL && pRes->numOfCols > 0)) {
taosTFree(pRes->tsrow);
......@@ -268,7 +268,7 @@ int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
}
void tscDestroyResPointerInfo(SSqlRes* pRes) {
static void tscDestroyResPointerInfo(SSqlRes* pRes) {
if (pRes->buffer != NULL) { // free all buffers containing the multibyte string
for (int i = 0; i < pRes->numOfCols; i++) {
taosTFree(pRes->buffer[i]);
......@@ -344,8 +344,6 @@ void tscPartiallyFreeSqlObj(SSqlObj* pSql) {
}
SSqlCmd* pCmd = &pSql->cmd;
STscObj* pObj = pSql->pTscObj;
int32_t cmd = pCmd->command;
if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_LOCALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
cmd == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
......@@ -353,26 +351,61 @@ void tscPartiallyFreeSqlObj(SSqlObj* pSql) {
}
// pSql->sqlstr will be used by tscBuildQueryStreamDesc
if (pObj->signature == pObj) {
// if (pObj->signature == pObj) {
//pthread_mutex_lock(&pObj->mutex);
taosTFree(pSql->sqlstr);
//pthread_mutex_unlock(&pObj->mutex);
}
// }
tscFreeSqlResult(pSql);
taosTFree(pSql->pSubs);
pSql->numOfSubs = 0;
pSql->self = 0;
tscResetSqlCmdObj(pCmd, false);
}
static UNUSED_FUNC void tscFreeSubobj(SSqlObj* pSql) {
if (pSql->numOfSubs == 0) {
return;
}
tscDebug("%p start to free sub SqlObj, numOfSub:%d", pSql, pSql->numOfSubs);
for(int32_t i = 0; i < pSql->numOfSubs; ++i) {
tscDebug("%p free sub SqlObj:%p, index:%d", pSql, pSql->pSubs[i], i);
taos_free_result(pSql->pSubs[i]);
pSql->pSubs[i] = NULL;
}
pSql->numOfSubs = 0;
}
/**
* The free operation will cause the pSql to be removed from hash table and free it in
* the function of processmsgfromserver is impossible in this case, since it will fail
* to retrieve pSqlObj in hashtable.
*
* @param pSql
*/
void tscFreeSqlObjInCache(void *pSql) {
assert(pSql != NULL);
SSqlObj** p = (SSqlObj**)pSql;
assert((*p)->self != 0 && (*p)->self == (p));
tscFreeSqlObj(*p);
}
void tscFreeSqlObj(SSqlObj* pSql) {
if (pSql == NULL || pSql->signature != pSql) {
return;
}
tscDebug("%p start to free sql object", pSql);
tscDebug("%p start to free sqlObj", pSql);
STscObj* pTscObj = pSql->pTscObj;
tscFreeSubobj(pSql);
tscPartiallyFreeSqlObj(pSql);
pSql->signature = NULL;
......@@ -388,6 +421,14 @@ void tscFreeSqlObj(SSqlObj* pSql) {
tsem_destroy(&pSql->rspSem);
free(pSql);
tscDebug("%p free sqlObj completed", pSql);
int32_t ref = T_REF_DEC(pTscObj);
assert(ref >= 0);
if (ref == 0) {
tscCloseTscObj(pTscObj);
}
}
void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
......@@ -399,7 +440,10 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
taosTFree(pDataBlock->params);
// free the refcount for metermeta
taosCacheRelease(tscCacheHandle, (void**)&(pDataBlock->pTableMeta), false);
if (pDataBlock->pTableMeta != NULL) {
taosCacheRelease(tscMetaCache, (void**)&(pDataBlock->pTableMeta), false);
}
taosTFree(pDataBlock);
}
......@@ -454,9 +498,12 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
// set the correct table meta object, the table meta has been locked in pDataBlocks, so it must be in the cache
if (pTableMetaInfo->pTableMeta != pDataBlock->pTableMeta) {
tstrncpy(pTableMetaInfo->name, pDataBlock->tableId, sizeof(pTableMetaInfo->name));
taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), false);
pTableMetaInfo->pTableMeta = taosCacheTransfer(tscCacheHandle, (void**)&pDataBlock->pTableMeta);
if (pTableMetaInfo->pTableMeta != NULL) {
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), false);
}
pTableMetaInfo->pTableMeta = taosCacheTransfer(tscMetaCache, (void**)&pDataBlock->pTableMeta);
} else {
assert(strncmp(pTableMetaInfo->name, pDataBlock->tableId, tListLen(pDataBlock->tableId)) == 0);
}
......@@ -527,7 +574,7 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
* due to operation such as drop database. So here we add the reference count directly instead of invoke
* taosGetDataFromCache, which may return NULL value.
*/
dataBuf->pTableMeta = taosCacheAcquireByData(tscCacheHandle, pTableMeta);
dataBuf->pTableMeta = taosCacheAcquireByData(tscMetaCache, pTableMeta);
assert(initialSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
*dataBlocks = dataBuf;
......@@ -721,17 +768,19 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
// TODO: all subqueries should be freed correctly before close this connection.
void tscCloseTscObj(STscObj* pObj) {
assert(pObj != NULL);
pObj->signature = NULL;
taosTmrStopA(&(pObj->pTimer));
pthread_mutex_destroy(&pObj->mutex);
void* p = pObj->pDnodeConn;
if (pObj->pDnodeConn != NULL) {
rpcClose(pObj->pDnodeConn);
pObj->pDnodeConn = NULL;
}
tscDebug("%p DB connection is closed, dnodeConn:%p", pObj, pObj->pDnodeConn);
pthread_mutex_destroy(&pObj->mutex);
tscDebug("%p DB connection is closed, dnodeConn:%p", pObj, p);
taosTFree(pObj);
}
......@@ -1531,8 +1580,8 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) {
pQueryInfo->fieldsInfo.pSupportInfo = taosArrayInit(4, sizeof(SFieldSupInfo));
assert(pQueryInfo->exprList == NULL);
pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX;
}
......@@ -1554,6 +1603,8 @@ int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) {
}
tscInitQueryInfo(pQueryInfo);
pQueryInfo->window = TSWINDOW_INITIALIZER;
pQueryInfo->msg = pCmd->payload; // pointer to the parent error message buffer
pCmd->pQueryInfo[pCmd->numOfClause++] = pQueryInfo;
......@@ -1665,7 +1716,10 @@ void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache)
return;
}
taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), removeFromCache);
if (pTableMetaInfo->pTableMeta != NULL) {
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), removeFromCache);
}
taosTFree(pTableMetaInfo->vgroupList);
tscColumnListDestroy(pTableMetaInfo->tagColList);
......@@ -1689,6 +1743,8 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
}
pNew->pTscObj = pSql->pTscObj;
T_REF_INC(pNew->pTscObj);
pNew->signature = pNew;
SSqlCmd* pCmd = &pNew->cmd;
......@@ -1719,6 +1775,11 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL);
T_REF_INC(pNew->pTscObj);
uint64_t p = (uint64_t) pNew;
pNew->self = taosCachePut(tscObjCache, &p, sizeof(uint64_t), &pNew, sizeof(uint64_t), 2 * 600 * 1000);
return pNew;
}
......@@ -1788,6 +1849,7 @@ static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pQueryInfo, SQueryInfo* p
}
assert(matched);
(void)matched;
}
tscFieldInfoUpdateOffset(pNewQueryInfo);
......@@ -1807,6 +1869,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
pNew->pTscObj = pSql->pTscObj;
pNew->signature = pNew;
T_REF_INC(pNew->pTscObj);
pNew->sqlstr = strdup(pSql->sqlstr);
if (pNew->sqlstr == NULL) {
......@@ -1837,10 +1900,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pNewQueryInfo->command = pQueryInfo->command;
pNewQueryInfo->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pNewQueryInfo->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pNewQueryInfo->intervalTime = pQueryInfo->intervalTime;
pNewQueryInfo->slidingTime = pQueryInfo->slidingTime;
memcpy(&pNewQueryInfo->interval, &pQueryInfo->interval, sizeof(pNewQueryInfo->interval));
pNewQueryInfo->type = pQueryInfo->type;
pNewQueryInfo->window = pQueryInfo->window;
pNewQueryInfo->limit = pQueryInfo->limit;
......@@ -1911,14 +1971,14 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
STableMetaInfo* pFinalInfo = NULL;
if (pPrevSql == NULL) {
STableMeta* pTableMeta = taosCacheAcquireByData(tscCacheHandle, pTableMetaInfo->pTableMeta); // get by name may failed due to the cache cleanup
STableMeta* pTableMeta = taosCacheAcquireByData(tscMetaCache, pTableMetaInfo->pTableMeta); // get by name may failed due to the cache cleanup
assert(pTableMeta != NULL);
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList);
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
STableMeta* pPrevTableMeta = taosCacheTransfer(tscCacheHandle, (void**)&pPrevInfo->pTableMeta);
STableMeta* pPrevTableMeta = taosCacheTransfer(tscMetaCache, (void**)&pPrevInfo->pTableMeta);
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList);
......@@ -1958,6 +2018,10 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
tscDebug("%p new sub insertion: %p, vnodeIdx:%d", pSql, pNew, pTableMetaInfo->vgroupIndex);
}
T_REF_INC(pNew->pTscObj);
uint64_t p = (uint64_t) pNew;
pNew->self = taosCachePut(tscObjCache, &p, sizeof(uint64_t), &pNew, sizeof(uint64_t), 2 * 600 * 10);
return pNew;
_error:
......@@ -2069,6 +2133,7 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) {
const char* msgFormat1 = "invalid SQL: %s";
const char* msgFormat2 = "invalid SQL: \'%s\' (%s)";
......@@ -2099,11 +2164,6 @@ bool tscHasReachLimitation(SQueryInfo* pQueryInfo, SSqlRes* pRes) {
return (pQueryInfo->clauseLimit > 0 && pRes->numOfClauseTotal >= pQueryInfo->clauseLimit);
}
bool tscResultsetFetchCompleted(TAOS_RES *result) {
SSqlRes* pRes = result;
return pRes->completed;
}
char* tscGetErrorMsgPayload(SSqlCmd* pCmd) { return pCmd->payload; }
/**
......
......@@ -35,8 +35,6 @@ bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision);
int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision);
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
// int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
#endif // TDENGINE_NAME_H
......@@ -957,17 +957,6 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// http configs
cfg.option = "httpCacheSessions";
cfg.ptr = &tsHttpCacheSessions;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 1;
cfg.maxValue = 100000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "httpEnableRecordSql";
cfg.ptr = &tsHttpEnableRecordSql;
cfg.valType = TAOS_CFG_VTYPE_INT32;
......
......@@ -62,10 +62,9 @@ SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const
if (name != NULL) {
tstrncpy(s.name, name, sizeof(s.name));
} else {
size_t len = strdequote(exprStr->z);
size_t tlen = MIN(sizeof(s.name), len + 1);
size_t tlen = MIN(sizeof(s.name), exprStr->n + 1);
tstrncpy(s.name, exprStr->z, tlen);
strdequote(s.name);
}
return s;
......@@ -100,62 +99,7 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO
return pFilter;
}
int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision) {
key /= 1000;
if (precision == TSDB_TIME_PRECISION_MICRO) {
key /= 1000;
}
struct tm tm;
time_t t = (time_t)key;
localtime_r(&t, &tm);
if (timeUnit == 'y') {
intervalTime *= 12;
}
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + intervalTime);
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
key = mktime(&tm) * 1000L;
if (precision == TSDB_TIME_PRECISION_MICRO) {
key *= 1000L;
}
return key;
}
int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision) {
skey /= 1000;
ekey /= 1000;
if (precision == TSDB_TIME_PRECISION_MICRO) {
skey /= 1000;
ekey /= 1000;
}
if (ekey < skey) {
int64_t tmp = ekey;
ekey = skey;
skey = tmp;
}
struct tm tm;
time_t t = (time_t)skey;
localtime_r(&t, &tm);
int smon = tm.tm_year * 12 + tm.tm_mon;
t = (time_t)ekey;
localtime_r(&t, &tm);
int emon = tm.tm_year * 12 + tm.tm_mon;
if (timeUnit == 'y') {
intervalTime *= 12;
}
return (emon - smon) / (int32_t)intervalTime;
}
#if 0
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
if (slidingTime == 0) {
return startTime;
......@@ -220,6 +164,8 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, in
return start;
}
#endif
/*
* tablePrefix.columnName
* extract table name and save it in pTable, with only column name in pToken
......
......@@ -540,9 +540,7 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) {
}
}
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size) {
char tmpBuf[4096] = {0};
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf) {
switch (type) {
case TSDB_DATA_TYPE_INT: {
SWAP(*(int32_t *)(pLeft), *(int32_t *)(pRight), int32_t);
......@@ -575,10 +573,9 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size) {
}
default: {
assert(size <= 4096);
memcpy(tmpBuf, pLeft, size);
memcpy(buf, pLeft, size);
memcpy(pLeft, pRight, size);
memcpy(pRight, tmpBuf, size);
memcpy(pRight, buf, size);
break;
}
}
......
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.0</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
<description>TDengine JDBC Driver</description>
<licenses>
<license>
<name>GNU AFFERO GENERAL PUBLIC LICENSE Version 3</name>
<url>https://github.com/taosdata/TDengine/blob/master/LICENSE</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:git://github.com/taosdata/TDengine.git</connection>
<developerConnection>scm:git:git@github.com:taosdata/TDengine.git</developerConnection>
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.0</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
<tag>HEAD</tag>
</scm>
<developers>
<developer>
<name>taosdata</name>
<email>support@taosdata.com</email>
<organization>https://www.taosdata.com/</organization>
<organizationUrl>https://www.taosdata.com/</organizationUrl>
</developer>
</developers>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<java.version>1.8</java.version>
<maven-compiler-plugin.version>3.6.0</maven-compiler-plugin.version>
<commons-logging.version>1.1.2</commons-logging.version>
<commons-lang3.version>3.5</commons-lang3.version>
</properties>
<dependencies>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons-logging.version}</version>
<exclusions>
<exclusion>
<groupId>*</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>${commons-lang3.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.0.0</version>
<configuration>
<descriptors>
<descriptor>src/main/assembly/assembly-jar.xml</descriptor>
</descriptors>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<encoding>UTF-8</encoding>
<source>${java.version}</source>
<target>${java.version}</target>
<debug>true</debug>
<showDeprecation>true</showDeprecation>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.12.4</version>
<configuration>
<testFailureIgnore>true</testFailureIgnore>
</configuration>
</plugin>
</plugins>
</build>
<description>TDengine JDBC Driver</description>
<licenses>
<license>
<name>GNU AFFERO GENERAL PUBLIC LICENSE Version 3</name>
<url>https://github.com/taosdata/TDengine/blob/master/LICENSE</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:git://github.com/taosdata/TDengine.git</connection>
<developerConnection>scm:git:git@github.com:taosdata/TDengine.git</developerConnection>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
<tag>HEAD</tag>
</scm>
<developers>
<developer>
<name>taosdata</name>
<email>support@taosdata.com</email>
<organization>https://www.taosdata.com/</organization>
<organizationUrl>https://www.taosdata.com/</organizationUrl>
</developer>
</developers>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<java.version>1.8</java.version>
<maven-compiler-plugin.version>3.6.0</maven-compiler-plugin.version>
<commons-logging.version>1.1.2</commons-logging.version>
<commons-lang3.version>3.5</commons-lang3.version>
</properties>
<dependencies>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons-logging.version}</version>
<exclusions>
<exclusion>
<groupId>*</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.0.0</version>
<configuration>
<descriptors>
<descriptor>src/main/assembly/assembly-jar.xml</descriptor>
</descriptors>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<encoding>UTF-8</encoding>
<source>${java.version}</source>
<target>${java.version}</target>
<debug>true</debug>
<showDeprecation>true</showDeprecation>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.12.4</version>
<configuration>
<testFailureIgnore>true</testFailureIgnore>
</configuration>
</plugin>
</plugins>
</build>
</project>
......@@ -53,66 +53,12 @@ public class TSDBConnection implements Connection {
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
this.dbMetaData = meta;
//load taos.cfg start
File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR));
File cfgFile = cfgDir.listFiles((dir, name) -> "taos.cfg".equalsIgnoreCase(name))[0];
List<String> endpoints = loadConfigEndpoints(cfgFile);
if (!endpoints.isEmpty()) {
info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]);
info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]);
}
//load taos.cfg end
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
}
private List<String> loadConfigEndpoints(File cfgFile) {
List<String> endpoints = new ArrayList<>();
try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) {
String line = null;
while ((line = reader.readLine()) != null) {
if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) {
endpoints.add(line.substring(line.indexOf('p') + 1).trim());
}
if (endpoints.size() > 1)
break;
}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return endpoints;
}
/**
* @param cfgDirPath
* @return return the config dir
**/
private File loadConfigDir(String cfgDirPath) {
if (cfgDirPath == null)
return loadDefaultConfigDir();
File cfgDir = new File(cfgDirPath);
if (!cfgDir.exists())
return loadDefaultConfigDir();
return cfgDir;
}
/**
* @return search the default config dir, if the config dir is not exist will return null
*/
private File loadDefaultConfigDir() {
File cfgDir;
File cfgDir_linux = new File("/etc/taos");
cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null;
File cfgDir_windows = new File("C:\\TDengine\\cfg");
cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir;
return cfgDir;
}
private void connect(String host, int port, String dbName, String user, String password) throws SQLException {
this.connector = new TSDBJNIConnector();
this.connector.connect(host, port, dbName, user, password);
......
......@@ -68,15 +68,15 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
}
public boolean nullsAreSortedLow() throws SQLException {
return false;
return !nullsAreSortedHigh();
}
public boolean nullsAreSortedAtStart() throws SQLException {
return false;
return true;
}
public boolean nullsAreSortedAtEnd() throws SQLException {
return false;
return !nullsAreSortedAtStart();
}
public String getDatabaseProductName() throws SQLException {
......
......@@ -242,7 +242,7 @@ public class TSDBStatement implements Statement {
public void addBatch(String sql) throws SQLException {
if (batchedArgs == null) {
batchedArgs = new ArrayList<String>();
batchedArgs = new ArrayList<>();
}
batchedArgs.add(sql);
}
......
......@@ -30,10 +30,12 @@
#include "tlog.h"
#include "twal.h"
#define cError(...) { if (cqDebugFlag & DEBUG_ERROR) { taosPrintLog("ERROR CQ ", cqDebugFlag, __VA_ARGS__); }}
#define cWarn(...) { if (cqDebugFlag & DEBUG_WARN) { taosPrintLog("WARN CQ ", cqDebugFlag, __VA_ARGS__); }}
#define cFatal(...) { if (cqDebugFlag & DEBUG_FATAL) { taosPrintLog("CQ FATAL ", 255, __VA_ARGS__); }}
#define cError(...) { if (cqDebugFlag & DEBUG_ERROR) { taosPrintLog("CQ ERROR ", 255, __VA_ARGS__); }}
#define cWarn(...) { if (cqDebugFlag & DEBUG_WARN) { taosPrintLog("CQ WARN ", 255, __VA_ARGS__); }}
#define cInfo(...) { if (cqDebugFlag & DEBUG_INFO) { taosPrintLog("CQ ", 255, __VA_ARGS__); }}
#define cDebug(...) { if (cqDebugFlag & DEBUG_DEBUG) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
#define cTrace(...) { if (cqDebugFlag & DEBUG_TRACE) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
#define cPrint(...) { taosPrintLog("CQ ", 255, __VA_ARGS__); }
typedef struct {
int vgId;
......@@ -94,7 +96,7 @@ void *cqOpen(void *ahandle, const SCqCfg *pCfg) {
pthread_mutex_init(&pContext->mutex, NULL);
cTrace("vgId:%d, CQ is opened", pContext->vgId);
cInfo("vgId:%d, CQ is opened", pContext->vgId);
return pContext;
}
......@@ -125,7 +127,7 @@ void cqClose(void *handle) {
taosTmrCleanUp(pContext->tmrCtrl);
pContext->tmrCtrl = NULL;
cTrace("vgId:%d, CQ is closed", pContext->vgId);
cInfo("vgId:%d, CQ is closed", pContext->vgId);
free(pContext);
}
......@@ -133,7 +135,7 @@ void cqStart(void *handle) {
SCqContext *pContext = handle;
if (pContext->dbConn || pContext->master) return;
cTrace("vgId:%d, start all CQs", pContext->vgId);
cInfo("vgId:%d, start all CQs", pContext->vgId);
pthread_mutex_lock(&pContext->mutex);
pContext->master = 1;
......@@ -149,7 +151,7 @@ void cqStart(void *handle) {
void cqStop(void *handle) {
SCqContext *pContext = handle;
cTrace("vgId:%d, stop all CQs", pContext->vgId);
cInfo("vgId:%d, stop all CQs", pContext->vgId);
if (pContext->dbConn == NULL || pContext->master == 0) return;
pthread_mutex_lock(&pContext->mutex);
......@@ -160,7 +162,7 @@ void cqStop(void *handle) {
if (pObj->pStream) {
taos_close_stream(pObj->pStream);
pObj->pStream = NULL;
cTrace("vgId:%d, id:%d CQ:%s is closed", pContext->vgId, pObj->tid, pObj->sqlStr);
cInfo("vgId:%d, id:%d CQ:%s is closed", pContext->vgId, pObj->tid, pObj->sqlStr);
} else {
taosTmrStop(pObj->tmrId);
pObj->tmrId = 0;
......@@ -188,7 +190,7 @@ void *cqCreate(void *handle, uint64_t uid, int tid, char *sqlStr, STSchema *pSch
pObj->pSchema = tdDupSchema(pSchema);
pObj->rowSize = schemaTLen(pSchema);
cTrace("vgId:%d, id:%d CQ:%s is created", pContext->vgId, pObj->tid, pObj->sqlStr);
cInfo("vgId:%d, id:%d CQ:%s is created", pContext->vgId, pObj->tid, pObj->sqlStr);
pthread_mutex_lock(&pContext->mutex);
......@@ -228,7 +230,7 @@ void cqDrop(void *handle) {
pObj->tmrId = 0;
}
cTrace("vgId:%d, id:%d CQ:%s is dropped", pContext->vgId, pObj->tid, pObj->sqlStr);
cInfo("vgId:%d, id:%d CQ:%s is dropped", pContext->vgId, pObj->tid, pObj->sqlStr);
tdFreeSchema(pObj->pSchema);
free(pObj->sqlStr);
free(pObj);
......@@ -262,7 +264,7 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, 0, pObj, NULL);
if (pObj->pStream) {
pContext->num++;
cTrace("vgId:%d, id:%d CQ:%s is openned", pContext->vgId, pObj->tid, pObj->sqlStr);
cInfo("vgId:%d, id:%d CQ:%s is openned", pContext->vgId, pObj->tid, pObj->sqlStr);
} else {
cError("vgId:%d, id:%d CQ:%s, failed to open", pContext->vgId, pObj->tid, pObj->sqlStr);
}
......@@ -278,7 +280,7 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
STSchema *pSchema = pObj->pSchema;
if (pObj->pStream == NULL) return;
cTrace("vgId:%d, id:%d CQ:%s stream result is ready", pContext->vgId, pObj->tid, pObj->sqlStr);
cDebug("vgId:%d, id:%d CQ:%s stream result is ready", pContext->vgId, pObj->tid, pObj->sqlStr);
int size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_DATA_ROW_HEAD_SIZE + pObj->rowSize;
char *buffer = calloc(size, 1);
......
......@@ -101,6 +101,7 @@ extern const int32_t TYPE_BYTES[11];
#define TSDB_TIME_PRECISION_MILLI 0
#define TSDB_TIME_PRECISION_MICRO 1
#define TSDB_TIME_PRECISION_NANO 2
#define TSDB_TICK_PER_SECOND(precision) ((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L))
#define TSDB_TIME_PRECISION_MILLI_STR "ms"
#define TSDB_TIME_PRECISION_MICRO_STR "us"
......@@ -198,7 +199,7 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
void* getNullValue(int32_t type);
void assignVal(char *val, const char *src, int32_t len, int32_t type);
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf);
// TODO: check if below is necessary
#define TSDB_RELATION_INVALID 0
......@@ -209,21 +210,24 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_RELATION_GREATER_EQUAL 5
#define TSDB_RELATION_NOT_EQUAL 6
#define TSDB_RELATION_LIKE 7
#define TSDB_RELATION_IN 8
#define TSDB_RELATION_AND 9
#define TSDB_RELATION_OR 10
#define TSDB_RELATION_NOT 11
#define TSDB_BINARY_OP_ADD 12
#define TSDB_BINARY_OP_SUBTRACT 13
#define TSDB_BINARY_OP_MULTIPLY 14
#define TSDB_BINARY_OP_DIVIDE 15
#define TSDB_BINARY_OP_REMAINDER 16
#define TSDB_RELATION_ISNULL 8
#define TSDB_RELATION_NOTNULL 9
#define TSDB_RELATION_IN 10
#define TSDB_RELATION_AND 11
#define TSDB_RELATION_OR 12
#define TSDB_RELATION_NOT 13
#define TSDB_BINARY_OP_ADD 30
#define TSDB_BINARY_OP_SUBTRACT 31
#define TSDB_BINARY_OP_MULTIPLY 32
#define TSDB_BINARY_OP_DIVIDE 33
#define TSDB_BINARY_OP_REMAINDER 34
#define TS_PATH_DELIMITER_LEN 1
#define TSDB_UNI_LEN 24
#define TSDB_USER_LEN TSDB_UNI_LEN
// ACCOUNT is a 32 bit positive integer
// this is the length of its string representation
// including the terminator zero
......@@ -241,7 +245,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 64
#define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE
#define TSDB_MAX_SQL_SHOW_LEN 256
#define TSDB_MAX_ALLOWED_SQL_LEN (8*1024*1024U) // sql length should be less than 8mb
#define TSDB_MAX_ALLOWED_SQL_LEN (1*1024*1024U) // sql length should be less than 8mb
#define TSDB_MAX_BYTES_PER_ROW 16384
#define TSDB_MAX_TAGS_LEN 16384
......
......@@ -460,11 +460,7 @@ typedef struct {
int16_t order;
int16_t orderColId;
int16_t numOfCols; // the number of columns will be load from vnode
int64_t intervalTime; // time interval for aggregation, in million second
int64_t intervalOffset; // start offset for interval query
int64_t slidingTime; // value for sliding window
char intervalTimeUnit;
char slidingTimeUnit; // time interval type, for revisement of interval(1d)
SInterval interval;
uint16_t tagCondLen; // tag length in current query
int16_t numOfGroupCols; // num of group by columns
int16_t orderByIdx;
......
......@@ -765,7 +765,9 @@ void read_history() {
FILE *f = fopen(f_history, "r");
if (f == NULL) {
#ifndef WINDOWS
fprintf(stderr, "Failed to open file %s\n", f_history);
if (errno != ENOENT) {
fprintf(stderr, "Failed to open file %s, reason:%s\n", f_history, strerror(errno));
}
#endif
return;
}
......@@ -792,7 +794,7 @@ void write_history() {
FILE *f = fopen(f_history, "w");
if (f == NULL) {
#ifndef WINDOWS
fprintf(stderr, "Failed to open file %s for write\n", f_history);
fprintf(stderr, "Failed to open file %s for write, reason:%s\n", f_history, strerror(errno));
#endif
return;
}
......
......@@ -30,8 +30,6 @@ extern "C" {
#define MILLISECOND_PER_HOUR (MILLISECOND_PER_MINUTE * 60)
#define MILLISECOND_PER_DAY (MILLISECOND_PER_HOUR * 24)
#define MILLISECOND_PER_WEEK (MILLISECOND_PER_DAY * 7)
#define MILLISECOND_PER_MONTH (MILLISECOND_PER_DAY * 30)
#define MILLISECOND_PER_YEAR (MILLISECOND_PER_DAY * 365)
//@return timestamp in second
int32_t taosGetTimestampSec();
......@@ -63,8 +61,22 @@ static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
}
}
int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts);
int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
typedef struct SInterval {
char intervalUnit;
char slidingUnit;
char offsetUnit;
int64_t interval;
int64_t sliding;
int64_t offset;
} SInterval;
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* ts);
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
void deltaToUtcInitOnce();
......
......@@ -321,7 +321,7 @@ int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) {
}
static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) {
static int32_t getDurationInUs(int64_t val, char unit, int64_t* result) {
*result = val;
int64_t factor = 1000L;
......@@ -342,19 +342,12 @@ static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* resu
case 'w':
(*result) *= MILLISECOND_PER_WEEK*factor;
break;
case 'n':
(*result) *= MILLISECOND_PER_MONTH*factor;
break;
case 'y':
(*result) *= MILLISECOND_PER_YEAR*factor;
break;
case 'a':
(*result) *= factor;
break;
case 'u':
break;
default: {
;
return -1;
}
}
......@@ -373,7 +366,7 @@ static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* resu
* n - Months (30 days)
* y - Years (365 days)
*/
int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* duration) {
errno = 0;
char* endPtr = NULL;
......@@ -383,10 +376,16 @@ int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
return -1;
}
return getTimestampInUsFromStrImpl(timestamp, token[tokenlen - 1], ts);
/* natual month/year are not allowed in absolute duration */
char unit = token[tokenlen - 1];
if (unit == 'n' || unit == 'y') {
return -1;
}
return getDurationInUs(timestamp, unit, duration);
}
int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
errno = 0;
/* get the basic numeric value */
......@@ -400,7 +399,121 @@ int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, ch
return 0;
}
return getTimestampInUsFromStrImpl(*duration, *unit, duration);
return getDurationInUs(*duration, *unit, duration);
}
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
if (duration == 0) {
return t;
}
if (unit == 'y') {
duration *= 12;
} else if (unit != 'n') {
return t + duration;
}
struct tm tm;
time_t tt = (time_t)(t / TSDB_TICK_PER_SECOND(precision));
localtime_r(&tt, &tm);
int mon = tm.tm_year * 12 + tm.tm_mon + (int)duration;
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
return (int64_t)(mktime(&tm) * TSDB_TICK_PER_SECOND(precision));
}
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision) {
if (ekey < skey) {
int64_t tmp = ekey;
ekey = skey;
skey = tmp;
}
if (unit != 'n' && unit != 'y') {
return (int32_t)((ekey - skey) / interval);
}
skey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
ekey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
struct tm tm;
time_t t = (time_t)skey;
localtime_r(&t, &tm);
int smon = tm.tm_year * 12 + tm.tm_mon;
t = (time_t)ekey;
localtime_r(&t, &tm);
int emon = tm.tm_year * 12 + tm.tm_mon;
if (unit == 'y') {
interval *= 12;
}
return (emon - smon) / (int32_t)interval;
}
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision) {
if (pInterval->sliding == 0) {
assert(pInterval->interval == 0);
return t;
}
int64_t start = t;
if (pInterval->slidingUnit == 'n' || pInterval->slidingUnit == 'y') {
start /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
struct tm tm;
time_t tt = (time_t)start;
localtime_r(&tt, &tm);
tm.tm_sec = 0;
tm.tm_min = 0;
tm.tm_hour = 0;
tm.tm_mday = 1;
if (pInterval->slidingUnit == 'y') {
tm.tm_mon = 0;
tm.tm_year = (int)(tm.tm_year / pInterval->sliding * pInterval->sliding);
} else {
int mon = tm.tm_year * 12 + tm.tm_mon;
mon = (int)(mon / pInterval->sliding * pInterval->sliding);
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
}
start = (int64_t)(mktime(&tm) * TSDB_TICK_PER_SECOND(precision));
} else {
int64_t delta = t - pInterval->interval;
int32_t factor = delta > 0 ? 1 : -1;
start = (delta / pInterval->sliding + factor) * pInterval->sliding;
if (pInterval->intervalUnit == 'd' || pInterval->intervalUnit == 'w') {
/*
* here we revised the start time of day according to the local time zone,
* but in case of DST, the start time of one day need to be dynamically decided.
*/
// todo refactor to extract function that is available for Linux/Windows/Mac platform
#if defined(WINDOWS) && _MSC_VER >= 1900
// see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
int64_t timezone = _timezone;
int32_t daylight = _daylight;
char** tzname = _tzname;
#endif
start += (int64_t)(timezone * TSDB_TICK_PER_SECOND(precision));
}
int64_t end = start + pInterval->interval - 1;
if (end < t) {
start += pInterval->sliding;
}
}
if (pInterval->offset > 0) {
start = taosTimeAdd(start, pInterval->offset, pInterval->offsetUnit, precision);
if (start > t) {
start = taosTimeAdd(start, -pInterval->interval, pInterval->intervalUnit, precision);
}
}
return start;
}
// internal function, when program is paused in debugger,
......@@ -411,24 +524,38 @@ int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, ch
// 2020-07-03 17:48:42
// and the parameter can also be a variable.
const char* fmtts(int64_t ts) {
static char buf[32];
static char buf[96];
size_t pos = 0;
struct tm tm;
time_t tt;
if (ts > -62135625943 && ts < 32503651200) {
tt = ts;
} else if (ts > -62135625943000 && ts < 32503651200000) {
tt = ts / 1000;
} else {
tt = ts / 1000000;
time_t t = (time_t)ts;
localtime_r(&t, &tm);
pos += strftime(buf + pos, sizeof(buf), "s=%Y-%m-%d %H:%M:%S", &tm);
}
struct tm* ptm = localtime(&tt);
size_t pos = strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", ptm);
if (ts > -62135625943000 && ts < 32503651200000) {
time_t t = (time_t)(ts / 1000);
localtime_r(&t, &tm);
if (pos > 0) {
buf[pos++] = ' ';
buf[pos++] = '|';
buf[pos++] = ' ';
}
pos += strftime(buf + pos, sizeof(buf), "ms=%Y-%m-%d %H:%M:%S", &tm);
pos += sprintf(buf + pos, ".%03d", (int)(ts % 1000));
}
if (ts <= -62135625943000 || ts >= 32503651200000) {
sprintf(buf + pos, ".%06d", (int)(ts % 1000000));
} else if (ts <= -62135625943 || ts >= 32503651200) {
sprintf(buf + pos, ".%03d", (int)(ts % 1000));
{
time_t t = (time_t)(ts / 1000000);
localtime_r(&t, &tm);
if (pos > 0) {
buf[pos++] = ' ';
buf[pos++] = '|';
buf[pos++] = ' ';
}
pos += strftime(buf + pos, sizeof(buf), "us=%Y-%m-%d %H:%M:%S", &tm);
pos += sprintf(buf + pos, ".%06d", (int)(ts % 1000000));
}
return buf;
......
......@@ -36,7 +36,7 @@
#define HTTP_BUFFER_SIZE 8388608
#define HTTP_STEP_SIZE 4096 //http message get process step by step
#define HTTP_METHOD_SCANNER_SIZE 7 //http method fp size
#define TSDB_CODE_HTTP_GC_TARGET_SIZE 512
#define HTTP_GC_TARGET_SIZE 512
#define HTTP_WRITE_RETRY_TIMES 500
#define HTTP_WRITE_WAIT_TIME_MS 5
#define HTTP_SESSION_ID_LEN (TSDB_USER_LEN + TSDB_PASSWORD_LEN)
......
......@@ -29,10 +29,10 @@ void httpFreeMultiCmds(HttpContext *pContext);
HttpSqlCmd *httpNewSqlCmd(HttpContext *pContext);
HttpSqlCmd *httpCurrSqlCmd(HttpContext *pContext);
int32_t httpCurSqlCmdPos(HttpContext *pContext);
int32_t httpCurSqlCmdPos(HttpContext *pContext);
void httpTrimTableName(char *name);
void httpTrimTableName(char *name);
int32_t httpShrinkTableName(HttpContext *pContext, int32_t pos, char *name);
char *httpGetCmdsString(HttpContext *pContext, int32_t pos);
char * httpGetCmdsString(HttpContext *pContext, int32_t pos);
#endif
......@@ -131,8 +131,6 @@ HttpContext *httpCreateContext(int32_t fd) {
HttpContext *httpGetContext(void *ptr) {
uint64_t handleVal = (uint64_t)ptr;
HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &handleVal, sizeof(HttpContext *));
ASSERT(ppContext);
ASSERT(*ppContext);
if (ppContext) {
HttpContext *pContext = *ppContext;
......@@ -232,4 +230,5 @@ void httpCloseContextByServer(HttpContext *pContext) {
pContext->parsed = false;
httpRemoveContextFromEpoll(pContext);
httpReleaseContext(pContext, true);
}
......@@ -228,7 +228,7 @@ bool gcProcessQueryRequest(HttpContext* pContext) {
cmd->values = refIdBuffer;
cmd->table = aliasBuffer;
cmd->numOfRows = 0; // hack way as target flags
cmd->timestamp = httpAddToSqlCmdBufferWithSize(pContext, TSDB_CODE_HTTP_GC_TARGET_SIZE + 1); // hack way
cmd->timestamp = httpAddToSqlCmdBufferWithSize(pContext, HTTP_GC_TARGET_SIZE + 1); // hack way
if (cmd->timestamp == -1) {
httpWarn("context:%p, fd:%d, user:%s, cant't malloc target size, sql buffer is full", pContext, pContext->fd,
......
......@@ -129,48 +129,48 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
// for group by
if (groupFields != -1) {
char target[TSDB_CODE_HTTP_GC_TARGET_SIZE] = {0};
char target[HTTP_GC_TARGET_SIZE] = {0};
int32_t len;
len = snprintf(target,TSDB_CODE_HTTP_GC_TARGET_SIZE,"%s{",aliasBuffer);
len = snprintf(target,HTTP_GC_TARGET_SIZE,"%s{",aliasBuffer);
for (int32_t i = dataFields + 1; i<num_fields; i++){
switch (fields[i].type) {
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT:
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:%d", fields[i].name, *((int8_t *)row[i]));
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%d", fields[i].name, *((int8_t *)row[i]));
break;
case TSDB_DATA_TYPE_SMALLINT:
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:%d", fields[i].name, *((int16_t *)row[i]));
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%d", fields[i].name, *((int16_t *)row[i]));
break;
case TSDB_DATA_TYPE_INT:
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:%d,", fields[i].name, *((int32_t *)row[i]));
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%d,", fields[i].name, *((int32_t *)row[i]));
break;
case TSDB_DATA_TYPE_BIGINT:
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:%ld", fields[i].name, *((int64_t *)row[i]));
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%ld", fields[i].name, *((int64_t *)row[i]));
break;
case TSDB_DATA_TYPE_FLOAT:
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:%.5f", fields[i].name, *((float *)row[i]));
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%.5f", fields[i].name, *((float *)row[i]));
break;
case TSDB_DATA_TYPE_DOUBLE:
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:%.9f", fields[i].name, *((double *)row[i]));
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%.9f", fields[i].name, *((double *)row[i]));
break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
if (row[i]!= NULL){
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:", fields[i].name);
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:", fields[i].name);
memcpy(target + len, (char *) row[i], length[i]);
len = strlen(target);
}
break;
default:
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "%s:%s", fields[i].name, "-");
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "%s:%s", fields[i].name, "-");
break;
}
if(i < num_fields - 1 ){
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, ", ");
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, ", ");
}
}
len += snprintf(target + len, TSDB_CODE_HTTP_GC_TARGET_SIZE - len, "}");
len += snprintf(target + len, HTTP_GC_TARGET_SIZE - len, "}");
if (strcmp(target, targetBuffer) != 0) {
// first target not write this section
......@@ -180,7 +180,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
// start new target
gcWriteTargetStartJson(jsonBuf, refIdBuffer, target);
strncpy(targetBuffer, target, TSDB_CODE_HTTP_GC_TARGET_SIZE);
strncpy(targetBuffer, target, HTTP_GC_TARGET_SIZE);
}
} // end of group by
......
......@@ -238,6 +238,7 @@ static int32_t httpOnParseHeaderField(HttpParser *parser, const char *key, const
httpTrace("context:%p, fd:%d, keepAlive:%d", pContext, pContext->fd, pContext->parser->keepAlive);
}
#if 0
else if (0 == strcasecmp(key, "Content-Encoding")) {
if (0 == strcmp(val, "gzip")) {
parser->contentChunked = 1;
......@@ -245,8 +246,9 @@ static int32_t httpOnParseHeaderField(HttpParser *parser, const char *key, const
}
return 0;
}
#endif
else if (0 == strcasecmp(key, "Transfer-Encoding")) {
else if (0 == strcasecmp(key, "Transfer-Encoding") || 0 == strcasecmp(key, "Content-Encoding")) {
if (strstr(val, "gzip")) {
parser->transferGzip = 1;
ehttp_gzip_conf_t conf = {0};
......
......@@ -87,15 +87,12 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
JsonBuf *jsonBuf = httpMallocJsonBuf(pContext);
if (jsonBuf == NULL) return false;
cmd->numOfRows += numOfRows;
int32_t num_fields = taos_num_fields(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
for (int32_t k = 0; k < numOfRows; ++k) {
TAOS_ROW row = taos_fetch_row(result);
if (row == NULL) {
cmd->numOfRows--;
continue;
}
int32_t* length = taos_fetch_lengths(result);
......@@ -151,24 +148,23 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
}
// data row array end
httpJsonToken(jsonBuf, JsonArrEnd);
}
httpJsonToken(jsonBuf, JsonArrEnd);
cmd->numOfRows ++;
if (cmd->numOfRows >= tsRestRowLimit) {
httpDebug("context:%p, fd:%d, user:%s, retrieve rows:%d larger than limit:%d, abort retrieve", pContext,
pContext->fd, pContext->user, cmd->numOfRows, tsRestRowLimit);
return false;
} else {
if (pContext->fd <= 0) {
httpError("context:%p, fd:%d, user:%s, connection is closed, abort retrieve", pContext, pContext->fd,
pContext->user);
httpError("context:%p, fd:%d, user:%s, conn closed, abort retrieve", pContext, pContext->fd, pContext->user);
return false;
}
if (cmd->numOfRows >= tsRestRowLimit) {
httpDebug("context:%p, fd:%d, user:%s, retrieve rows:%d larger than limit:%d, abort retrieve", pContext,
pContext->fd, pContext->user, cmd->numOfRows, tsRestRowLimit);
return false;
} else {
httpDebug("context:%p, fd:%d, user:%s, total rows:%d retrieved", pContext, pContext->fd, pContext->user,
cmd->numOfRows);
return true;
}
}
httpDebug("context:%p, fd:%d, user:%s, retrieved row:%d", pContext, pContext->fd, pContext->user, cmd->numOfRows);
return true;
}
bool restBuildSqlTimestampJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, int32_t numOfRows) {
......
......@@ -308,48 +308,55 @@ static bool httpReadData(HttpContext *pContext) {
httpInitParser(pParser);
}
ASSERT(!pParser->parsed);
if (pParser->parsed) {
httpDebug("context:%p, fd:%d, not in ready state, parsed:%d", pContext, pContext->fd, pParser->parsed);
return false;
}
pContext->accessTimes++;
pContext->lastAccessTime = taosGetTimestampSec();
char buf[HTTP_STEP_SIZE + 1] = {0};
char buf[HTTP_STEP_SIZE + 1] = {0};
int32_t nread = (int32_t)taosReadSocket(pContext->fd, buf, sizeof(buf));
if (nread > 0) {
buf[nread] = '\0';
httpTrace("context:%p, fd:%d, nread:%d", pContext, pContext->fd, nread);
int32_t ok = httpParseBuf(pParser, buf, nread);
if (ok) {
httpError("context:%p, fd:%d, parse failed, ret:%d code:%d close connect", pContext, pContext->fd, ok, pParser->parseCode);
httpSendErrorResp(pContext, pParser->parseCode);
httpNotifyContextClose(pContext);
return false;
}
while (1) {
int32_t nread = (int32_t)taosReadSocket(pContext->fd, buf, HTTP_STEP_SIZE);
if (nread > 0) {
buf[nread] = '\0';
httpTraceL("context:%p, fd:%d, nread:%d content:%s", pContext, pContext->fd, nread, buf);
int32_t ok = httpParseBuf(pParser, buf, nread);
if (ok) {
httpError("context:%p, fd:%d, parse failed, ret:%d code:%d close connect", pContext, pContext->fd, ok,
pParser->parseCode);
httpSendErrorResp(pContext, pParser->parseCode);
httpNotifyContextClose(pContext);
return false;
}
if (pParser->parseCode) {
httpError("context:%p, fd:%d, parse failed, code:%d close connect", pContext, pContext->fd, pParser->parseCode);
httpSendErrorResp(pContext, pParser->parseCode);
httpNotifyContextClose(pContext);
return false;
}
if (pParser->parseCode) {
httpError("context:%p, fd:%d, parse failed, code:%d close connect", pContext, pContext->fd, pParser->parseCode);
httpSendErrorResp(pContext, pParser->parseCode);
httpNotifyContextClose(pContext);
return false;
}
if (!pParser->parsed) {
httpTrace("context:%p, fd:%d, read not over yet, len:%d", pContext, pContext->fd, pParser->body.pos);
return false;
} else {
httpTraceL("context:%p, fd:%d, len:%d, body:%s", pContext, pContext->fd, pParser->body.pos, pParser->body.str);
return true;
}
} else if (nread < 0) {
if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) {
httpDebug("context:%p, fd:%d, read from socket error:%d, wait another event", pContext, pContext->fd, errno);
return false; // later again
if (!pParser->parsed) {
httpTrace("context:%p, fd:%d, read not finished", pContext, pContext->fd);
continue;
} else {
httpDebug("context:%p, fd:%d, bodyLen:%d", pContext, pContext->fd, pParser->body.pos);
return true;
}
} else if (nread < 0) {
if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) {
httpDebug("context:%p, fd:%d, read from socket error:%d, wait another event", pContext, pContext->fd, errno);
return false; // later again
} else {
httpError("context:%p, fd:%d, read from socket error:%d, close connect", pContext, pContext->fd, errno);
return false;
}
} else {
httpError("context:%p, fd:%d, read from socket error:%d, close connect", pContext, pContext->fd, errno);
httpError("context:%p, fd:%d, nread:%d, wait another event", pContext, pContext->fd, nread);
return false;
}
} else {
httpError("context:%p, fd:%d, nread:%d, wait another event", pContext, pContext->fd, nread);
return false;
}
}
......@@ -51,10 +51,6 @@ void httpProcessMultiSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int n
}
}
// if (tscResultsetFetchCompleted(result)) {
// isContinue = false;
// }
if (isContinue) {
// retrieve next batch of rows
httpDebug("context:%p, fd:%d, user:%s, process pos:%d, continue retrieve, numOfRows:%d, sql:%s", pContext,
......@@ -224,14 +220,6 @@ void httpProcessSingleSqlRetrieveCallBackImp(void *param, TAOS_RES *result, int
}
}
#if 0
// todo refactor
if (tscResultsetFetchCompleted(result)) {
httpDebug("context:%p, fd:%d, user:%s, resultset fetch completed", pContext, pContext->fd, pContext->user);
isContinue = false;
}
#endif
if (isContinue) {
// retrieve next batch of rows
httpDebug("context:%p, fd:%d, user:%s, continue retrieve, numOfRows:%d", pContext, pContext->fd, pContext->user,
......
......@@ -32,6 +32,7 @@ struct tExprNode;
struct SSchema;
enum {
TSQL_NODE_DUMMY = 0x0,
TSQL_NODE_EXPR = 0x1,
TSQL_NODE_COL = 0x2,
TSQL_NODE_VALUE = 0x4,
......
......@@ -57,7 +57,7 @@ typedef struct SWindowResult {
uint16_t numOfRows; // number of rows of current time window
bool closed; // this result status: closed or opened
SResultInfo* resultInfo; // For each result column, there is a resultInfo
TSKEY skey; // start key of current time window
union {STimeWindow win; char* key;}; // start key of current time window
} SWindowResult;
/**
......@@ -132,12 +132,9 @@ typedef struct SQueryCostInfo {
typedef struct SQuery {
int16_t numOfCols;
int16_t numOfTags;
char intervalTimeUnit;
char slidingTimeUnit; // interval data type, used for daytime revise
SOrderVal order;
STimeWindow window;
int64_t intervalTime;
int64_t slidingTime; // sliding time for sliding window query
SInterval interval;
int16_t precision;
int16_t numOfOutput;
int16_t fillType;
......
......@@ -51,12 +51,11 @@ typedef struct SFillInfo {
int32_t rowSize; // size of each row
// char ** pTags; // tags value for current interpolation
SFillTagColInfo* pTags; // tags value for filling gap
int64_t slidingTime; // sliding value to determine the number of result for a given time window
SInterval interval;
char * prevValues; // previous row of data, to generate the interpolation results
char * nextValues; // next row of data
char** pData; // original result data block involved in filling data
int32_t capacityInRows; // data buffer size in rows
int8_t slidingUnit; // sliding time unit
int8_t precision; // time resoluation
SFillColInfo* pFillCol; // column info for fill operations
} SFillInfo;
......
......@@ -64,11 +64,11 @@ typedef struct tMemBucket {
__perc_hash_func_t hashFunc;
} tMemBucket;
tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType);
tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, double maxval);
void tMemBucketDestroy(tMemBucket *pBucket);
void tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size);
int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size);
double getPercentile(tMemBucket *pMemBucket, double percent);
......
......@@ -73,12 +73,11 @@ typedef struct SDiskbasedResultBuf {
bool comp; // compressed before flushed to disk
int32_t nextPos; // next page flush position
const void* handle; // for debug purpose
const void* handle; // for debug purpose
SResultBufStatis statis;
} SDiskbasedResultBuf;
#define DEFAULT_INTERN_BUF_PAGE_SIZE (4096L)
#define DEFAULT_INMEM_BUF_PAGES 10
#define DEFAULT_INTERN_BUF_PAGE_SIZE (256L) // in bytes
#define PAGE_INFO_INITIALIZER (SPageDiskInfo){-1, -1}
/**
......
......@@ -65,6 +65,11 @@ typedef struct tVariantList {
tVariantListItem *a; /* One entry for each expression */
} tVariantList;
typedef struct SIntervalVal {
SStrToken interval;
SStrToken offset;
} SIntervalVal;
typedef struct SQuerySQL {
struct tSQLExprList *pSelection; // select clause
tVariantList * from; // from clause
......@@ -72,6 +77,7 @@ typedef struct SQuerySQL {
tVariantList * pGroupby; // groupby clause, only for tags[optional]
tVariantList * pSortOrder; // orderby [optional]
SStrToken interval; // interval [optional]
SStrToken offset; // offset window [optional]
SStrToken sliding; // sliding window [optional]
SLimitVal limit; // limit offset [optional]
SLimitVal slimit; // group limit offset [optional]
......@@ -259,7 +265,7 @@ tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken
void tSQLExprListDestroy(tSQLExprList *pList);
SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere,
tVariantList *pGroupby, tVariantList *pSortOrder, SStrToken *pInterval,
tVariantList *pGroupby, tVariantList *pSortOrder, SIntervalVal *pInterval,
SStrToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit);
SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SStrToken *pMetricName,
......
......@@ -39,7 +39,6 @@ static FORCE_INLINE SWindowResult *getWindowResult(SWindowResInfo *pWindowResInf
}
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
#define GET_TIMEWINDOW(_winresInfo, _win) (STimeWindow) {(_win)->skey, ((_win)->skey + (_winresInfo)->interval - 1)}
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pSelectExpr[1].base.arg->argValue.i64:1)
bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot);
......@@ -50,14 +49,16 @@ static FORCE_INLINE char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int3
tFilePage* page) {
assert(pResult != NULL && pRuntimeEnv != NULL);
SQuery *pQuery = pRuntimeEnv->pQuery;
// tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pResult->pos.pageId);
SQuery *pQuery = pRuntimeEnv->pQuery;
int32_t realRowId = (int32_t)(pResult->pos.rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery));
return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * pRuntimeEnv->numOfRowsPerPage +
pQuery->pSelectExpr[columnIndex].bytes * realRowId;
}
bool isNull_filter(SColumnFilterElem *pFilter, char* minval, char* maxval);
bool notNull_filter(SColumnFilterElem *pFilter, char* minval, char* maxval);
__filter_func_t *getRangeFilterFuncArray(int32_t type);
__filter_func_t *getValueFilterFuncArray(int32_t type);
......
此差异已折叠。
......@@ -108,7 +108,7 @@ extern "C" {
#define GET_FORWARD_DIRECTION_FACTOR(ord) (((ord) == TSDB_ORDER_ASC) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP)
#define MAX_INTERVAL_TIME_WINDOW 10000000
#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results
#define TOP_BOTTOM_QUERY_LIMIT 100
enum {
......@@ -177,7 +177,7 @@ typedef struct SQLFunctionCtx {
int16_t outputType;
int16_t outputBytes; // size of results, determined by function and input column data type
bool hasNull; // null value exist in current block
bool requireNull; // require null in some function
bool requireNull; // require null in some function
int16_t functionId; // function id
void * aInputElemBuf;
char * aOutputBuf; // final result output buffer, point to sdata->data
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
文件模式从 100755 更改为 100644
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册