提交 1fb4fb28 编写于 作者: K kailixu

Merge branch '3.0' into enh/TD-23421-M

......@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.0.3.2")
SET(TD_VER_NUMBER "3.0.4.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......
......@@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG cb1e89c
GIT_TAG e02ddb2
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
......
-DLINUX
-DWEBSOCKET
-I/usr/include
-Iinclude
-Iinclude/os
-Iinclude/common
-Iinclude/util
-Iinclude/libs/transport
-Itools/shell/inc
......@@ -27,7 +27,7 @@ The following data types can be used in the schema for standard tables.
| - | :------- | :-------- | :------- |
| 1 | ALTER ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
| 2 | ALTER ALL DNODES | Added | Modifies the configuration of all dnodes.
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. STRICT is now used to specify strong or weak consistency. The STRICT parameter cannot be modified. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. TDengine 3.0 provides strict consistency by default and doesn't allow to change to weak consitency. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
| 4 | ALTER STABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a supertable. </li></ul>
| 5 | ALTER TABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a standard table. </li><li>TTL: Specifies the time-to-live for a standard table. </li></ul>
| 6 | ALTER USER | Modified | Deprecated<ul><li>PRIVILEGE: Specified user permissions. Replaced by GRANT and REVOKE. <br/>Added</li><li>ENABLE: Enables or disables a user. </li><li>SYSINFO: Specifies whether a user can query system information. </li></ul>
......
......@@ -459,6 +459,56 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
</TabItem>
</Tabs>
### Schemaless Insert
Connector support schemaless insert.
<Tabs defaultValue="list">
<TabItem value="list" label="List Insert">
Simple insert
```python
{{#include docs/examples/python/schemaless_insert.py}}
```
Insert with ttl argument
```python
{{#include docs/examples/python/schemaless_insert_ttl.py}}
```
Insert with req_id argument
```python
{{#include docs/examples/python/schemaless_insert_req_id.py}}
```
</TabItem>
<TabItem value="raw" label="Raw Insert">
Simple insert
```python
{{#include docs/examples/python/schemaless_insert_raw.py}}
```
Insert with ttl argument
```python
{{#include docs/examples/python/schemaless_insert_raw_ttl.py}}
```
Insert with req_id argument
```python
{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
```
</TabItem>
</Tabs>
### Other sample programs
| Example program links | Example program content |
......
label: "connector"
\ No newline at end of file
label: "Connector"
......@@ -628,6 +628,16 @@ The charset that takes effect is UTF-8.
| Default Value | 1 |
| Note | The core file is generated under root directory `systemctl start taosd`/`launchctl start com.tdengine.taosd` is used to start, or under the working directory if `taosd` is started directly on Linux/macOS Shell. |
### enableScience
| Attribute | Description |
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| Applicable | Only taos-CLI client |
| Meaning | Whether to show float and double with the scientific notation |
| Value Range | 0: false, 1: true |
| Default Value | 0 |
### udf
| Attribute | Description |
......
......@@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3";
## 3.0.4.0
<Release type="tdengine" version="3.0.4.0" />
## 3.0.3.2
<Release type="tdengine" version="3.0.3.2" />
......
......@@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3";
## 2.4.12
<Release type="tools" version="2.4.12" />
## 2.4.11
<Release type="tools" version="2.4.11" />
......
import taos
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000',
]
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED)
print("inserted")
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED)
result = conn.query("show tables")
for row in result:
print(row)
conn.execute("drop database if exists %s" % dbname)
import taos
from taos import utils
from taos import TaosConnection
from taos.cinterface import *
from taos.error import OperationalError, SchemalessError
conn = taos.connect()
dbname = "taos_schemaless_insert"
try:
conn.execute("drop database if exists %s" % dbname)
if taos.IS_V3:
conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname)
else:
conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.select_db(dbname)
lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000
st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000
stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
res = conn.schemaless_insert_raw(lines, 1, 0)
print("affected rows: ", res)
assert (res == 3)
lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
res = conn.schemaless_insert_raw(lines, 1, 0)
print("affected rows: ", res)
assert (res == 1)
result = conn.query("select * from st")
dict2 = result.fetch_all_into_dict()
print(dict2)
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
assert (result.row_count == 2)
# error test
lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000'''
try:
res = conn.schemaless_insert_raw(lines, 1, 0)
print(res)
# assert(False)
except SchemalessError as err:
print('**** error: ', err)
# assert (err.msg == 'Invalid data format')
result = conn.query("select * from st")
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except InterfaceError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except SchemalessError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
raise err
import taos
from taos import utils
from taos import TaosConnection
from taos.cinterface import *
from taos.error import OperationalError, SchemalessError
conn = taos.connect()
dbname = "taos_schemaless_insert"
try:
conn.execute("drop database if exists %s" % dbname)
if taos.IS_V3:
conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname)
else:
conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.select_db(dbname)
lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000
st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000
stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
req_id = utils.gen_req_id()
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id)
print("affected rows: ", res)
assert (res == 3)
lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
req_id = utils.gen_req_id()
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id)
print("affected rows: ", res)
assert (res == 1)
result = conn.query("select * from st")
dict2 = result.fetch_all_into_dict()
print(dict2)
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
assert (result.row_count == 2)
# error test
lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000'''
try:
ttl = 1000
req_id = utils.gen_req_id()
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl, req_id=req_id)
print(res)
# assert(False)
except SchemalessError as err:
print('**** error: ', err)
# assert (err.msg == 'Invalid data format')
result = conn.query("select * from st")
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except InterfaceError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
raise err
import taos
from taos import utils
from taos import TaosConnection
from taos.cinterface import *
from taos.error import OperationalError, SchemalessError
conn = taos.connect()
dbname = "taos_schemaless_insert"
try:
conn.execute("drop database if exists %s" % dbname)
if taos.IS_V3:
conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname)
else:
conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.select_db(dbname)
lines = '''st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000
st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000
stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl)
print("affected rows: ", res)
assert (res == 3)
lines = '''stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000'''
ttl = 1000
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl)
print("affected rows: ", res)
assert (res == 1)
result = conn.query("select * from st")
dict2 = result.fetch_all_into_dict()
print(dict2)
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
assert (result.row_count == 2)
# error test
lines = ''',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000'''
try:
ttl = 1000
res = conn.schemaless_insert_raw(lines, 1, 0, ttl=ttl)
print(res)
# assert(False)
except SchemalessError as err:
print('**** error: ', err)
# assert (err.msg == 'Invalid data format')
result = conn.query("select * from st")
print(result.row_count)
all = result.rows_iter()
for row in all:
print(row)
result.close()
conn.execute("drop database if exists %s" % dbname)
conn.close()
except InterfaceError as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
except Exception as err:
conn.execute("drop database if exists %s" % dbname)
conn.close()
print(err)
raise err
import taos
from taos import SmlProtocol, SmlPrecision
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000',
]
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, req_id=1)
print("inserted")
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, req_id=2)
result = conn.query("show tables")
for row in result:
print(row)
conn.execute("drop database if exists %s" % dbname)
import taos
from taos import SmlProtocol, SmlPrecision
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000',
]
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, ttl=1000)
print("inserted")
conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED, ttl=1000)
result = conn.query("show tables")
for row in result:
print(row)
conn.execute("drop database if exists %s" % dbname)
......@@ -484,6 +484,56 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
</TabItem>
</Tabs>
### 无模式写入
连接器支持无模式写入功能。
<Tabs defaultValue="list">
<TabItem value="list" label="List 写入">
简单写入
```python
{{#include docs/examples/python/schemaless_insert.py}}
```
带有 ttl 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_ttl.py}}
```
带有 req_id 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_req_id.py}}
```
</TabItem>
<TabItem value="raw" label="Raw 写入">
简单写入
```python
{{#include docs/examples/python/schemaless_insert_raw.py}}
```
带有 ttl 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_raw_ttl.py}}
```
带有 req_id 参数的写入
```python
{{#include docs/examples/python/schemaless_insert_raw_req_id.py}}
```
</TabItem>
</Tabs>
### 其它示例程序
| 示例程序链接 | 示例程序内容 |
......
......@@ -27,7 +27,7 @@ description: "TDengine 3.0 版本的语法变更说明"
| - | :------- | :-------- | :------- |
| 1 | ALTER ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
| 2 | ALTER ALL DNODES | 新增 | 修改所有DNODE的参数。
| 3 | ALTER DATABASE | 调整 | 废除<ul><li>QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。3.0.0版本STRICT暂不支持修改</li><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP:3.0版本暂不支持修改。</li><br/>新增<li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。<br/>调整</li><li>REPLICA:3.0.0版本暂不支持修改。</li><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
| 3 | ALTER DATABASE | 调整 | 废除<ul><li>QUORUM:写入需要的副本确认数。3.0 版本默认行为是强一致性,且不支持修改为弱一致性</li><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP:3.0版本暂不支持修改。</li><br/>新增<li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。<br/>调整</li><li>REPLICA:3.0.0版本暂不支持修改。</li><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
| 4 | ALTER STABLE | 调整 | 废除<ul><li>CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG:代替原CHANGE TAG子句。</li><li>COMMENT:修改超级表的注释。</li></ul>
| 5 | ALTER TABLE | 调整 | 废除<ul><li>CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG:代替原CHANGE TAG子句。</li><li>COMMENT:修改表的注释。</li><li>TTL:修改表的生命周期。</li></ul>
| 6 | ALTER USER | 调整 | 废除<ul><li>PRIVILEGE:修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。<br/>新增</li><li>ENABLE:启用或停用此用户。</li><li>SYSINFO:修改用户是否可查看系统信息。</li></ul>
......
......@@ -626,6 +626,15 @@ charset 的有效值是 UTF-8。
| 缺省值 | 1 |
| 补充说明 | 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下 <br/> 2、手动启动,就在 taosd 执行目录下。 |
### enableScience
| 属性 | 说明 |
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| 适用范围 | 仅客户端 TAOS-CLI 适用 |
| 含义 | 是否开启科学计数法显示浮点数 |
| 取值范围 | 0:否,1:是 |
| 缺省值 | 0 |
### udf
| 属性 | 说明 |
......
......@@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
## 3.0.4.0
<Release type="tdengine" version="3.0.4.0" />
## 3.0.3.2
<Release type="tdengine" version="3.0.3.2" />
......
......@@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3";
## 2.4.12
<Release type="tools" version="2.4.12" />
## 2.4.11
<Release type="tools" version="2.4.11" />
......
......@@ -12,7 +12,7 @@ ENV TINI_VERSION v0.19.0
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
ENV DEBIAN_FRONTEND=noninteractive
WORKDIR /root/
RUN tar -zxf ${pkgFile} && cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root && rm /root/${pkgFile} && rm -rf /root/${dirName} && apt-get update && apt-get install -y locales tzdata netcat && locale-gen en_US.UTF-8 && apt-get clean && rm -rf /var/lib/apt/lists/ && chmod +x /tini
RUN tar -zxf ${pkgFile} && cd /root/${dirName}/ && /bin/bash install.sh -e no && cd /root && rm /root/${pkgFile} && rm -rf /root/${dirName} && apt-get update && apt-get install -y locales tzdata netcat curl gdb vim tmux less net-tools valgrind && locale-gen en_US.UTF-8 && apt-get clean && rm -rf /var/lib/apt/lists/ && chmod +x /tini
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib" \
LC_CTYPE=en_US.UTF-8 \
......
......@@ -575,11 +575,12 @@ function install_config() {
function install_share_etc() {
[ ! -d ${script_dir}/share/etc ] && return
for c in `ls ${script_dir}/share/etc/`; do
if [ -e /etc/$c ]; then
out=/etc/$c.new.`date +%F`
if [ -e /etc/${clientName2}/$c ]; then
out=/etc/${clientName2}/$c.new.`date +%F`
${csudo}cp -f ${script_dir}/share/etc/$c $out ||:
else
${csudo}cp -f ${script_dir}/share/etc/$c /etc/$c ||:
${csudo}mkdir -p /etc/${clientName2} >/dev/null 2>/dev/null ||:
${csudo}cp -f ${script_dir}/share/etc/$c /etc/${clientName2}/$c ||:
fi
done
......
TDengine is an open-source, cloud-native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. With its built-in caching, stream processing, and data subscription capabilities, TDengine offers a simplified solution for time-series data processing.
To configure TDengine : edit /etc/taos/taos.cfg
To start service : launchctl start com.tdengine.taosd
To start Taos Adapter : launchctl start com.tdengine.taosadapter
To access TDengine : use taos in shell
• To configure TDengine, edit /etc/taos/taos.cfg
• To start service, run launchctl start com.tdengine.taosd
• To start Taos Adapter, run launchctl start com.tdengine.taosadapter
• To access TDengine from your local machine, run taos
If you're experiencing problems installing TDengine, check the file /var/log/taos/tdengine_install.log to help troubleshoot the installation.
TDengine is an open-source, cloud-native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. With its built-in caching, stream processing, and data subscription capabilities, TDengine offers a simplified solution for time-series data processing.
Once it's installed, please take the steps below:
1: open a terminal/shell in Mac
2: if connecting to Cloud Service, follow the instructions on your cloud service account and configure the environment variable
3: if connecting to another TDengine Service, you can also view help information via "taos --help"
4: execute command taos
After the installation process is complete, perform the following steps to start using TDengine:
1: Open Terminal on your Mac.
2: To connect to a TDengine server using the default settings and credentials, run the taos command.
3: To connect to a TDengine server using custom settings or credentials, run taos --help for more information.
4: To connect to TDengine Cloud, follow the instructions on the Tools - TDengine CLI page in your TDengine Cloud account.
If you're experiencing problems installing TDengine, check the file /var/log/taos/tdengine_install.log to help troubleshoot the installation.
If any issues occur during installation, check the /var/log/taos/tdengine_install.log file to troubleshoot.
\ No newline at end of file
......@@ -150,7 +150,7 @@ fi
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm
mkdir -p ${install_dir}/share && cp -rf ${build_dir}/share/{etc,srv} ${install_dir}/share ||:
# mkdir -p ${install_dir}/share && cp -rf ${build_dir}/share/{etc,srv} ${install_dir}/share ||:
if [ $adapterName != "taosadapter" ]; then
mv ${install_dir}/cfg/${clientName2}adapter.toml ${install_dir}/cfg/$adapterName.toml
......@@ -322,6 +322,7 @@ if [[ $dbName == "taos" ]]; then
mkdir -p ${install_dir}/share/
cp -Rfap ${web_dir}/admin ${install_dir}/share/
cp ${web_dir}/png/taos.png ${install_dir}/share/admin/images/taos.png
cp -rf ${build_dir}/share/{etc,srv} ${install_dir}/share ||:
else
echo "directory not found for enterprise release: ${web_dir}/admin"
fi
......
......@@ -163,8 +163,8 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId);
if (pVnode == NULL) {
dGError("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg,
terrstr(), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen);
dGWarn("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg,
terrstr(), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen);
terrno = (terrno != 0) ? terrno : -1;
return terrno;
}
......
......@@ -5,6 +5,7 @@ ENDIF ()
IF (TD_ENTERPRISE)
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/privilege/src/privilege.c)
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndDb.c)
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndVgroup.c)
ENDIF ()
add_library(mnode STATIC ${MNODE_SRC})
......
......@@ -67,7 +67,7 @@ int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len) {
return 0;
}
static SClusterObj *mndAcquireCluster(SMnode *pMnode) {
static SClusterObj *mndAcquireCluster(SMnode *pMnode, void **ppIter) {
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
......@@ -76,23 +76,27 @@ static SClusterObj *mndAcquireCluster(SMnode *pMnode) {
pIter = sdbFetch(pSdb, SDB_CLUSTER, pIter, (void **)&pCluster);
if (pIter == NULL) break;
*ppIter = pIter;
return pCluster;
}
return NULL;
}
static void mndReleaseCluster(SMnode *pMnode, SClusterObj *pCluster) {
static void mndReleaseCluster(SMnode *pMnode, SClusterObj *pCluster, void *pIter) {
SSdb *pSdb = pMnode->pSdb;
sdbCancelFetch(pSdb, pIter);
sdbRelease(pSdb, pCluster);
}
int64_t mndGetClusterId(SMnode *pMnode) {
int64_t clusterId = 0;
SClusterObj *pCluster = mndAcquireCluster(pMnode);
void *pIter = NULL;
SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter);
if (pCluster != NULL) {
clusterId = pCluster->id;
mndReleaseCluster(pMnode, pCluster);
mndReleaseCluster(pMnode, pCluster, pIter);
}
return clusterId;
......@@ -100,10 +104,11 @@ int64_t mndGetClusterId(SMnode *pMnode) {
int64_t mndGetClusterCreateTime(SMnode *pMnode) {
int64_t createTime = 0;
SClusterObj *pCluster = mndAcquireCluster(pMnode);
void *pIter = NULL;
SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter);
if (pCluster != NULL) {
createTime = pCluster->createdTime;
mndReleaseCluster(pMnode, pCluster);
mndReleaseCluster(pMnode, pCluster, pIter);
}
return createTime;
......@@ -121,10 +126,11 @@ static int32_t mndGetClusterUpTimeImp(SClusterObj *pCluster) {
float mndGetClusterUpTime(SMnode *pMnode) {
int64_t upTime = 0;
SClusterObj *pCluster = mndAcquireCluster(pMnode);
void *pIter = NULL;
SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter);
if (pCluster != NULL) {
upTime = mndGetClusterUpTimeImp(pCluster);
mndReleaseCluster(pMnode, pCluster);
mndReleaseCluster(pMnode, pCluster, pIter);
}
return upTime / 86400.0f;
......@@ -321,11 +327,12 @@ static void mndCancelGetNextCluster(SMnode *pMnode, void *pIter) {
static int32_t mndProcessUptimeTimer(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SClusterObj clusterObj = {0};
SClusterObj *pCluster = mndAcquireCluster(pMnode);
void *pIter = NULL;
SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter);
if (pCluster != NULL) {
memcpy(&clusterObj, pCluster, sizeof(SClusterObj));
clusterObj.upTime += tsUptimeInterval;
mndReleaseCluster(pMnode, pCluster);
mndReleaseCluster(pMnode, pCluster, pIter);
}
if (clusterObj.id <= 0) {
......
......@@ -1177,15 +1177,13 @@ static void mndLoopHash(SHashObj *hash, char *priType, SSDataBlock *pBlock, int3
sprintf(sql, "error");
}
// char *obj = taosMemoryMalloc(sqlLen + VARSTR_HEADER_SIZE + 1);
char obj[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(obj, sql, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, *numOfRows, (const char *)obj, false);
// taosMemoryFree(obj);
} else {
char condition[20] = {0};
char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, *numOfRows, (const char *)condition, false);
......@@ -1266,12 +1264,12 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)objName, false);
char tableName[20] = {0};
char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(tableName, "", pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)tableName, false);
char condition[20] = {0};
char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)condition, false);
......@@ -1301,12 +1299,12 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)objName, false);
char tableName[20] = {0};
char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(tableName, "", pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)tableName, false);
char condition[20] = {0};
char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)condition, false);
......@@ -1338,12 +1336,12 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)topicName, false);
char tableName[20] = {0};
char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(tableName, "", pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)tableName, false);
char condition[20] = {0};
char condition[TSDB_PRIVILEDGE_CONDITION_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(condition, "", pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)condition, false);
......
......@@ -1891,56 +1891,17 @@ int32_t mndAddVgroupBalanceToTrans(SMnode *pMnode, SVgObj *pVgroup, STrans *pTra
return 0;
}
int32_t mndProcessVgroupBalanceLeaderMsg(SRpcMsg *pReq) {
int32_t code = -1;
SBalanceVgroupLeaderReq req = {0};
if (tDeserializeSBalanceVgroupLeaderReq(pReq->pCont, pReq->contLen, &req) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
return code;
}
SMnode *pMnode = pReq->info.node;
SSdb *pSdb = pMnode->pSdb;
int32_t total = sdbGetSize(pSdb, SDB_VGROUP);
if(total <= 0) {
terrno = TSDB_CODE_TSC_INVALID_OPERATION;
return code;
}
STrans *pTrans = NULL;
pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq, "bal-vg-leader");
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
mInfo("trans:%d, used to balance vgroup leader", pTrans->id);
void *pIter = NULL;
int32_t count = 0;
while (1) {
SVgObj *pVgroup = NULL;
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) break;
if(mndAddVgroupBalanceToTrans(pMnode, pVgroup, pTrans) == 0){
count++;
}
extern int32_t mndProcessVgroupBalanceLeaderMsgImp(SRpcMsg *pReq);
sdbRelease(pSdb, pVgroup);
}
if(count == 0) {
terrno = TSDB_CODE_TSC_INVALID_OPERATION;
goto _OVER;
}
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
code = 0;
int32_t mndProcessVgroupBalanceLeaderMsg(SRpcMsg *pReq) {
return mndProcessVgroupBalanceLeaderMsgImp(pReq);
}
_OVER:
mndTransDrop(pTrans);
return code;
#ifndef TD_ENTERPRISE
int32_t mndProcessVgroupBalanceLeaderMsgImp(SRpcMsg *pReq) {
return 0;
}
#endif
static int32_t mndCheckDnodeMemory(SMnode *pMnode, SDbObj *pOldDb, SDbObj *pNewDb, SVgObj *pOldVgroup,
SVgObj *pNewVgroup, SArray *pArray) {
......
......@@ -35,7 +35,11 @@ _err:
static void tsdbCloseBICache(STsdb *pTsdb) {
SLRUCache *pCache = pTsdb->biCache;
if (pCache) {
int32_t elems = taosLRUCacheGetElems(pCache);
tsdbTrace("vgId:%d, elems: %d", TD_VID(pTsdb->pVnode), elems);
taosLRUCacheEraseUnrefEntries(pCache);
elems = taosLRUCacheGetElems(pCache);
tsdbTrace("vgId:%d, elems: %d", TD_VID(pTsdb->pVnode), elems);
taosLRUCacheCleanup(pCache);
......@@ -820,7 +824,12 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
* &state->blockIdx);
*/
state->pBlockIdx = taosArraySearch(state->aBlockIdx, state->pBlockIdxExp, tCmprBlockIdx, TD_EQ);
if (!state->pBlockIdx) { /*
if (!state->pBlockIdx) {
tsdbBICacheRelease(state->pTsdb->biCache, state->aBlockIdxHandle);
state->aBlockIdxHandle = NULL;
state->aBlockIdx = NULL;
/*
tsdbDataFReaderClose(state->pDataFReader);
*state->pDataFReader = NULL;
resetLastBlockLoadInfo(state->pLoadInfo);*/
......@@ -1469,11 +1478,14 @@ static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, SArray **ppCo
hasRow = true;
code = updateTSchema(TSDBROW_SVERSION(pRow), pr, uid);
if (TSDB_CODE_SUCCESS != code) {
goto _err;
int32_t sversion = TSDBROW_SVERSION(pRow);
if (sversion != -1) {
code = updateTSchema(sversion, pr, uid);
if (TSDB_CODE_SUCCESS != code) {
goto _err;
}
pTSchema = pr->pCurrSchema;
}
pTSchema = pr->pCurrSchema;
int16_t nCol = pTSchema->numOfCols;
TSKEY rowTs = TSDBROW_TS(pRow);
......@@ -1623,11 +1635,14 @@ static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray, SCach
hasRow = true;
code = updateTSchema(TSDBROW_SVERSION(pRow), pr, uid);
if (TSDB_CODE_SUCCESS != code) {
goto _err;
int32_t sversion = TSDBROW_SVERSION(pRow);
if (sversion != -1) {
code = updateTSchema(sversion, pr, uid);
if (TSDB_CODE_SUCCESS != code) {
goto _err;
}
pTSchema = pr->pCurrSchema;
}
pTSchema = pr->pCurrSchema;
int16_t nCol = pTSchema->numOfCols;
TSKEY rowTs = TSDBROW_TS(pRow);
......@@ -1931,6 +1946,7 @@ int32_t tsdbCacheGetBlockIdx(SLRUCache *pCache, SDataFReader *pFileReader, LRUHa
taosThreadMutexUnlock(&pTsdb->biMutex);
}
tsdbTrace("bi cache:%p, ref", pCache);
*handle = h;
return code;
......@@ -1940,6 +1956,7 @@ int32_t tsdbBICacheRelease(SLRUCache *pCache, LRUHandle *h) {
int32_t code = 0;
taosLRUCacheRelease(pCache, h, false);
tsdbTrace("bi cache:%p, release", pCache);
return code;
}
......@@ -2320,7 +2320,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
tsdbRowMergerAdd(&merge, pRow, pSchema);
} else {
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
// STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
code = tsdbRowMergerInit(&merge, NULL, pRow, pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
......@@ -2352,7 +2352,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
tsdbRowMergerAdd(&merge, piRow, piSchema);
} else {
init = true;
STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid);
// STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid);
code = tsdbRowMergerInit(&merge, pSchema, piRow, piSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
......
......@@ -33,7 +33,7 @@
int32_t scanDebug = 0;
#define MULTI_READER_MAX_TABLE_NUM 5000
#define MULTI_READER_MAX_TABLE_NUM 5000
#define SET_REVERSE_SCAN_FLAG(_info) ((_info)->scanFlag = REVERSE_SCAN)
#define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC))
......@@ -330,7 +330,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca
qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%" PRId64 ", uid:%" PRIu64,
GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows,
pBlockInfo->id.uid);
doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, 1);
doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows);
pCost->skipBlocks += 1;
tsdbReleaseDataBlock(pTableScanInfo->dataReader);
return TSDB_CODE_SUCCESS;
......@@ -341,7 +341,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca
if (success) { // failed to load the block sma data, data block statistics does not exist, load data block instead
qDebug("%s data block SMA loaded, brange:%" PRId64 "-%" PRId64 ", rows:%" PRId64, GET_TASKID(pTaskInfo),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, 1);
doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows);
tsdbReleaseDataBlock(pTableScanInfo->dataReader);
return TSDB_CODE_SUCCESS;
} else {
......@@ -708,9 +708,9 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
// todo refactor
/*pTableScanInfo->lastStatus.uid = pBlock->info.id.uid;*/
/*pTableScanInfo->lastStatus.ts = pBlock->info.window.ekey;*/
// pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_DATA;
// pTaskInfo->streamInfo.lastStatus.uid = pBlock->info.id.uid;
// pTaskInfo->streamInfo.lastStatus.ts = pBlock->info.window.ekey;
// pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_DATA;
// pTaskInfo->streamInfo.lastStatus.uid = pBlock->info.id.uid;
// pTaskInfo->streamInfo.lastStatus.ts = pBlock->info.window.ekey;
return pBlock;
}
......@@ -900,7 +900,7 @@ static void destroyTableScanOperatorInfo(void* param) {
SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle,
STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) {
int32_t code = 0;
int32_t code = 0;
STableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STableScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
......@@ -912,7 +912,8 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
SDataBlockDescNode* pDescNode = pScanNode->node.pOutputDataBlockDesc;
int32_t numOfCols = 0;
code = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->base.matchInfo);
code =
extractColMatchInfo(pScanNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->base.matchInfo);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
......@@ -1668,8 +1669,9 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
if (pTaskInfo->streamInfo.currentOffset.type == TMQ_OFFSET__SNAPSHOT_DATA) {
SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
if (pResult && pResult->info.rows > 0) {
qDebug("queue scan tsdb return %"PRId64" rows min:%" PRId64 " max:%" PRId64 " wal curVersion:%" PRId64, pResult->info.rows,
pResult->info.window.skey, pResult->info.window.ekey, pInfo->tqReader->pWalReader->curVersion);
qDebug("queue scan tsdb return %" PRId64 " rows min:%" PRId64 " max:%" PRId64 " wal curVersion:%" PRId64,
pResult->info.rows, pResult->info.window.skey, pResult->info.window.ekey,
pInfo->tqReader->pWalReader->curVersion);
tqOffsetResetToData(&pTaskInfo->streamInfo.currentOffset, pResult->info.id.uid, pResult->info.window.ekey);
return pResult;
}
......@@ -1687,17 +1689,21 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
while (1) {
SFetchRet ret = {0};
tqNextBlock(pInfo->tqReader, &ret);
tqOffsetResetToLog(&pTaskInfo->streamInfo.currentOffset, pInfo->tqReader->pWalReader->curVersion - 1); //curVersion move to next, so currentOffset = curVersion - 1
tqOffsetResetToLog(
&pTaskInfo->streamInfo.currentOffset,
pInfo->tqReader->pWalReader->curVersion - 1); // curVersion move to next, so currentOffset = curVersion - 1
if (ret.fetchType == FETCH_TYPE__DATA) {
qDebug("doQueueScan get data from log %"PRId64" rows, version:%" PRId64, ret.data.info.rows, pTaskInfo->streamInfo.currentOffset.version);
qDebug("doQueueScan get data from log %" PRId64 " rows, version:%" PRId64, ret.data.info.rows,
pTaskInfo->streamInfo.currentOffset.version);
blockDataCleanup(pInfo->pRes);
setBlockIntoRes(pInfo, &ret.data, true);
if (pInfo->pRes->info.rows > 0) {
qDebug("doQueueScan get data from log %"PRId64" rows, return, version:%" PRId64, pInfo->pRes->info.rows, pTaskInfo->streamInfo.currentOffset.version);
qDebug("doQueueScan get data from log %" PRId64 " rows, return, version:%" PRId64, pInfo->pRes->info.rows,
pTaskInfo->streamInfo.currentOffset.version);
return pInfo->pRes;
}
}else if(ret.fetchType == FETCH_TYPE__NONE){
} else if (ret.fetchType == FETCH_TYPE__NONE) {
qDebug("doQueueScan get none from log, return, version:%" PRId64, pTaskInfo->streamInfo.currentOffset.version);
return NULL;
}
......@@ -2176,7 +2182,7 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
}
SMetaTableInfo mtInfo = getUidfromSnapShot(pInfo->sContext);
STqOffsetVal offset = {0};
STqOffsetVal offset = {0};
if (mtInfo.uid == 0) { // read snapshot done, change to get data from wal
qDebug("tmqsnap read snapshot done, change to get data from wal");
tqOffsetResetToLog(&offset, pInfo->sContext->snapVersion);
......
......@@ -3279,7 +3279,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "_irowts",
.type = FUNCTION_TYPE_IROWTS,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_INTERP_PC_FUNC|FUNC_MGT_KEEP_ORDER_FUNC,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_INTERP_PC_FUNC | FUNC_MGT_KEEP_ORDER_FUNC,
.translateFunc = translateTimePseudoColumn,
.getEnvFunc = getTimePseudoFuncEnv,
.initFunc = NULL,
......
......@@ -24,7 +24,7 @@ DLL_EXPORT int32_t udf1(SUdfDataBlock *block, SUdfColumn *resultCol) {
}
}
if (j == block->numOfCols) {
int32_t luckyNum = 88;
int32_t luckyNum = 1;
udfColDataSet(resultCol, i, (char *)&luckyNum, false);
}
}
......
......@@ -645,6 +645,10 @@ static bool isSelectStmt(SNode* pCurrStmt) {
return NULL != pCurrStmt && QUERY_NODE_SELECT_STMT == nodeType(pCurrStmt);
}
static bool isDeleteStmt(SNode* pCurrStmt) {
return NULL != pCurrStmt && QUERY_NODE_DELETE_STMT == nodeType(pCurrStmt);
}
static bool isSetOperator(SNode* pCurrStmt) {
return NULL != pCurrStmt && QUERY_NODE_SET_OPERATOR == nodeType(pCurrStmt);
}
......@@ -669,6 +673,9 @@ static uint8_t getPrecisionFromCurrStmt(SNode* pCurrStmt, uint8_t defaultVal) {
if (NULL != pCurrStmt && QUERY_NODE_CREATE_STREAM_STMT == nodeType(pCurrStmt)) {
return getPrecisionFromCurrStmt(((SCreateStreamStmt*)pCurrStmt)->pQuery, defaultVal);
}
if (isDeleteStmt(pCurrStmt)) {
return ((SDeleteStmt*)pCurrStmt)->precision;
}
return defaultVal;
}
......@@ -688,6 +695,10 @@ static bool isWindowPseudoColumnFunc(const SNode* pNode) {
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsWindowPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
}
static bool isInterpPseudoColumnFunc(const SNode* pNode) {
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsInterpPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
}
static bool isTimelineFunc(const SNode* pNode) {
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsTimelineFunc(((SFunctionNode*)pNode)->funcId));
}
......@@ -1235,6 +1246,10 @@ static int32_t calcTypeBytes(SDataType dt) {
}
static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal) {
if (pVal->translate) {
return TSDB_CODE_SUCCESS;
}
SDataType dt = pVal->node.resType;
dt.bytes = calcTypeBytes(dt);
return translateValueImpl(pCxt, pVal, dt, false);
......@@ -1295,7 +1310,7 @@ static EDealRes translateOperator(STranslateContext* pCxt, SOperatorNode* pOp) {
}
static EDealRes haveVectorFunction(SNode* pNode, void* pContext) {
if (isAggFunc(pNode) || isIndefiniteRowsFunc(pNode) || isWindowPseudoColumnFunc(pNode)) {
if (isAggFunc(pNode) || isIndefiniteRowsFunc(pNode) || isWindowPseudoColumnFunc(pNode) || isInterpPseudoColumnFunc(pNode)) {
*((bool*)pContext) = true;
return DEAL_RES_END;
}
......@@ -1522,6 +1537,21 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc
return TSDB_CODE_SUCCESS;
}
static int32_t translateInterpPseudoColumnFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
if (!fmIsInterpPseudoColumnFunc(pFunc->funcId)) {
return TSDB_CODE_SUCCESS;
}
if (!isSelectStmt(pCxt->pCurrStmt)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC,
"%s must be used in select statements", pFunc->functionName);
}
if (pCxt->currClause == SQL_CLAUSE_WHERE) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_INTERP_CLAUSE,
"%s is not allowed in where clause", pFunc->functionName);
}
return TSDB_CODE_SUCCESS;
}
static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
if (!fmIsTimelineFunc(pFunc->funcId)) {
return TSDB_CODE_SUCCESS;
......@@ -1692,7 +1722,8 @@ static void setFuncClassification(SNode* pCurrStmt, SFunctionNode* pFunc) {
pSelect->hasUniqueFunc = pSelect->hasUniqueFunc ? true : (FUNCTION_TYPE_UNIQUE == pFunc->funcType);
pSelect->hasTailFunc = pSelect->hasTailFunc ? true : (FUNCTION_TYPE_TAIL == pFunc->funcType);
pSelect->hasInterpFunc = pSelect->hasInterpFunc ? true : (FUNCTION_TYPE_INTERP == pFunc->funcType);
pSelect->hasInterpPseudoColFunc = pSelect->hasInterpPseudoColFunc ? true : fmIsInterpPseudoColumnFunc(pFunc->funcId);
pSelect->hasInterpPseudoColFunc =
pSelect->hasInterpPseudoColFunc ? true : fmIsInterpPseudoColumnFunc(pFunc->funcId);
pSelect->hasLastRowFunc = pSelect->hasLastRowFunc ? true : (FUNCTION_TYPE_LAST_ROW == pFunc->funcType);
pSelect->hasLastFunc = pSelect->hasLastFunc ? true : (FUNCTION_TYPE_LAST == pFunc->funcType);
pSelect->hasTimeLineFunc = pSelect->hasTimeLineFunc ? true : fmIsTimelineFunc(pFunc->funcId);
......@@ -1819,6 +1850,9 @@ static int32_t translateNormalFunction(STranslateContext* pCxt, SFunctionNode* p
if (TSDB_CODE_SUCCESS == code) {
code = translateInterpFunc(pCxt, pFunc);
}
if (TSDB_CODE_SUCCESS == code) {
code = translateInterpPseudoColumnFunc(pCxt, pFunc);
}
if (TSDB_CODE_SUCCESS == code) {
code = translateTimelineFunc(pCxt, pFunc);
}
......@@ -3372,7 +3406,8 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_INTERP_CLAUSE);
}
if (pSelect->hasInterpPseudoColFunc) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, "Has Interp pseudo column(s) but missing interp function");
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC,
"Has Interp pseudo column(s) but missing interp function");
}
return TSDB_CODE_SUCCESS;
}
......@@ -3741,6 +3776,7 @@ static int32_t translateDelete(STranslateContext* pCxt, SDeleteStmt* pDelete) {
pCxt->pCurrStmt = (SNode*)pDelete;
int32_t code = translateFrom(pCxt, pDelete->pFromTable);
if (TSDB_CODE_SUCCESS == code) {
pDelete->precision = ((STableNode*)pDelete->pFromTable)->precision;
code = translateDeleteWhere(pCxt, pDelete);
}
pCxt->currClause = SQL_CLAUSE_SELECT;
......
......@@ -77,18 +77,19 @@ static FORCE_INLINE int32_t syncLogReplGetNextRetryBackoff(SSyncLogReplMgr* pMgr
SyncTerm syncLogReplGetPrevLogTerm(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index);
int32_t syncLogReplReplicateOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode);
int32_t syncLogReplReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index, SyncTerm* pTerm,
SRaftId* pDestId, bool* pBarrier);
int32_t syncLogReplReplicateAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode);
int32_t syncLogReplReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index);
int32_t syncLogReplDoOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode);
int32_t syncLogReplAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode);
int32_t syncLogReplProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index);
int32_t syncLogReplRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode);
int32_t syncLogReplSendTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index, SyncTerm* pTerm, SRaftId* pDestId,
bool* pBarrier);
int32_t syncLogReplProcessReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg);
int32_t syncLogReplProcessReplyAsRecovery(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg);
int32_t syncLogReplProcessReplyAsNormal(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg);
int32_t syncLogReplProcessHeartbeatReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncHeartbeatReply* pMsg);
int32_t syncLogReplRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode);
// SSyncLogBuffer
SSyncLogBuffer* syncLogBufferCreate();
......@@ -100,6 +101,7 @@ int32_t syncLogBufferReInit(SSyncLogBuffer* pBuf, SSyncNode* pNode);
int64_t syncLogBufferGetEndIndex(SSyncLogBuffer* pBuf);
SyncTerm syncLogBufferGetLastMatchTerm(SSyncLogBuffer* pBuf);
bool syncLogBufferIsEmpty(SSyncLogBuffer* pBuf);
int32_t syncLogBufferAppend(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEntry* pEntry);
int32_t syncLogBufferAccept(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEntry* pEntry, SyncTerm prevTerm);
int64_t syncLogBufferProceed(SSyncLogBuffer* pBuf, SSyncNode* pNode, SyncTerm* pMatchTerm);
......
......@@ -45,7 +45,7 @@ SSyncRaftEntry* syncEntryBuildNoop(SyncTerm term, SyncIndex index, int32_t vgId)
void syncEntryDestroy(SSyncRaftEntry* pEntry);
void syncEntry2OriginalRpc(const SSyncRaftEntry* pEntry, SRpcMsg* pRpcMsg); // step 7
static FORCE_INLINE bool syncLogIsReplicationBarrier(SSyncRaftEntry* pEntry) {
static FORCE_INLINE bool syncLogReplBarrier(SSyncRaftEntry* pEntry) {
return pEntry->originalRpcType == TDMT_SYNC_NOOP;
}
......
......@@ -617,7 +617,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_
sNTrace(pSyncNode, "propose msg, type:%s", TMSG_INFO(pMsg->msgType));
code = (*pSyncNode->syncEqMsg)(pSyncNode->msgcb, &rpcMsg);
if (code != 0) {
sError("vgId:%d, failed to propose msg while enqueue since %s", pSyncNode->vgId, terrstr());
sWarn("vgId:%d, failed to propose msg while enqueue since %s", pSyncNode->vgId, terrstr());
(void)syncRespMgrDel(pSyncNode->pSyncRespMgr, seqNum);
}
......
......@@ -633,7 +633,7 @@ int32_t syncLogReplRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
SRaftId* pDestId = &pNode->replicasId[pMgr->peerId];
if (pMgr->retryBackoff == SYNC_MAX_RETRY_BACKOFF) {
syncLogReplReset(pMgr);
sWarn("vgId:%d, reset sync log repl mgr since retry backoff exceeding limit. peer:%" PRIx64, pNode->vgId,
sWarn("vgId:%d, reset sync log repl since retry backoff exceeding limit. peer:%" PRIx64, pNode->vgId,
pDestId->addr);
return -1;
}
......@@ -658,15 +658,15 @@ int32_t syncLogReplRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
if (pMgr->states[pos].acked) {
if (pMgr->matchIndex < index && pMgr->states[pos].timeMs + (syncGetRetryMaxWaitMs() << 3) < nowMs) {
syncLogReplReset(pMgr);
sWarn("vgId:%d, reset sync log repl mgr since stagnation. index:%" PRId64 ", peer:%" PRIx64, pNode->vgId,
index, pDestId->addr);
sWarn("vgId:%d, reset sync log repl since stagnation. index:%" PRId64 ", peer:%" PRIx64, pNode->vgId, index,
pDestId->addr);
goto _out;
}
continue;
}
bool barrier = false;
if (syncLogReplReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) {
if (syncLogReplSendTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) {
sError("vgId:%d, failed to replicate sync log entry since %s. index:%" PRId64 ", dest:%" PRIx64 "", pNode->vgId,
terrstr(), index, pDestId->addr);
goto _out;
......@@ -708,7 +708,7 @@ int32_t syncLogReplProcessReplyAsRecovery(SSyncLogReplMgr* pMgr, SSyncNode* pNod
ASSERT(pMgr->matchIndex == 0);
if (pMsg->matchIndex < 0) {
pMgr->restored = true;
sInfo("vgId:%d, sync log repl mgr restored. peer: dnode:%d (%" PRIx64 "), mgr: rs(%d) [%" PRId64 " %" PRId64
sInfo("vgId:%d, sync log repl restored. peer: dnode:%d (%" PRIx64 "), mgr: rs(%d) [%" PRId64 " %" PRId64
", %" PRId64 "), buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")",
pNode->vgId, DID(&destId), destId.addr, pMgr->restored, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex,
pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex);
......@@ -725,7 +725,7 @@ int32_t syncLogReplProcessReplyAsRecovery(SSyncLogReplMgr* pMgr, SSyncNode* pNod
if (pMsg->success && pMsg->matchIndex == pMsg->lastSendIndex) {
pMgr->matchIndex = pMsg->matchIndex;
pMgr->restored = true;
sInfo("vgId:%d, sync log repl mgr restored. peer: dnode:%d (%" PRIx64 "), mgr: rs(%d) [%" PRId64 " %" PRId64
sInfo("vgId:%d, sync log repl restored. peer: dnode:%d (%" PRIx64 "), mgr: rs(%d) [%" PRId64 " %" PRId64
", %" PRId64 "), buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")",
pNode->vgId, DID(&destId), destId.addr, pMgr->restored, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex,
pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex);
......@@ -774,14 +774,14 @@ int32_t syncLogReplProcessReplyAsRecovery(SSyncLogReplMgr* pMgr, SSyncNode* pNod
// attempt to replicate the raft log at index
(void)syncLogReplReset(pMgr);
return syncLogReplReplicateProbe(pMgr, pNode, index);
return syncLogReplProbe(pMgr, pNode, index);
}
int32_t syncLogReplProcessHeartbeatReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncHeartbeatReply* pMsg) {
SSyncLogBuffer* pBuf = pNode->pLogBuf;
taosThreadMutexLock(&pBuf->mutex);
if (pMsg->startTime != 0 && pMsg->startTime != pMgr->peerStartTime) {
sInfo("vgId:%d, reset sync log repl mgr in heartbeat. peer:%" PRIx64 ", start time:%" PRId64 ", old:%" PRId64 "",
sInfo("vgId:%d, reset sync log repl in heartbeat. peer:%" PRIx64 ", start time:%" PRId64 ", old:%" PRId64 "",
pNode->vgId, pMsg->srcId.addr, pMsg->startTime, pMgr->peerStartTime);
syncLogReplReset(pMgr);
pMgr->peerStartTime = pMsg->startTime;
......@@ -794,8 +794,7 @@ int32_t syncLogReplProcessReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncApp
SSyncLogBuffer* pBuf = pNode->pLogBuf;
taosThreadMutexLock(&pBuf->mutex);
if (pMsg->startTime != pMgr->peerStartTime) {
sInfo("vgId:%d, reset sync log repl mgr in appendlog reply. peer:%" PRIx64 ", start time:%" PRId64
", old:%" PRId64,
sInfo("vgId:%d, reset sync log repl in appendlog reply. peer:%" PRIx64 ", start time:%" PRId64 ", old:%" PRId64,
pNode->vgId, pMsg->srcId.addr, pMsg->startTime, pMgr->peerStartTime);
syncLogReplReset(pMgr);
pMgr->peerStartTime = pMsg->startTime;
......@@ -810,16 +809,16 @@ int32_t syncLogReplProcessReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncApp
return 0;
}
int32_t syncLogReplReplicateOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
int32_t syncLogReplDoOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
if (pMgr->restored) {
(void)syncLogReplReplicateAttempt(pMgr, pNode);
(void)syncLogReplAttempt(pMgr, pNode);
} else {
(void)syncLogReplReplicateProbe(pMgr, pNode, pNode->pLogBuf->matchIndex);
(void)syncLogReplProbe(pMgr, pNode, pNode->pLogBuf->matchIndex);
}
return 0;
}
int32_t syncLogReplReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index) {
int32_t syncLogReplProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index) {
ASSERT(!pMgr->restored);
ASSERT(pMgr->startIndex >= 0);
int64_t retryMaxWaitMs = syncGetRetryMaxWaitMs();
......@@ -834,7 +833,7 @@ int32_t syncLogReplReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncI
SRaftId* pDestId = &pNode->replicasId[pMgr->peerId];
bool barrier = false;
SyncTerm term = -1;
if (syncLogReplReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) {
if (syncLogReplSendTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) {
sError("vgId:%d, failed to replicate log entry since %s. index:%" PRId64 ", dest: 0x%016" PRIx64 "", pNode->vgId,
terrstr(), index, pDestId->addr);
return -1;
......@@ -857,7 +856,7 @@ int32_t syncLogReplReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncI
return 0;
}
int32_t syncLogReplReplicateAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
int32_t syncLogReplAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
ASSERT(pMgr->restored);
SRaftId* pDestId = &pNode->replicasId[pMgr->peerId];
......@@ -879,7 +878,7 @@ int32_t syncLogReplReplicateAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
SRaftId* pDestId = &pNode->replicasId[pMgr->peerId];
bool barrier = false;
SyncTerm term = -1;
if (syncLogReplReplicateOneTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) {
if (syncLogReplSendTo(pMgr, pNode, index, &term, pDestId, &barrier) < 0) {
sError("vgId:%d, failed to replicate log entry since %s. index:%" PRId64 ", dest: 0x%016" PRIx64 "", pNode->vgId,
terrstr(), index, pDestId->addr);
return -1;
......@@ -932,7 +931,7 @@ int32_t syncLogReplProcessReplyAsNormal(SSyncLogReplMgr* pMgr, SSyncNode* pNode,
pMgr->startIndex = pMgr->matchIndex;
}
return syncLogReplReplicateAttempt(pMgr, pNode);
return syncLogReplAttempt(pMgr, pNode);
}
SSyncLogReplMgr* syncLogReplCreate() {
......@@ -1127,8 +1126,8 @@ SSyncRaftEntry* syncLogBufferGetOneEntry(SSyncLogBuffer* pBuf, SSyncNode* pNode,
return pEntry;
}
int32_t syncLogReplReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index, SyncTerm* pTerm,
SRaftId* pDestId, bool* pBarrier) {
int32_t syncLogReplSendTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index, SyncTerm* pTerm, SRaftId* pDestId,
bool* pBarrier) {
SSyncRaftEntry* pEntry = NULL;
SRpcMsg msgOut = {0};
bool inBuf = false;
......@@ -1141,14 +1140,14 @@ int32_t syncLogReplReplicateOneTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncI
if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST) {
SSyncLogReplMgr* pMgr = syncNodeGetLogReplMgr(pNode, pDestId);
if (pMgr) {
sInfo("vgId:%d, reset sync log repl mgr of peer:%" PRIx64 " since %s. index:%" PRId64, pNode->vgId,
pDestId->addr, terrstr(), index);
sInfo("vgId:%d, reset sync log repl of peer:%" PRIx64 " since %s. index:%" PRId64, pNode->vgId, pDestId->addr,
terrstr(), index);
(void)syncLogReplReset(pMgr);
}
}
goto _err;
}
*pBarrier = syncLogIsReplicationBarrier(pEntry);
*pBarrier = syncLogReplBarrier(pEntry);
prevLogTerm = syncLogReplGetPrevLogTerm(pMgr, pNode, index);
if (prevLogTerm < 0) {
......
......@@ -74,7 +74,7 @@ int32_t syncNodeReplicateWithoutLock(SSyncNode* pNode) {
continue;
}
SSyncLogReplMgr* pMgr = pNode->logReplMgrs[i];
(void)syncLogReplReplicateOnce(pMgr, pNode);
(void)syncLogReplDoOnce(pMgr, pNode);
}
return 0;
}
......
......@@ -321,7 +321,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SNODE_ALREADY_DEPLOYED, "Snode already deploye
TAOS_DEFINE_ERROR(TSDB_CODE_SNODE_NOT_DEPLOYED, "Snode not deployed")
// vnode
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VGROUP_ID, "Vnode moved to another dnode or was deleted")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VGROUP_ID, "Vnode is closed or removed")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, "Database write operation denied")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_EXIST, "Vnode not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_ALREADY_EXIST, "Vnode already exist")
......
......@@ -89,7 +89,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqUpdateWithConsume.py -N 3 -n 3
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot0.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot1.py
# ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDelete-1ctb.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDelete-1ctb.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDelete-multiCtb.py -N 3 -n 3
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDropStbCtb.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDropNtb-snapshot0.py
......@@ -785,6 +785,7 @@
,,y,script,./test.sh -f tsim/insert/query_multi_file.sim
,,y,script,./test.sh -f tsim/insert/tcp.sim
,,y,script,./test.sh -f tsim/insert/update0.sim
,,y,script,./test.sh -f tsim/insert/delete0.sim
,,y,script,./test.sh -f tsim/insert/update1_sort_merge.sim
,,y,script,./test.sh -f tsim/insert/update2.sim
,,y,script,./test.sh -f tsim/parser/alter__for_community_version.sim
......
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
print =============== create database with different precision
sql create database d0 keep 365
sql create database d1 keep 365 precision 'ms'
sql create database d2 keep 365 precision 'us'
sql create database d3 keep 365 precision 'ns'
sql select * from information_schema.ins_databases
if $rows != 6 then
return -1
endi
print $data00 $data01 $data02
sql create table if not exists d0.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
sql create table if not exists d1.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
sql create table if not exists d2.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
sql create table if not exists d3.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned)
sql create table if not exists d0.ntb (ts timestamp, c1 int, c2 float, c3 double)
sql create table if not exists d1.ntb (ts timestamp, c1 int, c2 float, c3 double)
sql create table if not exists d2.ntb (ts timestamp, c1 int, c2 float, c3 double)
sql create table if not exists d3.ntb (ts timestamp, c1 int, c2 float, c3 double)
sql create table d0.ct1 using d0.stb tags(1000)
sql create table d1.ct1 using d1.stb tags(1000)
sql create table d2.ct1 using d2.stb tags(1000)
sql create table d3.ct1 using d3.stb tags(1000)
sql create table d0.ct2 using d0.stb tags(1000)
sql create table d1.ct2 using d1.stb tags(1000)
sql create table d2.ct2 using d2.stb tags(1000)
sql create table d3.ct2 using d3.stb tags(1000)
sql insert into d0.ct1 values(now+0s, 10, 2.0, 3.0)
sql insert into d1.ct1 values(now+0s, 10, 2.0, 3.0)
sql insert into d2.ct1 values(now+0s, 10, 2.0, 3.0)
sql insert into d3.ct1 values(now+0s, 10, 2.0, 3.0)
sql insert into d0.ct2 values(now+0s, 10, 2.0, 3.0)
sql insert into d1.ct2 values(now+0s, 10, 2.0, 3.0)
sql insert into d2.ct2 values(now+0s, 10, 2.0, 3.0)
sql insert into d3.ct2 values(now+0s, 10, 2.0, 3.0)
sql insert into d0.ntb values(now+0s, 10, 2.0, 3.0)
sql insert into d1.ntb values(now+0s, 10, 2.0, 3.0)
sql insert into d2.ntb values(now+0s, 10, 2.0, 3.0)
sql insert into d3.ntb values(now+0s, 10, 2.0, 3.0)
print =============== query data from super table
sql select count(*) from d0.stb
if $data00 != 2 then
return -1
endi
sql select count(*) from d1.stb
if $data00 != 2 then
return -1
endi
sql select count(*) from d2.stb
if $data00 != 2 then
return -1
endi
sql select count(*) from d3.stb
if $data00 != 2 then
return -1
endi
print =============== delete from child table
sql delete from d0.ct1 where ts < now()
sql delete from d1.ct1 where ts < now()
sql delete from d2.ct1 where ts < now()
sql delete from d3.ct1 where ts < now()
print =============== query data from super table
sql select count(*) from d0.stb
if $data00 != 1 then
return -1
endi
sql select count(*) from d1.stb
if $data00 != 1 then
return -1
endi
sql select count(*) from d2.stb
if $data00 != 1 then
return -1
endi
sql select count(*) from d3.stb
if $data00 != 1 then
return -1
endi
print =============== query data from normal table
sql select count(*) from d0.ntb
if $data00 != 1 then
return -1
endi
sql select count(*) from d1.ntb
if $data00 != 1 then
return -1
endi
sql select count(*) from d2.ntb
if $data00 != 1 then
return -1
endi
sql select count(*) from d3.ntb
if $data00 != 1 then
return -1
endi
print =============== delete from super table
sql delete from d0.stb where ts < now()
sql delete from d1.stb where ts < now()
sql delete from d2.stb where ts < now()
sql delete from d3.stb where ts < now()
print =============== query data from super table
sql select count(*) from d0.stb
if $data00 != 0 then
return -1
endi
sql select count(*) from d1.stb
if $data00 != 0 then
return -1
endi
sql select count(*) from d2.stb
if $data00 != 0 then
return -1
endi
sql select count(*) from d3.stb
if $data00 != 0 then
return -1
endi
print =============== delete from normal table
sql delete from d0.ntb where ts < now()
sql delete from d1.ntb where ts < now()
sql delete from d2.ntb where ts < now()
sql delete from d3.ntb where ts < now()
print =============== query data from normal table
sql select count(*) from d0.ntb
if $data00 != 0 then
return -1
endi
sql select count(*) from d1.ntb
if $data00 != 0 then
return -1
endi
sql select count(*) from d2.ntb
if $data00 != 0 then
return -1
endi
sql select count(*) from d3.ntb
if $data00 != 0 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
......@@ -53,7 +53,7 @@ sql insert into tbc values ("2021-05-11 10:12:29",36, 37, NULL, -4005)
sql insert into tbd values ("2021-05-11 10:12:29",NULL,NULL,NULL,NULL )
run tsim/parser/last_cache_query.sim
sql flush database $db
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start
......
......@@ -114,6 +114,7 @@ run tsim/insert/basic1.sim
run tsim/insert/commit-merge0.sim
run tsim/insert/basic0.sim
run tsim/insert/update0.sim
run tsim/insert/delete0.sim
run tsim/insert/backquote.sim
run tsim/insert/null.sim
run tsim/catalog/alterInCurrent.sim
......
......@@ -29,10 +29,10 @@ sql select udf1(f) from t;
if $rows != 2 then
return -1
endi
if $data00 != 88 then
if $data00 != 1 then
return -1
endi
if $data10 != 88 then
if $data10 != 1 then
return -1
endi
......@@ -51,10 +51,10 @@ sql select udf1(f1, f2) from t2;
if $rows != 2 then
return -1
endi
if $data00 != 88 then
if $data00 != 1 then
return -1
endi
if $data10 != 88 then
if $data10 != 1 then
return -1
endi
......@@ -72,10 +72,10 @@ print $rows , $data00 , $data10 , $data20 , $data30
if $rows != 4 then
return -1
endi
if $data00 != 88 then
if $data00 != 1 then
return -1
endi
if $data10 != 88 then
if $data10 != 1 then
return -1
endi
......@@ -114,10 +114,10 @@ print $rows , $data00 , $data01
if $rows != 1 then
return -1
endi
if $data00 != 176.000000000 then
if $data00 != 2.000000000 then
return -1
endi
if $data01 != 152.420471066 then
if $data01 != 1.732050808 then
return -1
endi
......
......@@ -191,20 +191,20 @@ class TDTestCase:
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.checkData(0,2,1)
tdSql.checkData(0,3,88)
tdSql.checkData(0,3,1)
tdSql.checkData(0,4,1.000000000)
tdSql.checkData(0,5,88)
tdSql.checkData(0,5,1)
tdSql.checkData(0,6,"binary1")
tdSql.checkData(0,7,88)
tdSql.checkData(0,7,1)
tdSql.checkData(3,0,3)
tdSql.checkData(3,1,88)
tdSql.checkData(3,1,1)
tdSql.checkData(3,2,33333)
tdSql.checkData(3,3,88)
tdSql.checkData(3,3,1)
tdSql.checkData(3,4,33.000000000)
tdSql.checkData(3,5,88)
tdSql.checkData(3,5,1)
tdSql.checkData(3,6,"binary1")
tdSql.checkData(3,7,88)
tdSql.checkData(3,7,1)
tdSql.checkData(11,0,None)
tdSql.checkData(11,1,None)
......@@ -213,7 +213,7 @@ class TDTestCase:
tdSql.checkData(11,4,None)
tdSql.checkData(11,5,None)
tdSql.checkData(11,6,"binary1")
tdSql.checkData(11,7,88)
tdSql.checkData(11,7,1)
tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1")
tdSql.checkData(0,0,None)
......@@ -226,13 +226,13 @@ class TDTestCase:
tdSql.checkData(0,7,None)
tdSql.checkData(20,0,8)
tdSql.checkData(20,1,88)
tdSql.checkData(20,1,1)
tdSql.checkData(20,2,88888)
tdSql.checkData(20,3,88)
tdSql.checkData(20,3,1)
tdSql.checkData(20,4,888)
tdSql.checkData(20,5,88)
tdSql.checkData(20,5,1)
tdSql.checkData(20,6,88)
tdSql.checkData(20,7,88)
tdSql.checkData(20,7,1)
# aggregate functions
......@@ -375,14 +375,14 @@ class TDTestCase:
tdSql.checkRows(25)
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.checkData(1,0,88)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,8)
tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;")
tdSql.checkRows(13)
tdSql.checkData(0,0,88)
tdSql.checkData(0,0,1)
tdSql.checkData(0,1,8)
tdSql.checkData(1,0,88)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,7)
# bug fix for crash
......@@ -401,9 +401,9 @@ class TDTestCase:
tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts")
tdSql.checkRows(3)
tdSql.checkData(0,0,9)
tdSql.checkData(0,1,88)
tdSql.checkData(0,1,1)
tdSql.checkData(0,2,-99.990000000)
tdSql.checkData(0,3,88)
tdSql.checkData(0,3,1)
tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,0)
......@@ -412,20 +412,20 @@ class TDTestCase:
tdSql.checkData(1,1,10)
tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,88)
tdSql.checkData(0,1,88)
tdSql.checkData(1,0,88)
tdSql.checkData(1,1,88)
tdSql.checkData(0,0,1)
tdSql.checkData(0,1,1)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,1)
tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,0)
tdSql.checkData(0,1,88)
tdSql.checkData(0,1,1)
tdSql.checkData(0,2,0)
tdSql.checkData(0,3,88)
tdSql.checkData(0,3,1)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,88)
tdSql.checkData(1,1,1)
tdSql.checkData(1,2,10)
tdSql.checkData(1,3,88)
tdSql.checkData(1,3,1)
tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,16.881943016)
......
......@@ -193,20 +193,20 @@ class TDTestCase:
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.checkData(0,2,1)
tdSql.checkData(0,3,88)
tdSql.checkData(0,3,1)
tdSql.checkData(0,4,1.000000000)
tdSql.checkData(0,5,88)
tdSql.checkData(0,5,1)
tdSql.checkData(0,6,"binary1")
tdSql.checkData(0,7,88)
tdSql.checkData(0,7,1)
tdSql.checkData(3,0,3)
tdSql.checkData(3,1,88)
tdSql.checkData(3,1,1)
tdSql.checkData(3,2,33333)
tdSql.checkData(3,3,88)
tdSql.checkData(3,3,1)
tdSql.checkData(3,4,33.000000000)
tdSql.checkData(3,5,88)
tdSql.checkData(3,5,1)
tdSql.checkData(3,6,"binary1")
tdSql.checkData(3,7,88)
tdSql.checkData(3,7,1)
tdSql.checkData(11,0,None)
tdSql.checkData(11,1,None)
......@@ -215,7 +215,7 @@ class TDTestCase:
tdSql.checkData(11,4,None)
tdSql.checkData(11,5,None)
tdSql.checkData(11,6,"binary1")
tdSql.checkData(11,7,88)
tdSql.checkData(11,7,1)
tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1")
tdSql.checkData(0,0,None)
......@@ -228,13 +228,13 @@ class TDTestCase:
tdSql.checkData(0,7,None)
tdSql.checkData(20,0,8)
tdSql.checkData(20,1,88)
tdSql.checkData(20,1,1)
tdSql.checkData(20,2,88888)
tdSql.checkData(20,3,88)
tdSql.checkData(20,3,1)
tdSql.checkData(20,4,888)
tdSql.checkData(20,5,88)
tdSql.checkData(20,5,1)
tdSql.checkData(20,6,88)
tdSql.checkData(20,7,88)
tdSql.checkData(20,7,1)
# aggregate functions
......@@ -377,14 +377,14 @@ class TDTestCase:
tdSql.checkRows(25)
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.checkData(1,0,88)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,8)
tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;")
tdSql.checkRows(13)
tdSql.checkData(0,0,88)
tdSql.checkData(0,0,1)
tdSql.checkData(0,1,8)
tdSql.checkData(1,0,88)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,7)
# bug fix for crash
......@@ -403,9 +403,9 @@ class TDTestCase:
tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts")
tdSql.checkRows(3)
tdSql.checkData(0,0,9)
tdSql.checkData(0,1,88)
tdSql.checkData(0,1,1)
tdSql.checkData(0,2,-99.990000000)
tdSql.checkData(0,3,88)
tdSql.checkData(0,3,1)
tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,0)
......@@ -414,20 +414,20 @@ class TDTestCase:
tdSql.checkData(1,1,10)
tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,88)
tdSql.checkData(0,1,88)
tdSql.checkData(1,0,88)
tdSql.checkData(1,1,88)
tdSql.checkData(0,0,1)
tdSql.checkData(0,1,1)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,1)
tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,0)
tdSql.checkData(0,1,88)
tdSql.checkData(0,1,1)
tdSql.checkData(0,2,0)
tdSql.checkData(0,3,88)
tdSql.checkData(0,3,1)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,88)
tdSql.checkData(1,1,1)
tdSql.checkData(1,2,10)
tdSql.checkData(1,3,88)
tdSql.checkData(1,3,1)
tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,16.881943016)
......
......@@ -193,20 +193,20 @@ class TDTestCase:
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.checkData(0,2,1)
tdSql.checkData(0,3,88)
tdSql.checkData(0,3,1)
tdSql.checkData(0,4,1.000000000)
tdSql.checkData(0,5,88)
tdSql.checkData(0,5,1)
tdSql.checkData(0,6,"binary1")
tdSql.checkData(0,7,88)
tdSql.checkData(0,7,1)
tdSql.checkData(3,0,3)
tdSql.checkData(3,1,88)
tdSql.checkData(3,1,1)
tdSql.checkData(3,2,33333)
tdSql.checkData(3,3,88)
tdSql.checkData(3,3,1)
tdSql.checkData(3,4,33.000000000)
tdSql.checkData(3,5,88)
tdSql.checkData(3,5,1)
tdSql.checkData(3,6,"binary1")
tdSql.checkData(3,7,88)
tdSql.checkData(3,7,1)
tdSql.checkData(11,0,None)
tdSql.checkData(11,1,None)
......@@ -215,7 +215,7 @@ class TDTestCase:
tdSql.checkData(11,4,None)
tdSql.checkData(11,5,None)
tdSql.checkData(11,6,"binary1")
tdSql.checkData(11,7,88)
tdSql.checkData(11,7,1)
tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1")
tdSql.checkData(0,0,None)
......@@ -228,13 +228,13 @@ class TDTestCase:
tdSql.checkData(0,7,None)
tdSql.checkData(20,0,8)
tdSql.checkData(20,1,88)
tdSql.checkData(20,1,1)
tdSql.checkData(20,2,88888)
tdSql.checkData(20,3,88)
tdSql.checkData(20,3,1)
tdSql.checkData(20,4,888)
tdSql.checkData(20,5,88)
tdSql.checkData(20,5,1)
tdSql.checkData(20,6,88)
tdSql.checkData(20,7,88)
tdSql.checkData(20,7,1)
# aggregate functions
......@@ -377,14 +377,14 @@ class TDTestCase:
tdSql.checkRows(25)
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.checkData(1,0,88)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,8)
tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;")
tdSql.checkRows(13)
tdSql.checkData(0,0,88)
tdSql.checkData(0,0,1)
tdSql.checkData(0,1,8)
tdSql.checkData(1,0,88)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,7)
# bug fix for crash
......@@ -403,9 +403,9 @@ class TDTestCase:
tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts")
tdSql.checkRows(3)
tdSql.checkData(0,0,9)
tdSql.checkData(0,1,88)
tdSql.checkData(0,1,1)
tdSql.checkData(0,2,-99.990000000)
tdSql.checkData(0,3,88)
tdSql.checkData(0,3,1)
tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,0)
......@@ -414,20 +414,20 @@ class TDTestCase:
tdSql.checkData(1,1,10)
tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,88)
tdSql.checkData(0,1,88)
tdSql.checkData(1,0,88)
tdSql.checkData(1,1,88)
tdSql.checkData(0,0,1)
tdSql.checkData(0,1,1)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,1)
tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,0)
tdSql.checkData(0,1,88)
tdSql.checkData(0,1,1)
tdSql.checkData(0,2,0)
tdSql.checkData(0,3,88)
tdSql.checkData(0,3,1)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,88)
tdSql.checkData(1,1,1)
tdSql.checkData(1,2,10)
tdSql.checkData(1,3,88)
tdSql.checkData(1,3,1)
tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,16.881943016)
......
......@@ -190,20 +190,20 @@ class TDTestCase:
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.checkData(0,2,1)
tdSql.checkData(0,3,88)
tdSql.checkData(0,3,1)
tdSql.checkData(0,4,1.000000000)
tdSql.checkData(0,5,88)
tdSql.checkData(0,5,1)
tdSql.checkData(0,6,"binary1")
tdSql.checkData(0,7,88)
tdSql.checkData(0,7,1)
tdSql.checkData(3,0,3)
tdSql.checkData(3,1,88)
tdSql.checkData(3,1,1)
tdSql.checkData(3,2,33333)
tdSql.checkData(3,3,88)
tdSql.checkData(3,3,1)
tdSql.checkData(3,4,33.000000000)
tdSql.checkData(3,5,88)
tdSql.checkData(3,5,1)
tdSql.checkData(3,6,"binary1")
tdSql.checkData(3,7,88)
tdSql.checkData(3,7,1)
tdSql.checkData(11,0,None)
tdSql.checkData(11,1,None)
......@@ -212,7 +212,7 @@ class TDTestCase:
tdSql.checkData(11,4,None)
tdSql.checkData(11,5,None)
tdSql.checkData(11,6,"binary1")
tdSql.checkData(11,7,88)
tdSql.checkData(11,7,1)
tdSql.query("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1")
tdSql.checkData(0,0,None)
......@@ -225,13 +225,13 @@ class TDTestCase:
tdSql.checkData(0,7,None)
tdSql.checkData(20,0,8)
tdSql.checkData(20,1,88)
tdSql.checkData(20,1,1)
tdSql.checkData(20,2,88888)
tdSql.checkData(20,3,88)
tdSql.checkData(20,3,1)
tdSql.checkData(20,4,888)
tdSql.checkData(20,5,88)
tdSql.checkData(20,5,1)
tdSql.checkData(20,6,88)
tdSql.checkData(20,7,88)
tdSql.checkData(20,7,1)
# aggregate functions
......@@ -374,14 +374,14 @@ class TDTestCase:
tdSql.checkRows(25)
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.checkData(1,0,88)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,8)
tdSql.query("select abs(udf1(c1)) , abs(ceil(c1)) from ct1 order by ts;")
tdSql.checkRows(13)
tdSql.checkData(0,0,88)
tdSql.checkData(0,0,1)
tdSql.checkData(0,1,8)
tdSql.checkData(1,0,88)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,7)
# bug fix for crash
......@@ -400,9 +400,9 @@ class TDTestCase:
tdSql.query("select c1 ,udf1(c1) , c6 ,udf1(c6) from stb1 where c1 > 8 order by ts")
tdSql.checkRows(3)
tdSql.checkData(0,0,9)
tdSql.checkData(0,1,88)
tdSql.checkData(0,1,1)
tdSql.checkData(0,2,-99.990000000)
tdSql.checkData(0,3,88)
tdSql.checkData(0,3,1)
tdSql.query("select sub1.c1, sub2.c2 from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,0)
......@@ -411,20 +411,20 @@ class TDTestCase:
tdSql.checkData(1,1,10)
tdSql.query("select udf1(sub1.c1), udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,88)
tdSql.checkData(0,1,88)
tdSql.checkData(1,0,88)
tdSql.checkData(1,1,88)
tdSql.checkData(0,0,1)
tdSql.checkData(0,1,1)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,1)
tdSql.query("select sub1.c1 , udf1(sub1.c1), sub2.c2 ,udf1(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,0)
tdSql.checkData(0,1,88)
tdSql.checkData(0,1,1)
tdSql.checkData(0,2,0)
tdSql.checkData(0,3,88)
tdSql.checkData(0,3,1)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,88)
tdSql.checkData(1,1,1)
tdSql.checkData(1,2,10)
tdSql.checkData(1,3,88)
tdSql.checkData(1,3,1)
tdSql.query("select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null")
tdSql.checkData(0,0,16.881943016)
......@@ -468,12 +468,12 @@ class TDTestCase:
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,1)
tdSql.checkData(1,2,1.110000000)
tdSql.checkData(1,3,88)
tdSql.checkData(1,3,1)
tdSql.query("select c1,c6,udf1(c1,c6) from stb1 order by ts")
tdSql.checkData(1,0,8)
tdSql.checkData(1,1,88.880000000)
tdSql.checkData(1,2,88)
tdSql.checkData(1,2,1)
tdSql.query("select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;")
tdSql.checkRows(22)
......
......@@ -2382,6 +2382,14 @@ class TDTestCase:
tdSql.error(f"select interp('abcd') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
tdSql.error(f"select interp('中文字符') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
# invalid pseudo column usage
tdSql.error(f"select interp(_irowts) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
tdSql.error(f"select interp(_isfilled) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
tdSql.error(f"select interp(c0) from {dbname}.{tbname} where _isfilled = true range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
tdSql.error(f"select interp(c0) from {dbname}.{tbname} where _irowts > 0 range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
tdLog.printNoPrefix("==========step13:stable cases")
......
......@@ -20,8 +20,8 @@ class TDTestCase:
intData = []
floatData = []
tdSql.execute(f'''create table {dbname}.stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags('beijing')")
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(t0 tinyint, t1 float, loc nchar(20))''')
tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags(5, 5.5, 'beijing')")
for i in range(self.rowNum):
tdSql.execute(f"insert into {dbname}.stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
......@@ -55,13 +55,20 @@ class TDTestCase:
tdSql.checkData(0, 1, np.max(intData))
tdSql.query(f"select ts, min(col9) from {dbname}.stb")
tdSql.checkRows(1)
tdSql.checkRows(1)
tdSql.checkData(0, 1, np.min(floatData))
tdSql.query(f"select ts, min(col9) from {dbname}.stb_1")
tdSql.checkRows(1)
tdSql.checkRows(1)
tdSql.checkData(0, 1, np.min(floatData))
# check tags
tdSql.query(f"select max(t0) from {dbname}.stb")
tdSql.checkData(0,0,5)
tdSql.query(f"select max(t1) from {dbname}.stb")
tdSql.checkData(0,0,5.5)
def max_check_ntb_base(self, dbname="db"):
tdSql.prepare()
intData = []
......
......@@ -243,8 +243,8 @@ int32_t shellParseArgsWithoutArgp(int argc, char *argv[]) {
SShellArgs *pArgs = &shell.args;
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "--help") == 0 || strcmp(argv[i], "--usage") == 0 || strcmp(argv[i], "-?") == 0 ||
strcmp(argv[i], "/?") == 0) {
if (strcmp(argv[i], "--help") == 0 || strcmp(argv[i], "--usage") == 0
|| strcmp(argv[i], "-?") == 0 || strcmp(argv[i], "/?") == 0) {
shellParseSingleOpt('?', NULL);
return 0;
}
......@@ -260,8 +260,10 @@ int32_t shellParseArgsWithoutArgp(int argc, char *argv[]) {
return -1;
}
if (key[1] == 'h' || key[1] == 'P' || key[1] == 'u' || key[1] == 'a' || key[1] == 'c' || key[1] == 's' ||
key[1] == 'f' || key[1] == 'd' || key[1] == 'w' || key[1] == 'n' || key[1] == 'l' || key[1] == 'N'
if (key[1] == 'h' || key[1] == 'P' || key[1] == 'u'
|| key[1] == 'a' || key[1] == 'c' || key[1] == 's'
|| key[1] == 'f' || key[1] == 'd' || key[1] == 'w'
|| key[1] == 'n' || key[1] == 'l' || key[1] == 'N'
#ifdef WEBSOCKET
|| key[1] == 'E' || key[1] == 'T'
#endif
......@@ -277,10 +279,12 @@ int32_t shellParseArgsWithoutArgp(int argc, char *argv[]) {
}
shellParseSingleOpt(key[1], val);
i++;
} else if (key[1] == 'p' || key[1] == 'A' || key[1] == 'C' || key[1] == 'r' || key[1] == 'k' || key[1] == 't' ||
key[1] == 'V' || key[1] == '?' || key[1] == 1
} else if (key[1] == 'p' || key[1] == 'A' || key[1] == 'C'
|| key[1] == 'r' || key[1] == 'k'
|| key[1] == 't' || key[1] == 'V'
|| key[1] == '?' || key[1] == 1
#ifdef WEBSOCKET
|| key[1] == 'R'
||key[1] == 'R'
#endif
) {
shellParseSingleOpt(key[1], NULL);
......
......@@ -24,7 +24,7 @@ int shell_conn_ws_server(bool first) {
((dsnLen-SHELL_WS_DSN_MASK) > SHELL_WS_DSN_BUFF)?
SHELL_WS_DSN_BUFF:(dsnLen-SHELL_WS_DSN_MASK),
"%s", shell.args.dsn);
fprintf(stdout, "trying to connect %s*** ", cuttedDsn);
fprintf(stdout, "trying to connect %s****** ", cuttedDsn);
fflush(stdout);
for (int i = 0; i < shell.args.timeout; i++) {
shell.ws_conn = ws_connect_with_dsn(shell.args.dsn);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册