未验证 提交 f8453c14 编写于 作者: S slguan 提交者: GitHub

Merge branch 'develop' into hotfix/TBASE-1241

......@@ -141,12 +141,12 @@ IF (NOT DEFINED TD_CLUSTER)
SET(RELEASE_FLAGS "-O0")
IF (NOT TD_ARM)
IF (${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ELSE ()
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
ELSE ()
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -Wno-char-subscripts -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
ADD_DEFINITIONS(-DLINUX)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
......@@ -156,7 +156,7 @@ IF (NOT DEFINED TD_CLUSTER)
ENDIF ()
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -Wno-char-subscripts -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -latomic -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DLINUX)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
ADD_DEFINITIONS(-DUSE_LIBICONV)
......@@ -171,7 +171,7 @@ IF (NOT DEFINED TD_CLUSTER)
ADD_DEFINITIONS(-DPTW32_BUILD)
ADD_DEFINITIONS(-D_MBCS -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE)
ELSEIF (TD_DARWIN_64)
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-unused-variable -Wno-bitfield-constant-conversion")
SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0")
ADD_DEFINITIONS(-DDARWIN)
......
......@@ -114,23 +114,84 @@ public Connection getConn() throws Exception{
</ul>
<p>对于TDengine操作的报错信息,用户可使用JDBCDriver包里提供的枚举类TSDBError.java来获取error message和error code的列表。对于更多的具体操作的相关代码,请参考TDengine提供的使用示范项目<code>JDBCDemo</code></p>
<a class='anchor' id='Python-Connector'></a><h2>Python Connector</h2>
<a class='anchor' id='Python客户端安装'></a><h3>Python客户端安装</h3>
<p>用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包。用户可以通过pip命令安装: </p>
<p><code>pip install src/connector/python/python2/</code></p>
<p></p>
<p><code>pip install src/connector/python/python3/</code></p>
<p>如果机器上没有pip命令,用户可将src/connector/python/python3或src/connector/python/python2下的taos文件夹拷贝到应用程序的目录使用。</p>
<a class='anchor' id='Python客户端接口'></a><h3>Python客户端接口</h3>
<p>在使用TDengine的python接口时,需导入TDengine客户端模块:</p>
<pre><code>import taos </code></pre>
<p>用户可通过python的帮助信息直接查看模块的使用信息,或者参考code/examples/python中的示例程序。以下为部分常用类和方法:</p>
<a class='anchor' id='安装准备'></a><h3>安装准备</h3>
<li>已安装TDengine, 如果客户端在Windows上,需要安装Windows 版本的TDengine客户端</li>
<li>已安装python 2.7 or >= 3.4</li>
<li>已安装pip</li>
<a class='anchor' id='安装'></a><h3>安装</h3>
<a class='anchor' id='Linux'></a><h4>Linux</h4>
<p>用户可以在源代码的src/connector/python文件夹下找到python2和python3的安装包, 然后通过pip命令安装</p>
<pre><code class="cmd language-cmd">pip install src/connector/python/linux/python2/</code></pre>
<p>或者</p>
<pre><code>pip install src/connector/python/linux/python3/</code></pre>
<a class='anchor' id='Windows'></a><h4>Windows</h4>
<p>在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos <em>cmd</em> 命令行界面</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python2\</code></pre>
<p>或者</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python3\</code></pre>
<p>* 如果机器上没有<em>pip</em>命令,用户可将src/connector/python/windows/python3或src/connector/python/windows/python2下的taos文件夹拷贝到应用程序的目录使用。 </p>
<a class='anchor' id='使用'></a><h3>使用</h3>
<a class='anchor' id='代码示例'></a><h4>代码示例</h4>
<li>导入TDengine客户端模块:</li>
<pre><code class="python language-python">import taos </code></pre>
<li>获取连接</li>
<pre><code>
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
</code></pre>
<p>* <em>host 是TDengine 服务端所有IP, config 为客户端配置文件所在目录</em></p>
<li>写入数据</li>
<pre><code>
import datetime
# 创建数据库
c1.execute('create database db')
c1.execute('use db')
# 建表
c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
# 插入数据
start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# 批量插入数据
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
</code></pre>
<li>查询数据</li>
<code><pre>
c1.execute('select * from tb')
# 拉取查询结果
data = c1.fetchall()
# 返回的结果是一个列表,每一行构成列表的一个元素
numOfRows = c1.rowcount
numOfCols = c1.descriptions
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
# 直接使用cursor 循环拉取查询结果
c1.execute('select * from tb')
for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
</pre></code>
<li>关闭连接</li>
<code><pre>
c1.close()
conn.close()
</pre></code>
<a class='anchor' id='帮助信息''></a><h4>帮助信息</h4>
<p>用户可通过python的帮助信息直接查看模块的使用信息,或者参考code/examples/python中的示例程序。以下为部分常用类和方法:</p>
<ul>
<li><p><em>TaosConnection</em></p>
<p>参考python中help(taos.TaosConnection)。</p></li>
<li><p><em>TaosCursor</em></p>
<p>参考python中help(taos.TaosCursor)。</p></li>
<li><p><em>connect</em>方法</p>
<p>用于生成taos.TaosConnection的实例。</p></li>
<li><p><em>TaosConnection</em> </p>
<p>参考python中<code>help(taos.TDengineConnection)</code></p></li>
<li><p><em>TaosCursor</em> </p>
<p>参考python中<code>help(taos.TDengineCursor)</code></p></li>
<li><p>connect 方法</p>
<p>用于生成taos.TDengineConnection的实例。</p></li>
</ul>
<a class='anchor' id='RESTful-Connector'></a><h2>RESTful Connector</h2>
<p>为支持各种不同类型平台的开发,TDengine提供符合REST设计标准的API,即RESTful API。为最大程度降低学习成本,不同于其他数据库RESTful API的设计方法,TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库,仅需要一个URL。 </p>
......
......@@ -28,7 +28,7 @@
<p>在TDengine终端中,用户可以通过SQL命令来创建/删除数据库、表等,并进行插入查询操作。在终端中运行的SQL语句需要以分号结束来运行。示例:</p>
<pre><code class="mysql language-mysql">create database db;
use db;
create table t (ts timestamp, cdata int);
create table t (ts timestamp, speed int);
insert into t values ('2019-07-15 00:00:00', 10);
insert into t values ('2019-07-15 01:00:00', 20);
select * from t;
......@@ -85,4 +85,4 @@ Query OK, 2 row(s) in set (0.001700s)</code></pre>
</ul>
<p>TDengine是专为物联网、车联网、工业互联网、运维监测等场景优化设计的时序数据处理引擎。与其他方案相比,它的插入查询速度都快10倍以上。单核一秒钟就能插入100万数据点,读出1000万数据点。由于采用列式存储和优化的压缩算法,存储空间不及普通数据库的1/10.</p>
<a class='anchor' id='深入了解TDengine'></a><h2>深入了解TDengine</h2>
<p>请继续阅读<a href="../documentation">文档</a>来深入了解TDengine。</p><a href='../index.html'>回去</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>
\ No newline at end of file
<p>请继续阅读<a href="../documentation">文档</a>来深入了解TDengine。</p><a href='../index.html'>回去</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>
......@@ -122,15 +122,76 @@ public Connection getConn() throws Exception{
</ul>
<p>All the error codes and error messages can be found in <code>TSDBError.java</code> . For a more detailed coding example, please refer to the demo project <code>JDBCDemo</code> in TDengine's code examples. </p>
<a class='anchor' id='Python-Connector'></a><h2>Python Connector</h2>
<a class='anchor' id='Install-TDengine-Python-client'></a><h3>Install TDengine Python client</h3>
<p>Users can find python client packages in our source code directory <em>src/connector/python</em>. There are two directories corresponding two python versions. Please choose the correct package to install. Users can use <em>pip</em> command to install:</p>
<pre><code class="cmd language-cmd">pip install src/connector/python/python2/</code></pre>
<a class='anchor' id='Pre-requirement'></a><h3>Pre-requirement</h3>
<li>TDengine installed, TDengine-client installed if on Windows</li>
<li>python 2.7 or >= 3.4</li>
<li>pip installed </li>
<a class='anchor' id='Installation'></a><h3>Installation</h3>
<a class='anchor' id='Linux'></a><h4>Linux</h4>
<p>Users can find python client packages in our source code directory <em>src/connector/python</em>. There are two directories corresponding to two python versions. Please choose the correct package to install. Users can use <em>pip</em> command to install:</p>
<pre><code class="cmd language-cmd">pip install src/connector/python/linux/python2/</code></pre>
<p>or</p>
<pre><code>pip install src/connector/python/python3/</code></pre>
<p>If <em>pip</em> command is not installed on the system, users can choose to install pip or just copy the <em>taos</em> directory in the python client directory to the application directory to use.</p>
<a class='anchor' id='Python-client-interfaces'></a><h3>Python client interfaces</h3>
<p>To use TDengine Python client, import TDengine module at first:</p>
<pre><code>pip install src/connector/python/linux/python3/</code></pre>
<a class='anchor' id='Windows'></a><h4>Windows</h4>
<p>Assumed the Windows TDengine client has been installed , copy the file "C:\TDengine\driver\taos.dll" to the folder "C:\windows\system32", and then enter the <em>cmd</em> Windows command interface</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python2\</code></pre>
<p>or</p>
<pre><code>cd C:\TDengine\connector\python\windows</code></pre>
<pre><code>pip install python3\</code></pre>
<p>* If <em>pip</em> command is not installed on the system, users can choose to install pip or just copy the <em>taos</em> directory in the python client directory to the application directory to use.</p>
<a class='anchor' id='Usage'></a><h3>Usage</h3>
<a class='anchor' id='Examples'></a><h4>Examples</h4>
<li>import TDengine module at first:</li>
<pre><code class="python language-python">import taos </code></pre>
<li>get the connection</li>
<pre><code>
conn = taos.connect(host="127.0.0.1", user="root", password="taosdata", config="/etc/taos")
c1 = conn.cursor()
</code></pre>
<p>* <em>host is the IP of TDengine server, and config is the directory where exists the TDengine client configure file</em></p>
<li>insert records into the database</li>
<pre><code>
import datetime
# create a database
c1.execute('create database db')
c1.execute('use db')
# create a table
c1.execute('create table tb (ts timestamp, temperature int, humidity float)')
# insert a record
start_time = datetime.datetime(2019, 11, 1)
affected_rows = c1.execute('insert into tb values (\'%s\', 0, 0.0)' %start_time)
# insert multiple records in a batch
time_interval = datetime.timedelta(seconds=60)
sqlcmd = ['insert into tb values']
for irow in range(1,11):
start_time += time_interval
sqlcmd.append('(\'%s\', %d, %f)' %(start_time, irow, irow*1.2))
affected_rows = c1.execute(' '.join(sqlcmd))
</code></pre>
<li>query the database</li>
<code><pre>
c1.execute('select * from tb')
# fetch all returned results
data = c1.fetchall()
# data is a list of returned rows with each row being a tuple
numOfRows = c1.rowcount
numOfCols = c1.descriptions
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2])
# use the cursor as an iterator to retrieve all returned results
c1.execute('select * from tb')
for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2])
</pre></code>
<li>close the connection</li>
<code><pre>
c1.close()
conn.close()
</pre></code>
<a class='anchor' id='Help information''></a><h4>Help information</h4>
<p>Users can get module information from Python help interface or refer to our [python code example](). We list the main classes and methods below:</p>
<ul>
<li><p><em>TaosConnection</em> class</p>
......
......@@ -28,7 +28,7 @@
<p>In the TDengine shell, you can create databases, create tables and insert/query data with SQL. Each query command ends with a semicolon. It works like MySQL, for example:</p>
<pre><code class="mysql language-mysql">create database db;
use db;
create table t (ts timestamp, cdata int);
create table t (ts timestamp, speed int);
insert into t values ('2019-07-15 10:00:00', 10);
insert into t values ('2019-07-15 10:01:05', 20);
select * from t;
......@@ -85,4 +85,4 @@ Query OK, 2 row(s) in set (0.001700s)</code></pre>
</ul>
<p>TDengine is specially designed and optimized for time-series data processing in IoT, connected cars, Industrial IoT, IT infrastructure and application monitoring, and other scenarios. Compared with other solutions, it is 10x faster on insert/query speed. With a single-core machine, over 20K requestes can be processed, millions data points can be ingested, and over 10 million data points can be retrieved in a second. Via column-based storage and tuned compression algorithm for different data types, less than 1/10 storage space is required. </p>
<a class='anchor' id='Explore-More-on-TDengine'></a><h2>Explore More on TDengine</h2>
<p>Please read through the whole <a href='../documentation'>documentation</a> to learn more about TDengine.</p><a href='../index.html'>Back</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>
\ No newline at end of file
<p>Please read through the whole <a href='../documentation'>documentation</a> to learn more about TDengine.</p><a href='../index.html'>Back</a></section></main></div><?php include($s.'/footer.php'); ?><script>$('pre').addClass('prettyprint linenums');PR.prettyPrint()</script><script src='lib/docs/liner.js'></script></body></html>
......@@ -18,6 +18,7 @@ TDengine提供类似SQL语法,用户可以在TDengine Shell中使用SQL语句
- 插入记录时,如果时间戳为0,插入数据时使用服务器当前时间
- Epoch Time: 时间戳也可以是一个长整数,表示从1970-01-01 08:00:00.000开始的毫秒数
- 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。数字后面的时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据
- TDengine暂不支持时间窗口按照自然年和自然月切分。Where条件中的时间窗口单位的换算关系如下:interval(1y) 等效于 interval(365d), interval(1n) 等效于 interval(30d), interval(1w) 等效于 interval(7d)
TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMicrosecond就可支持微秒。
......
......@@ -2,15 +2,15 @@
## 文件目录结构
安装TDengine后,默认会在操作系统中生成下列目录或文件:
安装TDengine的过程中,安装程序将在操作系统中创建以下目录或文件:
| 目录/文件 | 说明 |
| ---------------------- | :------------------------------------------------|
| /etc/taos/taos.cfg | TDengine默认[配置文件] |
| /usr/local/taos/driver | TDengine动态链接库目录 |
| /var/lib/taos | TDengine默认数据文件目录,可通过[配置文件]修改位置. |
| /var/log/taos | TDengine默认日志文件目录,可通过[配置文件]修改位置 |
| /usr/local/taos/bin | TDengine可执行文件目录 |
| /etc/taos/taos.cfg | 默认[配置文件] |
| /usr/local/taos/driver | 动态链接库目录 |
| /var/lib/taos | 默认数据文件目录,可通过[配置文件]修改位置. |
| /var/log/taos | 默认日志文件目录,可通过[配置文件]修改位置 |
| /usr/local/taos/bin | 可执行文件目录 |
### 可执行文件
......@@ -25,22 +25,78 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
## 服务端配置
TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修改配置参数,以满足不同场景的需求。配置文件的缺省位置在/etc/taos目录,可以通过taosd命令行执行参数-c指定配置文件目录。比如taosd -c /home/user来指定配置文件位于/home/user这个目录。
TDengine系统后台服务程序是`taosd`,其启动时候读取的配置文件缺省目录是`/etc/taos`。可以通过命令行执行参数-c指定配置文件目录,比如
```
taosd -c /home/user
```
指定`taosd`启动的时候读取`/home/user`目录下的配置文件taos.cfg。
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节。**注意:配置修改后,需要重启*taosd*服务才能生效。**
- internalIp: 对外提供服务的IP地址,默认取第一个IP地址
- mgmtShellPort:管理节点与客户端通信使用的TCP/UDP端口号(默认值是6030)。此端口号在内向后连续的5个端口都会被UDP通信占用,即UDP占用[6030-6034],同时TCP通信也会使用端口[6030]。
- vnodeShellPort:数据节点与客户端通信使用的TCP/UDP端口号(默认值是6035)。此端口号在内向后连续的5个端口都会被UDP通信占用,即UDP占用[6035-6039],同时TCP通信也会使用端口[6035]
- httpPort:数据节点对外提供RESTful服务使用TCP,端口号[6020]
- dataDir: 数据文件目录,缺省是/var/lib/taos
- maxUsers:用户的最大数量
- maxDbs:数据库的最大数量
- maxTables:数据表的最大数量
- enableMonitor: 系统监测标志位,0:关闭,1:打开
- logDir: 日志文件目录,缺省是/var/log/taos
- numOfLogLines:日志文件的最大行数
- debugFlag: 系统debug日志开关,131:仅错误和报警信息,135:所有
**internalIp**
- 默认值:操作配置的IP地址列表中的第一个IP地址
对外提供服务的IP地址。
**mgmtShellPort**
- 默认值: _6030_
数据库服务中管理节点与客户端通信使用的TCP/UDP端口号。
> 此端口号在内向后连续的5个端口都会用于UDP通信,即使用的端口是 _6030_ - _6034_ 。此外,TCP还会使用端口 _6030_
**vnodeShellPort**
- 默认值: _6035_
数据节点与客户端通信使用的TCP/UDP端口号。
> 此端口号在内向后连续的5个端口都会用于UDP通信,即使用的端口是 _6035_ - _6039_ 。此外,TCP还会使用端口 _6035_
**httpPort**
- 默认值: _6020_
RESTful服务使用的端口号,所有的HTTP请求(TCP)都需要向该接口发起查询/写入请求。
**dataDir**
- 默认值:/var/lib/taos
数据文件目录,所有的数据文件都将写入该目录。
**logDir**
- 默认值:/var/log/taos
日志文件目录,客户端和服务器的运行日志将写入该目录。
**maxUsers**
- 默认值:10,000
系统允许创建用户数量的上限
**maxDbs**
- 默认值:1,000
系统允许的创建数据库的上限
**maxTables**
- 默认值:650,000
系统允许创建数据表的上限。
>系统能够创建的表受到多种因素的限制,单纯地增大该参数并不能直接增加系统能够创建的表数量。例如,由于每个表创建均需要消耗一定量的缓存空间,系统可用内存一定的情况下,创建表的总数的上限是一个固定的值。
**monitor**
- 默认值:1(激活状态)
服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括CPU、内存、硬盘、网络带宽、HTTP请求量的监控记录,记录信息存储在`LOG`库中。0表示关闭监控服务,1表示激活监控服务。
**numOfLogLines**
- 默认值:10,000,000
单个日志文件允许的最大行数(10,000,000行)。
**debugFlag**
- 默认值:131(仅输出错误和警告信息)
系统(服务端和客户端)运行日志开关:
- 131 仅输出错误和警告信息
- 135 输入错误(ERROR)、警告(WARN)、信息(Info)
不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数:
......@@ -66,19 +122,139 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修
## 客户端配置
TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。
TDengine系统的前台交互客户端应用程序为taos(Windows平台上为taos.exe)。与服务端程序一样,也可以通过设置taos.cfg来配置`taos`启动和运行的配置项。启动的时候如果不指定taos加载配置文件路径,默认读取`/etc/taos/`路径下的`taos.cfg`文件。指定配置文件来启动`taos`的命令如下:
```
taos -c /home/cfg/
```
**注意:启动设置的是配置文件所在目录,而不是配置文件本身**
如果`/home/cfg/`目录下没有配置文件,程序会继续启动并打印如下告警信息:
```markdown
Welcome to the TDengine shell from linux, client version:1.6.4.0
option file:/home/cfg/taos.cfg not found, all options are set to system default
```
更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。
客户端配置参数说明
**masterIP**
- 默认值:127.0.0.1
客户端连接的TDengine服务器IP地址,如果不设置默认连接127.0.0.1的节点。以下两个命令等效:
```
taos
taos -h 127.0.0.1
```
其中的IP地址是从配置文件中读取的masterIP的值。
**locale**
- 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置
TDengine为存储中文、日文、韩文等非ASCII编码的宽字符,提供一种专门的字段类型`nchar`。写入`nchar`字段的数据将统一采用`UCS4-LE`格式进行编码并发送到服务器。需要注意的是,**编码正确性**是客户端来保证。因此,如果用户想要正常使用`nchar`字段来存储诸如中文、日文、韩文等非ASCII字符,需要正确设置客户端的编码格式。
客户端的输入的字符均采用操作系统当前默认的编码格式,在Linux系统上多为`UTF-8`,部分中文系统编码则可能是`GB18030``GBK`等。在docker环境中默认的编码是`POSIX`。在中文版Windows系统中,编码则是`CP936`。客户端需要确保正确设置自己所使用的字符集,即客户端运行的操作系统当前编码字符集,才能保证`nchar`中的数据正确转换为`UCS4-LE`编码格式。
在 Linux 中 locale 的命名规则为:
`<语言>_<地区>.<字符集编码>`
如:`zh_CN.UTF-8`,zh代表中文,CN代表大陆地区,UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与Mac OSX系统可以通过设置locale来确定系统的字符编码,由于Windows使用的locale中不是POSIX标准的locale格式,因此在Windows下需要采用另一个配置参数`charset`来指定字符编码。在Linux系统中也可以使用charset来指定字符编码。
客户端配置参数列表及解释
**charset**
- 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置
- masterIP:客户端默认发起请求的服务器的IP地址
- charset:指明客户端所使用的字符集,默认值为UTF-8。TDengine存储nchar类型数据时使用的是unicode存储,因此客户端需要告知服务自己所使用的字符集,也即客户端所在系统的字符集。
- locale:设置系统语言环境。Linux上客户端与服务端共享
- defaultUser:默认登录用户,默认值root
- defaultPass:默认登录密码,默认值taosdata
如果配置文件中不设置`charset`,在Linux系统中,taos在启动时候,自动读取系统当前的locale信息,并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败,则尝试读取charset配置,如果读取charset配置也失败,**则中断启动过程**
TCP/UDP端口,以及日志的配置参数,与server的配置参数完全一样。
在Linux系统中,locale信息包含了字符编码信息,因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如:
```
locale zh_CN.UTF-8
```
在Windows系统中,无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息,`taos`默认设置为字符编码为`CP936`。其等效在配置文件中添加如下配置:
```
charset CP936
```
如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。
在Linux系统中,如果用户同时设置了locale和字符集编码charset,并且locale和charset的不一致,后设置的值将覆盖前面设置的值。
```
locale zh_CN.UTF-8
charset GBK
```
`charset`的有效值是`GBK`
```
charset GBK
locale zh_CN.UTF-8
```
`charset`的有效值是`UTF-8`
**sockettype**
- 默认值:UDP
客户端连接服务端的套接字的方式,可以使用`UDP``TCP`两种配置。
在客户端和服务端之间的通讯需要经过恶劣的网络环境下(如公共网络、互联网)、客户端与数据库服务端连接不稳定(由于MTU的问题导致UDP丢包)的情况下,可以将连接的套接字类型调整为`TCP`
>注意:客户端套接字的类型需要和服务端的套接字类型相同,否则无法连接数据库。
**compressMsgSize**
- 默认值:-1(不压缩)
客户端与服务器之间进行消息通讯过程中,对通讯的消息进行压缩的阈值,默认值为-1(不压缩)。如果要压缩消息,建议设置为64330字节,即大于64330字节的消息体才进行压缩。在配置文件中增加如下配置项即可:
```
compressMsgSize 64330
```
如果配置项设置为0,`compressMsgSize 0`表示对所有的消息均进行压缩。
**timezone**
- 默认值:从系统中动态获取当前的时区设置
客户端运行系统所在的时区。为应对多时区的数据写入和查询问题,TDengine采用Unix时间戳([Unix Timestamp](https://en.wikipedia.org/wiki/Unix_time))来记录和存储时间戳。Unix时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间()转换为正确的Unix时间戳,需要设置正确的时区。
在Linux系统中,客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如:
```
timezone UTC-8
timezone GMT-8
timezone Asia/Shanghai
```
均是合法的设置东八区时区的格式。
时区的设置对于查询和写入SQL语句中非Unix时间戳的内容(时间戳字符串、关键词`now`的解析)产生影响。例如:
```
SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08';
```
在东八区,SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554955268000;
```
在UTC时区,SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554984068000;
```
为了避免使用字符串时间格式带来的不确定性,也可以直接使用Unix时间戳。此外,还可以在SQL语句中使用带有时区的时间戳字符串,例如:RFC3339格式的时间戳字符串,`2013-04-12T15:52:01.123+08:00`或者ISO-8601格式时间戳字符串`2013-04-12T15:52:01.123+0800`。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。
**defaultUser**
- 默认值:root
登录用户名,客户端登录的时候,如果不指定用户名,则自动使用该用户名登录。默认情况下,以下的两个命令等效
```
taos
taos -u root
```
用户名为从配置中读取的`defaultUser`配置项。如果更改`defaultUser abc`,则以下两个命令等效:
```
taos
taos -u abc
```
**defaultPass**
- 默认值:taosdata
登录用户名,客户端登录的时候,如果不指定密码,则自动使用该密码登录。默认情况下,以下的两个命令等效
```
taos
taos -ptaosdata
```
启动taos时,你也可以从命令行指定IP地址、端口号,用户名和密码,否则就从taos.cfg读取
TCP/UDP端口,以及日志的配置参数,与server的配置参数完全一样。使用命令`taos -?` 可查看`taos`允许的可选项
## 用户管理
......@@ -191,6 +367,6 @@ KILL STREAM <stream-id>
## 系统监控
TDengine启动后,会自动创建一个监测数据库SYS,并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在SYS库里。系统管理员可以从CLI直接查看这个数据库,也可以在WEB通过图形化界面查看这些监测信息
TDengine启动后,会自动创建一个监测数据库`LOG`,并自动将服务器的CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度、慢查询等信息定时写入该数据库。TDengine还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息记录下来存放在`LOG`库里。系统管理员可以通过客户端程序查看记录库中的运行负载信息,(在企业版中)还可以通过浏览器查看数据的图标可视化结果
这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。
\ No newline at end of file
这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项`monitor`将其关闭或打开。
......@@ -100,6 +100,9 @@
# default system charset
# charset UTF-8
# system time zone
# timezone Asia/Shanghai (CST, +0800)
# enable/disable commit log
# clog 1
......
......@@ -7,6 +7,7 @@
compile_dir=$1
output_dir=$2
tdengine_ver=$3
armver=$4
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -m ${script_dir}/../..)"
......@@ -63,7 +64,16 @@ debver="Version: "$tdengine_ver
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
#get taos version, then set deb name
debname="TDengine-"${tdengine_ver}".deb"
if [ -z "$armver" ]; then
debname="TDengine-"${tdengine_ver}".deb"
elif [ "$armver" == "arm64" ]; then
debname="TDengine-"${tdengine_ver}"-arm64.deb"
elif [ "$armver" == "arm32" ]; then
debname="TDengine-"${tdengine_ver}"-arm32.deb"
else
echo "input parameter error!!!"
return
fi
# make deb package
dpkg -b ${pkg_dir} $debname
......
......@@ -109,6 +109,7 @@ build_time=$(date +"%F %R")
echo "char version[64] = \"${version}\";" > ${versioninfo}
echo "char compatible_version[64] = \"${compatible_version}\";" >> ${versioninfo}
echo "char gitinfo[128] = \"$(git rev-parse --verify HEAD)\";" >> ${versioninfo}
echo "char gitinfoOfInternal[128] = \"\";" >> ${versioninfo}
echo "char buildinfo[512] = \"Built by ${USER} at ${build_time}\";" >> ${versioninfo}
# 2. cmake executable file
......@@ -149,7 +150,7 @@ if [ -d ${output_dir} ]; then
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/deb
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version}
${csudo} ./makedeb.sh ${compile_dir} ${output_dir} ${version} ${armver}
echo "do rpm package for the centos system"
output_dir="${top_dir}/rpms"
......@@ -158,7 +159,7 @@ if [ -d ${output_dir} ]; then
fi
${csudo} mkdir -p ${output_dir}
cd ${script_dir}/rpm
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version}
${csudo} ./makerpm.sh ${compile_dir} ${output_dir} ${version} ${armver}
echo "do tar.gz package for all systems"
cd ${script_dir}/tools
......
......@@ -9,6 +9,7 @@
compile_dir=$1
output_dir=$2
tdengine_ver=$3
armver=$4
script_dir="$(dirname $(readlink -f $0))"
top_dir="$(readlink -m ${script_dir}/../..)"
......@@ -58,5 +59,11 @@ ${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_di
#${csudo} cp -rf RPMS/* ${output_dir}
cp_rpm_package ${pkg_dir}/RPMS
if [ "$armver" == "arm64" ]; then
mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/TDengine-${tdengine_ver}-arm64.rpm
elif [ "$armver" == "arm32" ]; then
mv ${output_dir}/TDengine-${tdengine_ver}.rpm ${output_dir}/TDengine-${tdengine_ver}-arm32.rpm
fi
cd ..
${csudo} rm -rf ${pkg_dir}
......@@ -313,9 +313,9 @@ vercomp () {
function is_version_compatible() {
curr_version=$(${bin_dir}/taosd -V | head -1 | cut -d ' ' -f 2)
curr_version=$(${bin_dir}/taosd -V | head -1 | cut -d ' ' -f 3)
min_compatible_version=$(${script_dir}/bin/taosd -V | head -1 | cut -d ' ' -f 4)
min_compatible_version=$(${script_dir}/bin/taosd -V | head -1 | cut -d ' ' -f 5)
vercomp $curr_version $min_compatible_version
case $? in
......
......@@ -17,7 +17,6 @@ top_dir="$(readlink -m ${script_dir}/../..)"
build_dir="${compile_dir}/build"
code_dir="${top_dir}/src"
release_dir="${top_dir}/release"
community_dir="${script_dir}/../../../community/src"
#package_name='linux'
install_dir="${release_dir}/TDengine-client-${version}"
......@@ -25,7 +24,7 @@ install_dir="${release_dir}/TDengine-client-${version}"
# Directories and files.
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${script_dir}/remove_client.sh"
lib_files="${build_dir}/lib/libtaos.so.${version}"
header_files="${community_dir}/inc/taos.h ${community_dir}/inc/taoserror.h"
header_files="${code_dir}/inc/taos.h ${code_dir}/inc/taoserror.h"
cfg_dir="${top_dir}/packaging/cfg"
install_files="${script_dir}/install_client.sh"
......@@ -55,7 +54,7 @@ mkdir -p ${install_dir}/driver
cp ${lib_files} ${install_dir}/driver
# Copy connector
connector_dir="${community_dir}/connector"
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp -r ${connector_dir}/grafana ${install_dir}/connector/
......
......@@ -94,7 +94,7 @@ typedef struct SRetrieveSupport {
tOrderDescriptor *pOrderDescriptor;
tColModel * pFinalColModel; // colModel for final result
SSubqueryState * pState;
int32_t vnodeIdx; // index of current vnode in vnode list
int32_t subqueryIndex; // index of current vnode in vnode list
SSqlObj * pParentSqlObj;
tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to
uint32_t numOfRetry; // record the number of retry times
......
......@@ -23,14 +23,14 @@ extern "C" {
/*
* @date 2018/09/30
*/
#include <limits.h>
#include <stdio.h>
#include "os.h"
#include "textbuffer.h"
#include "tscSecondaryMerge.h"
#include "tsclient.h"
#include "tsdb.h"
#include "tscSecondaryMerge.h"
#define UTIL_METER_IS_METRIC(metaInfo) (((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_METRIC))
#define UTIL_METER_IS_METRIC(metaInfo) \
(((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_METRIC))
#define UTIL_METER_IS_NOMRAL_METER(metaInfo) (!(UTIL_METER_IS_METRIC(metaInfo)))
#define UTIL_METER_IS_CREATE_FROM_METRIC(metaInfo) \
(((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_MTABLE))
......@@ -52,7 +52,6 @@ typedef struct SParsedDataColInfo {
typedef struct SJoinSubquerySupporter {
SSubqueryState* pState;
SSqlObj* pObj; // parent SqlObj
bool hasMore; // has data from vnode to fetch
int32_t subqueryIndex; // index of sub query
int64_t interval; // interval time
SLimitVal limit; // limit info
......@@ -62,28 +61,25 @@ typedef struct SJoinSubquerySupporter {
SFieldInfo fieldsInfo;
STagCond tagCond;
SSqlGroupbyExpr groupbyExpr;
struct STSBuf* pTSBuf;
FILE* f;
char path[PATH_MAX];
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
FILE* f; // temporary file in order to create TSBuf
char path[PATH_MAX]; // temporary file path
} SJoinSubquerySupporter;
void tscDestroyDataBlock(STableDataBlocks* pDataBlock);
STableDataBlocks* tscCreateDataBlock(int32_t size);
void tscAppendDataBlock(SDataBlockList* pList, STableDataBlocks* pBlocks);
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes, uint32_t offset);
SDataBlockList* tscCreateBlockArrayList();
void* tscDestroyBlockArrayList(SDataBlockList* pList);
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
void tscFreeUnusedDataBlocks(SDataBlockList* pList);
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pDataList);
void tscDestroyDataBlock(STableDataBlocks* pDataBlock);
STableDataBlocks* tscCreateDataBlock(size_t initialBufSize, int32_t rowSize, int32_t startOffset, const char* name);
void tscAppendDataBlock(SDataBlockList* pList, STableDataBlocks* pBlocks);
SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes,
uint32_t offset);
SDataBlockList* tscCreateBlockArrayList();
void* tscDestroyBlockArrayList(SDataBlockList* pList);
int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock);
void tscFreeUnusedDataBlocks(SDataBlockList* pList);
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pDataList);
STableDataBlocks* tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size,
int32_t startOffset, int32_t rowSize, char* tableId);
STableDataBlocks* tscCreateDataBlockEx(size_t size, int32_t rowSize, int32_t startOffset, char* name);
SVnodeSidList* tscGetVnodeSidList(SMetricMeta* pMetricmeta, int32_t vnodeIdx);
int32_t startOffset, int32_t rowSize, const char* tableId);
SVnodeSidList* tscGetVnodeSidList(SMetricMeta* pMetricmeta, int32_t vnodeIdx);
SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx);
/**
......@@ -97,6 +93,8 @@ SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx);
bool tscIsPointInterpQuery(SSqlCmd* pCmd);
bool tscIsTWAQuery(SSqlCmd* pCmd);
bool tscProjectionQueryOnMetric(SSqlCmd* pCmd);
bool tscProjectionQueryOnTable(SSqlCmd* pCmd);
bool tscIsTwoStageMergeMetricQuery(SSqlCmd* pCmd);
bool tscQueryOnMetric(SSqlCmd* pCmd);
bool tscQueryMetricTags(SSqlCmd* pCmd);
......@@ -107,16 +105,8 @@ void tscAddSpecialColumnForSelect(SSqlCmd* pCmd, int32_t outputColIndex, int16_t
void addRequiredTagColumn(SSqlCmd* pCmd, int32_t tagColIndex, int32_t tableIndex);
//TODO refactor, remove
void SStringFree(SString* str);
void SStringCopy(SString* pDest, const SString* pSrc);
SString SStringCreate(const char* str);
int32_t SStringAlloc(SString* pStr, int32_t size);
int32_t SStringEnsureRemain(SString* pStr, int32_t size);
int32_t setMeterID(SSqlObj* pSql, SSQLToken* pzTableName, int32_t tableIndex);
void tscClearInterpInfo(SSqlCmd* pCmd);
void tscClearInterpInfo(SSqlCmd* pCmd);
bool tscIsInsertOrImportData(char* sqlstr);
......@@ -136,9 +126,9 @@ void tscFieldInfoCopy(SFieldInfo* src, SFieldInfo* dst, const int32_t* indexList
void tscFieldInfoCopyAll(SFieldInfo* src, SFieldInfo* dst);
TAOS_FIELD* tscFieldInfoGetField(SSqlCmd* pCmd, int32_t index);
int16_t tscFieldInfoGetOffset(SSqlCmd* pCmd, int32_t index);
int32_t tscGetResRowLength(SSqlCmd* pCmd);
void tscClearFieldInfo(SFieldInfo* pFieldInfo);
int16_t tscFieldInfoGetOffset(SSqlCmd* pCmd, int32_t index);
int32_t tscGetResRowLength(SSqlCmd* pCmd);
void tscClearFieldInfo(SFieldInfo* pFieldInfo);
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex);
......@@ -150,15 +140,15 @@ SSqlExpr* tscSqlExprUpdate(SSqlCmd* pCmd, int32_t index, int16_t functionId, int
int16_t size);
SSqlExpr* tscSqlExprGet(SSqlCmd* pCmd, int32_t index);
void tscSqlExprCopy(SSqlExprInfo* dst, const SSqlExprInfo* src, uint64_t uid);
void tscSqlExprCopy(SSqlExprInfo* dst, const SSqlExprInfo* src, uint64_t uid);
SColumnBase* tscColumnBaseInfoInsert(SSqlCmd* pCmd, SColumnIndex* colIndex);
void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* src);
void tscColumnBaseCopy(SColumnBase* dst, const SColumnBase* src);
void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* src);
void tscColumnBaseCopy(SColumnBase* dst, const SColumnBase* src);
void tscColumnBaseInfoCopy(SColumnBaseInfo* dst, const SColumnBaseInfo* src, int16_t tableIndex);
void tscColumnBaseInfoCopy(SColumnBaseInfo* dst, const SColumnBaseInfo* src, int16_t tableIndex);
SColumnBase* tscColumnBaseInfoGet(SColumnBaseInfo* pColumnBaseInfo, int32_t index);
void tscColumnBaseInfoUpdateTableIndex(SColumnBaseInfo* pColList, int16_t tableIndex);
void tscColumnBaseInfoUpdateTableIndex(SColumnBaseInfo* pColList, int16_t tableIndex);
void tscColumnBaseInfoReserve(SColumnBaseInfo* pColumnBaseInfo, int32_t size);
void tscColumnBaseInfoDestroy(SColumnBaseInfo* pColumnBaseInfo);
......@@ -171,11 +161,10 @@ bool tscValidateColumnId(SSqlCmd* pCmd, int32_t colId);
// get starter position of metric query condition (query on tags) in SSqlCmd.payload
SCond* tsGetMetricQueryCondPos(STagCond* pCond, uint64_t tableIndex);
void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str);
void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str);
void tscTagCondCopy(STagCond* dest, const STagCond* src);
void tscTagCondRelease(STagCond* pCond);
void tscTagCondSetQueryCondType(STagCond* pCond, int16_t type);
void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SSqlCmd* pCmd);
......@@ -184,19 +173,19 @@ bool tscShouldFreeHeatBeat(SSqlObj* pHb);
void tscCleanSqlCmd(SSqlCmd* pCmd);
bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql);
void tscRemoveAllMeterMetaInfo(SSqlCmd* pCmd, bool removeFromCache);
void tscRemoveAllMeterMetaInfo(SSqlCmd* pCmd, bool removeFromCache);
SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd* pCmd, int32_t index);
SMeterMetaInfo* tscGetMeterMetaInfoByUid(SSqlCmd* pCmd, uint64_t uid, int32_t* index);
void tscClearMeterMetaInfo(SMeterMetaInfo* pMeterMetaInfo, bool removeFromCache);
void tscClearMeterMetaInfo(SMeterMetaInfo* pMeterMetaInfo, bool removeFromCache);
SMeterMetaInfo* tscAddMeterMetaInfo(SSqlCmd* pCmd, const char* name, SMeterMeta* pMeterMeta, SMetricMeta* pMetricMeta,
int16_t numOfTags, int16_t* tags);
SMeterMetaInfo* tscAddEmptyMeterMetaInfo(SSqlCmd* pCmd);
void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* keyStr, uint64_t uid);
int tscGetMetricMeta(SSqlObj* pSql);
int tscGetMeterMeta(SSqlObj* pSql, char* meterId, int32_t tableIndex);
int tscGetMeterMetaEx(SSqlObj* pSql, char* meterId, bool createIfNotExists);
int tscGetMetricMeta(SSqlObj* pSql);
int tscGetMeterMeta(SSqlObj* pSql, char* meterId, int32_t tableIndex);
int tscGetMeterMetaEx(SSqlObj* pSql, char* meterId, bool createIfNotExists);
void tscResetForNextRetrieve(SSqlRes* pRes);
......@@ -220,18 +209,20 @@ void tscDoQuery(SSqlObj* pSql);
* @param pPrevSql
* @return
*/
SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex, void (*fp)(), void* param,
SSqlObj* pPrevSql);
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIndex);
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, SSqlObj* pPrevSql);
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIndex);
void doAddGroupColumnForSubquery(SSqlCmd* pCmd, int32_t tagIndex);
int16_t tscGetJoinTagColIndexByUid(SSqlCmd* pCmd, uint64_t uid);
int16_t tscGetJoinTagColIndexByUid(STagCond* pTagCond, uint64_t uid);
TAOS* taos_connect_a(char* ip, char* user, char* pass, char* db, uint16_t port, void (*fp)(void*, TAOS_RES*, int),
void* param, void** taos);
void sortRemoveDuplicates(STableDataBlocks* dataBuf);
void tscPrintSelectClause(SSqlCmd* pCmd);
#ifdef __cplusplus
}
#endif
......
......@@ -107,22 +107,25 @@ enum _sql_cmd {
struct SSqlInfo;
typedef struct SSqlGroupbyExpr {
int16_t tableIndex;
int16_t tableIndex;
int16_t numOfGroupCols;
SColIndexEx columnInfo[TSDB_MAX_TAGS]; // group by columns information
int16_t orderIndex; // order by column index
int16_t orderType; // order by type: asc/desc
int16_t orderIndex; // order by column index
int16_t orderType; // order by type: asc/desc
} SSqlGroupbyExpr;
typedef struct SMeterMetaInfo {
SMeterMeta * pMeterMeta; // metermeta
SMetricMeta *pMetricMeta; // metricmeta
char name[TSDB_METER_ID_LEN + 1];
int16_t numOfTags; // total required tags in query, including groupby tags
int16_t tagColumnIndex[TSDB_MAX_TAGS]; // clause + tag projection
SMeterMeta * pMeterMeta; // metermeta
SMetricMeta *pMetricMeta; // metricmeta
/*
* 1. keep the vnode index during the multi-vnode super table projection query
* 2. keep the vnode index for multi-vnode insertion
*/
int32_t vnodeIndex;
char name[TSDB_METER_ID_LEN + 1]; // table(super table) name
int16_t numOfTags; // total required tags in query, including groupby tags
int16_t tagColumnIndex[TSDB_MAX_TAGS]; // clause + tag projection
} SMeterMetaInfo;
/* the structure for sql function in select clause */
......@@ -188,7 +191,7 @@ typedef struct SString {
typedef struct SCond {
uint64_t uid;
SString cond;
char * cond;
} SCond;
typedef struct SJoinNode {
......@@ -228,17 +231,22 @@ typedef struct SParamInfo {
typedef struct STableDataBlocks {
char meterId[TSDB_METER_ID_LEN];
int8_t tsSource;
bool ordered;
int64_t vgid;
int64_t prevTS;
int8_t tsSource; // where does the UNIX timestamp come from, server or client
bool ordered; // if current rows are ordered or not
int64_t vgid; // virtual group id
int64_t prevTS; // previous timestamp, recorded to decide if the records array is ts ascending
int32_t numOfMeters; // number of tables in current submit block
int32_t numOfMeters;
int32_t rowSize;
int32_t rowSize; // row size for current table
uint32_t nAllocSize;
uint32_t size;
/*
* the metermeta for current table, the metermeta will be used during submit stage, keep a ref
* to avoid it to be removed from cache
*/
SMeterMeta* pMeterMeta;
union {
char *filename;
char *pData;
......@@ -252,8 +260,8 @@ typedef struct STableDataBlocks {
typedef struct SDataBlockList {
int32_t idx;
int32_t nSize;
int32_t nAlloc;
uint32_t nSize;
uint32_t nAlloc;
char * userParam; /* user assigned parameters for async query */
void * udfp; /* user defined function pointer, used in async model */
STableDataBlocks **pData;
......@@ -262,16 +270,16 @@ typedef struct SDataBlockList {
typedef struct {
SOrderVal order;
int command;
int count;// TODO refactor
int count; // TODO refactor
union {
bool existsCheck; // check if the table exists
int8_t showType; // show command type
bool existsCheck; // check if the table exists
int8_t showType; // show command type
};
int8_t isInsertFromFile; // load data from file or not
bool import; // import/insert type
char msgType;
bool import; // import/insert type
uint8_t msgType;
uint16_t type; // query type
char intervalTimeUnit;
int64_t etime, stime;
......@@ -296,7 +304,6 @@ typedef struct {
SLimitVal slimit;
int64_t globalLimit;
STagCond tagCond;
int16_t vnodeIdx; // vnode index in pMetricMeta for metric query
int16_t interpoType; // interpolate type
int16_t numOfTables;
......@@ -366,26 +373,24 @@ typedef struct _sql_obj {
STscObj *pTscObj;
void (*fp)();
void (*fetchFp)();
void * param;
uint32_t ip;
short vnode;
int64_t stime;
uint32_t queryId;
void * thandle;
void * pStream;
char * sqlstr;
char retry;
char maxRetry;
char index;
char freed : 4;
char listed : 4;
tsem_t rspSem;
tsem_t emptyRspSem;
SSqlCmd cmd;
SSqlRes res;
char numOfSubs;
void * param;
uint32_t ip;
short vnode;
int64_t stime;
uint32_t queryId;
void * thandle;
void * pStream;
char * sqlstr;
char retry;
char maxRetry;
uint8_t index;
char freed : 4;
char listed : 4;
tsem_t rspSem;
tsem_t emptyRspSem;
SSqlCmd cmd;
SSqlRes res;
uint8_t numOfSubs;
struct _sql_obj **pSubs;
struct _sql_obj * prev, *next;
} SSqlObj;
......@@ -477,6 +482,8 @@ void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql);
void tscKillMetricQuery(SSqlObj *pSql);
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen);
bool tscIsUpdateQuery(STscObj *pObj);
bool tscHasReachLimitation(SSqlObj* pSql);
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
// transfer SSqlInfo to SqlCmd struct
......
......@@ -239,7 +239,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J
jbyteArray jsql, jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
jniError("jobj:%p, connection is already closed", jobj);
return JNI_CONNECTION_NULL;
}
......@@ -252,6 +252,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J
char *dst = (char *)calloc(1, sizeof(char) * (len + 1));
if (dst == NULL) {
jniError("jobj:%p, conn:%p, can not alloc memory", jobj, tscon);
return JNI_OUT_OF_MEMORY;
}
......@@ -260,9 +261,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J
//todo handle error
}
jniTrace("jobj:%p, conn:%p, sql:%s", jobj, tscon, dst);
int code = taos_query(tscon, dst);
if (code != 0) {
jniError("jobj:%p, conn:%p, code:%d, msg:%s, sql:%s", jobj, tscon, code, taos_errstr(tscon), dst);
jniError("jobj:%p, conn:%p, code:%d, msg:%s", jobj, tscon, code, taos_errstr(tscon));
free(dst);
return JNI_TDENGINE_ERROR;
} else {
......@@ -271,9 +274,9 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J
if (pSql->cmd.command == TSDB_SQL_INSERT) {
affectRows = taos_affected_rows(tscon);
jniTrace("jobj:%p, conn:%p, code:%d, affect rows:%d, sql:%s", jobj, tscon, code, affectRows, dst);
jniTrace("jobj:%p, conn:%p, code:%d, affect rows:%d", jobj, tscon, code, affectRows);
} else {
jniTrace("jobj:%p, conn:%p, code:%d, sql:%s", jobj, tscon, code, dst);
jniTrace("jobj:%p, conn:%p, code:%d", jobj, tscon, code);
}
free(dst);
......@@ -307,7 +310,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp(
if (tscIsUpdateQuery(tscon)) {
ret = 0; // for update query, no result pointer
jniTrace("jobj:%p, conn:%p, no result", jobj, tscon);
jniTrace("jobj:%p, conn:%p, no resultset", jobj, tscon);
} else {
ret = (jlong) taos_use_result(tscon);
jniTrace("jobj:%p, conn:%p, get resultset:%p", jobj, tscon, (void *) ret);
......@@ -496,7 +499,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm
jlong con) {
TAOS *tscon = (TAOS *)con;
if (tscon == NULL) {
jniError("jobj:%p, connection is closed", jobj);
jniError("jobj:%p, connection is already closed", jobj);
return JNI_CONNECTION_NULL;
} else {
jniTrace("jobj:%p, conn:%p, close connection success", jobj, tscon);
......@@ -675,4 +678,4 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTab
JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset(JNIEnv *env, jobject jobj) {
return (*env)->NewStringUTF(env, (const char *)tsCharset);
}
\ No newline at end of file
}
......@@ -108,11 +108,11 @@ static tSQLSyntaxNode *tSQLSyntaxNodeCreate(SSchema *pSchema, int32_t numOfCols,
return NULL;
}
int32_t i = 0;
size_t nodeSize = sizeof(tSQLSyntaxNode);
tSQLSyntaxNode *pNode = NULL;
if (pToken->type == TK_ID || pToken->type == TK_TBNAME) {
int32_t i = 0;
if (pToken->type == TK_ID) {
do {
size_t len = strlen(pSchema[i].name);
......@@ -268,8 +268,8 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha
}
// get the operator of expr
uint8_t optr = getBinaryExprOptr(&t0);
if (optr <= 0) {
uint8_t optr = getBinaryExprOptr(&t0);
if (optr == 0) {
pError("not support binary operator:%d", t0.type);
tSQLSyntaxNodeDestroy(pLeft, NULL);
return NULL;
......@@ -323,9 +323,10 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha
pn->colId = -1;
return pn;
} else {
uint8_t localOptr = getBinaryExprOptr(&t0);
if (localOptr <= 0) {
uint8_t localOptr = getBinaryExprOptr(&t0);
if (localOptr == 0) {
pError("not support binary operator:%d", t0.type);
free(pBinExpr);
return NULL;
}
......@@ -418,16 +419,17 @@ void tSQLBinaryExprToString(tSQLBinaryExpr *pExpr, char *dst, int32_t *len) {
if (pExpr == NULL) {
*dst = 0;
*len = 0;
return;
}
int32_t lhs = tSQLBinaryExprToStringImpl(pExpr->pLeft, dst, pExpr->pLeft->nodeType);
int32_t lhs = tSQLBinaryExprToStringImpl(pExpr->pLeft, dst, pExpr->pLeft->nodeType);
dst += lhs;
*len = lhs;
char *start = tSQLOptrToString(pExpr->nSQLBinaryOptr, dst);
char *start = tSQLOptrToString(pExpr->nSQLBinaryOptr, dst);
*len += (start - dst);
*len += tSQLBinaryExprToStringImpl(pExpr->pRight, start, pExpr->pRight->nodeType);
*len += tSQLBinaryExprToStringImpl(pExpr->pRight, start, pExpr->pRight->nodeType);
}
static void UNUSED_FUNC destroySyntaxTree(tSQLSyntaxNode *pNode) { tSQLSyntaxNodeDestroy(pNode, NULL); }
......@@ -641,16 +643,15 @@ int32_t intersect(tQueryResultset *pLeft, tQueryResultset *pRight, tQueryResults
}
/*
*
* traverse the result and apply the function to each item to check if the item is qualified or not
*/
void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, bool (*fp)(tSkipListNode *, void *),
tQueryResultset * pResult) {
static void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, __result_filter_fn_t fp, tQueryResultset *pResult) {
assert(pExpr->pLeft->nodeType == TSQL_NODE_COL && pExpr->pRight->nodeType == TSQL_NODE_VALUE);
// brutal force search
// brutal force scan the result list and check for each item in the list
int64_t num = pResult->num;
for (int32_t i = 0, j = 0; i < pResult->num; ++i) {
if (fp == NULL || (fp != NULL && fp(pResult->pRes[i], pExpr->info) == true)) {
if (fp == NULL || (fp(pResult->pRes[i], pExpr->info) == true)) {
pResult->pRes[j++] = pResult->pRes[i];
} else {
num--;
......@@ -933,4 +934,4 @@ void tQueryResultClean(tQueryResultset *pRes) {
tfree(pRes->pRes);
pRes->num = 0;
}
\ No newline at end of file
}
......@@ -51,7 +51,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, void (*fp)(void *, TAOS_RES *,
}
int32_t sqlLen = strlen(sqlstr);
if (sqlLen > TSDB_MAX_SQL_LEN) {
if (sqlLen > tsMaxSQLStringLen) {
tscError("sql string too long");
tscQueueAsyncError(fp, param);
return;
......@@ -121,7 +121,8 @@ static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOf
// sequentially retrieve data from remain vnodes first, query vnode specified by vnodeIdx
if (numOfRows == 0 && tscProjectionQueryOnMetric(pCmd)) {
// vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx
assert(pCmd->vnodeIdx >= 0);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
assert(pMeterMetaInfo->vnodeIndex >= 0);
/* reach the maximum number of output rows, abort */
if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) {
......@@ -133,8 +134,8 @@ static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOf
pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal;
pCmd->limit.offset = pRes->offset;
if ((++(pCmd->vnodeIdx)) < tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta->numOfVnodes) {
tscTrace("%p retrieve data from next vnode:%d", pSql, pCmd->vnodeIdx);
if ((++(pMeterMetaInfo->vnodeIndex)) < pMeterMetaInfo->pMetricMeta->numOfVnodes) {
tscTrace("%p retrieve data from next vnode:%d", pSql, pMeterMetaInfo->vnodeIndex);
pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first.
......@@ -157,7 +158,6 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
SSqlObj *pSql = (SSqlObj *)tres;
if (pSql == NULL) { // error
tscError("sql object is NULL");
tscQueueAsyncError(pSql->fetchFp, param);
return;
}
......@@ -272,7 +272,8 @@ void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows) {
/*
* vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx till all vnode have been retrieved
*/
assert(pCmd->vnodeIdx >= 1);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
assert(pMeterMetaInfo->vnodeIndex >= 0);
/* reach the maximum number of output rows, abort */
if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) {
......@@ -283,7 +284,7 @@ void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows) {
/* update the limit value according to current retrieval results */
pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal;
if ((++pCmd->vnodeIdx) <= tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta->numOfVnodes) {
if ((++pMeterMetaInfo->vnodeIndex) <= pMeterMetaInfo->pMetricMeta->numOfVnodes) {
pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first.
tscResetForNextRetrieve(pRes);
......@@ -404,9 +405,12 @@ void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows)
int32_t code = TSDB_CODE_SUCCESS;
assert(!pCmd->isInsertFromFile && pSql->signature == pSql);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
assert(pCmd->numOfTables == 1);
SDataBlockList *pDataBlocks = pCmd->pDataBlocks;
if (pDataBlocks == NULL || pCmd->vnodeIdx >= pDataBlocks->nSize) {
if (pDataBlocks == NULL || pMeterMetaInfo->vnodeIndex >= pDataBlocks->nSize) {
// restore user defined fp
pSql->fp = pSql->fetchFp;
tscTrace("%p Async insertion completed, destroy data block list", pSql);
......@@ -418,17 +422,17 @@ void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows)
(*pSql->fp)(pSql->param, tres, numOfRows);
} else {
do {
code = tscCopyDataBlockToPayload(pSql, pDataBlocks->pData[pCmd->vnodeIdx++]);
code = tscCopyDataBlockToPayload(pSql, pDataBlocks->pData[pMeterMetaInfo->vnodeIndex++]);
if (code != TSDB_CODE_SUCCESS) {
tscTrace("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%d, code:%d",
pSql, pCmd->vnodeIdx - 1, pDataBlocks->nSize, code);
pSql, pMeterMetaInfo->vnodeIndex - 1, pDataBlocks->nSize, code);
}
} while (code != TSDB_CODE_SUCCESS && pCmd->vnodeIdx < pDataBlocks->nSize);
} while (code != TSDB_CODE_SUCCESS && pMeterMetaInfo->vnodeIndex < pDataBlocks->nSize);
// build submit msg may fail
if (code == TSDB_CODE_SUCCESS) {
tscTrace("%p async insertion, vnodeIdx:%d, total:%d", pSql, pCmd->vnodeIdx - 1, pDataBlocks->nSize);
tscTrace("%p async insertion, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex - 1, pDataBlocks->nSize);
tscProcessSql(pSql);
}
}
......@@ -484,11 +488,11 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) {
// check if it is a sub-query of metric query first, if true, enter another routine
if ((pSql->cmd.type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) == TSDB_QUERY_TYPE_STABLE_SUBQUERY) {
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
assert(pMeterMetaInfo->pMeterMeta->numOfTags != 0 && pCmd->vnodeIdx >= 0 && pSql->param != NULL);
assert(pMeterMetaInfo->pMeterMeta->numOfTags != 0 && pMeterMetaInfo->vnodeIndex >= 0 && pSql->param != NULL);
SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param;
SSqlObj * pParObj = trs->pParentSqlObj;
assert(pParObj->signature == pParObj && trs->vnodeIdx == pCmd->vnodeIdx &&
assert(pParObj->signature == pParObj && trs->subqueryIndex == pMeterMetaInfo->vnodeIndex &&
pMeterMetaInfo->pMeterMeta->numOfTags != 0);
tscTrace("%p get metricMeta during metric query successfully", pSql);
......
此差异已折叠。
此差异已折叠。
......@@ -64,7 +64,7 @@ static int32_t getToStringLength(const char *pData, int32_t length, int32_t type
} break;
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT:
len = sprintf(buf, "%lld", *(int64_t *)pData);
len = sprintf(buf, "%" PRId64 "", *(int64_t *)pData);
break;
case TSDB_DATA_TYPE_BOOL:
len = MAX_BOOL_TYPE_LENGTH;
......@@ -228,7 +228,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
sprintf(target, "%d", *(int32_t *)pTagValue);
break;
case TSDB_DATA_TYPE_BIGINT:
sprintf(target, "%lld", *(int64_t *)pTagValue);
sprintf(target, "%" PRId64 "", *(int64_t *)pTagValue);
break;
case TSDB_DATA_TYPE_BOOL: {
char *val = (*((int8_t *)pTagValue) == 0) ? "false" : "true";
......
......@@ -18,9 +18,6 @@
#define _XOPEN_SOURCE
#pragma GCC diagnostic ignored "-Woverflow"
#pragma GCC diagnostic ignored "-Wunused-variable"
#include "os.h"
#include "ihash.h"
#include "tscSecondaryMerge.h"
......@@ -74,8 +71,8 @@ static int32_t tscToDouble(SSQLToken *pToken, double *value, char **endPtr) {
}
int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) {
char * token;
int tokenlen;
//char * token; //fang not used
//int tokenlen; //fang not used
int32_t index = 0;
SSQLToken sToken;
int64_t interval;
......@@ -392,9 +389,9 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
}
int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, char *error,
int16_t timePrec) {
int16_t timePrec, int32_t *code, char* tmpTokenBuf) {
int32_t index = 0;
bool isPrevOptr;
//bool isPrevOptr; //fang, never used
SSQLToken sToken = {0};
char * payload = pDataBlocks->pData + pDataBlocks->size;
......@@ -418,6 +415,7 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
}
strcpy(error, "client out of memory");
*code = TSDB_CODE_CLI_OUT_OF_MEMORY;
return -1;
}
......@@ -425,23 +423,42 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
(sToken.type != TK_FLOAT) && (sToken.type != TK_BOOL) && (sToken.type != TK_NULL)) ||
(sToken.n == 0) || (sToken.type == TK_RP)) {
tscInvalidSQLErrMsg(error, "invalid data or symbol", sToken.z);
*code = TSDB_CODE_INVALID_SQL;
return -1;
}
// Remove quotation marks
if (TK_STRING == sToken.type) {
sToken.z++;
sToken.n -= 2;
// delete escape character: \\, \', \"
char delim = sToken.z[0];
int32_t cnt = 0;
int32_t j = 0;
for (int32_t k = 1; k < sToken.n - 1; ++k) {
if (sToken.z[k] == delim || sToken.z[k] == '\\') {
if (sToken.z[k + 1] == delim) {
cnt++;
continue;
}
}
tmpTokenBuf[j] = sToken.z[k];
j++;
}
tmpTokenBuf[j] = 0;
sToken.z = tmpTokenBuf;
sToken.n -= 2 + cnt;
}
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
int32_t ret = tsParseOneColumnData(pSchema, &sToken, start, error, str, isPrimaryKey, timePrec);
if (ret != TSDB_CODE_SUCCESS) {
*code = TSDB_CODE_INVALID_SQL;
return -1; // NOTE: here 0 mean error!
}
if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) {
tscInvalidSQLErrMsg(error, "client time/server time can not be mixed up", sToken.z);
*code = TSDB_CODE_INVALID_TIME_STAMP;
return -1;
}
}
......@@ -476,7 +493,7 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) {
}
int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMeta, int maxRows,
SParsedDataColInfo *spd, char *error) {
SParsedDataColInfo *spd, char *error, int32_t *code, char* tmpTokenBuf) {
int32_t index = 0;
SSQLToken sToken;
......@@ -487,6 +504,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe
if (spd->hasVal[0] == false) {
strcpy(error, "primary timestamp column can not be null");
*code = TSDB_CODE_INVALID_SQL;
return -1;
}
......@@ -498,14 +516,16 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe
*str += index;
if (numOfRows >= maxRows || pDataBlock->size + pMeterMeta->rowSize >= pDataBlock->nAllocSize) {
int32_t tSize = tscAllocateMemIfNeed(pDataBlock, pMeterMeta->rowSize);
if (0 == tSize) {
if (0 == tSize) { //TODO pass the correct error code to client
strcpy(error, "client out of memory");
*code = TSDB_CODE_CLI_OUT_OF_MEMORY;
return -1;
}
maxRows += tSize;
}
int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision);
int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision, code, tmpTokenBuf);
if (len <= 0) { // error message has been set in tsParseOneRowData
return -1;
}
......@@ -517,6 +537,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe
*str += index;
if (sToken.n == 0 || sToken.type != TK_RP) {
tscInvalidSQLErrMsg(error, ") expected", *str);
*code = TSDB_CODE_INVALID_SQL;
return -1;
}
......@@ -525,6 +546,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe
if (numOfRows <= 0) {
strcpy(error, "no any data points");
*code = TSDB_CODE_INVALID_SQL;
return -1;
} else {
return numOfRows;
......@@ -636,10 +658,17 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableHashList, char
if (0 == maxNumOfRows) {
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
int32_t numOfRows = tsParseValues(str, dataBuf, pMeterMeta, maxNumOfRows, spd, pCmd->payload);
int32_t code = TSDB_CODE_INVALID_SQL;
char* tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) {
return TSDB_CODE_CLI_OUT_OF_MEMORY;
}
int32_t numOfRows = tsParseValues(str, dataBuf, pMeterMeta, maxNumOfRows, spd, pCmd->payload, &code, tmpTokenBuf);
free(tmpTokenBuf);
if (numOfRows <= 0) {
return TSDB_CODE_INVALID_SQL;
return code;
}
for (uint32_t i = 0; i < dataBuf->numOfParams; ++i) {
......@@ -953,7 +982,7 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
strcpy(fname, full_path.we_wordv[0]);
wordfree(&full_path);
STableDataBlocks *pDataBlock = tscCreateDataBlockEx(PATH_MAX, pMeterMetaInfo->pMeterMeta->rowSize,
STableDataBlocks *pDataBlock = tscCreateDataBlock(PATH_MAX, pMeterMetaInfo->pMeterMeta->rowSize,
sizeof(SShellSubmitBlock), pMeterMetaInfo->name);
tscAppendDataBlock(pCmd->pDataBlocks, pDataBlock);
......@@ -1060,8 +1089,10 @@ int doParserInsertSql(SSqlObj *pSql, char *str) {
goto _error_clean;
}
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
// set the next sent data vnode index in data block arraylist
pCmd->vnodeIdx = 1;
pMeterMetaInfo->vnodeIndex = 1;
} else {
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
}
......@@ -1173,7 +1204,7 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock
return TSDB_CODE_SUCCESS;
}
static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp) {
static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp, char *tmpTokenBuf) {
size_t readLen = 0;
char * line = NULL;
size_t n = 0;
......@@ -1188,8 +1219,8 @@ static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp) {
int32_t rowSize = pMeterMeta->rowSize;
pCmd->pDataBlocks = tscCreateBlockArrayList();
STableDataBlocks *pTableDataBlock =
tscCreateDataBlockEx(TSDB_PAYLOAD_SIZE, pMeterMeta->rowSize, sizeof(SShellSubmitBlock), pMeterMetaInfo->name);
STableDataBlocks *pTableDataBlock = tscCreateDataBlock(TSDB_PAYLOAD_SIZE, pMeterMeta->rowSize,
sizeof(SShellSubmitBlock), pMeterMetaInfo->name);
tscAppendDataBlock(pCmd->pDataBlocks, pTableDataBlock);
......@@ -1205,7 +1236,7 @@ static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp) {
while ((readLen = getline(&line, &n, fp)) != -1) {
// line[--readLen] = '\0';
if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) line[--readLen] = 0;
if (readLen <= 0) continue;
if (readLen == 0) continue; //fang, <= to ==
char *lineptr = line;
strtolower(line, line);
......@@ -1216,10 +1247,10 @@ static int tscInsertDataFromFile(SSqlObj *pSql, FILE *fp) {
maxRows += tSize;
}
len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, pMeterMeta->precision);
len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, pMeterMeta->precision, &code, tmpTokenBuf);
if (len <= 0 || pTableDataBlock->numOfParams > 0) {
pSql->res.code = TSDB_CODE_INVALID_SQL;
return -1;
pSql->res.code = code;
return (-code);
}
pTableDataBlock->size += len;
......@@ -1279,19 +1310,19 @@ void tscProcessMultiVnodesInsert(SSqlObj *pSql) {
int32_t code = TSDB_CODE_SUCCESS;
/* the first block has been sent to server in processSQL function */
assert(pCmd->isInsertFromFile != -1 && pCmd->vnodeIdx >= 1 && pCmd->pDataBlocks != NULL);
assert(pCmd->isInsertFromFile != -1 && pMeterMetaInfo->vnodeIndex >= 1 && pCmd->pDataBlocks != NULL);
if (pCmd->vnodeIdx < pCmd->pDataBlocks->nSize) {
if (pMeterMetaInfo->vnodeIndex < pCmd->pDataBlocks->nSize) {
SDataBlockList *pDataBlocks = pCmd->pDataBlocks;
for (int32_t i = pCmd->vnodeIdx; i < pDataBlocks->nSize; ++i) {
for (int32_t i = pMeterMetaInfo->vnodeIndex; i < pDataBlocks->nSize; ++i) {
pDataBlock = pDataBlocks->pData[i];
if (pDataBlock == NULL) {
continue;
}
if ((code = tscCopyDataBlockToPayload(pSql, pDataBlock)) != TSDB_CODE_SUCCESS) {
tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pCmd->vnodeIdx, pDataBlocks->nSize);
tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex, pDataBlocks->nSize);
continue;
}
......@@ -1348,8 +1379,16 @@ void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql) {
tscError("%p get meter meta failed, abort", pSql);
continue;
}
char* tmpTokenBuf = calloc(1, 4096); // used for deleting Escape character: \\, \', \"
if (NULL == tmpTokenBuf) {
tscError("%p calloc failed", pSql);
continue;
}
int nrows = tscInsertDataFromFile(pSql, fp);
int nrows = tscInsertDataFromFile(pSql, fp, tmpTokenBuf);
free(tmpTokenBuf);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
if (nrows < 0) {
......
......@@ -75,7 +75,6 @@ static int normalStmtAddPart(SNormalStmt* stmt, bool isParam, char* str, uint32_
if (isParam) {
++stmt->numParams;
}
return TSDB_CODE_SUCCESS;
}
......@@ -409,7 +408,9 @@ static int insertStmtReset(STscStmt* pStmt) {
}
}
pCmd->batchSize = 0;
pCmd->vnodeIdx = 0;
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
pMeterMetaInfo->vnodeIndex = 0;
return TSDB_CODE_SUCCESS;
}
......@@ -422,6 +423,8 @@ static int insertStmtExecute(STscStmt* stmt) {
++pCmd->batchSize;
}
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0);
if (pCmd->pDataBlocks->nSize > 0) {
// merge according to vgid
int code = tscMergeTableDataBlocks(stmt->pSql, pCmd->pDataBlocks);
......@@ -436,7 +439,7 @@ static int insertStmtExecute(STscStmt* stmt) {
}
// set the next sent data vnode index in data block arraylist
pCmd->vnodeIdx = 1;
pMeterMetaInfo->vnodeIndex = 1;
} else {
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
}
......
......@@ -93,10 +93,10 @@ void tscSaveSlowQuery(SSqlObj *pSql) {
const static int64_t SLOW_QUERY_INTERVAL = 3000000L;
if (pSql->res.useconds < SLOW_QUERY_INTERVAL) return;
tscTrace("%p query time:%lld sql:%s", pSql, pSql->res.useconds, pSql->sqlstr);
tscTrace("%p query time:%" PRId64 " sql:%s", pSql, pSql->res.useconds, pSql->sqlstr);
char *sql = malloc(200);
int len = snprintf(sql, 200, "insert into %s.slowquery values(now, '%s', %lld, %lld, '", tsMonitorDbName,
int len = snprintf(sql, 200, "insert into %s.slowquery values(now, '%s', %" PRId64 ", %" PRId64 ", '", tsMonitorDbName,
pSql->pTscObj->user, pSql->stime, pSql->res.useconds);
int sqlLen = snprintf(sql + len, TSDB_SHOW_SQL_LEN, "%s", pSql->sqlstr);
if (sqlLen > TSDB_SHOW_SQL_LEN - 1) {
......@@ -197,8 +197,10 @@ void tscKillStream(STscObj *pObj, uint32_t killId) {
}
pthread_mutex_unlock(&pObj->mutex);
tscTrace("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId);
if (pStream) {
tscTrace("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId);
}
taos_close_stream(pStream);
if (pStream->callback) {
......
此差异已折叠。
......@@ -83,6 +83,13 @@ struct SSchema* tsGetColumnSchema(SMeterMeta* pMeta, int32_t startCol) {
return (SSchema*)(((char*)pMeta + sizeof(SMeterMeta)) + startCol * sizeof(SSchema));
}
struct SSchema tsGetTbnameColumnSchema() {
struct SSchema s = {.colId = TSDB_TBNAME_COLUMN_INDEX, .type = TSDB_DATA_TYPE_BINARY, .bytes = TSDB_METER_NAME_LEN};
strcpy(s.name, TSQL_TBNAME_L);
return s;
}
/**
* the MeterMeta data format in memory is as follows:
*
......@@ -123,7 +130,7 @@ bool tsMeterMetaIdentical(SMeterMeta* p1, SMeterMeta* p2) {
return memcmp(p1, p2, size) == 0;
}
//todo refactor
// todo refactor
static FORCE_INLINE char* skipSegments(char* input, char delimiter, int32_t num) {
for (int32_t i = 0; i < num; ++i) {
while (*input != 0 && *input++ != delimiter) {
......
......@@ -1314,8 +1314,10 @@ int32_t tscLocalDoReduce(SSqlObj *pSql) {
tscTrace("%s call the drop local reducer", __FUNCTION__);
tscDestroyLocalReducer(pSql);
pRes->numOfRows = 0;
pRes->row = 0;
if (pRes) {
pRes->numOfRows = 0;
pRes->row = 0;
}
return 0;
}
......
此差异已折叠。
此差异已折叠。
......@@ -85,7 +85,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
// failed to get meter/metric meta, retry in 10sec.
if (code != TSDB_CODE_SUCCESS) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
tscError("%p stream:%p,get metermeta failed, retry in %lldms", pStream->pSql, pStream, retryDelayTime);
tscError("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
tscSetRetryTimer(pStream, pSql, retryDelayTime);
return;
......@@ -136,7 +136,7 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
SSqlStream *pStream = (SSqlStream *)param;
if (tres == NULL || numOfRows < 0) {
int64_t retryDelay = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
tscError("%p stream:%p, query data failed, code:%d, retry in %lldms", pStream->pSql, pStream, numOfRows,
tscError("%p stream:%p, query data failed, code:%d, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
retryDelay);
SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pStream->pSql->cmd, 0);
......@@ -158,7 +158,7 @@ static void tscSetTimestampForRes(SSqlStream *pStream, SSqlObj *pSql) {
if (timestamp != actualTimestamp) {
// reset the timestamp of each agg point by using start time of each interval
*((int64_t *)pRes->data) = actualTimestamp;
tscWarn("%p stream:%p, timestamp of points is:%lld, reset to %lld", pSql, pStream, timestamp, actualTimestamp);
tscWarn("%p stream:%p, timestamp of points is:%" PRId64 ", reset to %" PRId64 "", pSql, pStream, timestamp, actualTimestamp);
}
}
......@@ -169,7 +169,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
if (pSql == NULL || numOfRows < 0) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream->slidingTime, pStream->precision);
tscError("%p stream:%p, retrieve data failed, code:%d, retry in %lldms", pSql, pStream, numOfRows, retryDelayTime);
tscError("%p stream:%p, retrieve data failed, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
tscClearMeterMetaInfo(pMeterMetaInfo, true);
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
......@@ -235,7 +235,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
/* no resuls in the query range, retry */
// todo set retry dynamic time
int32_t retry = tsProjectExecInterval;
tscError("%p stream:%p, retrieve no data, code:%d, retry in %lldms", pSql, pStream, numOfRows, retry);
tscError("%p stream:%p, retrieve no data, code:%d, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retry);
tscClearSqlMetaInfoForce(&(pStream->pSql->cmd));
tscSetRetryTimer(pStream, pStream->pSql, retry);
......@@ -265,7 +265,7 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
/*
* current time window will be closed, since it too early to exceed the maxRetentWindow value
*/
tscTrace("%p stream:%p, etime:%lld is too old, exceeds the max retention time window:%lld, stop the stream",
tscTrace("%p stream:%p, etime:%" PRId64 " is too old, exceeds the max retention time window:%" PRId64 ", stop the stream",
pStream->pSql, pStream, pStream->stime, pStream->etime);
// TODO : How to terminate stream here
taos_close_stream(pStream);
......@@ -276,10 +276,10 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
return;
}
tscTrace("%p stream:%p, next query start at %lld, in %lldms. query range %lld-%lld", pStream->pSql, pStream,
tscTrace("%p stream:%p, next query start at %" PRId64 ", in %" PRId64 "ms. query range %" PRId64 "-%" PRId64 "", pStream->pSql, pStream,
now + timer, timer, pStream->stime, etime);
} else {
tscTrace("%p stream:%p, next query start at %lld, in %lldms. query range %lld-%lld", pStream->pSql, pStream,
tscTrace("%p stream:%p, next query start at %" PRId64 ", in %" PRId64 "ms. query range %" PRId64 "-%" PRId64 "", pStream->pSql, pStream,
pStream->stime, timer, pStream->stime - pStream->interval, pStream->stime - 1);
}
......@@ -299,7 +299,7 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
*/
timer = pStream->slidingTime;
if (pStream->stime > pStream->etime) {
tscTrace("%p stream:%p, stime:%lld is larger than end time: %lld, stop the stream", pStream->pSql, pStream,
tscTrace("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
// TODO : How to terminate stream here
taos_close_stream(pStream);
......@@ -353,7 +353,7 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
int64_t minIntervalTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime;
if (pCmd->nAggTimeInterval < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%lld", pSql, pStream,
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64 "", pSql, pStream,
pCmd->nAggTimeInterval, minIntervalTime);
pCmd->nAggTimeInterval = minIntervalTime;
}
......@@ -368,14 +368,14 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
if (pCmd->nSlidingTime < minSlidingTime) {
tscWarn("%p stream:%p, original sliding value:%lld too small, reset to:%lld", pSql, pStream, pCmd->nSlidingTime,
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64 "", pSql, pStream, pCmd->nSlidingTime,
minSlidingTime);
pCmd->nSlidingTime = minSlidingTime;
}
if (pCmd->nSlidingTime > pCmd->nAggTimeInterval) {
tscWarn("%p stream:%p, sliding value:%lld can not be larger than interval range, reset to:%lld", pSql, pStream,
tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64 "", pSql, pStream,
pCmd->nSlidingTime, pCmd->nAggTimeInterval);
pCmd->nSlidingTime = pCmd->nAggTimeInterval;
......@@ -401,11 +401,11 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
} else { // timewindow based aggregation stream
if (stime == 0) { // no data in meter till now
stime = ((int64_t)taosGetTimestamp(pStream->precision) / pStream->interval) * pStream->interval;
tscWarn("%p stream:%p, last timestamp:0, reset to:%lld", pSql, pStream, stime);
tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64 "", pSql, pStream, stime);
} else {
int64_t newStime = (stime / pStream->interval) * pStream->interval;
if (newStime != stime) {
tscWarn("%p stream:%p, last timestamp:%lld, reset to:%lld", pSql, pStream, stime, newStime);
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64 "", pSql, pStream, stime, newStime);
stime = newStime;
}
}
......@@ -447,7 +447,10 @@ static void setErrorInfo(STscObj* pObj, int32_t code, char* info) {
SSqlCmd* pCmd = &pObj->pSql->cmd;
pObj->pSql->res.code = code;
strncpy(pCmd->payload, info, pCmd->payloadLen);
if (info != NULL) {
strncpy(pCmd->payload, info, pCmd->payloadLen);
}
}
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
......@@ -537,7 +540,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
int64_t starttime = tscGetLaunchTimestamp(pStream);
taosTmrReset(tscProcessStreamTimer, starttime, pStream, tscTmr, &pStream->pTimer);
tscTrace("%p stream:%p is opened, query on:%s, interval:%lld, sliding:%lld, first launched in:%lld, sql:%s", pSql,
tscTrace("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
pStream, pMeterMetaInfo->name, pStream->interval, pStream->slidingTime, starttime, sqlstr);
return pStream;
......
......@@ -56,7 +56,7 @@ TAOS_SUB *taos_subscribe(const char *host, const char *user, const char *pass, c
if (pSub->taos == NULL) {
tfree(pSub);
} else {
char qstr[128];
char qstr[256] = {0};
sprintf(qstr, "use %s", db);
int res = taos_query(pSub->taos, qstr);
if (res != 0) {
......@@ -64,7 +64,7 @@ TAOS_SUB *taos_subscribe(const char *host, const char *user, const char *pass, c
taos_close(pSub->taos);
tfree(pSub);
} else {
sprintf(qstr, "select * from %s where _c0 > now+1000d", pSub->name);
snprintf(qstr, tListLen(qstr), "select * from %s where _c0 > now+1000d", pSub->name);
if (taos_query(pSub->taos, qstr)) {
tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos));
taos_close(pSub->taos);
......@@ -106,7 +106,7 @@ TAOS_ROW taos_consume(TAOS_SUB *tsub) {
pSub->stime = taosGetTimestampMs();
sprintf(qstr, "select * from %s where _c0 > %lld order by _c0 asc", pSub->name, pSub->lastKey);
sprintf(qstr, "select * from %s where _c0 > %" PRId64 " order by _c0 asc", pSub->name, pSub->lastKey);
if (taos_query(pSub->taos, qstr)) {
tscTrace("failed to select, reason:%s", taos_errstr(pSub->taos));
return NULL;
......
......@@ -198,7 +198,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
switch (option) {
case TSDB_OPTION_CONFIGDIR:
cfg = tsGetConfigOption("configDir");
if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
assert(cfg != NULL);
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
strncpy(configDir, pStr, TSDB_FILENAME_LEN);
cfg->cfgStatus = TSDB_CFG_CSTATUS_OPTION;
tscPrint("set config file directory:%s", pStr);
......@@ -210,7 +212,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
case TSDB_OPTION_SHELL_ACTIVITY_TIMER:
cfg = tsGetConfigOption("shellActivityTimer");
if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
assert(cfg != NULL);
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
tsShellActivityTimer = atoi(pStr);
if (tsShellActivityTimer < 1) tsShellActivityTimer = 1;
if (tsShellActivityTimer > 3600) tsShellActivityTimer = 3600;
......@@ -224,13 +228,15 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
case TSDB_OPTION_LOCALE: { // set locale
cfg = tsGetConfigOption("locale");
assert(cfg != NULL);
size_t len = strlen(pStr);
if (len == 0 || len > TSDB_LOCALE_LEN) {
tscPrint("Invalid locale:%s, use default", pStr);
return -1;
}
if (cfg && cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
char sep = '.';
if (strlen(tsLocale) == 0) { // locale does not set yet
......@@ -285,13 +291,15 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
case TSDB_OPTION_CHARSET: {
/* set charset will override the value of charset, assigned during system locale changed */
cfg = tsGetConfigOption("charset");
assert(cfg != NULL);
size_t len = strlen(pStr);
if (len == 0 || len > TSDB_LOCALE_LEN) {
tscPrint("failed to set charset:%s", pStr);
return -1;
}
if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
if (taosValidateEncodec(pStr)) {
if (strlen(tsCharset) == 0) {
tscPrint("charset is set:%s", pStr);
......@@ -314,7 +322,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
case TSDB_OPTION_TIMEZONE:
cfg = tsGetConfigOption("timezone");
if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
assert(cfg != NULL);
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
strcpy(tsTimezone, pStr);
tsSetTimeZone();
cfg->cfgStatus = TSDB_CFG_CSTATUS_OPTION;
......@@ -327,7 +337,9 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
case TSDB_OPTION_SOCKET_TYPE:
cfg = tsGetConfigOption("sockettype");
if (cfg && cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
assert(cfg != NULL);
if (cfg->cfgStatus <= TSDB_CFG_CSTATUS_OPTION) {
if (strcasecmp(pStr, TAOS_SOCKET_TYPE_NAME_UDP) != 0 && strcasecmp(pStr, TAOS_SOCKET_TYPE_NAME_TCP) != 0) {
tscError("only 'tcp' or 'udp' allowed for configuring the socket type");
return -1;
......@@ -340,6 +352,7 @@ static int taos_options_imp(TSDB_OPTION option, const char *pStr) {
break;
default:
// TODO return the correct error code to client in the format for taos_errstr()
tscError("Invalid option %d", option);
return -1;
}
......
此差异已折叠。
此差异已折叠。
......@@ -105,7 +105,7 @@ extern SSdbPeer *sdbPeer[];
#endif
void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, char keyType, char *directory,
void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, uint8_t keyType, char *directory,
void *(*appTool)(char, void *, char *, int, int *));
void *sdbGetRow(void *handle, void *key);
......
......@@ -128,7 +128,7 @@ extern "C" {
#define TSDB_CODE_CACHE_BLOCK_TS_DISORDERED 107 // time stamp in cache block is disordered
#define TSDB_CODE_FILE_BLOCK_TS_DISORDERED 108 // time stamp in file block is disordered
#define TSDB_CODE_INVALID_COMMIT_LOG 109 // commit log init failed
#define TSDB_CODE_SERVER_NO_SPACE 110
#define TSDB_CODE_SERV_NO_DISKSPACE 110
#define TSDB_CODE_NOT_SUPER_TABLE 111 // operation only available for super table
#define TSDB_CODE_DUPLICATE_TAGS 112 // tags value for join not unique
#define TSDB_CODE_INVALID_SUBMIT_MSG 113
......@@ -137,6 +137,8 @@ extern "C" {
#define TSDB_CODE_INVALID_VNODE_STATUS 116
#define TSDB_CODE_FAILED_TO_LOCK_RESOURCES 117
#define TSDB_CODE_MAX_ERROR_CODE 118
#ifdef __cplusplus
}
#endif
......
......@@ -280,7 +280,7 @@ typedef struct {
} SShellSubmitMsg;
typedef struct SSchema {
char type;
uint8_t type;
char name[TSDB_COL_NAME_LEN];
short colId;
short bytes;
......@@ -624,7 +624,7 @@ typedef struct {
char repStrategy;
char loadLatest; // load into mem or not
char precision; // time resoluation
uint8_t precision; // time resolution
char reserved[16];
} SVnodeCfg, SCreateDbMsg, SDbCfg, SAlterDbMsg;
......@@ -667,7 +667,7 @@ typedef struct {
uint32_t destIp;
char meterId[TSDB_UNI_LEN];
char empty[3];
char msgType;
uint8_t msgType;
int32_t msgLen;
uint8_t content[0];
} SIntMsg;
......
......@@ -106,7 +106,6 @@ extern int tsMaxDbs;
extern int tsMaxTables;
extern int tsMaxDnodes;
extern int tsMaxVGroups;
extern int tsShellActivityTimer;
extern char tsMgmtZone[];
extern char tsLocalIp[];
......@@ -127,6 +126,7 @@ extern int tsEnableHttpModule;
extern int tsEnableMonitorModule;
extern int tsRestRowLimit;
extern int tsCompressMsgSize;
extern int tsMaxSQLStringLen;
extern char tsSocketType[4];
......@@ -181,6 +181,7 @@ extern int tsUdpDelay;
extern char version[];
extern char compatible_version[];
extern char gitinfo[];
extern char gitinfoOfInternal[];
extern char buildinfo[];
extern char tsTimezone[64];
......
......@@ -47,6 +47,7 @@ struct SSchema *tsGetSchema(SMeterMeta *pMeta);
struct SSchema *tsGetTagSchema(SMeterMeta *pMeta);
struct SSchema *tsGetColumnSchema(SMeterMeta *pMeta, int32_t startCol);
struct SSchema tsGetTbnameColumnSchema();
char *tsGetTagsValue(SMeterMeta *pMeta);
......
......@@ -100,6 +100,7 @@ extern "C" {
#define TSDB_COL_NAME_LEN 64
#define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 16
#define TSDB_MAX_SQL_LEN TSDB_PAYLOAD_SIZE
#define TSDB_MAX_ALLOWED_SQL_LEN (8*1024*1024U) // sql length should be less than 6mb
#define TSDB_MAX_BYTES_PER_ROW TSDB_MAX_COLUMNS * 16
#define TSDB_MAX_TAGS_LEN 512
......
......@@ -227,8 +227,6 @@ typedef struct SPatternCompareInfo {
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type,
int16_t *len, int16_t *interResBytes, int16_t extLength, bool isSuperTable);
SResultInfo *getResultSupportInfo(SQLFunctionCtx *pCtx);
int patternMatch(const char *zPattern, const char *zString, size_t size, const SPatternCompareInfo *pInfo);
int WCSPatternMatch(const wchar_t *zPattern, const wchar_t *zString, size_t size, const SPatternCompareInfo *pInfo);
......
......@@ -50,7 +50,7 @@ bool isNull(const char *val, int32_t type);
void setNull(char *val, int32_t type, int32_t bytes);
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
void assignVal(char *val, char *src, int32_t len, int32_t type);
void assignVal(char *val, const char *src, int32_t len, int32_t type);
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
// variant, each number/string/field_id has a corresponding struct during parsing sql
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册